From acf86568a7e21176ba2cca15861da231bec6932a Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 8 Oct 2020 18:45:10 +0300 Subject: [PATCH 001/333] S3 zero copy replication proof of concept --- src/Disks/DiskCacheWrapper.cpp | 18 +- src/Disks/DiskCacheWrapper.h | 6 +- src/Disks/DiskDecorator.cpp | 12 +- src/Disks/DiskDecorator.h | 7 +- src/Disks/DiskLocal.cpp | 6 +- src/Disks/DiskLocal.h | 6 +- src/Disks/DiskMemory.cpp | 6 +- src/Disks/DiskMemory.h | 6 +- src/Disks/IDisk.h | 13 +- src/Disks/S3/DiskS3.cpp | 34 ++- src/Disks/S3/DiskS3.h | 8 +- src/Interpreters/InterserverIOHandler.h | 8 + src/Storages/MergeTree/DataPartsExchange.cpp | 238 +++++++++++++++++- src/Storages/MergeTree/DataPartsExchange.h | 39 ++- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 20 +- src/Storages/MergeTree/IMergeTreeDataPart.h | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 34 ++- 17 files changed, 392 insertions(+), 71 deletions(-) diff --git a/src/Disks/DiskCacheWrapper.cpp b/src/Disks/DiskCacheWrapper.cpp index c60f69920f4..94b15920cee 100644 --- a/src/Disks/DiskCacheWrapper.cpp +++ b/src/Disks/DiskCacheWrapper.cpp @@ -198,11 +198,11 @@ DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode buf_size); } -void DiskCacheWrapper::clearDirectory(const String & path) +void DiskCacheWrapper::clearDirectory(const String & path, bool keep_s3) { if (cache_disk->exists(path)) - cache_disk->clearDirectory(path); - DiskDecorator::clearDirectory(path); + cache_disk->clearDirectory(path, keep_s3); + DiskDecorator::clearDirectory(path, keep_s3); } void DiskCacheWrapper::moveDirectory(const String & from_path, const String & to_path) @@ -251,18 +251,18 @@ void DiskCacheWrapper::copyFile(const String & from_path, const String & to_path DiskDecorator::copyFile(from_path, to_path); } -void DiskCacheWrapper::remove(const String & path) +void DiskCacheWrapper::remove(const String & path, bool keep_s3) { if (cache_disk->exists(path)) - cache_disk->remove(path); - DiskDecorator::remove(path); + cache_disk->remove(path, keep_s3); + DiskDecorator::remove(path, keep_s3); } -void DiskCacheWrapper::removeRecursive(const String & path) +void DiskCacheWrapper::removeRecursive(const String & path, bool keep_s3) { if (cache_disk->exists(path)) - cache_disk->removeRecursive(path); - DiskDecorator::removeRecursive(path); + cache_disk->removeRecursive(path, keep_s3); + DiskDecorator::removeRecursive(path, keep_s3); } void DiskCacheWrapper::createHardLink(const String & src_path, const String & dst_path) diff --git a/src/Disks/DiskCacheWrapper.h b/src/Disks/DiskCacheWrapper.h index b0b373d900c..9fca4e02e34 100644 --- a/src/Disks/DiskCacheWrapper.h +++ b/src/Disks/DiskCacheWrapper.h @@ -28,7 +28,7 @@ public: std::function cache_file_predicate_); void createDirectory(const String & path) override; void createDirectories(const String & path) override; - void clearDirectory(const String & path) override; + void clearDirectory(const String & path, bool keep_s3 = false) override; void moveDirectory(const String & from_path, const String & to_path) override; void moveFile(const String & from_path, const String & to_path) override; void replaceFile(const String & from_path, const String & to_path) override; @@ -37,8 +37,8 @@ public: readFile(const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold) const override; std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path) override; - void removeRecursive(const String & path) override; + void remove(const String & path, bool keep_s3 = false) override; + void removeRecursive(const String & path, bool keep_s3 = false) override; void createHardLink(const String & src_path, const String & dst_path) override; ReservationPtr reserve(UInt64 bytes) override; diff --git a/src/Disks/DiskDecorator.cpp b/src/Disks/DiskDecorator.cpp index 7f2ea58d7cf..9d61141a162 100644 --- a/src/Disks/DiskDecorator.cpp +++ b/src/Disks/DiskDecorator.cpp @@ -73,9 +73,9 @@ void DiskDecorator::createDirectories(const String & path) delegate->createDirectories(path); } -void DiskDecorator::clearDirectory(const String & path) +void DiskDecorator::clearDirectory(const String & path, bool keep_s3) { - delegate->clearDirectory(path); + delegate->clearDirectory(path, keep_s3); } void DiskDecorator::moveDirectory(const String & from_path, const String & to_path) @@ -130,14 +130,14 @@ DiskDecorator::writeFile(const String & path, size_t buf_size, WriteMode mode, s return delegate->writeFile(path, buf_size, mode, estimated_size, aio_threshold); } -void DiskDecorator::remove(const String & path) +void DiskDecorator::remove(const String & path, bool keep_s3) { - delegate->remove(path); + delegate->remove(path, keep_s3); } -void DiskDecorator::removeRecursive(const String & path) +void DiskDecorator::removeRecursive(const String & path, bool keep_s3) { - delegate->removeRecursive(path); + delegate->removeRecursive(path, keep_s3); } void DiskDecorator::setLastModified(const String & path, const Poco::Timestamp & timestamp) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index f1ddfff4952..f1fea043843 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -22,7 +22,7 @@ public: size_t getFileSize(const String & path) const override; void createDirectory(const String & path) override; void createDirectories(const String & path) override; - void clearDirectory(const String & path) override; + void clearDirectory(const String & path, bool keep_s3 = false) override; void moveDirectory(const String & from_path, const String & to_path) override; DiskDirectoryIteratorPtr iterateDirectory(const String & path) override; void createFile(const String & path) override; @@ -35,8 +35,8 @@ public: readFile(const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold) const override; std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path) override; - void removeRecursive(const String & path) override; + void remove(const String & path, bool keep_s3 = false) override; + void removeRecursive(const String & path, bool keep_s3 = false) override; void setLastModified(const String & path, const Poco::Timestamp & timestamp) override; Poco::Timestamp getLastModified(const String & path) override; void setReadOnly(const String & path) override; @@ -46,6 +46,7 @@ public: void close(int fd) const override; void sync(int fd) const override; const String getType() const override { return delegate->getType(); } + const String getUniqueId(const String & path) const override { return delegate->getUniqueId(path); } protected: DiskPtr delegate; diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index a09ab7c5ac5..ad85fdf4236 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -180,7 +180,7 @@ void DiskLocal::createDirectories(const String & path) Poco::File(disk_path + path).createDirectories(); } -void DiskLocal::clearDirectory(const String & path) +void DiskLocal::clearDirectory(const String & path, bool) { std::vector files; Poco::File(disk_path + path).list(files); @@ -236,12 +236,12 @@ DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode, size_ return createWriteBufferFromFileBase(disk_path + path, estimated_size, aio_threshold, buf_size, flags); } -void DiskLocal::remove(const String & path) +void DiskLocal::remove(const String & path, bool) { Poco::File(disk_path + path).remove(false); } -void DiskLocal::removeRecursive(const String & path) +void DiskLocal::removeRecursive(const String & path, bool) { Poco::File(disk_path + path).remove(true); } diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 762a8502faa..18e6d072874 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -55,7 +55,7 @@ public: void createDirectories(const String & path) override; - void clearDirectory(const String & path) override; + void clearDirectory(const String & path, bool keep_s3 = false) override; void moveDirectory(const String & from_path, const String & to_path) override; @@ -87,9 +87,9 @@ public: size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path) override; + void remove(const String & path, bool keep_s3 = false) override; - void removeRecursive(const String & path) override; + void removeRecursive(const String & path, bool keep_s3 = false) override; void setLastModified(const String & path, const Poco::Timestamp & timestamp) override; diff --git a/src/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp index d185263d48c..fc375707feb 100644 --- a/src/Disks/DiskMemory.cpp +++ b/src/Disks/DiskMemory.cpp @@ -233,7 +233,7 @@ void DiskMemory::createDirectoriesImpl(const String & path) files.emplace(path, FileData{FileType::Directory}); } -void DiskMemory::clearDirectory(const String & path) +void DiskMemory::clearDirectory(const String & path, bool) { std::lock_guard lock(mutex); @@ -348,7 +348,7 @@ std::unique_ptr DiskMemory::writeFile(const String & pa return std::make_unique(this, path, mode, buf_size); } -void DiskMemory::remove(const String & path) +void DiskMemory::remove(const String & path, bool) { std::lock_guard lock(mutex); @@ -368,7 +368,7 @@ void DiskMemory::remove(const String & path) } } -void DiskMemory::removeRecursive(const String & path) +void DiskMemory::removeRecursive(const String & path, bool) { std::lock_guard lock(mutex); diff --git a/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h index 4d4b947098b..e75d9bff100 100644 --- a/src/Disks/DiskMemory.h +++ b/src/Disks/DiskMemory.h @@ -48,7 +48,7 @@ public: void createDirectories(const String & path) override; - void clearDirectory(const String & path) override; + void clearDirectory(const String & path, bool keep_s3 = false) override; void moveDirectory(const String & from_path, const String & to_path) override; @@ -78,9 +78,9 @@ public: size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path) override; + void remove(const String & path, bool keep_s3 = false) override; - void removeRecursive(const String & path) override; + void removeRecursive(const String & path, bool keep_s3 = false) override; void setLastModified(const String &, const Poco::Timestamp &) override {} diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 688c1dfad42..324384fade6 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -105,7 +105,7 @@ public: virtual void createDirectories(const String & path) = 0; /// Remove all files from the directory. Directories are not removed. - virtual void clearDirectory(const String & path) = 0; + virtual void clearDirectory(const String & path, bool keep_s3 = false) = 0; /// Move directory from `from_path` to `to_path`. virtual void moveDirectory(const String & from_path, const String & to_path) = 0; @@ -153,16 +153,16 @@ public: size_t aio_threshold = 0) = 0; /// Remove file or directory. Throws exception if file doesn't exists or if directory is not empty. - virtual void remove(const String & path) = 0; + virtual void remove(const String & path, bool keep_s3 = false) = 0; /// Remove file or directory with all children. Use with extra caution. Throws exception if file doesn't exists. - virtual void removeRecursive(const String & path) = 0; + virtual void removeRecursive(const String & path, bool keep_s3 = false) = 0; /// Remove file or directory if it exists. - void removeIfExists(const String & path) + void removeIfExists(const String & path, bool keep_s3 = false) { if (exists(path)) - remove(path); + remove(path, keep_s3); } /// Set last modified time to file or directory at `path`. @@ -195,6 +195,9 @@ public: /// Invoked when Global Context is shutdown. virtual void shutdown() { } + /// Return some uniq string for file, overrided for S3 + virtual const String getUniqueId(const String & path) const { return path; } + private: /// Returns executor to perform asynchronous operations. Executor & getExecutor() { return *executor; } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 6abb72efeb0..8b6c3c8465c 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -535,16 +535,25 @@ void DiskS3::createDirectories(const String & path) Poco::File(metadata_path + path).createDirectories(); } +const String DiskS3::getUniqueId(const String & path) const +{ + Metadata metadata(s3_root_path, metadata_path, path); + String id; + if (!metadata.s3_objects.empty()) + id = metadata.s3_objects[0].first; + return id; +} + DiskDirectoryIteratorPtr DiskS3::iterateDirectory(const String & path) { return std::make_unique(metadata_path + path, path); } -void DiskS3::clearDirectory(const String & path) +void DiskS3::clearDirectory(const String & path, bool keep_s3) { for (auto it{iterateDirectory(path)}; it->isValid(); it->next()) if (isFile(it->path())) - remove(it->path()); + remove(it->path(), keep_s3); } void DiskS3::moveFile(const String & from_path, const String & to_path) @@ -634,7 +643,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, } } -void DiskS3::remove(const String & path) +void DiskS3::remove(const String & path, bool keep_s3) { LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Remove file by path: {}", backQuote(metadata_path + path)); @@ -647,13 +656,16 @@ void DiskS3::remove(const String & path) if (metadata.ref_count == 0) { file.remove(); - for (const auto & [s3_object_path, _] : metadata.s3_objects) + if (!keep_s3) { - /// TODO: Make operation idempotent. Do not throw exception if key is already deleted. - Aws::S3::Model::DeleteObjectRequest request; - request.SetBucket(bucket); - request.SetKey(s3_root_path + s3_object_path); - throwIfError(client->DeleteObject(request)); + for (const auto & [s3_object_path, _] : metadata.s3_objects) + { + /// TODO: Make operation idempotent. Do not throw exception if key is already deleted. + Aws::S3::Model::DeleteObjectRequest request; + request.SetBucket(bucket); + request.SetKey(s3_root_path + s3_object_path); + throwIfError(client->DeleteObject(request)); + } } } else /// In other case decrement number of references, save metadata and delete file. @@ -667,7 +679,7 @@ void DiskS3::remove(const String & path) file.remove(); } -void DiskS3::removeRecursive(const String & path) +void DiskS3::removeRecursive(const String & path, bool keep_s3) { checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks. @@ -679,7 +691,7 @@ void DiskS3::removeRecursive(const String & path) else { for (auto it{iterateDirectory(path)}; it->isValid(); it->next()) - removeRecursive(it->path()); + removeRecursive(it->path(), keep_s3); file.remove(); } } diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 2d9c7f79865..48644dcccf0 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -58,7 +58,7 @@ public: void createDirectories(const String & path) override; - void clearDirectory(const String & path) override; + void clearDirectory(const String & path, bool keep_s3 = false) override; void moveDirectory(const String & from_path, const String & to_path) override { moveFile(from_path, to_path); } @@ -86,9 +86,9 @@ public: size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path) override; + void remove(const String & path, bool keep_s3 = false) override; - void removeRecursive(const String & path) override; + void removeRecursive(const String & path, bool keep_s3 = false) override; void createHardLink(const String & src_path, const String & dst_path) override; @@ -108,6 +108,8 @@ public: void shutdown() override; + const String getUniqueId(const String & path) const override; + private: bool tryReserve(UInt64 bytes); diff --git a/src/Interpreters/InterserverIOHandler.h b/src/Interpreters/InterserverIOHandler.h index 6d62c9651ca..bcb0e8736f0 100644 --- a/src/Interpreters/InterserverIOHandler.h +++ b/src/Interpreters/InterserverIOHandler.h @@ -16,6 +16,12 @@ namespace Poco { namespace Net { class HTTPServerResponse; } } +namespace zkutil +{ + class ZooKeeper; + using ZooKeeperPtr = std::shared_ptr; +} + namespace DB { @@ -34,6 +40,8 @@ public: virtual void processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & body, WriteBuffer & out, Poco::Net::HTTPServerResponse & response) = 0; virtual ~InterserverIOEndpoint() = default; + virtual void setZooKeeper(const zkutil::ZooKeeperPtr &zookeeper_, const String & zookeeper_path_, const String & replica_name_) = 0; + /// You need to stop the data transfer if blocker is activated. ActionBlocker blocker; std::shared_mutex rwlock; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index f9fb157942a..d9a37a01585 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -3,11 +3,15 @@ #include #include #include +#include #include #include #include +#include #include #include +#include +#include #include #include #include @@ -34,6 +38,7 @@ namespace ErrorCodes extern const int INSECURE_PATH; extern const int CORRUPTED_DATA; extern const int LOGICAL_ERROR; + extern const int S3_ERROR; } namespace DataPartsExchange @@ -45,6 +50,7 @@ constexpr auto REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE = 1; constexpr auto REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE_AND_TTL_INFOS = 2; constexpr auto REPLICATION_PROTOCOL_VERSION_WITH_PARTS_TYPE = 3; constexpr auto REPLICATION_PROTOCOL_VERSION_WITH_PARTS_DEFAULT_COMPRESSION = 4; +constexpr auto REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY = 5; std::string getEndpointId(const std::string & node_id) @@ -85,7 +91,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo } /// We pretend to work as older server version, to be sure that client will correctly process our version - response.addCookie({"server_protocol_version", toString(std::min(client_protocol_version, REPLICATION_PROTOCOL_VERSION_WITH_PARTS_DEFAULT_COMPRESSION))}); + response.addCookie({"server_protocol_version", toString(std::min(client_protocol_version, REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY))}); ++total_sends; SCOPE_EXIT({--total_sends;}); @@ -118,8 +124,30 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo sendPartFromMemory(part, out); else { - bool send_default_compression_file = client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_DEFAULT_COMPRESSION; - sendPartFromDisk(part, out, send_default_compression_file); + bool try_use_s3_copy = false; + + if (client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY) + { /// if source and destination are in the same S3 storage we try to use S3 CopyObject request first + int send_s3_metadata = parse(params.get("send_s3_metadata", "0")); + if (send_s3_metadata == 1) + { + auto disk = part->volume->getDisk(); + if (disk->getType() == "s3") + { + try_use_s3_copy = true; + } + } + } + if (try_use_s3_copy) + { + response.addCookie({"send_s3_metadata", "1"}); + sendPartS3Metadata(part, out); + } + else + { + bool send_default_compression_file = client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_DEFAULT_COMPRESSION; + sendPartFromDisk(part, out, send_default_compression_file); + } } } catch (const NetException &) @@ -199,6 +227,62 @@ void Service::sendPartFromDisk(const MergeTreeData::DataPartPtr & part, WriteBuf part->checksums.checkEqual(data_checksums, false); } +void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteBuffer & out) +{ + /// We'll take a list of files from the list of checksums. + MergeTreeData::DataPart::Checksums checksums = part->checksums; + /// Add files that are not in the checksum list. + auto file_names_without_checksums = part->getFileNamesWithoutChecksums(); + for (const auto & file_name : file_names_without_checksums) + checksums.files[file_name] = {}; + + auto disk = part->volume->getDisk(); + if (disk->getType() != "s3") + throw Exception("S3 disk is not S3 anymore", ErrorCodes::LOGICAL_ERROR); + + String id = disk->getUniqueId(part->getFullRelativePath() + "checksums.txt"); + + if (id.empty()) + throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); + + String zookeeper_node = zookeeper_path + "/zero_copy_s3/" + id + "/" + replica_name; + + LOG_TRACE(log, "Set zookeeper lock {}", id); + + zookeeper->createAncestors(zookeeper_node); + zookeeper->createIfNotExists(zookeeper_node, "lock"); + + writeBinary(checksums.files.size(), out); + for (const auto & it : checksums.files) + { + String file_name = it.first; + + String metadata_file = disk->getPath() + part->getFullRelativePath() + file_name; + + Poco::File metadata(metadata_file); + + if (!metadata.exists()) + throw Exception("S3 metadata '" + file_name + "' is not exists", ErrorCodes::LOGICAL_ERROR); + if (!metadata.isFile()) + throw Exception("S3 metadata '" + file_name + "' is not a file", ErrorCodes::LOGICAL_ERROR); + UInt64 file_size = metadata.getSize(); + + writeStringBinary(it.first, out); + writeBinary(file_size, out); + + auto file_in = createReadBufferFromFileBase(metadata_file, 0, 0, 0, DBMS_DEFAULT_BUFFER_SIZE); + HashingWriteBuffer hashing_out(out); + copyData(*file_in, hashing_out, blocker.getCounter()); + if (blocker.isCancelled()) + throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED); + + if (hashing_out.count() != file_size) + throw Exception("Unexpected size of file " + metadata_file, ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART); + + writePODBinary(hashing_out.getHash(), out); + } +} + MergeTreeData::DataPartPtr Service::findPart(const String & name) { /// It is important to include PreCommitted and Outdated parts here because remote replicas cannot reliably @@ -222,7 +306,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( const String & password, const String & interserver_scheme, bool to_detached, - const String & tmp_prefix_) + const String & tmp_prefix_, + bool try_use_s3_copy) { if (blocker.isCancelled()) throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); @@ -239,10 +324,29 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( { {"endpoint", getEndpointId(replica_path)}, {"part", part_name}, - {"client_protocol_version", toString(REPLICATION_PROTOCOL_VERSION_WITH_PARTS_DEFAULT_COMPRESSION)}, + {"client_protocol_version", toString(REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY)}, {"compress", "false"} }); + ReservationPtr reservationS3; + + if (try_use_s3_copy) + { + /// TODO: Make a normal check for S3 Disk + reservationS3 = data.makeEmptyReservationOnLargestDisk(); + auto disk = reservationS3->getDisk(); + + if (disk->getType() != "s3") + { + try_use_s3_copy = false; + } + } + + if (try_use_s3_copy) + { + uri.addQueryParameter("send_s3_metadata", "1"); + } + Poco::Net::HTTPBasicCredentials creds{}; if (!user.empty()) { @@ -263,6 +367,40 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( int server_protocol_version = parse(in.getResponseCookie("server_protocol_version", "0")); + int send_s3 = parse(in.getResponseCookie("send_s3_metadata", "0")); + + if (send_s3 == 1) + { + if (server_protocol_version < REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY) + throw Exception("Got 'send_s3_metadata' cookie with old protocol version", ErrorCodes::LOGICAL_ERROR); + if (!try_use_s3_copy) + throw Exception("Got 'send_s3_metadata' cookie when was not requested", ErrorCodes::LOGICAL_ERROR); + + size_t sum_files_size = 0; + readBinary(sum_files_size, in); + IMergeTreeDataPart::TTLInfos ttl_infos; + /// Skip ttl infos, not required for S3 metadata + String ttl_infos_string; + readBinary(ttl_infos_string, in); + String part_type = "Wide"; + readStringBinary(part_type, in); + if (part_type == "InMemory") + throw Exception("Got 'send_s3_metadata' cookie for in-memory partition", ErrorCodes::LOGICAL_ERROR); + + try + { + return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, sync, std::move(reservationS3), in); + } + catch(const Exception& e) + { + if (e.code() != ErrorCodes::S3_ERROR) + throw; + /// Try again but without S3 copy + return fetchPart(metadata_snapshot, part_name, replica_path, host, port, timeouts, + user, password, interserver_scheme, to_detached, tmp_prefix_, false); + } + } + ReservationPtr reservation; size_t sum_files_size = 0; if (server_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_SIZE) @@ -418,6 +556,96 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( return new_data_part; } +MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( + const String & part_name, + const String & replica_path, + bool to_detached, + const String & tmp_prefix_, + bool ,//sync, + const ReservationPtr reservation, + PooledReadWriteBufferFromHTTP & in + ) +{ + auto disk = reservation->getDisk(); + if (disk->getType() != "s3") + throw Exception("S3 disk is not S3 anymore", ErrorCodes::LOGICAL_ERROR); + + static const String TMP_PREFIX = "tmp_fetch_"; + String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_; + + String part_relative_path = String(to_detached ? "detached/" : "") + tmp_prefix + part_name; + String part_download_path = data.getRelativeDataPath() + part_relative_path + "/"; + + if (disk->exists(part_download_path)) + throw Exception("Directory " + fullPath(disk, part_download_path) + " already exists.", ErrorCodes::DIRECTORY_ALREADY_EXISTS); + + CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedFetch}; + + disk->createDirectories(part_download_path); + + size_t files; + readBinary(files, in); + + auto volume = std::make_shared("volume_" + part_name, disk); + MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(part_name, volume, part_relative_path); + + for (size_t i = 0; i < files; ++i) + { + String file_name; + UInt64 file_size; + + readStringBinary(file_name, in); + readBinary(file_size, in); + + String metadata_file = disk->getPath() + new_data_part->getFullRelativePath() + file_name; + + auto file_out = createWriteBufferFromFileBase(metadata_file, 0, 0, DBMS_DEFAULT_BUFFER_SIZE, -1); + + HashingWriteBuffer hashing_out(*file_out); + + copyData(in, hashing_out, file_size, blocker.getCounter()); + + if (blocker.isCancelled()) + { + /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, + /// performing a poll with a not very large timeout. + /// And now we check it only between read chunks (in the `copyData` function). + throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); + } + + MergeTreeDataPartChecksum::uint128 expected_hash; + readPODBinary(expected_hash, in); + + if (expected_hash != hashing_out.getHash()) + { + throw Exception("Checksum mismatch for file " + metadata_file + " transferred from " + replica_path, + ErrorCodes::CHECKSUM_DOESNT_MATCH); + } + } + + assertEOF(in); + + new_data_part->is_temp = true; + new_data_part->modification_time = time(nullptr); + new_data_part->loadColumnsChecksumsIndexes(true, false); + + + String id = disk->getUniqueId(new_data_part->getFullRelativePath() + "checksums.txt"); + + if (id.empty()) + throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); + + String zookeeper_node = zookeeper_path + "/zero_copy_s3/" + id + "/" + replica_name; + + LOG_TRACE(log, "Set zookeeper lock {}", id); + + zookeeper->createAncestors(zookeeper_node); + zookeeper->createIfNotExists(zookeeper_node, "lock"); + + + return new_data_part; +} + } } diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index 52a34a2239a..e2e7b2adf4f 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -9,6 +9,12 @@ #include +namespace zkutil +{ + class ZooKeeper; + using ZooKeeperPtr = std::shared_ptr; +} + namespace DB { @@ -29,16 +35,27 @@ public: std::string getId(const std::string & node_id) const override; void processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & body, WriteBuffer & out, Poco::Net::HTTPServerResponse & response) override; + void setZooKeeper(const zkutil::ZooKeeperPtr & zookeeper_, const String & zookeeper_path_, const String & replica_name_) override + { + zookeeper = zookeeper_; + zookeeper_path = zookeeper_path_; + replica_name = replica_name_; + } + private: MergeTreeData::DataPartPtr findPart(const String & name); void sendPartFromMemory(const MergeTreeData::DataPartPtr & part, WriteBuffer & out); void sendPartFromDisk(const MergeTreeData::DataPartPtr & part, WriteBuffer & out, bool send_default_compression_file); + void sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteBuffer & out); private: /// StorageReplicatedMergeTree::shutdown() waits for all parts exchange handlers to finish, /// so Service will never access dangling reference to storage MergeTreeData & data; Poco::Logger * log; + zkutil::ZooKeeperPtr zookeeper; + String zookeeper_path; + String replica_name; }; /** Client for getting the parts from the table *MergeTree. @@ -63,11 +80,19 @@ public: const String & password, const String & interserver_scheme, bool to_detached = false, - const String & tmp_prefix_ = ""); + const String & tmp_prefix_ = "", + bool try_use_s3_copy = true); /// You need to stop the data transfer. ActionBlocker blocker; + void setZooKeeper(const zkutil::ZooKeeperPtr & zookeeper_, const String & zookeeper_path_, const String & replica_name_) + { + zookeeper = zookeeper_; + zookeeper_path = zookeeper_path_; + replica_name = replica_name_; + } + private: MergeTreeData::MutableDataPartPtr downloadPartToDisk( const String & part_name, @@ -84,8 +109,20 @@ private: ReservationPtr reservation, PooledReadWriteBufferFromHTTP & in); + MergeTreeData::MutableDataPartPtr downloadPartToS3( + const String & part_name, + const String & replica_path, + bool to_detached, + const String & tmp_prefix_, + bool sync, + const ReservationPtr reservation, + PooledReadWriteBufferFromHTTP & in); + MergeTreeData & data; Poco::Logger * log; + zkutil::ZooKeeperPtr zookeeper; + String zookeeper_path; + String replica_name; }; } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 486e444763d..23fe60b44e5 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -262,7 +262,7 @@ void IMergeTreeDataPart::removeIfNeeded() } } - remove(); + remove(false); if (state == State::DeleteOnDestroy) { @@ -809,7 +809,7 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_ } -void IMergeTreeDataPart::remove() const +void IMergeTreeDataPart::remove(bool keep_s3) const { if (!isStoredOnDisk()) return; @@ -839,7 +839,7 @@ void IMergeTreeDataPart::remove() const try { - volume->getDisk()->removeRecursive(to + "/"); + volume->getDisk()->removeRecursive(to + "/", keep_s3); } catch (...) { @@ -862,7 +862,7 @@ void IMergeTreeDataPart::remove() const if (checksums.empty()) { /// If the part is not completely written, we cannot use fast path by listing files. - volume->getDisk()->removeRecursive(to + "/"); + volume->getDisk()->removeRecursive(to + "/", keep_s3); } else { @@ -875,18 +875,18 @@ void IMergeTreeDataPart::remove() const # pragma GCC diagnostic ignored "-Wunused-variable" #endif for (const auto & [file, _] : checksums.files) - volume->getDisk()->remove(to + "/" + file); + volume->getDisk()->remove(to + "/" + file, keep_s3); #if !__clang__ # pragma GCC diagnostic pop #endif for (const auto & file : {"checksums.txt", "columns.txt"}) - volume->getDisk()->remove(to + "/" + file); + volume->getDisk()->remove(to + "/" + file, keep_s3); - volume->getDisk()->removeIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME); - volume->getDisk()->removeIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME); + volume->getDisk()->removeIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_s3); + volume->getDisk()->removeIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_s3); - volume->getDisk()->remove(to); + volume->getDisk()->remove(to, keep_s3); } catch (...) { @@ -894,7 +894,7 @@ void IMergeTreeDataPart::remove() const LOG_ERROR(storage.log, "Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: {}", fullPath(volume->getDisk(), to), getCurrentExceptionMessage(false)); - volume->getDisk()->removeRecursive(to + "/"); + volume->getDisk()->removeRecursive(to + "/", keep_s3); } } } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 78daf6c9017..3e7b03b2903 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -124,7 +124,7 @@ public: /// Throws an exception if part is not stored in on-disk format. void assertOnDisk() const; - void remove() const; + void remove(bool keep_s3 = false) const; /// Initialize columns (from columns.txt if exists, or create from column files if not). /// Load checksums from checksums.txt if exists. Load index if required. diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 9613bd5111d..dbbf8645d36 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -370,6 +370,8 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } createNewZooKeeperNodes(); + + fetcher.setZooKeeper(current_zookeeper, zookeeper_path, replica_name); } @@ -3364,6 +3366,7 @@ void StorageReplicatedMergeTree::startup() queue.initialize(getDataParts()); data_parts_exchange_endpoint = std::make_shared(*this); + data_parts_exchange_endpoint->setZooKeeper(tryGetZooKeeper(), zookeeper_path, replica_name); global_context.getInterserverIOHandler().addEndpoint(data_parts_exchange_endpoint->getId(replica_path), data_parts_exchange_endpoint); /// In this thread replica will be activated. @@ -5010,13 +5013,40 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() } parts.clear(); - auto remove_parts_from_filesystem = [log=log] (const DataPartsVector & parts_to_remove) + auto remove_parts_from_filesystem = [log=log,&zookeeper=zookeeper,&zookeeper_path=zookeeper_path,&replica_name=replica_name] (const DataPartsVector & parts_to_remove) { for (const auto & part : parts_to_remove) { try { - part->remove(); + bool keep_s3 = false; + + auto disk = part->volume->getDisk(); + + if (disk->getType() == "s3") + { + String id = disk->getUniqueId(part->getFullRelativePath() + "checksums.txt"); + + if (!id.empty()) + { + String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/" + id; + String zookeeper_node = zookeeper_part_node + "/" + replica_name; + + LOG_TRACE(log, "Remove zookeeper lock for {}", id); + + zookeeper->remove(zookeeper_node); + + Strings children; + zookeeper->tryGetChildren(zookeeper_part_node, children); + if (!children.empty()) + { + LOG_TRACE(log, "Found zookeper locks for {}", id); + keep_s3 = true; + } + } + } + + part->remove(keep_s3); } catch (...) { From a4adb39b2576eb26accc8336a27d6ab9eca4e1b4 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 8 Oct 2020 19:23:04 +0300 Subject: [PATCH 002/333] S3 zero copy replication proof of concept - description --- S3ZeroCopyReplication.md | 47 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 S3ZeroCopyReplication.md diff --git a/S3ZeroCopyReplication.md b/S3ZeroCopyReplication.md new file mode 100644 index 00000000000..7e7709ff5a7 --- /dev/null +++ b/S3ZeroCopyReplication.md @@ -0,0 +1,47 @@ +# ClickHouse S3 Zero Copy Replication + +Говнокод просто для теста, не production-ready ни разу. + +[Коммит](https://github.com/ianton-ru/ClickHouse/commit/acf86568a7e21176ba2cca15861da231bec6932a) + +[Ветка](https://github.com/ianton-ru/ClickHouse/tree/s3_zero_copy_replication) + +## Как сделано + +При fetch-е парта при репликации в случае, если источник хранит, а приемник собирается хранить парт в S3, вместо данных пересылаются только метаданные S3, приемник кладет их локально себе +и испольузет общие с источником данные на S3. Для того, чтобы не удалить такие пошареные данные, делается пометка в ZooKeeper. + +Введена новая версия протокола REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY. В запросе новый параметр send_s3_metadata, если 1, то приемних просит у источника метаданные вместо данных, если это возможно. +Приемник в ответ отсылает куку send_s3_metadata=1 в случае, если идут метаданные. В остальных случаях отсылаются данные, как и прежде. + +Применик перед запросом смотрит, будет ли хранить данные в S3. Провеока сейчас кривая - запрашивается резервирование на диске с наибольшим доступным местом, а потом смотрится, не на S3 ли оно. +Если на S3, то отсылает в запросе send_s3_metadata=1. + +Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/<некий ID парта>/`, +ставит в ответ куку send_s3_metadata=1 и вместо файлов с данными отсылает только файлы метаданных. + +Приемник при получении ответа с send_s3_metadata=1 создает только файлики с идентичными меаданными, которые в итоге будут ссылаться на те же ключи в S3, ставит в зукипере аналогичную метку, +только со своим ID реплики, и работает с этим. + +При желании удалить парт нода удаляет в Зукипере ключ `<путь к данным таблицы>/zero_copy_s3/<некий ID парта>/`, потом получает все подключи `<путь к данным таблицы>/zero_copy_s3/<некий ID парта>`. +Если список не пустой, то считает, что данные использует другая нода и удаляет только локальные метаданные, если пустой, то удаляет и данные в S3. + +## Костыли и недоработки, коих много + +* Никакой проверки, один и тот же S3 у нод или разный сейчас нет, если будет несколько разных S3, работать не будет. + +* В качестве ID парта берется имя первого S3-ключа от файла checksums.txt. + +* Не нашел удобного способа прокидывать в коде зукипер, прокинул хадркодом. + +* При удалении класс диска ничего не знает про парты, прокинул флаг, что надо оставлять данные в S3 параметром, это очень криво получилось. + +* Возможна гонка, если источник отошлет метаданные про парт и тут же решит его удалить до того, как приемник поставит в зукипер пометку. + +* В протоколе репликации обмен инфой через параметр запрос в одну сторону и куку в другую мне не нравится, хотя так сделан обмен версиями репликации. + +* При ошибке должно пытаться реплицироваться по старому, но хз, всегда ли сработает + +* Не будет обратной совместимости, если образуются такие шареные парты, откатиться на старую версию кликхауса не получится, иначе нода может удалить используемые другой данные. + +* И вообще From 9272ed06b427f017d1b95e0d20ff6132f5cb06a2 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 9 Oct 2020 17:24:10 +0300 Subject: [PATCH 003/333] Move Zookeeper lock for S3 shared part in IMergeTreeDataPart --- src/Storages/MergeTree/DataPartsExchange.cpp | 27 +--------- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 52 +++++++++++++++++++ src/Storages/MergeTree/IMergeTreeDataPart.h | 14 +++++ src/Storages/StorageReplicatedMergeTree.cpp | 28 +--------- 4 files changed, 69 insertions(+), 52 deletions(-) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 2708373d1a4..da5acdbefcd 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -240,17 +239,7 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB if (disk->getType() != "s3") throw Exception("S3 disk is not S3 anymore", ErrorCodes::LOGICAL_ERROR); - String id = disk->getUniqueId(part->getFullRelativePath() + "checksums.txt"); - - if (id.empty()) - throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); - - String zookeeper_node = zookeeper_path + "/zero_copy_s3/" + id + "/" + replica_name; - - LOG_TRACE(log, "Set zookeeper lock {}", id); - - zookeeper->createAncestors(zookeeper_node); - zookeeper->createIfNotExists(zookeeper_node, "lock"); + part->lockSharedData(zookeeper_path, replica_name, zookeeper); writeBinary(checksums.files.size(), out); for (const auto & it : checksums.files) @@ -629,19 +618,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( new_data_part->modification_time = time(nullptr); new_data_part->loadColumnsChecksumsIndexes(true, false); - - String id = disk->getUniqueId(new_data_part->getFullRelativePath() + "checksums.txt"); - - if (id.empty()) - throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); - - String zookeeper_node = zookeeper_path + "/zero_copy_s3/" + id + "/" + replica_name; - - LOG_TRACE(log, "Set zookeeper lock {}", id); - - zookeeper->createAncestors(zookeeper_node); - zookeeper->createIfNotExists(zookeeper_node, "lock"); - + new_data_part->lockSharedData(zookeeper_path, replica_name, zookeeper); return new_data_part; } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 40a6569cd46..786bc056702 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -1079,6 +1080,56 @@ bool IMergeTreeDataPart::checkAllTTLCalculated(const StorageMetadataPtr & metada return true; } +void IMergeTreeDataPart::lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const +{ + auto disk = volume->getDisk(); + + if (disk->getType() != "s3") + return; + + String id = disk->getUniqueId(getFullRelativePath() + "checksums.txt"); + + if (id.empty()) + throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); + + String zookeeper_node = zookeeper_path + "/zero_copy_s3/" + id + "/" + replica_name; + + LOG_TRACE(storage.log, "Set zookeeper lock {}", id); + + zookeeper->createAncestors(zookeeper_node); + zookeeper->createIfNotExists(zookeeper_node, "lock"); +} + +bool IMergeTreeDataPart::unlockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const +{ + auto disk = volume->getDisk(); + + if (disk->getType() != "s3") + return true; + + String id = disk->getUniqueId(getFullRelativePath() + "checksums.txt"); + + if (id.empty()) + return true; + + String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/" + id; + String zookeeper_node = zookeeper_part_node + "/" + replica_name; + + LOG_TRACE(storage.log, "Remove zookeeper lock for {}", id); + + zookeeper->remove(zookeeper_node); + + Strings children; + zookeeper->tryGetChildren(zookeeper_part_node, children); + + if (!children.empty()) + { + LOG_TRACE(storage.log, "Found zookeper locks for {}", id); + } + + return children.empty(); +} + bool isCompactPart(const MergeTreeDataPartPtr & data_part) { return (data_part && data_part->getType() == MergeTreeDataPartType::COMPACT); @@ -1095,3 +1146,4 @@ bool isInMemoryPart(const MergeTreeDataPartPtr & data_part) } } + diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 3e7b03b2903..d40ff40f157 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -22,6 +22,12 @@ #include +namespace zkutil +{ + class ZooKeeper; + using ZooKeeperPtr = std::shared_ptr; +} + namespace DB { @@ -349,6 +355,14 @@ public: /// part creation (using alter query with materialize_ttl setting). bool checkAllTTLCalculated(const StorageMetadataPtr & metadata_snapshot) const; + /// Lock part in zookeeper for use common S3 data in several nodes + void lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const; + + /// Unlock common S3 data part in zookeeper + /// Return true if data unlocked + /// Return false if data is still used by another node + bool unlockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const; + protected: /// Total size of all columns, calculated once in calcuateColumnSizesOnDisk diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index b1c7c754637..6355894d59e 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5111,33 +5111,7 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() { try { - bool keep_s3 = false; - - auto disk = part->volume->getDisk(); - - if (disk->getType() == "s3") - { - String id = disk->getUniqueId(part->getFullRelativePath() + "checksums.txt"); - - if (!id.empty()) - { - String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/" + id; - String zookeeper_node = zookeeper_part_node + "/" + replica_name; - - LOG_TRACE(log, "Remove zookeeper lock for {}", id); - - zookeeper->remove(zookeeper_node); - - Strings children; - zookeeper->tryGetChildren(zookeeper_part_node, children); - if (!children.empty()) - { - LOG_TRACE(log, "Found zookeper locks for {}", id); - keep_s3 = true; - } - } - } - + bool keep_s3 = !part->unlockSharedData(zookeeper_path, replica_name, zookeeper); part->remove(keep_s3); } catch (...) From b877459cf78fbff327c3b75481220e39ea8ee9a6 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Wed, 14 Oct 2020 18:05:59 +0300 Subject: [PATCH 004/333] Zero copy replication over S3: check s3 storage --- src/Disks/DiskDecorator.h | 1 + src/Disks/IDisk.h | 3 + src/Disks/S3/DiskS3.cpp | 38 +++++++ src/Disks/S3/DiskS3.h | 2 + src/Disks/StoragePolicy.cpp | 11 ++ src/Disks/StoragePolicy.h | 3 + src/Storages/MergeTree/DataPartsExchange.cpp | 106 +++++++++++++------ src/Storages/MergeTree/DataPartsExchange.h | 2 +- src/Storages/MergeTree/MergeTreeData.h | 2 + 9 files changed, 136 insertions(+), 32 deletions(-) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index f1fea043843..86d842ce2cf 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -47,6 +47,7 @@ public: void sync(int fd) const override; const String getType() const override { return delegate->getType(); } const String getUniqueId(const String & path) const override { return delegate->getUniqueId(path); } + bool checkFile(const String & path) const override { return delegate->checkFile(path); } protected: DiskPtr delegate; diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 324384fade6..96a2e5e4669 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -198,6 +198,9 @@ public: /// Return some uniq string for file, overrided for S3 virtual const String getUniqueId(const String & path) const { return path; } + /// Check file, overrided for S3 only + virtual bool checkFile(const String & path) const { return exists(path); } + private: /// Returns executor to perform asynchronous operations. Executor & getExecutor() { return *executor; } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 9cb3178350c..b563c84094a 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -83,6 +84,16 @@ namespace } } + template + void throwIfError(const Aws::Utils::Outcome & response) + { + if (!response.IsSuccess()) + { + const auto & err = response.GetError(); + throw Exception(err.GetMessage(), static_cast(err.GetErrorType())); + } + } + /** * S3 metadata file layout: * Number of S3 objects, Total size of all S3 objects. @@ -835,4 +846,31 @@ void DiskS3::shutdown() client->DisableRequestProcessing(); } +bool DiskS3::checkFile(const String & path) const +{ + Metadata metadata(s3_root_path, metadata_path, path); + + /// empty s3_objects list for empty file + if (metadata.s3_objects.empty()) + return true; + + String object = metadata.s3_root_path + metadata.s3_objects[0].first; + + Aws::S3::Model::ListObjectsRequest request; + request.SetBucket(bucket); + request.SetPrefix(object); + auto resp = client->ListObjects(request); + throwIfError(resp); + Aws::Vector object_list = resp.GetResult().GetContents(); + + /// Should be only one object with name equal to prefix + if (object_list.size() != 1) + return false; + + if (object_list[0].GetKey() != object) + return false; + return true; +} + + } diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 7808f5a8007..07348c53417 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -112,6 +112,8 @@ public: const String getUniqueId(const String & path) const override; + bool checkFile(const String & path) const override; + private: bool tryReserve(UInt64 bytes); diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 1aa20301bc0..746438bc72c 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -123,6 +123,17 @@ Disks StoragePolicy::getDisks() const } +Disks StoragePolicy::getDisksByType(const String & type) const +{ + Disks res; + for (const auto & volume : volumes) + for (const auto & disk : volume->getDisks()) + if (disk->getType() == type) + res.push_back(disk); + return res; +} + + DiskPtr StoragePolicy::getAnyDisk() const { /// StoragePolicy must contain at least one Volume diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index 0e0795d8bf1..b42886afcb2 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -41,6 +41,9 @@ public: /// Returns disks ordered by volumes priority Disks getDisks() const; + /// Returns disks by type ordered by volumes priority + Disks getDisksByType(const String & type) const; + /// Returns any disk /// Used when it's not important, for example for /// mutations files diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index da5acdbefcd..678acc2d848 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -317,18 +317,13 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( {"compress", "false"} }); - ReservationPtr reservationS3; + Disks disksS3; if (try_use_s3_copy) { - /// TODO: Make a normal check for S3 Disk - reservationS3 = data.makeEmptyReservationOnLargestDisk(); - auto disk = reservationS3->getDisk(); - - if (disk->getType() != "s3") - { + disksS3 = data.getDisksByType("s3"); + if (disksS3.empty()) try_use_s3_copy = false; - } } if (try_use_s3_copy) @@ -378,7 +373,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( try { - return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, sync, std::move(reservationS3), in); + return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, sync, std::move(disksS3), in); } catch(const Exception& e) { @@ -551,13 +546,14 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( bool to_detached, const String & tmp_prefix_, bool ,//sync, - const ReservationPtr reservation, + const Disks & disksS3, PooledReadWriteBufferFromHTTP & in ) { - auto disk = reservation->getDisk(); - if (disk->getType() != "s3") - throw Exception("S3 disk is not S3 anymore", ErrorCodes::LOGICAL_ERROR); + if (disksS3.empty()) + throw Exception("No S3 disks anymore", ErrorCodes::LOGICAL_ERROR); + + auto disk = disksS3[0]; static const String TMP_PREFIX = "tmp_fetch_"; String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_; @@ -586,29 +582,77 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( readStringBinary(file_name, in); readBinary(file_size, in); - String metadata_file = disk->getPath() + new_data_part->getFullRelativePath() + file_name; + String data_path = new_data_part->getFullRelativePath() + file_name; + String metadata_file = fullPath(disk, data_path); - auto file_out = createWriteBufferFromFileBase(metadata_file, 0, 0, DBMS_DEFAULT_BUFFER_SIZE, -1); - - HashingWriteBuffer hashing_out(*file_out); - - copyData(in, hashing_out, file_size, blocker.getCounter()); - - if (blocker.isCancelled()) { - /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, - /// performing a poll with a not very large timeout. - /// And now we check it only between read chunks (in the `copyData` function). - throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); + auto file_out = createWriteBufferFromFileBase(metadata_file, 0, 0, DBMS_DEFAULT_BUFFER_SIZE, -1); + + HashingWriteBuffer hashing_out(*file_out); + + copyData(in, hashing_out, file_size, blocker.getCounter()); + + if (blocker.isCancelled()) + { + /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, + /// performing a poll with a not very large timeout. + /// And now we check it only between read chunks (in the `copyData` function). + disk->removeRecursive(part_download_path, true); + throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); + } + + MergeTreeDataPartChecksum::uint128 expected_hash; + readPODBinary(expected_hash, in); + + if (expected_hash != hashing_out.getHash()) + { + throw Exception("Checksum mismatch for file " + metadata_file + " transferred from " + replica_path, + ErrorCodes::CHECKSUM_DOESNT_MATCH); + } } - MergeTreeDataPartChecksum::uint128 expected_hash; - readPODBinary(expected_hash, in); + if (!i) + { /// Check access for first s3 object of first file + if (!disk->checkFile(data_path)) + { /// Wrong S3 disk + Poco::File metadata(metadata_file); - if (expected_hash != hashing_out.getHash()) - { - throw Exception("Checksum mismatch for file " + metadata_file + " transferred from " + replica_path, - ErrorCodes::CHECKSUM_DOESNT_MATCH); + size_t disk_id = 1; + while (true) + { + if (disk_id >= disksS3.size()) + { /// No more S3 disks + disk->removeRecursive(part_download_path, true); + /// After catch this exception replication continues with full data copy + throw Exception("Can't find S3 drive for shared data", ErrorCodes::S3_ERROR); + } + + /// Try next S3 disk + auto next_disk = disksS3[disk_id]; + + auto next_volume = std::make_shared("volume_" + part_name, next_disk); + MergeTreeData::MutableDataPartPtr next_new_data_part = data.createPart(part_name, next_volume, part_relative_path); + + next_disk->createDirectories(part_download_path); + + String next_data_path = next_new_data_part->getFullRelativePath() + file_name; + String next_metadata_file = fullPath(next_disk, next_data_path); + metadata.copyTo(next_metadata_file); + if (next_disk->checkFile(next_data_path)) + { /// Right disk found + disk->removeRecursive(part_download_path, true); + disk = next_disk; + volume = next_volume; + data_path = next_data_path; + new_data_part = next_new_data_part; + break; + } + + /// Wrong disk again + next_disk->removeRecursive(part_download_path, true); + ++disk_id; + } + } } } diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index e2e7b2adf4f..7e59e81d6dc 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -115,7 +115,7 @@ private: bool to_detached, const String & tmp_prefix_, bool sync, - const ReservationPtr reservation, + const Disks & disksS3, PooledReadWriteBufferFromHTTP & in); MergeTreeData & data; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 5c18661dad1..1b620b3bdae 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -651,6 +651,8 @@ public: /// Reserves 0 bytes ReservationPtr makeEmptyReservationOnLargestDisk() { return getStoragePolicy()->makeEmptyReservationOnLargestDisk(); } + Disks getDisksByType(const String & type) const { return getStoragePolicy()->getDisksByType(type); } + /// Return alter conversions for part which must be applied on fly. AlterConversions getAlterConversionsForPart(const MergeTreeDataPartPtr part) const; /// Returns destination disk or volume for the TTL rule according to current storage policy From 14a78f87b03141721ad5978793d22c3d8fc36baa Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 15 Oct 2020 18:23:20 +0300 Subject: [PATCH 005/333] Zero copy replication over S3: fetch instead of merge --- S3ZeroCopyReplication.md | 33 ++++++++++++------- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 4 +-- src/Storages/StorageReplicatedMergeTree.cpp | 14 ++++++++ 3 files changed, 38 insertions(+), 13 deletions(-) diff --git a/S3ZeroCopyReplication.md b/S3ZeroCopyReplication.md index 7e7709ff5a7..0744460012a 100644 --- a/S3ZeroCopyReplication.md +++ b/S3ZeroCopyReplication.md @@ -2,8 +2,6 @@ Говнокод просто для теста, не production-ready ни разу. -[Коммит](https://github.com/ianton-ru/ClickHouse/commit/acf86568a7e21176ba2cca15861da231bec6932a) - [Ветка](https://github.com/ianton-ru/ClickHouse/tree/s3_zero_copy_replication) ## Как сделано @@ -14,21 +12,24 @@ Введена новая версия протокола REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY. В запросе новый параметр send_s3_metadata, если 1, то приемних просит у источника метаданные вместо данных, если это возможно. Приемник в ответ отсылает куку send_s3_metadata=1 в случае, если идут метаданные. В остальных случаях отсылаются данные, как и прежде. -Применик перед запросом смотрит, будет ли хранить данные в S3. Провеока сейчас кривая - запрашивается резервирование на диске с наибольшим доступным местом, а потом смотрится, не на S3 ли оно. -Если на S3, то отсылает в запросе send_s3_metadata=1. +Применик перед запросом смотрит, будет ли хранить данные в S3. Проверка сейчас кривая - если в сторадже есть S3, то считаем, что будет S3. +Если да S3, то отсылает в запросе send_s3_metadata=1. -Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/<некий ID парта>/`, +Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/shared/<некий ID парта>/`, ставит в ответ куку send_s3_metadata=1 и вместо файлов с данными отсылает только файлы метаданных. Приемник при получении ответа с send_s3_metadata=1 создает только файлики с идентичными меаданными, которые в итоге будут ссылаться на те же ключи в S3, ставит в зукипере аналогичную метку, -только со своим ID реплики, и работает с этим. +только со своим ID реплики, и работает с этим. Для первого фалйа из списка проверяет наличие первого ы3-объекта (просто наличие), если объект с таким именем найден, то все ок, если нет, то откат на старую версию. +(Сейчас есть еще код на случай наличия более одного диска S3, тогда перебирает все и если на каком-то файл найден, то использует его, но мы внутри команды MDB смотрим на такую конфигурацию как на странную. +Планируем ограничить функционал только случаем одного S3 диска.) -При желании удалить парт нода удаляет в Зукипере ключ `<путь к данным таблицы>/zero_copy_s3/<некий ID парта>/`, потом получает все подключи `<путь к данным таблицы>/zero_copy_s3/<некий ID парта>`. +При желании удалить парт нода удаляет в Зукипере ключ `<путь к данным таблицы>/zero_copy_s3/shared/<некий ID парта>/`, потом получает все подключи `<путь к данным таблицы>/zero_copy_s3/shared/<некий ID парта>`. Если список не пустой, то считает, что данные использует другая нода и удаляет только локальные метаданные, если пустой, то удаляет и данные в S3. -## Костыли и недоработки, коих много +При мерже если реузльтат будет на S3, нода ставит эфемерную метку в Zookeeper по пути `<путь к данным таблицы>/zero_copy_s3/merged/<имя нового парта>`. Если такая метка уже есть, то считает, что другая нода +уже помержила или мержит сейчас, и надо сделать fetch вместо мержа самой. -* Никакой проверки, один и тот же S3 у нод или разный сейчас нет, если будет несколько разных S3, работать не будет. +## Костыли и недоработки, коих много * В качестве ID парта берется имя первого S3-ключа от файла checksums.txt. @@ -40,8 +41,18 @@ * В протоколе репликации обмен инфой через параметр запрос в одну сторону и куку в другую мне не нравится, хотя так сделан обмен версиями репликации. -* При ошибке должно пытаться реплицироваться по старому, но хз, всегда ли сработает +* При ошибке должно пытаться реплицироваться по старому, но не уверен, всегда ли сработает * Не будет обратной совместимости, если образуются такие шареные парты, откатиться на старую версию кликхауса не получится, иначе нода может удалить используемые другой данные. -* И вообще +* Возможны все же дублирования партов. Пример - нода делает мерж, падает. Другая нода незавимо делает мерж, первая нода поднимается. В итоге есть две копии померженого парта. + +* ... много их. Честно. + +## TODO, чего еще вообще не делалось + +* Флаг в конфиге для включения функционала, по умолчанию будет выключен. + +* Для гибридного хранилища сделать проверку и fetch при переезде парта с локального диска в S3. + +* Тесты. diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 786bc056702..d9098aec1dc 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1092,7 +1092,7 @@ void IMergeTreeDataPart::lockSharedData(const String & zookeeper_path, const Str if (id.empty()) throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); - String zookeeper_node = zookeeper_path + "/zero_copy_s3/" + id + "/" + replica_name; + String zookeeper_node = zookeeper_path + "/zero_copy_s3/shared/" + id + "/" + replica_name; LOG_TRACE(storage.log, "Set zookeeper lock {}", id); @@ -1112,7 +1112,7 @@ bool IMergeTreeDataPart::unlockSharedData(const String & zookeeper_path, const S if (id.empty()) return true; - String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/" + id; + String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + id; String zookeeper_node = zookeeper_part_node + "/" + replica_name; LOG_TRACE(storage.log, "Remove zookeeper lock for {}", id); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 6355894d59e..c8e8388028b 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1436,6 +1436,20 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) future_merged_part.updatePath(*this, reserved_space); future_merged_part.merge_type = entry.merge_type; + { + auto disk = reserved_space->getDisk(); + if (disk->getType() == "s3") + { + auto zookeeper = getZooKeeper(); + String zookeeper_node = zookeeper_path + "/zero_copy_s3/merged/" + entry.new_part_name; + zookeeper->createAncestors(zookeeper_node); + auto code = zookeeper->tryCreate(zookeeper_node, "lock", zkutil::CreateMode::Ephemeral); + /// Someone else created or started create this merge + if (code == Coordination::Error::ZNODEEXISTS) + return false; + } + } + auto table_id = getStorageID(); MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(table_id.database_name, table_id.table_name, future_merged_part); From fb178ef2139d56775f60d5b9d6cd2401aee6dd8c Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Mon, 19 Oct 2020 15:20:45 +0300 Subject: [PATCH 006/333] Zero copy replication over S3: base tests --- S3ZeroCopyReplication.md | 2 +- .../test_s3_zero_copy_replication/__init__.py | 0 .../configs/config.d/s3.xml | 49 +++++++++++ .../test_s3_zero_copy_replication/test.py | 84 +++++++++++++++++++ 4 files changed, 134 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_s3_zero_copy_replication/__init__.py create mode 100644 tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml create mode 100644 tests/integration/test_s3_zero_copy_replication/test.py diff --git a/S3ZeroCopyReplication.md b/S3ZeroCopyReplication.md index 0744460012a..1e152977753 100644 --- a/S3ZeroCopyReplication.md +++ b/S3ZeroCopyReplication.md @@ -1,6 +1,6 @@ # ClickHouse S3 Zero Copy Replication -Говнокод просто для теста, не production-ready ни разу. +Код просто для теста, не production-ready ни разу. [Ветка](https://github.com/ianton-ru/ClickHouse/tree/s3_zero_copy_replication) diff --git a/tests/integration/test_s3_zero_copy_replication/__init__.py b/tests/integration/test_s3_zero_copy_replication/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml new file mode 100644 index 00000000000..24a3fb95c53 --- /dev/null +++ b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml @@ -0,0 +1,49 @@ + + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + + + + + +
+ s31 +
+
+
+
+
+ + + 0 + 2 + + + + + + + node1 + 9000 + + + + + node2 + 9000 + + + + + + + test_cluster + + +
diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py new file mode 100644 index 00000000000..278559f73f1 --- /dev/null +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -0,0 +1,84 @@ +import logging +import time + +import pytest +from helpers.cluster import ClickHouseCluster + +logging.getLogger().setLevel(logging.INFO) +logging.getLogger().addHandler(logging.StreamHandler()) + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance("node1", main_configs=["configs/config.d/s3.xml"], macros={'replica': '1'}, + with_minio=True, + with_zookeeper=True) + cluster.add_instance("node2", main_configs=["configs/config.d/s3.xml"], macros={'replica': '2'}, + with_minio=True, + with_zookeeper=True) + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def get_large_objects_count(cluster, size=100): + minio = cluster.minio_client + counter = 0 + for obj in minio.list_objects(cluster.minio_bucket, 'data/'): + if obj.size >= size: + counter = counter + 1 + return counter + + +@pytest.mark.parametrize( + "policy", ["s3"] +) +def test_s3_zero_copy_replication(cluster, policy): + node1 = cluster.instances["node1"] + node2 = cluster.instances["node2"] + + node1.query( + """ + CREATE TABLE s3_test ON CLUSTER test_cluster (id UInt32, value String) + ENGINE=ReplicatedMergeTree('/clickhouse/tables/s3_test', '{}') + ORDER BY id + SETTINGS storage_policy='{}' + """ + .format('{replica}', policy) + ) + + node1.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')") + assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" + assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" + + # Based on version 20.x - should be only one file with size 100+ (checksums.txt), used by both nodes + assert get_large_objects_count(cluster) == 1 + + node2.query("INSERT INTO s3_test VALUES (2,'data'),(3,'data')") + assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" + assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" + + # Based on version 20.x - two parts + assert get_large_objects_count(cluster) == 2 + + node1.query("OPTIMIZE TABLE s3_test") + + time.sleep(1) + + # Based on version 20.x - after merge, two old parts and one merged + assert get_large_objects_count(cluster) == 3 + + time.sleep(60) + + # Based on version 20.x - after cleanup - only one merged part + assert get_large_objects_count(cluster) == 1 + + node1.query("DROP TABLE IF EXISTS s3_test NO DELAY") + node2.query("DROP TABLE IF EXISTS s3_test NO DELAY") + From 652c56e74e7fcb560c535f6695845c6b16ab32a4 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 22 Oct 2020 12:32:05 +0300 Subject: [PATCH 007/333] Fix style, fix build --- src/Disks/DiskDecorator.h | 2 +- src/Disks/IDisk.h | 2 +- src/Disks/S3/DiskS3.cpp | 2 +- src/Disks/S3/DiskS3.h | 2 +- src/Storages/MergeTree/DataPartsExchange.cpp | 12 +++++------- src/Storages/MergeTree/DataPartsExchange.h | 7 +++---- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.h | 2 +- 8 files changed, 14 insertions(+), 17 deletions(-) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index d230d49b400..8dcdb64ead5 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -50,7 +50,7 @@ public: void close(int fd) const override; void sync(int fd) const override; const String getType() const override { return delegate->getType(); } - const String getUniqueId(const String & path) const override { return delegate->getUniqueId(path); } + String getUniqueId(const String & path) const override { return delegate->getUniqueId(path); } bool checkFile(const String & path) const override { return delegate->checkFile(path); } Executor & getExecutor() override; diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 143b094fb38..63432bc226a 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -196,7 +196,7 @@ public: virtual void shutdown() { } /// Return some uniq string for file, overrided for S3 - virtual const String getUniqueId(const String & path) const { return path; } + virtual String getUniqueId(const String & path) const { return path; } /// Check file, overrided for S3 only virtual bool checkFile(const String & path) const { return exists(path); } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index b563c84094a..8e5e230d9db 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -572,7 +572,7 @@ void DiskS3::createDirectories(const String & path) Poco::File(metadata_path + path).createDirectories(); } -const String DiskS3::getUniqueId(const String & path) const +String DiskS3::getUniqueId(const String & path) const { Metadata metadata(s3_root_path, metadata_path, path); String id; diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 07348c53417..cc52722f973 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -110,7 +110,7 @@ public: void shutdown() override; - const String getUniqueId(const String & path) const override; + String getUniqueId(const String & path) const override; bool checkFile(const String & path) const override; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index c6568340620..265d855ba31 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -269,7 +268,7 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB throw Exception("Unexpected size of file " + metadata_file, ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART); writePODBinary(hashing_out.getHash(), out); - } + } } MergeTreeData::DataPartPtr Service::findPart(const String & name) @@ -359,7 +358,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( throw Exception("Got 'send_s3_metadata' cookie with old protocol version", ErrorCodes::LOGICAL_ERROR); if (!try_use_s3_copy) throw Exception("Got 'send_s3_metadata' cookie when was not requested", ErrorCodes::LOGICAL_ERROR); - + size_t sum_files_size = 0; readBinary(sum_files_size, in); IMergeTreeDataPart::TTLInfos ttl_infos; @@ -373,14 +372,14 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( try { - return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, sync, std::move(disksS3), in); + return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, std::move(disksS3), in); } - catch(const Exception& e) + catch (const Exception & e) { if (e.code() != ErrorCodes::S3_ERROR) throw; /// Try again but without S3 copy - return fetchPart(metadata_snapshot, part_name, replica_path, host, port, timeouts, + return fetchPart(metadata_snapshot, part_name, replica_path, host, port, timeouts, user, password, interserver_scheme, to_detached, tmp_prefix_, false); } } @@ -545,7 +544,6 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( const String & replica_path, bool to_detached, const String & tmp_prefix_, - bool ,//sync, const Disks & disksS3, PooledReadWriteBufferFromHTTP & in ) diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index 7e59e81d6dc..ac591c2046a 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -35,8 +35,8 @@ public: std::string getId(const std::string & node_id) const override; void processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & body, WriteBuffer & out, Poco::Net::HTTPServerResponse & response) override; - void setZooKeeper(const zkutil::ZooKeeperPtr & zookeeper_, const String & zookeeper_path_, const String & replica_name_) override - { + void setZooKeeper(const zkutil::ZooKeeperPtr & zookeeper_, const String & zookeeper_path_, const String & replica_name_) override + { zookeeper = zookeeper_; zookeeper_path = zookeeper_path_; replica_name = replica_name_; @@ -87,7 +87,7 @@ public: ActionBlocker blocker; void setZooKeeper(const zkutil::ZooKeeperPtr & zookeeper_, const String & zookeeper_path_, const String & replica_name_) - { + { zookeeper = zookeeper_; zookeeper_path = zookeeper_path_; replica_name = replica_name_; @@ -114,7 +114,6 @@ private: const String & replica_path, bool to_detached, const String & tmp_prefix_, - bool sync, const Disks & disksS3, PooledReadWriteBufferFromHTTP & in); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 93f424cf0d1..badfb32cf58 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1106,7 +1106,7 @@ void IMergeTreeDataPart::lockSharedData(const String & zookeeper_path, const Str if (id.empty()) throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); - + String zookeeper_node = zookeeper_path + "/zero_copy_s3/shared/" + id + "/" + replica_name; LOG_TRACE(storage.log, "Set zookeeper lock {}", id); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 9213578c831..8d21f5856fc 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -361,7 +361,7 @@ public: /// Lock part in zookeeper for use common S3 data in several nodes void lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const; - + /// Unlock common S3 data part in zookeeper /// Return true if data unlocked /// Return false if data is still used by another node From 1ffe0b1d03db9fedafe4918489b4ca5598553480 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 23 Oct 2020 13:01:40 +0300 Subject: [PATCH 008/333] S3 zero copy replication: fix tests --- .../configs/config.d/storage_conf.xml | 1 + .../__init__.py | 0 .../configs/config.d/storage_conf.xml | 50 +++++++++ .../test.py | 105 ++++++++++++++++++ .../configs/config.d/s3.xml | 1 + .../test_s3_zero_copy_replication/test.py | 2 + 6 files changed, 159 insertions(+) create mode 100644 tests/integration/test_replicated_merge_tree_s3_zero_copy/__init__.py create mode 100644 tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml create mode 100644 tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py diff --git a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml index 20b750ffff3..1f75a4efeae 100644 --- a/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_replicated_merge_tree_s3/configs/config.d/storage_conf.xml @@ -21,6 +21,7 @@ 0 + 0 diff --git a/tests/integration/test_replicated_merge_tree_s3_zero_copy/__init__.py b/tests/integration/test_replicated_merge_tree_s3_zero_copy/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml new file mode 100644 index 00000000000..d8c7f49fc49 --- /dev/null +++ b/tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml @@ -0,0 +1,50 @@ + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + + + + + +
+ s3 +
+
+
+
+
+ + + 0 + 1 + + + + + + + node1 + 9000 + + + node2 + 9000 + + + node3 + 9000 + + + + + + + 0 + + +
diff --git a/tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py b/tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py new file mode 100644 index 00000000000..793abc53566 --- /dev/null +++ b/tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py @@ -0,0 +1,105 @@ +import logging +import random +import string + +import pytest +from helpers.cluster import ClickHouseCluster + +logging.getLogger().setLevel(logging.INFO) +logging.getLogger().addHandler(logging.StreamHandler()) + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + + cluster.add_instance("node1", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '1'}, + with_minio=True, with_zookeeper=True) + cluster.add_instance("node2", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '2'}, + with_zookeeper=True) + cluster.add_instance("node3", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '3'}, + with_zookeeper=True) + + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +FILES_OVERHEAD = 1 +FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files +FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1 +FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1 + + +def random_string(length): + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(length)) + + +def generate_values(date_str, count, sign=1): + data = [[date_str, sign * (i + 1), random_string(10)] for i in range(count)] + data.sort(key=lambda tup: tup[1]) + return ",".join(["('{}',{},'{}')".format(x, y, z) for x, y, z in data]) + + +def create_table(cluster, additional_settings=None): + create_table_statement = """ + CREATE TABLE s3_test ON CLUSTER cluster( + dt Date, + id Int64, + data String, + INDEX min_max (id) TYPE minmax GRANULARITY 3 + ) ENGINE=ReplicatedMergeTree() + PARTITION BY dt + ORDER BY (dt, id) + SETTINGS storage_policy='s3' + """ + if additional_settings: + create_table_statement += "," + create_table_statement += additional_settings + + list(cluster.instances.values())[0].query(create_table_statement) + + +@pytest.fixture(autouse=True) +def drop_table(cluster): + yield + for node in list(cluster.instances.values()): + node.query("DROP TABLE IF EXISTS s3_test") + + minio = cluster.minio_client + # Remove extra objects to prevent tests cascade failing + for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): + minio.remove_object(cluster.minio_bucket, obj.object_name) + +@pytest.mark.parametrize( + "min_rows_for_wide_part,files_per_part", + [ + (0, FILES_OVERHEAD_PER_PART_WIDE), + (8192, FILES_OVERHEAD_PER_PART_COMPACT) + ] +) +def test_insert_select_replicated(cluster, min_rows_for_wide_part, files_per_part): + create_table(cluster, additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part)) + + all_values = "" + for node_idx in range(1, 4): + node = cluster.instances["node" + str(node_idx)] + values = generate_values("2020-01-0" + str(node_idx), 4096) + node.query("INSERT INTO s3_test VALUES {}".format(values), settings={"insert_quorum": 3}) + if node_idx != 1: + all_values += "," + all_values += values + + for node_idx in range(1, 4): + node = cluster.instances["node" + str(node_idx)] + assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values", + settings={"select_sequential_consistency": 1}) == all_values + + minio = cluster.minio_client + assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == (3 * FILES_OVERHEAD) + (files_per_part * 3) diff --git a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml index 24a3fb95c53..285ade3f727 100644 --- a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml +++ b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml @@ -23,6 +23,7 @@ 0 2 + 1 diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py index 278559f73f1..88d038e357b 100644 --- a/tests/integration/test_s3_zero_copy_replication/test.py +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -54,6 +54,7 @@ def test_s3_zero_copy_replication(cluster, policy): ) node1.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')") + time.sleep(1) assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" @@ -61,6 +62,7 @@ def test_s3_zero_copy_replication(cluster, policy): assert get_large_objects_count(cluster) == 1 node2.query("INSERT INTO s3_test VALUES (2,'data'),(3,'data')") + time.sleep(1) assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" From e3879afa69672d28686b591dc3b088d1bf451b7a Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 23 Oct 2020 15:01:50 +0300 Subject: [PATCH 009/333] S3 zero copy replication: fix virtual method default parameter --- src/Disks/DiskCacheWrapper.cpp | 32 +++++++++++++------ src/Disks/DiskCacheWrapper.h | 8 +++-- src/Disks/DiskDecorator.cpp | 22 +++++++++---- src/Disks/DiskDecorator.h | 8 +++-- src/Disks/DiskLocal.cpp | 6 ++-- src/Disks/DiskLocal.h | 6 ++-- src/Disks/DiskMemory.cpp | 6 ++-- src/Disks/DiskMemory.h | 6 ++-- src/Disks/IDisk.h | 19 ++++++++--- src/Disks/S3/DiskS3.cpp | 8 ++--- src/Disks/S3/DiskS3.h | 10 ++++-- src/Storages/MergeTree/DataPartsExchange.cpp | 32 +++++++++++-------- src/Storages/MergeTree/DataPartsExchange.h | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 16 +++++----- src/Storages/MergeTree/MergeTreeSettings.h | 1 + 15 files changed, 114 insertions(+), 68 deletions(-) diff --git a/src/Disks/DiskCacheWrapper.cpp b/src/Disks/DiskCacheWrapper.cpp index 79e615d3609..8e0f77eed6d 100644 --- a/src/Disks/DiskCacheWrapper.cpp +++ b/src/Disks/DiskCacheWrapper.cpp @@ -199,11 +199,11 @@ DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode buf_size); } -void DiskCacheWrapper::clearDirectory(const String & path, bool keep_s3) +void DiskCacheWrapper::clearDirectory(const String & path) { if (cache_disk->exists(path)) - cache_disk->clearDirectory(path, keep_s3); - DiskDecorator::clearDirectory(path, keep_s3); + cache_disk->clearDirectory(path); + DiskDecorator::clearDirectory(path); } void DiskCacheWrapper::moveDirectory(const String & from_path, const String & to_path) @@ -252,18 +252,32 @@ void DiskCacheWrapper::copyFile(const String & from_path, const String & to_path DiskDecorator::copyFile(from_path, to_path); } -void DiskCacheWrapper::remove(const String & path, bool keep_s3) +void DiskCacheWrapper::remove(const String & path) { if (cache_disk->exists(path)) - cache_disk->remove(path, keep_s3); - DiskDecorator::remove(path, keep_s3); + cache_disk->remove(path); + DiskDecorator::remove(path); } -void DiskCacheWrapper::removeRecursive(const String & path, bool keep_s3) +void DiskCacheWrapper::removeRecursive(const String & path) { if (cache_disk->exists(path)) - cache_disk->removeRecursive(path, keep_s3); - DiskDecorator::removeRecursive(path, keep_s3); + cache_disk->removeRecursive(path); + DiskDecorator::removeRecursive(path); +} + +void DiskCacheWrapper::removeShared(const String & path, bool keep_s3) +{ + if (cache_disk->exists(path)) + cache_disk->removeShared(path, keep_s3); + DiskDecorator::removeShared(path, keep_s3); +} + +void DiskCacheWrapper::removeSharedRecursive(const String & path, bool keep_s3) +{ + if (cache_disk->exists(path)) + cache_disk->removeSharedRecursive(path, keep_s3); + DiskDecorator::removeSharedRecursive(path, keep_s3); } void DiskCacheWrapper::createHardLink(const String & src_path, const String & dst_path) diff --git a/src/Disks/DiskCacheWrapper.h b/src/Disks/DiskCacheWrapper.h index 9fca4e02e34..6722d5bd1a5 100644 --- a/src/Disks/DiskCacheWrapper.h +++ b/src/Disks/DiskCacheWrapper.h @@ -28,7 +28,7 @@ public: std::function cache_file_predicate_); void createDirectory(const String & path) override; void createDirectories(const String & path) override; - void clearDirectory(const String & path, bool keep_s3 = false) override; + void clearDirectory(const String & path) override; void moveDirectory(const String & from_path, const String & to_path) override; void moveFile(const String & from_path, const String & to_path) override; void replaceFile(const String & from_path, const String & to_path) override; @@ -37,8 +37,10 @@ public: readFile(const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold) const override; std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path, bool keep_s3 = false) override; - void removeRecursive(const String & path, bool keep_s3 = false) override; + void remove(const String & path) override; + void removeRecursive(const String & path) override; + void removeShared(const String & path, bool keep_s3) override; + void removeSharedRecursive(const String & path, bool keep_s3) override; void createHardLink(const String & src_path, const String & dst_path) override; ReservationPtr reserve(UInt64 bytes) override; diff --git a/src/Disks/DiskDecorator.cpp b/src/Disks/DiskDecorator.cpp index 4ad71a67f95..e7a5beeaff1 100644 --- a/src/Disks/DiskDecorator.cpp +++ b/src/Disks/DiskDecorator.cpp @@ -73,9 +73,9 @@ void DiskDecorator::createDirectories(const String & path) delegate->createDirectories(path); } -void DiskDecorator::clearDirectory(const String & path, bool keep_s3) +void DiskDecorator::clearDirectory(const String & path) { - delegate->clearDirectory(path, keep_s3); + delegate->clearDirectory(path); } void DiskDecorator::moveDirectory(const String & from_path, const String & to_path) @@ -130,14 +130,24 @@ DiskDecorator::writeFile(const String & path, size_t buf_size, WriteMode mode, s return delegate->writeFile(path, buf_size, mode, estimated_size, aio_threshold); } -void DiskDecorator::remove(const String & path, bool keep_s3) +void DiskDecorator::remove(const String & path) { - delegate->remove(path, keep_s3); + delegate->remove(path); } -void DiskDecorator::removeRecursive(const String & path, bool keep_s3) +void DiskDecorator::removeRecursive(const String & path) { - delegate->removeRecursive(path, keep_s3); + delegate->removeRecursive(path); +} + +void DiskDecorator::removeShared(const String & path, bool keep_s3) +{ + delegate->removeShared(path, keep_s3); +} + +void DiskDecorator::removeSharedRecursive(const String & path, bool keep_s3) +{ + delegate->removeSharedRecursive(path, keep_s3); } void DiskDecorator::setLastModified(const String & path, const Poco::Timestamp & timestamp) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index 8dcdb64ead5..4bc7879ffd3 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -26,7 +26,7 @@ public: size_t getFileSize(const String & path) const override; void createDirectory(const String & path) override; void createDirectories(const String & path) override; - void clearDirectory(const String & path, bool keep_s3 = false) override; + void clearDirectory(const String & path) override; void moveDirectory(const String & from_path, const String & to_path) override; DiskDirectoryIteratorPtr iterateDirectory(const String & path) override; void createFile(const String & path) override; @@ -39,8 +39,10 @@ public: readFile(const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold) const override; std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path, bool keep_s3 = false) override; - void removeRecursive(const String & path, bool keep_s3 = false) override; + void remove(const String & path) override; + void removeRecursive(const String & path) override; + void removeShared(const String & path, bool keep_s3) override; + void removeSharedRecursive(const String & path, bool keep_s3) override; void setLastModified(const String & path, const Poco::Timestamp & timestamp) override; Poco::Timestamp getLastModified(const String & path) override; void setReadOnly(const String & path) override; diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index ad85fdf4236..a09ab7c5ac5 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -180,7 +180,7 @@ void DiskLocal::createDirectories(const String & path) Poco::File(disk_path + path).createDirectories(); } -void DiskLocal::clearDirectory(const String & path, bool) +void DiskLocal::clearDirectory(const String & path) { std::vector files; Poco::File(disk_path + path).list(files); @@ -236,12 +236,12 @@ DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode, size_ return createWriteBufferFromFileBase(disk_path + path, estimated_size, aio_threshold, buf_size, flags); } -void DiskLocal::remove(const String & path, bool) +void DiskLocal::remove(const String & path) { Poco::File(disk_path + path).remove(false); } -void DiskLocal::removeRecursive(const String & path, bool) +void DiskLocal::removeRecursive(const String & path) { Poco::File(disk_path + path).remove(true); } diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 18e6d072874..762a8502faa 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -55,7 +55,7 @@ public: void createDirectories(const String & path) override; - void clearDirectory(const String & path, bool keep_s3 = false) override; + void clearDirectory(const String & path) override; void moveDirectory(const String & from_path, const String & to_path) override; @@ -87,9 +87,9 @@ public: size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path, bool keep_s3 = false) override; + void remove(const String & path) override; - void removeRecursive(const String & path, bool keep_s3 = false) override; + void removeRecursive(const String & path) override; void setLastModified(const String & path, const Poco::Timestamp & timestamp) override; diff --git a/src/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp index fc375707feb..d185263d48c 100644 --- a/src/Disks/DiskMemory.cpp +++ b/src/Disks/DiskMemory.cpp @@ -233,7 +233,7 @@ void DiskMemory::createDirectoriesImpl(const String & path) files.emplace(path, FileData{FileType::Directory}); } -void DiskMemory::clearDirectory(const String & path, bool) +void DiskMemory::clearDirectory(const String & path) { std::lock_guard lock(mutex); @@ -348,7 +348,7 @@ std::unique_ptr DiskMemory::writeFile(const String & pa return std::make_unique(this, path, mode, buf_size); } -void DiskMemory::remove(const String & path, bool) +void DiskMemory::remove(const String & path) { std::lock_guard lock(mutex); @@ -368,7 +368,7 @@ void DiskMemory::remove(const String & path, bool) } } -void DiskMemory::removeRecursive(const String & path, bool) +void DiskMemory::removeRecursive(const String & path) { std::lock_guard lock(mutex); diff --git a/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h index e75d9bff100..4d4b947098b 100644 --- a/src/Disks/DiskMemory.h +++ b/src/Disks/DiskMemory.h @@ -48,7 +48,7 @@ public: void createDirectories(const String & path) override; - void clearDirectory(const String & path, bool keep_s3 = false) override; + void clearDirectory(const String & path) override; void moveDirectory(const String & from_path, const String & to_path) override; @@ -78,9 +78,9 @@ public: size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path, bool keep_s3 = false) override; + void remove(const String & path) override; - void removeRecursive(const String & path, bool keep_s3 = false) override; + void removeRecursive(const String & path) override; void setLastModified(const String &, const Poco::Timestamp &) override {} diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 63432bc226a..915c6da5a21 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -105,7 +105,7 @@ public: virtual void createDirectories(const String & path) = 0; /// Remove all files from the directory. Directories are not removed. - virtual void clearDirectory(const String & path, bool keep_s3 = false) = 0; + virtual void clearDirectory(const String & path) = 0; /// Move directory from `from_path` to `to_path`. virtual void moveDirectory(const String & from_path, const String & to_path) = 0; @@ -153,18 +153,27 @@ public: size_t aio_threshold = 0) = 0; /// Remove file or directory. Throws exception if file doesn't exists or if directory is not empty. - virtual void remove(const String & path, bool keep_s3 = false) = 0; + virtual void remove(const String & path) = 0; /// Remove file or directory with all children. Use with extra caution. Throws exception if file doesn't exists. - virtual void removeRecursive(const String & path, bool keep_s3 = false) = 0; + virtual void removeRecursive(const String & path) = 0; /// Remove file or directory if it exists. - void removeIfExists(const String & path, bool keep_s3 = false) + void removeIfExists(const String & path) { if (exists(path)) - remove(path, keep_s3); + remove(path); } + /// Remove file or directory. Throws exception if file doesn't exists or if directory is not empty. + virtual void removeShared(const String & path, bool) { remove(path); } + + /// Remove file or directory with all children. Use with extra caution. Throws exception if file doesn't exists. + virtual void removeSharedRecursive(const String & path, bool) { removeRecursive(path); } + + /// Remove file or directory if it exists. + void removeSharedIfExists(const String & path, bool) { removeIfExists(path); } + /// Set last modified time to file or directory at `path`. virtual void setLastModified(const String & path, const Poco::Timestamp & timestamp) = 0; diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 8e5e230d9db..7334a5b8a9b 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -586,11 +586,11 @@ DiskDirectoryIteratorPtr DiskS3::iterateDirectory(const String & path) return std::make_unique(metadata_path + path, path); } -void DiskS3::clearDirectory(const String & path, bool keep_s3) +void DiskS3::clearDirectory(const String & path) { for (auto it{iterateDirectory(path)}; it->isValid(); it->next()) if (isFile(it->path())) - remove(it->path(), keep_s3); + remove(it->path()); } void DiskS3::moveFile(const String & from_path, const String & to_path) @@ -744,7 +744,7 @@ void DiskS3::removeAws(const AwsS3KeyKeeper & keys) } } -void DiskS3::remove(const String & path, bool keep_s3) +void DiskS3::removeShared(const String & path, bool keep_s3) { AwsS3KeyKeeper keys; removeMeta(path, keys); @@ -752,7 +752,7 @@ void DiskS3::remove(const String & path, bool keep_s3) removeAws(keys); } -void DiskS3::removeRecursive(const String & path, bool keep_s3) +void DiskS3::removeSharedRecursive(const String & path, bool keep_s3) { AwsS3KeyKeeper keys; removeMetaRecursive(path, keys); diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index cc52722f973..80752fa8253 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -60,7 +60,7 @@ public: void createDirectories(const String & path) override; - void clearDirectory(const String & path, bool keep_s3 = false) override; + void clearDirectory(const String & path) override; void moveDirectory(const String & from_path, const String & to_path) override { moveFile(from_path, to_path); } @@ -88,9 +88,13 @@ public: size_t estimated_size, size_t aio_threshold) override; - void remove(const String & path, bool keep_s3 = false) override; + void remove(const String & path) override { removeShared(path, false); } - void removeRecursive(const String & path, bool keep_s3 = false) override; + void removeRecursive(const String & path) override { removeSharedRecursive(path, false); } + + void removeShared(const String & path, bool keep_s3) override; + + void removeSharedRecursive(const String & path, bool keep_s3) override; void createHardLink(const String & src_path, const String & dst_path) override; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 265d855ba31..d2bd3c21173 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -124,7 +124,8 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo { bool try_use_s3_copy = false; - if (client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY) + if (data_settings->allow_s3_zero_copy_replication + && client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY) { /// if source and destination are in the same S3 storage we try to use S3 CopyObject request first int send_s3_metadata = parse(params.get("send_s3_metadata", "0")); if (send_s3_metadata == 1) @@ -316,12 +317,15 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( {"compress", "false"} }); - Disks disksS3; + Disks disks_s3; + + if (!data_settings->allow_s3_zero_copy_replication) + try_use_s3_copy = false; if (try_use_s3_copy) { - disksS3 = data.getDisksByType("s3"); - if (disksS3.empty()) + disks_s3 = data.getDisksByType("s3"); + if (disks_s3.empty()) try_use_s3_copy = false; } @@ -372,7 +376,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( try { - return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, std::move(disksS3), in); + return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, std::move(disks_s3), in); } catch (const Exception & e) { @@ -544,14 +548,14 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( const String & replica_path, bool to_detached, const String & tmp_prefix_, - const Disks & disksS3, + const Disks & disks_s3, PooledReadWriteBufferFromHTTP & in ) { - if (disksS3.empty()) + if (disks_s3.empty()) throw Exception("No S3 disks anymore", ErrorCodes::LOGICAL_ERROR); - auto disk = disksS3[0]; + auto disk = disks_s3[0]; static const String TMP_PREFIX = "tmp_fetch_"; String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_; @@ -595,7 +599,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, /// performing a poll with a not very large timeout. /// And now we check it only between read chunks (in the `copyData` function). - disk->removeRecursive(part_download_path, true); + disk->removeSharedRecursive(part_download_path, true); throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); } @@ -618,15 +622,15 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( size_t disk_id = 1; while (true) { - if (disk_id >= disksS3.size()) + if (disk_id >= disks_s3.size()) { /// No more S3 disks - disk->removeRecursive(part_download_path, true); + disk->removeSharedRecursive(part_download_path, true); /// After catch this exception replication continues with full data copy throw Exception("Can't find S3 drive for shared data", ErrorCodes::S3_ERROR); } /// Try next S3 disk - auto next_disk = disksS3[disk_id]; + auto next_disk = disks_s3[disk_id]; auto next_volume = std::make_shared("volume_" + part_name, next_disk); MergeTreeData::MutableDataPartPtr next_new_data_part = data.createPart(part_name, next_volume, part_relative_path); @@ -638,7 +642,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( metadata.copyTo(next_metadata_file); if (next_disk->checkFile(next_data_path)) { /// Right disk found - disk->removeRecursive(part_download_path, true); + disk->removeSharedRecursive(part_download_path, true); disk = next_disk; volume = next_volume; data_path = next_data_path; @@ -647,7 +651,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( } /// Wrong disk again - next_disk->removeRecursive(part_download_path, true); + next_disk->removeSharedRecursive(part_download_path, true); ++disk_id; } } diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index ac591c2046a..91edc3ba6d4 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -114,7 +114,7 @@ private: const String & replica_path, bool to_detached, const String & tmp_prefix_, - const Disks & disksS3, + const Disks & disks_s3, PooledReadWriteBufferFromHTTP & in); MergeTreeData & data; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index badfb32cf58..be2f88e74e5 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -851,7 +851,7 @@ void IMergeTreeDataPart::remove(bool keep_s3) const try { - volume->getDisk()->removeRecursive(to + "/", keep_s3); + volume->getDisk()->removeSharedRecursive(to + "/", keep_s3); } catch (...) { @@ -874,7 +874,7 @@ void IMergeTreeDataPart::remove(bool keep_s3) const if (checksums.empty()) { /// If the part is not completely written, we cannot use fast path by listing files. - volume->getDisk()->removeRecursive(to + "/", keep_s3); + volume->getDisk()->removeSharedRecursive(to + "/", keep_s3); } else { @@ -887,18 +887,18 @@ void IMergeTreeDataPart::remove(bool keep_s3) const # pragma GCC diagnostic ignored "-Wunused-variable" #endif for (const auto & [file, _] : checksums.files) - volume->getDisk()->remove(to + "/" + file, keep_s3); + volume->getDisk()->removeShared(to + "/" + file, keep_s3); #if !__clang__ # pragma GCC diagnostic pop #endif for (const auto & file : {"checksums.txt", "columns.txt"}) - volume->getDisk()->remove(to + "/" + file, keep_s3); + volume->getDisk()->removeShared(to + "/" + file, keep_s3); - volume->getDisk()->removeIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_s3); - volume->getDisk()->removeIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_s3); + volume->getDisk()->removeSharedIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_s3); + volume->getDisk()->removeSharedIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_s3); - volume->getDisk()->remove(to, keep_s3); + volume->getDisk()->removeShared(to, keep_s3); } catch (...) { @@ -906,7 +906,7 @@ void IMergeTreeDataPart::remove(bool keep_s3) const LOG_ERROR(storage.log, "Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: {}", fullPath(volume->getDisk(), to), getCurrentExceptionMessage(false)); - volume->getDisk()->removeRecursive(to + "/", keep_s3); + volume->getDisk()->removeSharedRecursive(to + "/", keep_s3); } } } diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 97bc73caf5b..d39d212c5fc 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -105,6 +105,7 @@ struct Settings; M(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \ M(String, storage_policy, "default", "Name of storage disk policy", 0) \ M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \ + M(Bool, allow_s3_zero_copy_replication, true, "Allow Zero-copy replication over S3", 0) \ \ /** Settings for testing purposes */ \ M(Bool, randomize_part_type, false, "For testing purposes only. Randomizes part type between wide and compact", 0) \ From 78021714f1cbbf54246d09383bdf2a4d06389fa3 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Tue, 3 Nov 2020 11:58:26 +0300 Subject: [PATCH 010/333] S3 zero copy replication: more simple s3 check --- S3ZeroCopyReplication.md | 22 ++++--- src/Disks/DiskDecorator.h | 2 +- src/Disks/IDisk.h | 2 +- src/Disks/S3/DiskS3.cpp | 27 +++----- src/Disks/S3/DiskS3.h | 2 +- src/Storages/MergeTree/DataPartsExchange.cpp | 64 ++++++------------- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 29 ++++----- src/Storages/MergeTree/IMergeTreeDataPart.h | 2 + 8 files changed, 60 insertions(+), 90 deletions(-) diff --git a/S3ZeroCopyReplication.md b/S3ZeroCopyReplication.md index 1e152977753..22c01caa90c 100644 --- a/S3ZeroCopyReplication.md +++ b/S3ZeroCopyReplication.md @@ -12,23 +12,26 @@ Введена новая версия протокола REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY. В запросе новый параметр send_s3_metadata, если 1, то приемних просит у источника метаданные вместо данных, если это возможно. Приемник в ответ отсылает куку send_s3_metadata=1 в случае, если идут метаданные. В остальных случаях отсылаются данные, как и прежде. +В новой версии протокола перед полем с количеством файлов добавлена еще одна строка. Абстрактно это некий ID, по которому ноды могу понять, с одним S3 они работают или с разными. +Практически сейчас это один имя первого объекта файла checksums.txt. Эта же строка используется в качестве ID парта в зукипере. + Применик перед запросом смотрит, будет ли хранить данные в S3. Проверка сейчас кривая - если в сторадже есть S3, то считаем, что будет S3. Если да S3, то отсылает в запросе send_s3_metadata=1. -Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/shared/<некий ID парта>/`, +Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/shared//`, ставит в ответ куку send_s3_metadata=1 и вместо файлов с данными отсылает только файлы метаданных. -Приемник при получении ответа с send_s3_metadata=1 создает только файлики с идентичными меаданными, которые в итоге будут ссылаться на те же ключи в S3, ставит в зукипере аналогичную метку, -только со своим ID реплики, и работает с этим. Для первого фалйа из списка проверяет наличие первого ы3-объекта (просто наличие), если объект с таким именем найден, то все ок, если нет, то откат на старую версию. -(Сейчас есть еще код на случай наличия более одного диска S3, тогда перебирает все и если на каком-то файл найден, то использует его, но мы внутри команды MDB смотрим на такую конфигурацию как на странную. -Планируем ограничить функционал только случаем одного S3 диска.) +Приемник при получении ответа с send_s3_metadata=1 проверяет доступность по переданному ключу (первый объект checksums.txt) создает только файлики с идентичными меаданными, которые в итоге будут ссылаться на те же ключи в S3, ставит в зукипере аналогичную метку, +только со своим ID реплики, и работает с этим. -При желании удалить парт нода удаляет в Зукипере ключ `<путь к данным таблицы>/zero_copy_s3/shared/<некий ID парта>/`, потом получает все подключи `<путь к данным таблицы>/zero_copy_s3/shared/<некий ID парта>`. +При желании удалить парт нода удаляет в Зукипере ключ `<путь к данным таблицы>/zero_copy_s3/shared//`, потом получает все подключи `<путь к данным таблицы>/zero_copy_s3/shared/`. Если список не пустой, то считает, что данные использует другая нода и удаляет только локальные метаданные, если пустой, то удаляет и данные в S3. -При мерже если реузльтат будет на S3, нода ставит эфемерную метку в Zookeeper по пути `<путь к данным таблицы>/zero_copy_s3/merged/<имя нового парта>`. Если такая метка уже есть, то считает, что другая нода +При мерже если реузльтат будет на S3, нода ставит эфемерную метку в Zookeeper по пути `<путь к данным таблицы>/zero_copy_s3/merged/<имя нового парта>` (!! НЕ !!). Если такая метка уже есть, то считает, что другая нода уже помержила или мержит сейчас, и надо сделать fetch вместо мержа самой. +В конфиг добавлен флаг, по которому включается функционал нового протокола репликации - merge_tree->allow_s3_zero_copy_replication. Сейчас стоит в true - это времеменно, чтобы все тесты сейчас проходили с включенным флагом, перед финальным мержем надо не забыть заменить на false. + ## Костыли и недоработки, коих много * В качестве ID парта берется имя первого S3-ключа от файла checksums.txt. @@ -47,12 +50,11 @@ * Возможны все же дублирования партов. Пример - нода делает мерж, падает. Другая нода незавимо делает мерж, первая нода поднимается. В итоге есть две копии померженого парта. +* Тесты пока только самые базовые. + * ... много их. Честно. ## TODO, чего еще вообще не делалось -* Флаг в конфиге для включения функционала, по умолчанию будет выключен. - * Для гибридного хранилища сделать проверку и fetch при переезде парта с локального диска в S3. -* Тесты. diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index 4bc7879ffd3..71d75b92ab6 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -53,7 +53,7 @@ public: void sync(int fd) const override; const String getType() const override { return delegate->getType(); } String getUniqueId(const String & path) const override { return delegate->getUniqueId(path); } - bool checkFile(const String & path) const override { return delegate->checkFile(path); } + bool checkUniqueId(const String & id) const override { return delegate->checkUniqueId(id); } Executor & getExecutor() override; protected: diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 915c6da5a21..e05b52c4a78 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -208,7 +208,7 @@ public: virtual String getUniqueId(const String & path) const { return path; } /// Check file, overrided for S3 only - virtual bool checkFile(const String & path) const { return exists(path); } + virtual bool checkUniqueId(const String & id) const { return exists(id); } /// Returns executor to perform asynchronous operations. virtual Executor & getExecutor() { return *executor; } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index 7334a5b8a9b..01221d7c1a2 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -577,7 +577,7 @@ String DiskS3::getUniqueId(const String & path) const Metadata metadata(s3_root_path, metadata_path, path); String id; if (!metadata.s3_objects.empty()) - id = metadata.s3_objects[0].first; + id = metadata.s3_root_path + metadata.s3_objects[0].first; return id; } @@ -846,30 +846,23 @@ void DiskS3::shutdown() client->DisableRequestProcessing(); } -bool DiskS3::checkFile(const String & path) const +bool DiskS3::checkUniqueId(const String & id) const { - Metadata metadata(s3_root_path, metadata_path, path); - - /// empty s3_objects list for empty file - if (metadata.s3_objects.empty()) - return true; - - String object = metadata.s3_root_path + metadata.s3_objects[0].first; - + /// Check that we have right s3 and have access rights + /// Actually interprets id as s3 object name and checks if it exists Aws::S3::Model::ListObjectsRequest request; request.SetBucket(bucket); - request.SetPrefix(object); + request.SetPrefix(id); auto resp = client->ListObjects(request); throwIfError(resp); Aws::Vector object_list = resp.GetResult().GetContents(); - /// Should be only one object with name equal to prefix - if (object_list.size() != 1) + if (object_list.size() < 1) return false; - - if (object_list[0].GetKey() != object) - return false; - return true; + for (const auto & object : object_list) + if (object.GetKey() == id) + return true; + return false; } diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 80752fa8253..43cec7838eb 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -116,7 +116,7 @@ public: String getUniqueId(const String & path) const override; - bool checkFile(const String & path) const override; + bool checkUniqueId(const String & path) const override; private: bool tryReserve(UInt64 bytes); diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index d2bd3c21173..2a1da0e0eaf 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -241,6 +241,9 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB part->lockSharedData(zookeeper_path, replica_name, zookeeper); + String part_id = part->getUniqueId(); + writeStringBinary(part_id, out); + writeBinary(checksums.files.size(), out); for (const auto & it : checksums.files) { @@ -555,7 +558,22 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( if (disks_s3.empty()) throw Exception("No S3 disks anymore", ErrorCodes::LOGICAL_ERROR); - auto disk = disks_s3[0]; + String part_id; + readStringBinary(part_id, in); + + DiskPtr disk = disks_s3[0]; + + for (const auto & disk_ : disks_s3) + { + if (disk_->checkUniqueId(part_id)) + { + disk = disk_; + break; + } + } + + if (!disk) + throw Exception("Can't find S3 disk", ErrorCodes::S3_ERROR); static const String TMP_PREFIX = "tmp_fetch_"; String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_; @@ -612,50 +630,6 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( ErrorCodes::CHECKSUM_DOESNT_MATCH); } } - - if (!i) - { /// Check access for first s3 object of first file - if (!disk->checkFile(data_path)) - { /// Wrong S3 disk - Poco::File metadata(metadata_file); - - size_t disk_id = 1; - while (true) - { - if (disk_id >= disks_s3.size()) - { /// No more S3 disks - disk->removeSharedRecursive(part_download_path, true); - /// After catch this exception replication continues with full data copy - throw Exception("Can't find S3 drive for shared data", ErrorCodes::S3_ERROR); - } - - /// Try next S3 disk - auto next_disk = disks_s3[disk_id]; - - auto next_volume = std::make_shared("volume_" + part_name, next_disk); - MergeTreeData::MutableDataPartPtr next_new_data_part = data.createPart(part_name, next_volume, part_relative_path); - - next_disk->createDirectories(part_download_path); - - String next_data_path = next_new_data_part->getFullRelativePath() + file_name; - String next_metadata_file = fullPath(next_disk, next_data_path); - metadata.copyTo(next_metadata_file); - if (next_disk->checkFile(next_data_path)) - { /// Right disk found - disk->removeSharedRecursive(part_download_path, true); - disk = next_disk; - volume = next_volume; - data_path = next_data_path; - new_data_part = next_new_data_part; - break; - } - - /// Wrong disk again - next_disk->removeSharedRecursive(part_download_path, true); - ++disk_id; - } - } - } } assertEOF(in); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index be2f88e74e5..85c2d5e4ab4 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1095,17 +1095,24 @@ bool IMergeTreeDataPart::checkAllTTLCalculated(const StorageMetadataPtr & metada return true; } -void IMergeTreeDataPart::lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const +String IMergeTreeDataPart::getUniqueId() const { + String id; + auto disk = volume->getDisk(); - if (disk->getType() != "s3") - return; - - String id = disk->getUniqueId(getFullRelativePath() + "checksums.txt"); + if (disk->getType() == "s3") + id = disk->getUniqueId(getFullRelativePath() + "checksums.txt"); if (id.empty()) - throw Exception("Can't lock part on S3 storage", ErrorCodes::LOGICAL_ERROR); + throw Exception("Can't get unique S3 object", ErrorCodes::LOGICAL_ERROR); + + return id; +} + +void IMergeTreeDataPart::lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const +{ + String id = getUniqueId(); String zookeeper_node = zookeeper_path + "/zero_copy_s3/shared/" + id + "/" + replica_name; @@ -1117,15 +1124,7 @@ void IMergeTreeDataPart::lockSharedData(const String & zookeeper_path, const Str bool IMergeTreeDataPart::unlockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const { - auto disk = volume->getDisk(); - - if (disk->getType() != "s3") - return true; - - String id = disk->getUniqueId(getFullRelativePath() + "checksums.txt"); - - if (id.empty()) - return true; + String id = getUniqueId(); String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + id; String zookeeper_node = zookeeper_part_node + "/" + replica_name; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 8d21f5856fc..f948cbaa18c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -359,6 +359,8 @@ public: /// part creation (using alter query with materialize_ttl setting). bool checkAllTTLCalculated(const StorageMetadataPtr & metadata_snapshot) const; + String getUniqueId() const; + /// Lock part in zookeeper for use common S3 data in several nodes void lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const; From eba98b04b0322f02139f7553c2fab61b84a514e8 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 14 Jan 2021 19:26:56 +0300 Subject: [PATCH 011/333] Zero copy replication over S3: Hybrid storage support --- S3ZeroCopyReplication.md | 17 +- src/Storages/MergeTree/DataPartsExchange.cpp | 26 ++- src/Storages/MergeTree/DataPartsExchange.h | 3 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 218 ++++++++++++++++-- src/Storages/MergeTree/IMergeTreeDataPart.h | 11 +- src/Storages/MergeTree/MergeTreeData.cpp | 5 +- src/Storages/MergeTree/MergeTreeData.h | 3 +- .../MergeTree/MergeTreeDataMergerMutator.cpp | 1 + .../MergeTree/MergedBlockOutputStream.cpp | 1 + .../MergeTree/ReplicatedMergeTreeLogEntry.h | 40 ++-- src/Storages/StorageReplicatedMergeTree.cpp | 176 ++++++++++---- src/Storages/StorageReplicatedMergeTree.h | 24 +- 12 files changed, 431 insertions(+), 94 deletions(-) diff --git a/S3ZeroCopyReplication.md b/S3ZeroCopyReplication.md index 22c01caa90c..bfb39addcd2 100644 --- a/S3ZeroCopyReplication.md +++ b/S3ZeroCopyReplication.md @@ -18,9 +18,14 @@ Применик перед запросом смотрит, будет ли хранить данные в S3. Проверка сейчас кривая - если в сторадже есть S3, то считаем, что будет S3. Если да S3, то отсылает в запросе send_s3_metadata=1. -Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/shared//`, +Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/shared/<имя парта>//<Путь парта>/`, ставит в ответ куку send_s3_metadata=1 и вместо файлов с данными отсылает только файлы метаданных. +Путь получился сложным, потому что требуется +* по имени парта получить, на каких репликах он уже есть на S3 (нужно для гибридного хранилища) +* по уникальному пути понимать, используелся ли эта копия парта другими репликами +* для павильного времени жизни лока различать лок основного варианта (all_0_0_0) от временного (tmp_fetch_all_0_0_0) + Приемник при получении ответа с send_s3_metadata=1 проверяет доступность по переданному ключу (первый объект checksums.txt) создает только файлики с идентичными меаданными, которые в итоге будут ссылаться на те же ключи в S3, ставит в зукипере аналогичную метку, только со своим ID реплики, и работает с этим. @@ -30,14 +35,14 @@ При мерже если реузльтат будет на S3, нода ставит эфемерную метку в Zookeeper по пути `<путь к данным таблицы>/zero_copy_s3/merged/<имя нового парта>` (!! НЕ !!). Если такая метка уже есть, то считает, что другая нода уже помержила или мержит сейчас, и надо сделать fetch вместо мержа самой. +В гибридном хранилище если парт переносится на S3, нода через ZK проверяет, нет был ли парт перенесен другой нодой, если был, то делает fetch (модифицированный по сравнению с обычным fetch'ем). + В конфиг добавлен флаг, по которому включается функционал нового протокола репликации - merge_tree->allow_s3_zero_copy_replication. Сейчас стоит в true - это времеменно, чтобы все тесты сейчас проходили с включенным флагом, перед финальным мержем надо не забыть заменить на false. ## Костыли и недоработки, коих много * В качестве ID парта берется имя первого S3-ключа от файла checksums.txt. -* Не нашел удобного способа прокидывать в коде зукипер, прокинул хадркодом. - * При удалении класс диска ничего не знает про парты, прокинул флаг, что надо оставлять данные в S3 параметром, это очень криво получилось. * Возможна гонка, если источник отошлет метаданные про парт и тут же решит его удалить до того, как приемник поставит в зукипер пометку. @@ -52,9 +57,5 @@ * Тесты пока только самые базовые. -* ... много их. Честно. - -## TODO, чего еще вообще не делалось - -* Для гибридного хранилища сделать проверку и fetch при переезде парта с локального диска в S3. +* Для гибридного хранилища если две ноды решают одновременно перенести парт на S3, обе проверяют, что его там еще нет и обе переносят. diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index f8f5bfb5a3b..884dd22c295 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -267,7 +267,7 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB if (disk->getType() != "s3") throw Exception("S3 disk is not S3 anymore", ErrorCodes::LOGICAL_ERROR); - part->lockSharedData(zookeeper_path, replica_name, zookeeper); + part->lockSharedData(); String part_id = part->getUniqueId(); writeStringBinary(part_id, out); @@ -327,7 +327,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( const String & interserver_scheme, bool to_detached, const String & tmp_prefix_, - bool try_use_s3_copy) + bool try_use_s3_copy, + const DiskPtr disk_s3) { if (blocker.isCancelled()) throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); @@ -348,6 +349,9 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( {"compress", "false"} }); + if (try_use_s3_copy && disk_s3 && disk_s3->getType() != "s3") + throw Exception("Try to fetch shared s3 part on non-s3 disk", ErrorCodes::LOGICAL_ERROR); + Disks disks_s3; if (!data_settings->allow_s3_zero_copy_replication) @@ -355,9 +359,15 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( if (try_use_s3_copy) { - disks_s3 = data.getDisksByType("s3"); - if (disks_s3.empty()) - try_use_s3_copy = false; + if (disk_s3) + disks_s3.push_back(disk_s3); + else + { + disks_s3 = data.getDisksByType("s3"); + + if (disks_s3.empty()) + try_use_s3_copy = false; + } } if (try_use_s3_copy) @@ -405,6 +415,10 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( if (part_type == "InMemory") throw Exception("Got 'send_s3_metadata' cookie for in-memory partition", ErrorCodes::LOGICAL_ERROR); + UUID part_uuid = UUIDHelpers::Nil; + if (server_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_UUID) + readUUIDText(part_uuid, in); + try { return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, std::move(disks_s3), in); @@ -680,7 +694,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( new_data_part->modification_time = time(nullptr); new_data_part->loadColumnsChecksumsIndexes(true, false); - new_data_part->lockSharedData(zookeeper_path, replica_name, zookeeper); + new_data_part->lockSharedData(); return new_data_part; } diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index c5bc891b550..f0297aa1d28 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -81,7 +81,8 @@ public: const String & interserver_scheme, bool to_detached = false, const String & tmp_prefix_ = "", - bool try_use_s3_copy = true); + bool try_use_s3_copy = true, + const DiskPtr disk_s3 = nullptr); /// You need to stop the data transfer. ActionBlocker blocker; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 5f017972a47..f4635208cda 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ namespace DB { + namespace ErrorCodes { extern const int DIRECTORY_ALREADY_EXISTS; @@ -773,7 +775,8 @@ void IMergeTreeDataPart::loadColumns(bool require) { /// We can get list of columns only from columns.txt in compact parts. if (require || part_type == Type::COMPACT) - throw Exception("No columns.txt in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); + throw Exception("No columns.txt in part " + name + ", expected path " + path + " on drive " + volume->getDisk()->getName(), + ErrorCodes::NO_FILE_IN_DATA_PART); /// If there is no file with a list of columns, write it down. for (const NameAndTypePair & column : metadata_snapshot->getColumns().getAllPhysical()) @@ -855,7 +858,10 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_ volume->getDisk()->setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr))); volume->getDisk()->moveFile(from, to); + String old_relative_path = relative_path; relative_path = new_relative_path; + lockSharedData(); + unlockSharedData(old_relative_path); } @@ -1010,7 +1016,15 @@ void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & di } disk->createDirectories(path_to_clone); - volume->getDisk()->copy(getFullRelativePath(), disk, path_to_clone); + bool is_fetched = false; + + if (disk->getType() == "s3") + { + is_fetched = tryToFetchIfShared(disk, path_to_clone + "/" + name); + } + + if (!is_fetched) + volume->getDisk()->copy(getFullRelativePath(), disk, path_to_clone); volume->getDisk()->removeIfExists(path_to_clone + '/' + DELETE_ON_DESTROY_MARKER_FILE_NAME); } @@ -1148,38 +1162,212 @@ String IMergeTreeDataPart::getUniqueId() const return id; } -void IMergeTreeDataPart::lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const +void IMergeTreeDataPart::lockSharedData() const { + if (!volume) + return; + DiskPtr disk = volume->getDisk(); + if (!disk) + return; + if (disk->getType() != "s3") + return; + + const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); + if (!replicated_storage) + return; + + StorageReplicatedMergeTree::ZooKeeperAccessData zk = replicated_storage->getZooKeeperAccessData(); + if (!zk.zookeeper) + return; + String id = getUniqueId(); + boost::replace_all(id, "/", "_"); + String norm_path = relative_path; + boost::replace_all(norm_path, "/", "_"); - String zookeeper_node = zookeeper_path + "/zero_copy_s3/shared/" + id + "/" + replica_name; + String zookeeper_node = zk.zookeeper_path + "/zero_copy_s3/shared/" + name + "/" + id + "/" + norm_path + "/" + zk.replica_name; - LOG_TRACE(storage.log, "Set zookeeper lock {}", id); + LOG_TRACE(storage.log, "Set zookeeper lock {}", zookeeper_node); - zookeeper->createAncestors(zookeeper_node); - zookeeper->createIfNotExists(zookeeper_node, "lock"); + zk.zookeeper->createAncestors(zookeeper_node); + zk.zookeeper->createIfNotExists(zookeeper_node, "lock"); } -bool IMergeTreeDataPart::unlockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const +bool IMergeTreeDataPart::unlockSharedData() const { + return unlockSharedData(relative_path); +} + +bool IMergeTreeDataPart::unlockSharedData(const String & path) const +{ + if (!volume) + return true; + DiskPtr disk = volume->getDisk(); + if (!disk) + return true; + if (disk->getType() != "s3") + return true; + + const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); + if (!replicated_storage) + return true; + + StorageReplicatedMergeTree::ZooKeeperAccessData zk = replicated_storage->getZooKeeperAccessData(); + if (!zk.zookeeper) + return true; + String id = getUniqueId(); + boost::replace_all(id, "/", "_"); + String norm_path = path; + boost::replace_all(norm_path, "/", "_"); - String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + id; - String zookeeper_node = zookeeper_part_node + "/" + replica_name; + String zookeeper_part_node = zk.zookeeper_path + "/zero_copy_s3/shared/" + name; + String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; + String zookeeper_part_path_node = zookeeper_part_uniq_node + "/" + norm_path; + String zookeeper_node = zookeeper_part_path_node + "/" + zk.replica_name; - LOG_TRACE(storage.log, "Remove zookeeper lock for {}", id); + LOG_TRACE(storage.log, "Remove zookeeper lock {}", zookeeper_node); - zookeeper->remove(zookeeper_node); + zk.zookeeper->tryRemove(zookeeper_node); Strings children; - zookeeper->tryGetChildren(zookeeper_part_node, children); + zk.zookeeper->tryGetChildren(zookeeper_part_path_node, children); + if (!children.empty()) + { + LOG_TRACE(storage.log, "Found zookeper locks for {}", zookeeper_part_path_node); + return false; + } + + zk.zookeeper->tryRemove(zookeeper_part_path_node); + + children.clear(); + zk.zookeeper->tryGetChildren(zookeeper_part_uniq_node, children); if (!children.empty()) { - LOG_TRACE(storage.log, "Found zookeper locks for {}", id); + LOG_TRACE(storage.log, "Found zookeper locks for {}", zookeeper_part_uniq_node); + return false; } - return children.empty(); + zk.zookeeper->tryRemove(zookeeper_part_uniq_node); + + /// Even when we have lock with same part name, but with different uniq, we can remove files on S3 + children.clear(); + zk.zookeeper->tryGetChildren(zookeeper_part_node, children); + if (children.empty()) + /// Cleanup after last uniq removing + zk.zookeeper->tryRemove(zookeeper_part_node); + + return true; +} + +String IMergeTreeDataPart::getSharedDataReplica( + const String & zookeeper_path, + zkutil::ZooKeeperPtr zookeeper, + const String & replica_name) const +{ + String norm_path = relative_path; + boost::replace_all(norm_path, "/", "_"); + String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + name; + + Strings ids; + zookeeper->tryGetChildren(zookeeper_part_node, ids); + + Strings replicas; + for (const auto & id : ids) + { + String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; + Strings paths; + zookeeper->tryGetChildren(zookeeper_part_uniq_node, paths); + for (const auto &path : paths) + { + String zookeeper_node = zookeeper_part_uniq_node + "/" + path; + Strings id_replicas; + zookeeper->tryGetChildren(zookeeper_node, id_replicas); + LOG_TRACE(storage.log, "Found zookeper replicas for {}: {}", zookeeper_node, id_replicas.size()); + replicas.insert(replicas.end(), id_replicas.begin(), id_replicas.end()); + } + } + + LOG_TRACE(storage.log, "Found zookeper replicas for part {}: {}", name, replicas.size()); + + String best_replica; + Strings active_replicas; + + /// TODO: Move best replica choose in common method (here is the same code as in StorageReplicatedMergeTree::fetchPartition) + + /// Leave only active replicas. + active_replicas.reserve(replicas.size()); + + for (const String & replica : replicas) + if ((replica != replica_name) && (zookeeper->exists(zookeeper_path + "/replicas/" + replica + "/is_active"))) + active_replicas.push_back(replica); + + LOG_TRACE(storage.log, "Found zookeper active replicas for part {}: {}", name, active_replicas.size()); + + if (active_replicas.empty()) + return best_replica; + + /** You must select the best (most relevant) replica. + * This is a replica with the maximum `log_pointer`, then with the minimum `queue` size. + * NOTE This is not exactly the best criteria. It does not make sense to download old partitions, + * and it would be nice to be able to choose the replica closest by network. + * NOTE Of course, there are data races here. You can solve it by retrying. + */ + Int64 max_log_pointer = -1; + UInt64 min_queue_size = std::numeric_limits::max(); + + for (const String & replica : active_replicas) + { + String current_replica_path = zookeeper_path + "/replicas/" + replica; + + String log_pointer_str = zookeeper->get(current_replica_path + "/log_pointer"); + Int64 log_pointer = log_pointer_str.empty() ? 0 : parse(log_pointer_str); + + Coordination::Stat stat; + zookeeper->get(current_replica_path + "/queue", &stat); + size_t queue_size = stat.numChildren; + + if (log_pointer > max_log_pointer + || (log_pointer == max_log_pointer && queue_size < min_queue_size)) + { + max_log_pointer = log_pointer; + min_queue_size = queue_size; + best_replica = replica; + } + } + + return best_replica; +} + +bool IMergeTreeDataPart::tryToFetchIfShared(const DiskPtr & disk, const String & path) const +{ + const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); + if (!replicated_storage) + return false; + + StorageReplicatedMergeTree::ZooKeeperAccessData zk = replicated_storage->getZooKeeperAccessData(); + if (!zk.zookeeper) + return false; + + String replica = getSharedDataReplica(zk.zookeeper_path, zk.zookeeper, zk.replica_name); + + /// We can't fetch part when none replicas have this part on S3 + if (replica.empty()) + return false; + + ReplicatedMergeTreeLogEntry log_entry; + log_entry.type = ReplicatedMergeTreeLogEntry::FETCH_SHARED_PART; + log_entry.source_replica = replica; + log_entry.new_part_name = name;//part_name; + log_entry.create_time = 0;//part_create_time; + log_entry.disk = disk; + log_entry.path = path; + + /// TODO: !!! Fix const usage !!! + StorageReplicatedMergeTree *replicated_storage_nc = const_cast(replicated_storage); + + return replicated_storage_nc->executeFetchShared(log_entry); } bool isCompactPart(const MergeTreeDataPartPtr & data_part) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 746d95fe78e..cfe3d7da263 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -369,12 +369,13 @@ public: String getUniqueId() const; /// Lock part in zookeeper for use common S3 data in several nodes - void lockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const; + void lockSharedData() const; /// Unlock common S3 data part in zookeeper /// Return true if data unlocked /// Return false if data is still used by another node - bool unlockSharedData(const String & zookeeper_path, const String & replica_name, zkutil::ZooKeeperPtr zookeeper) const; + bool unlockSharedData() const; + bool unlockSharedData(const String & path) const; protected: @@ -439,6 +440,12 @@ private: /// Found column without specific compression and return codec /// for this column with default parameters. CompressionCodecPtr detectDefaultCompressionCodec() const; + + /// Fetch part only if some replica has it on shared storage like S3 + bool tryToFetchIfShared(const DiskPtr & disk, const String & path) const; + + /// Get best replica having this partition on S3 + String getSharedDataReplica(const String & zookeeper_path, zkutil::ZooKeeperPtr zookeeper, const String & replica_name) const; }; using MergeTreeDataPartState = IMergeTreeDataPart::State; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index c3a599665bb..37f7187585c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1833,7 +1833,8 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace( const MergeTreePartInfo & new_part_info, const String & new_part_name, DataPartPtr & out_covering_part, - DataPartsLock & /* data_parts_lock */) const + DataPartsLock & /* data_parts_lock */, + bool allow_duplicate) const { /// Parts contained in the part are consecutive in data_parts, intersecting the insertion place for the part itself. auto it_middle = data_parts_by_state_and_info.lower_bound(DataPartStateAndInfo{DataPartState::Committed, new_part_info}); @@ -1867,7 +1868,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace( DataPartIteratorByStateAndInfo end = it_middle; while (end != committed_parts_range.end()) { - if ((*end)->info == new_part_info) + if ((*end)->info == new_part_info && !allow_duplicate) throw Exception("Unexpected duplicate part " + (*end)->getNameWithState() + ". It is a bug.", ErrorCodes::LOGICAL_ERROR); if (!new_part_info.contains((*end)->info)) diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index e65d486d46f..53902688f1f 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -870,7 +870,8 @@ protected: const MergeTreePartInfo & new_part_info, const String & new_part_name, DataPartPtr & out_covering_part, - DataPartsLock & data_parts_lock) const; + DataPartsLock & data_parts_lock, + bool allow_duplicate = false) const; /// Checks whether the column is in the primary key, possibly wrapped in a chain of functions with single argument. bool isPrimaryOrMinMaxKeyColumnPossiblyWrappedInFunctions(const ASTPtr & node, const StorageMetadataPtr & metadata_snapshot) const; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 1065b992396..807d1e9eed2 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -1876,6 +1876,7 @@ void MergeTreeDataMergerMutator::finalizeMutatedPart( MergeTreeData::DataPart::calculateTotalSizeOnDisk(new_data_part->volume->getDisk(), new_data_part->getFullRelativePath())); new_data_part->default_codec = codec; new_data_part->calculateColumnsSizesOnDisk(); + new_data_part->lockSharedData(); } diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 00a4c37c60d..255526eca11 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -126,6 +126,7 @@ void MergedBlockOutputStream::writeSuffixAndFinalizePart( new_part->calculateColumnsSizesOnDisk(); if (default_codec != nullptr) new_part->default_codec = default_codec; + new_part->lockSharedData(); } void MergedBlockOutputStream::finalizePartOnDisk( diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h index 4b384171dde..e9e3d15c5ff 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -29,29 +30,31 @@ struct ReplicatedMergeTreeLogEntryData { enum Type { - EMPTY, /// Not used. - GET_PART, /// Get the part from another replica. - MERGE_PARTS, /// Merge the parts. - DROP_RANGE, /// Delete the parts in the specified partition in the specified number range. - CLEAR_COLUMN, /// NOTE: Deprecated. Drop specific column from specified partition. - CLEAR_INDEX, /// NOTE: Deprecated. Drop specific index from specified partition. - REPLACE_RANGE, /// Drop certain range of partitions and replace them by new ones - MUTATE_PART, /// Apply one or several mutations to the part. - ALTER_METADATA, /// Apply alter modification according to global /metadata and /columns paths + EMPTY, /// Not used. + GET_PART, /// Get the part from another replica. + MERGE_PARTS, /// Merge the parts. + DROP_RANGE, /// Delete the parts in the specified partition in the specified number range. + CLEAR_COLUMN, /// NOTE: Deprecated. Drop specific column from specified partition. + CLEAR_INDEX, /// NOTE: Deprecated. Drop specific index from specified partition. + REPLACE_RANGE, /// Drop certain range of partitions and replace them by new ones + MUTATE_PART, /// Apply one or several mutations to the part. + ALTER_METADATA, /// Apply alter modification according to global /metadata and /columns paths + FETCH_SHARED_PART, /// Get the part from other replica only if it on shared S3 storade }; static String typeToString(Type type) { switch (type) { - case ReplicatedMergeTreeLogEntryData::GET_PART: return "GET_PART"; - case ReplicatedMergeTreeLogEntryData::MERGE_PARTS: return "MERGE_PARTS"; - case ReplicatedMergeTreeLogEntryData::DROP_RANGE: return "DROP_RANGE"; - case ReplicatedMergeTreeLogEntryData::CLEAR_COLUMN: return "CLEAR_COLUMN"; - case ReplicatedMergeTreeLogEntryData::CLEAR_INDEX: return "CLEAR_INDEX"; - case ReplicatedMergeTreeLogEntryData::REPLACE_RANGE: return "REPLACE_RANGE"; - case ReplicatedMergeTreeLogEntryData::MUTATE_PART: return "MUTATE_PART"; - case ReplicatedMergeTreeLogEntryData::ALTER_METADATA: return "ALTER_METADATA"; + case ReplicatedMergeTreeLogEntryData::GET_PART: return "GET_PART"; + case ReplicatedMergeTreeLogEntryData::MERGE_PARTS: return "MERGE_PARTS"; + case ReplicatedMergeTreeLogEntryData::DROP_RANGE: return "DROP_RANGE"; + case ReplicatedMergeTreeLogEntryData::CLEAR_COLUMN: return "CLEAR_COLUMN"; + case ReplicatedMergeTreeLogEntryData::CLEAR_INDEX: return "CLEAR_INDEX"; + case ReplicatedMergeTreeLogEntryData::REPLACE_RANGE: return "REPLACE_RANGE"; + case ReplicatedMergeTreeLogEntryData::MUTATE_PART: return "MUTATE_PART"; + case ReplicatedMergeTreeLogEntryData::ALTER_METADATA: return "ALTER_METADATA"; + case ReplicatedMergeTreeLogEntryData::FETCH_SHARED_PART: return "FETCH_SHARED_PART"; default: throw Exception("Unknown log entry type: " + DB::toString(type), ErrorCodes::LOGICAL_ERROR); } @@ -191,6 +194,9 @@ struct ReplicatedMergeTreeLogEntry : public ReplicatedMergeTreeLogEntryData, std std::condition_variable execution_complete; /// Awake when currently_executing becomes false. static Ptr parse(const String & s, const Coordination::Stat & stat); + + DiskPtr disk; + String path; }; using ReplicatedMergeTreeLogEntryPtr = std::shared_ptr; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 977a485f758..59312737a39 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1891,6 +1891,60 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry) } +bool StorageReplicatedMergeTree::executeFetchShared(ReplicatedMergeTreeLogEntry & entry) +{ + if (entry.type != LogEntry::FETCH_SHARED_PART) + { + throw Exception("Wrong entry.type in executeFetchShared", ErrorCodes::LOGICAL_ERROR); + } + + if (entry.source_replica.empty()) + { + LOG_INFO(log, "No active replica has part {} on S3.", entry.new_part_name); + return false; + } + + const auto storage_settings_ptr = getSettings(); + auto metadata_snapshot = getInMemoryMetadataPtr(); + + static std::atomic_uint total_fetches {0}; + if (storage_settings_ptr->replicated_max_parallel_fetches && total_fetches >= storage_settings_ptr->replicated_max_parallel_fetches) + { + throw Exception("Too many total fetches from replicas, maximum: " + storage_settings_ptr->replicated_max_parallel_fetches.toString(), + ErrorCodes::TOO_MANY_FETCHES); + } + + ++total_fetches; + SCOPE_EXIT({--total_fetches;}); + + if (storage_settings_ptr->replicated_max_parallel_fetches_for_table + && current_table_fetches >= storage_settings_ptr->replicated_max_parallel_fetches_for_table) + { + throw Exception("Too many fetches from replicas for table, maximum: " + storage_settings_ptr->replicated_max_parallel_fetches_for_table.toString(), + ErrorCodes::TOO_MANY_FETCHES); + } + + ++current_table_fetches; + SCOPE_EXIT({--current_table_fetches;}); + + try + { + if (!fetchPart(entry.new_part_name, metadata_snapshot, zookeeper_path + "/replicas/" + entry.source_replica, false, entry.quorum, + nullptr, true, entry.disk, entry.path)) + return false; + } + catch (Exception & e) + { + if (e.code() == ErrorCodes::RECEIVED_ERROR_TOO_MANY_REQUESTS) + e.addMessage("Too busy replica. Will try later."); + tryLogCurrentException(log, __PRETTY_FUNCTION__); + throw; + } + + return true; +} + + void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) { auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); @@ -3133,6 +3187,29 @@ String StorageReplicatedMergeTree::findReplicaHavingPart(const String & part_nam return {}; } +String StorageReplicatedMergeTree::findReplicaHavingSharedPart(const String & part_name, bool active) +{ + auto zookeeper = getZooKeeper(); + Strings replicas = zookeeper->getChildren(zookeeper_path + "/replicas"); + + /// Select replicas in uniformly random order. + std::shuffle(replicas.begin(), replicas.end(), thread_local_rng); + + for (const String & replica : replicas) + { + /// We don't interested in ourself. + if (replica == replica_name) + continue; + + if (checkReplicaHavePart(replica, part_name) && + (!active || zookeeper->exists(zookeeper_path + "/replicas/" + replica + "/is_active"))) + return replica; + + /// Obviously, replica could become inactive or even vanish after return from this method. + } + + return {}; +} String StorageReplicatedMergeTree::findReplicaHavingCoveringPart(LogEntry & entry, bool active) { @@ -3330,7 +3407,6 @@ void StorageReplicatedMergeTree::updateQuorum(const String & part_name, bool is_ } } - void StorageReplicatedMergeTree::cleanLastPartNode(const String & partition_id) { auto zookeeper = getZooKeeper(); @@ -3382,7 +3458,6 @@ void StorageReplicatedMergeTree::cleanLastPartNode(const String & partition_id) } } - bool StorageReplicatedMergeTree::partIsInsertingWithParallelQuorum(const MergeTreePartInfo & part_info) const { auto zookeeper = getZooKeeper(); @@ -3411,7 +3486,8 @@ bool StorageReplicatedMergeTree::partIsLastQuorumPart(const MergeTreePartInfo & } bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const StorageMetadataPtr & metadata_snapshot, - const String & source_replica_path, bool to_detached, size_t quorum, zkutil::ZooKeeper::Ptr zookeeper_) + const String & source_replica_path, bool to_detached, size_t quorum, zkutil::ZooKeeper::Ptr zookeeper_, bool replace_exists, + DiskPtr replaced_disk, String replaced_part_path) { auto zookeeper = zookeeper_ ? zookeeper_ : getZooKeeper(); const auto part_info = MergeTreePartInfo::fromPartName(part_name, format_version); @@ -3461,6 +3537,8 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora }; DataPartPtr part_to_clone; + + if (!replace_exists) { /// If the desired part is a result of a part mutation, try to find the source part and compare /// its checksums to the checksums of the desired part. If they match, we can just clone the local part. @@ -3520,7 +3598,8 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora return fetcher.fetchPart( metadata_snapshot, part_name, source_replica_path, address.host, address.replication_port, - timeouts, user_password.first, user_password.second, interserver_scheme, to_detached); + timeouts, user_password.first, user_password.second, interserver_scheme, to_detached, "", true, + replace_exists ? replaced_disk : nullptr); }; } @@ -3530,46 +3609,56 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora if (!to_detached) { - Transaction transaction(*this); - renameTempPartAndReplace(part, nullptr, &transaction); - - /** NOTE - * Here, an error occurs if ALTER occurred with a change in the column type or column deletion, - * and the part on remote server has not yet been modified. - * After a while, one of the following attempts to make `fetchPart` succeed. - */ - replaced_parts = checkPartChecksumsAndCommit(transaction, part); - - /** If a quorum is tracked for this part, you must update it. - * If you do not have time, in case of losing the session, when you restart the server - see the `ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart` method. - */ - if (quorum) + if (replace_exists) { - /// Check if this quorum insert is parallel or not - if (zookeeper->exists(zookeeper_path + "/quorum/parallel/" + part_name)) - updateQuorum(part_name, true); - else if (zookeeper->exists(zookeeper_path + "/quorum/status")) - updateQuorum(part_name, false); + if (part->volume->getDisk()->getName() != replaced_disk->getName()) + throw Exception("Part " + part->name + " fetched on wrong disk " + part->volume->getDisk()->getName(), ErrorCodes::LOGICAL_ERROR); + replaced_disk->removeIfExists(replaced_part_path); + replaced_disk->moveDirectory(part->getFullRelativePath(), replaced_part_path); } - - /// merged parts that are still inserted with quorum. if it only contains one block, it hasn't been merged before - if (part_info.level != 0 || part_info.mutation != 0) + else { - Strings quorum_parts = zookeeper->getChildren(zookeeper_path + "/quorum/parallel"); - for (const String & quorum_part : quorum_parts) + Transaction transaction(*this); + renameTempPartAndReplace(part, nullptr, &transaction); + + /** NOTE + * Here, an error occurs if ALTER occurred with a change in the column type or column deletion, + * and the part on remote server has not yet been modified. + * After a while, one of the following attempts to make `fetchPart` succeed. + */ + replaced_parts = checkPartChecksumsAndCommit(transaction, part); + + /** If a quorum is tracked for this part, you must update it. + * If you do not have time, in case of losing the session, when you restart the server - see the `ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart` method. + */ + if (quorum) { - auto quorum_part_info = MergeTreePartInfo::fromPartName(quorum_part, format_version); - if (part_info.contains(quorum_part_info)) - updateQuorum(quorum_part, true); + /// Check if this quorum insert is parallel or not + if (zookeeper->exists(zookeeper_path + "/quorum/parallel/" + part_name)) + updateQuorum(part_name, true); + else if (zookeeper->exists(zookeeper_path + "/quorum/status")) + updateQuorum(part_name, false); } - } - merge_selecting_task->schedule(); + /// merged parts that are still inserted with quorum. if it only contains one block, it hasn't been merged before + if (part_info.level != 0 || part_info.mutation != 0) + { + Strings quorum_parts = zookeeper->getChildren(zookeeper_path + "/quorum/parallel"); + for (const String & quorum_part : quorum_parts) + { + auto quorum_part_info = MergeTreePartInfo::fromPartName(quorum_part, format_version); + if (part_info.contains(quorum_part_info)) + updateQuorum(quorum_part, true); + } + } - for (const auto & replaced_part : replaced_parts) - { - LOG_DEBUG(log, "Part {} is rendered obsolete by fetching part {}", replaced_part->name, part_name); - ProfileEvents::increment(ProfileEvents::ObsoleteReplicatedParts); + merge_selecting_task->schedule(); + + for (const auto & replaced_part : replaced_parts) + { + LOG_DEBUG(log, "Part {} is rendered obsolete by fetching part {}", replaced_part->name, part_name); + ProfileEvents::increment(ProfileEvents::ObsoleteReplicatedParts); + } } write_part_log({}); @@ -5315,13 +5404,13 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() } parts.clear(); - auto remove_parts_from_filesystem = [log=log,&zookeeper=zookeeper,&zookeeper_path=zookeeper_path,&replica_name=replica_name] (const DataPartsVector & parts_to_remove) + auto remove_parts_from_filesystem = [log=log] (const DataPartsVector & parts_to_remove) { for (const auto & part : parts_to_remove) { try { - bool keep_s3 = !part->unlockSharedData(zookeeper_path, replica_name, zookeeper); + bool keep_s3 = !part->unlockSharedData(); part->remove(keep_s3); } catch (...) @@ -6271,4 +6360,13 @@ void StorageReplicatedMergeTree::startBackgroundMovesIfNeeded() background_moves_executor.start(); } +StorageReplicatedMergeTree::ZooKeeperAccessData StorageReplicatedMergeTree::getZooKeeperAccessData() const +{ + ZooKeeperAccessData res; + res.zookeeper = tryGetZooKeeper(); + res.zookeeper_path = zookeeper_path; + res.replica_name = replica_name; + return res; +} + } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index d396f32dcca..11dc475257e 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -211,6 +211,18 @@ public: /// is not overloaded bool canExecuteFetch(const ReplicatedMergeTreeLogEntry & entry, String & disable_reason) const; + struct ZooKeeperAccessData + { + zkutil::ZooKeeperPtr zookeeper; + String zookeeper_path; + String replica_name; + }; + + ZooKeeperAccessData getZooKeeperAccessData() const; + + /// Fetch part only when it stored on shared storage like S3 + bool executeFetchShared(ReplicatedMergeTreeLogEntry & entry); + private: /// Get a sequential consistent view of current parts. ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock getMaxAddedBlocks() const; @@ -369,8 +381,7 @@ private: String getChecksumsForZooKeeper(const MergeTreeDataPartChecksums & checksums) const; /// Accepts a PreComitted part, atomically checks its checksums with ones on other replicas and commit the part - DataPartsVector checkPartChecksumsAndCommit(Transaction & transaction, - const DataPartPtr & part); + DataPartsVector checkPartChecksumsAndCommit(Transaction & transaction, const DataPartPtr & part); bool partIsAssignedToBackgroundOperation(const DataPartPtr & part) const override; @@ -487,6 +498,10 @@ private: */ String findReplicaHavingPart(const String & part_name, bool active); + /** Returns a replica with part on shared storage like S3. + */ + String findReplicaHavingSharedPart(const String & part_name, bool active); + bool checkReplicaHavePart(const String & replica, const String & part_name); /** Find replica having specified part or any part that covers it. @@ -508,7 +523,10 @@ private: const String & replica_path, bool to_detached, size_t quorum, - zkutil::ZooKeeper::Ptr zookeeper_ = nullptr); + zkutil::ZooKeeper::Ptr zookeeper_ = nullptr, + bool replace_exists = false, + DiskPtr replaced_disk = nullptr, + String replaced_part_path = ""); /// Required only to avoid races between executeLogEntry and fetchPartition std::unordered_set currently_fetching_parts; From df6c882aab57882f78c15baae200d593b3dad7e6 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Wed, 20 Jan 2021 12:48:22 +0300 Subject: [PATCH 012/333] Fix build after merge --- S3ZeroCopyReplication.md | 2 +- src/Disks/DiskCacheWrapper.cpp | 6 ++-- src/Disks/DiskCacheWrapper.h | 2 +- src/Disks/DiskDecorator.cpp | 4 +-- src/Disks/DiskDecorator.h | 2 +- src/Disks/IDisk.h | 4 +-- src/Disks/S3/DiskS3.cpp | 2 -- src/Disks/S3/DiskS3.h | 2 +- src/Storages/MergeTree/DataPartsExchange.cpp | 9 +++--- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 25 ++++++++++++--- src/Storages/MergeTree/MergeTreeSettings.h | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 31 ++++++++++++++----- 12 files changed, 61 insertions(+), 30 deletions(-) diff --git a/S3ZeroCopyReplication.md b/S3ZeroCopyReplication.md index bfb39addcd2..5230640ebcc 100644 --- a/S3ZeroCopyReplication.md +++ b/S3ZeroCopyReplication.md @@ -37,7 +37,7 @@ В гибридном хранилище если парт переносится на S3, нода через ZK проверяет, нет был ли парт перенесен другой нодой, если был, то делает fetch (модифицированный по сравнению с обычным fetch'ем). -В конфиг добавлен флаг, по которому включается функционал нового протокола репликации - merge_tree->allow_s3_zero_copy_replication. Сейчас стоит в true - это времеменно, чтобы все тесты сейчас проходили с включенным флагом, перед финальным мержем надо не забыть заменить на false. +В конфиг добавлен флаг, по которому включается функционал нового протокола репликации - merge_tree->allow_s3_zero_copy_replication. Сейчас стоит в false. ## Костыли и недоработки, коих много diff --git a/src/Disks/DiskCacheWrapper.cpp b/src/Disks/DiskCacheWrapper.cpp index c26fa7623a4..df30af769e1 100644 --- a/src/Disks/DiskCacheWrapper.cpp +++ b/src/Disks/DiskCacheWrapper.cpp @@ -278,11 +278,11 @@ void DiskCacheWrapper::removeRecursive(const String & path) DiskDecorator::removeRecursive(path); } -void DiskCacheWrapper::removeShared(const String & path, bool keep_s3) +void DiskCacheWrapper::removeSharedFile(const String & path, bool keep_s3) { if (cache_disk->exists(path)) - cache_disk->removeShared(path, keep_s3); - DiskDecorator::removeShared(path, keep_s3); + cache_disk->removeSharedFile(path, keep_s3); + DiskDecorator::removeSharedFile(path, keep_s3); } void DiskCacheWrapper::removeSharedRecursive(const String & path, bool keep_s3) diff --git a/src/Disks/DiskCacheWrapper.h b/src/Disks/DiskCacheWrapper.h index fc7ccaaa345..8995bf1936d 100644 --- a/src/Disks/DiskCacheWrapper.h +++ b/src/Disks/DiskCacheWrapper.h @@ -41,7 +41,7 @@ public: void removeFileIfExists(const String & path) override; void removeDirectory(const String & path) override; void removeRecursive(const String & path) override; - void removeShared(const String & path, bool keep_s3) override; + void removeSharedFile(const String & path, bool keep_s3) override; void removeSharedRecursive(const String & path, bool keep_s3) override; void createHardLink(const String & src_path, const String & dst_path) override; ReservationPtr reserve(UInt64 bytes) override; diff --git a/src/Disks/DiskDecorator.cpp b/src/Disks/DiskDecorator.cpp index 9c8c7859b8b..96d2e8278e3 100644 --- a/src/Disks/DiskDecorator.cpp +++ b/src/Disks/DiskDecorator.cpp @@ -150,9 +150,9 @@ void DiskDecorator::removeRecursive(const String & path) delegate->removeRecursive(path); } -void DiskDecorator::removeShared(const String & path, bool keep_s3) +void DiskDecorator::removeSharedFile(const String & path, bool keep_s3) { - delegate->removeShared(path, keep_s3); + delegate->removeSharedFile(path, keep_s3); } void DiskDecorator::removeSharedRecursive(const String & path, bool keep_s3) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index edba993639a..d069f8a84b6 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -43,7 +43,7 @@ public: void removeFileIfExists(const String & path) override; void removeDirectory(const String & path) override; void removeRecursive(const String & path) override; - void removeShared(const String & path, bool keep_s3) override; + void removeSharedFile(const String & path, bool keep_s3) override; void removeSharedRecursive(const String & path, bool keep_s3) override; void setLastModified(const String & path, const Poco::Timestamp & timestamp) override; Poco::Timestamp getLastModified(const String & path) override; diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 612c5ef88ee..a5a886c9c9f 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -201,10 +201,10 @@ public: /// Invoked when Global Context is shutdown. virtual void shutdown() { } - /// Return some uniq string for file, overrided for S3 + /// Return some uniq string for file, overrode for S3 virtual String getUniqueId(const String & path) const { return path; } - /// Check file, overrided for S3 only + /// Check file, overrode for S3 only virtual bool checkUniqueId(const String & id) const { return exists(id); } /// Returns executor to perform asynchronous operations. diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index bbedb2af8f6..aadfcfa82d6 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -935,8 +935,6 @@ bool DiskS3::checkUniqueId(const String & id) const throwIfError(resp); Aws::Vector object_list = resp.GetResult().GetContents(); - if (object_list.size() < 1) - return false; for (const auto & object : object_list) if (object.GetKey() == id) return true; diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index acfb75f681d..165f09ff1e4 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -118,7 +118,7 @@ public: String getUniqueId(const String & path) const override; - bool checkUniqueId(const String & path) const override; + bool checkUniqueId(const String & id) const override; private: bool tryReserve(UInt64 bytes); diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index f2ae78c85ce..7041cfd5ad2 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -619,11 +618,11 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( DiskPtr disk = disks_s3[0]; - for (const auto & disk_ : disks_s3) + for (const auto & disk_s3 : disks_s3) { - if (disk_->checkUniqueId(part_id)) + if (disk_s3->checkUniqueId(part_id)) { - disk = disk_; + disk = disk_s3; break; } } @@ -662,7 +661,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( String metadata_file = fullPath(disk, data_path); { - auto file_out = createWriteBufferFromFileBase(metadata_file, 0, 0, DBMS_DEFAULT_BUFFER_SIZE, -1); + auto file_out = std::make_unique(metadata_file, DBMS_DEFAULT_BUFFER_SIZE, -1, 0666, nullptr, 0); HashingWriteBuffer hashing_out(*file_out); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 89ec68e5068..69710311af3 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1132,7 +1132,9 @@ void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & di if (disk->getType() == "s3") { - is_fetched = tryToFetchIfShared(disk, path_to_clone + "/" + name); + auto data_settings = storage.getSettings(); + if (data_settings->allow_s3_zero_copy_replication) + is_fetched = tryToFetchIfShared(disk, path_to_clone + "/" + name); } if (!is_fetched) @@ -1301,8 +1303,23 @@ void IMergeTreeDataPart::lockSharedData() const LOG_TRACE(storage.log, "Set zookeeper lock {}", zookeeper_node); - zk.zookeeper->createAncestors(zookeeper_node); - zk.zookeeper->createIfNotExists(zookeeper_node, "lock"); + /// In rare case other replica can remove path between createAncestors and createIfNotExists + /// So we make up to 5 attempts + for (int attempts = 5; attempts > 0; --attempts) + { + try + { + zk.zookeeper->createAncestors(zookeeper_node); + zk.zookeeper->createIfNotExists(zookeeper_node, "lock"); + break; + } + catch (const zkutil::KeeperException & e) + { + if (e.code == Coordination::Error::ZNONODE) + continue; + throw; + } + } } bool IMergeTreeDataPart::unlockSharedData() const @@ -1476,7 +1493,7 @@ bool IMergeTreeDataPart::tryToFetchIfShared(const DiskPtr & disk, const String & log_entry.disk = disk; log_entry.path = path; - /// TODO: !!! Fix const usage !!! + /// TODO: Fix const usage StorageReplicatedMergeTree *replicated_storage_nc = const_cast(replicated_storage); return replicated_storage_nc->executeFetchShared(log_entry); diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 68c69c3687e..ab6e2cc995e 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -108,7 +108,7 @@ struct Settings; M(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \ M(String, storage_policy, "default", "Name of storage disk policy", 0) \ M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \ - M(Bool, allow_s3_zero_copy_replication, true, "Allow Zero-copy replication over S3", 0) \ + M(Bool, allow_s3_zero_copy_replication, false, "Allow Zero-copy replication over S3", 0) \ M(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm", 0) \ M(Bool, assign_part_uuids, false, "Generate UUIDs for parts. Before enabling check that all replicas support new format.", 0) \ M(Int64, max_partitions_to_read, -1, "Limit the max number of partitions that can be accessed in one query. <= 0 means unlimited. This setting is the default that can be overridden by the query-level setting with the same name.", 0) \ diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index c3b8731cbe8..2002c124a66 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1498,11 +1498,28 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) { auto zookeeper = getZooKeeper(); String zookeeper_node = zookeeper_path + "/zero_copy_s3/merged/" + entry.new_part_name; - zookeeper->createAncestors(zookeeper_node); - auto code = zookeeper->tryCreate(zookeeper_node, "lock", zkutil::CreateMode::Ephemeral); - /// Someone else created or started create this merge - if (code == Coordination::Error::ZNODEEXISTS) - return false; + + /// In rare case other replica can remove path between createAncestors and tryCreate + /// So we make up to 5 attempts to make a lock + for (int attempts = 5; attempts > 0; --attempts) + { + try + { + zookeeper->createAncestors(zookeeper_node); + auto code = zookeeper->tryCreate(zookeeper_node, "lock", zkutil::CreateMode::Ephemeral); + /// Someone else created or started create this merge + if (code == Coordination::Error::ZNODEEXISTS) + return false; + if (code != Coordination::Error::ZNONODE) + break; + } + catch (const zkutil::KeeperException & e) + { + if (e.code == Coordination::Error::ZNONODE) + continue; + throw; + } + } } } @@ -1930,7 +1947,7 @@ bool StorageReplicatedMergeTree::executeFetchShared(ReplicatedMergeTreeLogEntry try { - if (!fetchPart(entry.new_part_name, metadata_snapshot, zookeeper_path + "/replicas/" + entry.source_replica, false, entry.quorum, + if (!fetchPart(entry.new_part_name, metadata_snapshot, zookeeper_path + "/replicas/" + entry.source_replica, false, entry.quorum, nullptr, true, entry.disk, entry.path)) return false; } @@ -3624,7 +3641,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora { if (part->volume->getDisk()->getName() != replaced_disk->getName()) throw Exception("Part " + part->name + " fetched on wrong disk " + part->volume->getDisk()->getName(), ErrorCodes::LOGICAL_ERROR); - replaced_disk->removeIfExists(replaced_part_path); + replaced_disk->removeFileIfExists(replaced_part_path); replaced_disk->moveDirectory(part->getFullRelativePath(), replaced_part_path); } else From cd94f708a16dc1135637fa7d9bf852317531798d Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 5 Feb 2021 20:13:44 +0300 Subject: [PATCH 013/333] Fix build after merge --- src/Disks/DiskDecorator.h | 7 +++---- src/Disks/IStoragePolicy.h | 2 ++ src/Disks/S3/DiskS3.cpp | 8 ++------ src/Disks/StoragePolicy.h | 2 +- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index 18f43e1b9b6..d5ac6f0fda0 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -49,10 +49,9 @@ public: void setReadOnly(const String & path) override; void createHardLink(const String & src_path, const String & dst_path) override; void truncateFile(const String & path, size_t size) override; - int open(const String & path, mode_t mode) const override; - void close(int fd) const override; - void sync(int fd) const override; - const String getType() const override { return delegate->getType(); } + int open(const String & path, mode_t mode) const; + void close(int fd) const; + void sync(int fd) const; String getUniqueId(const String & path) const override { return delegate->getUniqueId(path); } bool checkUniqueId(const String & id) const override { return delegate->checkUniqueId(id); } DiskType::Type getType() const override { return delegate->getType(); } diff --git a/src/Disks/IStoragePolicy.h b/src/Disks/IStoragePolicy.h index a41ea87c328..957021441b8 100644 --- a/src/Disks/IStoragePolicy.h +++ b/src/Disks/IStoragePolicy.h @@ -36,6 +36,7 @@ public: /// mutations files virtual DiskPtr getAnyDisk() const = 0; virtual DiskPtr getDiskByName(const String & disk_name) const = 0; + virtual Disks getDisksByType(const String & type) const = 0; /// Get free space from most free disk virtual UInt64 getMaxUnreservedFreeSpace() const = 0; /// Reserves space on any volume with index > min_volume_index or returns nullptr @@ -57,6 +58,7 @@ public: /// Check if we have any volume with stopped merges virtual bool hasAnyVolumeWithDisabledMerges() const = 0; virtual bool containsVolume(const String & volume_name) const = 0; + /// Returns disks by type ordered by volumes priority }; } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index f223e423256..56789cf6327 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -23,12 +23,8 @@ #include #include #include -<<<<<<< HEAD -#include -======= #include #include ->>>>>>> master #include @@ -985,10 +981,10 @@ bool DiskS3::checkUniqueId(const String & id) const { /// Check that we have right s3 and have access rights /// Actually interprets id as s3 object name and checks if it exists - Aws::S3::Model::ListObjectsRequest request; + Aws::S3::Model::ListObjectsV2Request request; request.SetBucket(bucket); request.SetPrefix(id); - auto resp = client->ListObjects(request); + auto resp = client->ListObjectsV2(request); throwIfError(resp); Aws::Vector object_list = resp.GetResult().GetContents(); diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index 5cc92e1ede7..7e72fcda8b1 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -48,7 +48,7 @@ public: Disks getDisks() const override; /// Returns disks by type ordered by volumes priority - Disks getDisksByType(const String & type) const; + Disks getDisksByType(const String & type) const override; /// Returns any disk /// Used when it's not important, for example for From 4d44d75bc74666c11e08ccabfb11b34a1d093558 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Mon, 8 Feb 2021 14:45:10 +0300 Subject: [PATCH 014/333] Fix build after merge one more time --- src/Disks/StoragePolicy.cpp | 2 +- src/Storages/MergeTree/DataPartsExchange.cpp | 6 +++--- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 8 ++++---- src/Storages/StorageReplicatedMergeTree.cpp | 6 ++---- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 55ccc39c58a..be40a5ae72d 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -164,7 +164,7 @@ Disks StoragePolicy::getDisksByType(const String & type) const Disks res; for (const auto & volume : volumes) for (const auto & disk : volume->getDisks()) - if (disk->getType() == type) + if (DB::DiskType::toString(disk->getType()) == type) res.push_back(disk); return res; } diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index d93e16fe154..d031989bfcd 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -157,7 +157,7 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo if (send_s3_metadata == 1) { auto disk = part->volume->getDisk(); - if (disk->getType() == "s3") + if (disk->getType() == DB::DiskType::Type::S3) { try_use_s3_copy = true; } @@ -262,7 +262,7 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB checksums.files[file_name] = {}; auto disk = part->volume->getDisk(); - if (disk->getType() != "s3") + if (disk->getType() != DB::DiskType::Type::S3) throw Exception("S3 disk is not S3 anymore", ErrorCodes::LOGICAL_ERROR); part->lockSharedData(); @@ -347,7 +347,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( {"compress", "false"} }); - if (try_use_s3_copy && disk_s3 && disk_s3->getType() != "s3") + if (try_use_s3_copy && disk_s3 && disk_s3->getType() != DB::DiskType::Type::S3) throw Exception("Try to fetch shared s3 part on non-s3 disk", ErrorCodes::LOGICAL_ERROR); Disks disks_s3; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index b5ab3c84558..5c35a8d0af3 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1168,7 +1168,7 @@ void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & di bool is_fetched = false; - if (disk->getType() == "s3") + if (disk->getType() == DB::DiskType::Type::S3) { auto data_settings = storage.getSettings(); if (data_settings->allow_s3_zero_copy_replication) @@ -1317,7 +1317,7 @@ String IMergeTreeDataPart::getUniqueId() const auto disk = volume->getDisk(); - if (disk->getType() == "s3") + if (disk->getType() == DB::DiskType::Type::S3) id = disk->getUniqueId(getFullRelativePath() + "checksums.txt"); if (id.empty()) @@ -1333,7 +1333,7 @@ void IMergeTreeDataPart::lockSharedData() const DiskPtr disk = volume->getDisk(); if (!disk) return; - if (disk->getType() != "s3") + if (disk->getType() != DB::DiskType::Type::S3) return; const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); @@ -1384,7 +1384,7 @@ bool IMergeTreeDataPart::unlockSharedData(const String & path) const DiskPtr disk = volume->getDisk(); if (!disk) return true; - if (disk->getType() != "s3") + if (disk->getType() != DB::DiskType::Type::S3) return true; const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 6bea0bbd7f3..25d379e2960 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1491,10 +1491,9 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) future_merged_part.updatePath(*this, reserved_space); future_merged_part.merge_type = entry.merge_type; -<<<<<<< HEAD { auto disk = reserved_space->getDisk(); - if (disk->getType() == "s3") + if (disk->getType() == DB::DiskType::Type::S3) { auto zookeeper = getZooKeeper(); String zookeeper_node = zookeeper_path + "/zero_copy_s3/merged/" + entry.new_part_name; @@ -1522,11 +1521,10 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) } } } -======= + /// Account TTL merge if (isTTLMergeType(future_merged_part.merge_type)) global_context.getMergeList().bookMergeWithTTL(); ->>>>>>> master auto table_id = getStorageID(); /// Add merge to list From f0163c2acfe41c78124e49582301e896ee3f8240 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Fri, 12 Feb 2021 17:02:04 +0300 Subject: [PATCH 015/333] Don't create empty parts on INSERT --- src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp | 6 ++++++ src/Storages/MergeTree/MergeTreeDataWriter.cpp | 5 +++++ .../MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp | 5 +++++ .../queries/0_stateless/01560_optimize_on_insert.reference | 1 + tests/queries/0_stateless/01560_optimize_on_insert.sql | 7 +++++++ 5 files changed, 24 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp index 904081cc1df..bb5644567ae 100644 --- a/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockOutputStream.cpp @@ -29,6 +29,12 @@ void MergeTreeBlockOutputStream::write(const Block & block) Stopwatch watch; MergeTreeData::MutableDataPartPtr part = storage.writer.writeTempPart(current_block, metadata_snapshot, optimize_on_insert); + + /// If optimize_on_insert setting is true, current_block could become empty after merge + /// and we didn't create part. + if (!part) + continue; + storage.renameTempPartAndAdd(part, &storage.increment); PartLog::addNewPart(storage.global_context, part, watch.elapsed()); diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 5a9bdd90bc8..5929293d714 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -327,6 +327,11 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithPa /// Size of part would not be greater than block.bytes() + epsilon size_t expected_size = block.bytes(); + /// If optimize_on_insert is true, block may become empty after merge. + /// There is no need to create empty part. + if (expected_size == 0) + return nullptr; + DB::IMergeTreeDataPart::TTLInfos move_ttl_infos; const auto & move_ttl_entries = metadata_snapshot->getMoveTTLs(); for (const auto & ttl_entry : move_ttl_entries) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index 7046a510f75..6f90d9f00a9 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -144,6 +144,11 @@ void ReplicatedMergeTreeBlockOutputStream::write(const Block & block) MergeTreeData::MutableDataPartPtr part = storage.writer.writeTempPart(current_block, metadata_snapshot, optimize_on_insert); + /// If optimize_on_insert setting is true, current_block could become empty after merge + /// and we didn't create part. + if (!part) + continue; + String block_id; if (deduplicate) diff --git a/tests/queries/0_stateless/01560_optimize_on_insert.reference b/tests/queries/0_stateless/01560_optimize_on_insert.reference index 7ace2043be0..477f48be7a9 100644 --- a/tests/queries/0_stateless/01560_optimize_on_insert.reference +++ b/tests/queries/0_stateless/01560_optimize_on_insert.reference @@ -11,3 +11,4 @@ Summing Merge Tree Aggregating Merge Tree 1 5 2020-01-01 00:00:00 2 5 2020-01-02 00:00:00 +Check creating empty parts diff --git a/tests/queries/0_stateless/01560_optimize_on_insert.sql b/tests/queries/0_stateless/01560_optimize_on_insert.sql index 9f6dac686bb..f64f4c75cfe 100644 --- a/tests/queries/0_stateless/01560_optimize_on_insert.sql +++ b/tests/queries/0_stateless/01560_optimize_on_insert.sql @@ -33,3 +33,10 @@ INSERT INTO aggregating_merge_tree VALUES (1, 1, '2020-01-01'), (2, 1, '2020-01- SELECT * FROM aggregating_merge_tree ORDER BY key; DROP TABLE aggregating_merge_tree; +SELECT 'Check creating empty parts'; +DROP TABLE IF EXISTS empty; +CREATE TABLE empty (key UInt32, val UInt32, date Datetime) ENGINE=SummingMergeTree(val) PARTITION BY date ORDER BY key; +INSERT INTO empty VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-01'), (1, -2, '2020-01-01'); +SELECT * FROM empty ORDER BY key; +SELECT table, partition, active FROM system.parts where table = 'empty' and active = 1; +DROP TABLE empty; From 6eeef74d4389d97fcd614d3ae0b49025c6ac1a91 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Feb 2021 00:32:39 +0300 Subject: [PATCH 016/333] first draft --- docs/en/sql-reference/statements/detach.md | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 62a7c0cc1e0..f3f8b053724 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -5,7 +5,9 @@ toc_title: DETACH # DETACH Statement {#detach} -Deletes information about the ‘name’ table from the server. The server stops knowing about the table’s existence. +Deletes information about the `name` table from the server. The server stops knowing about the table’s existence. + +Syntax: ``` sql DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] @@ -13,4 +15,20 @@ DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again. -Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them). +Similarly, a “detached” table can be re-attached using the [ATTACH](../../sql-reference/statements/attach) query (with the exception of system tables, which do not have metadata stored for them). + +## DETACH PERMAMENTLY {#detach-permamently} + +Deletes information about `name` table or view from the server. Permamently detached tables won't automatically reappear after the server restart. + +Syntax: + +``` sql +DETACH TABLE/VIEW [IF EXISTS] [db.]name PERMAMENTLY [ON CLUSTER cluster] +``` + +This statement does not delete the table’s data or metadata. + +Permamently detached table or view can be reattached with [ATTACH](../../sql-reference/statements/attach) query and can be shown with [SHOW CREATE TABLE](../../sql-reference/statements/show.md#show-create-table) query. + +[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) From 341e7bc8482e99478a0e40ea1afa446ca15f9312 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Feb 2021 13:23:52 +0300 Subject: [PATCH 017/333] Fixed links --- docs/en/sql-reference/statements/detach.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index f3f8b053724..b2720acaaa5 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -15,7 +15,7 @@ DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again. -Similarly, a “detached” table can be re-attached using the [ATTACH](../../sql-reference/statements/attach) query (with the exception of system tables, which do not have metadata stored for them). +Similarly, a “detached” table can be re-attached using the [ATTACH](../../sql-reference/statements/attach.md) query (with the exception of system tables, which do not have metadata stored for them). ## DETACH PERMAMENTLY {#detach-permamently} @@ -29,6 +29,6 @@ DETACH TABLE/VIEW [IF EXISTS] [db.]name PERMAMENTLY [ON CLUSTER cluster] This statement does not delete the table’s data or metadata. -Permamently detached table or view can be reattached with [ATTACH](../../sql-reference/statements/attach) query and can be shown with [SHOW CREATE TABLE](../../sql-reference/statements/show.md#show-create-table) query. +Permamently detached table or view can be reattached with [ATTACH](../../sql-reference/statements/attach.md) query and can be shown with [SHOW CREATE TABLE](../../sql-reference/statements/show.md#show-create-table) query. [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) From 17d7a49106342536a0348c020ca92e1cafc52434 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Feb 2021 13:33:20 +0300 Subject: [PATCH 018/333] Fixed typos --- docs/en/sql-reference/statements/detach.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index b2720acaaa5..adb2df570d7 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -17,9 +17,9 @@ This does not delete the table’s data or metadata. On the next server launch, Similarly, a “detached” table can be re-attached using the [ATTACH](../../sql-reference/statements/attach.md) query (with the exception of system tables, which do not have metadata stored for them). -## DETACH PERMAMENTLY {#detach-permamently} +## DETACH PERMANENTLY {#detach-permanently} -Deletes information about `name` table or view from the server. Permamently detached tables won't automatically reappear after the server restart. +Deletes information about `name` table or view from the server. Permanently detached tables won't automatically reappear after the server restart. Syntax: @@ -29,6 +29,6 @@ DETACH TABLE/VIEW [IF EXISTS] [db.]name PERMAMENTLY [ON CLUSTER cluster] This statement does not delete the table’s data or metadata. -Permamently detached table or view can be reattached with [ATTACH](../../sql-reference/statements/attach.md) query and can be shown with [SHOW CREATE TABLE](../../sql-reference/statements/show.md#show-create-table) query. +Permanently detached table or view can be reattached with [ATTACH](../../sql-reference/statements/attach.md) query and can be shown with [SHOW CREATE TABLE](../../sql-reference/statements/show.md#show-create-table) query. [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) From 2d03d330bcc400a0b61c8028b01587de072aa60e Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Fri, 17 Apr 2020 16:26:44 +0300 Subject: [PATCH 019/333] Extended range of DateTime64 to years 1925 - 2238 The Year 1925 is a starting point because most of the timezones switched to saner (mostly 15-minutes based) offsets somewhere during 1924 or before. And that significantly simplifies implementation. 2238 is to simplify arithmetics for sanitizing LUT index access; there are less than 0x1ffff days from 1925. * Extended DateLUTImpl internal LUT to 0x1ffff items, some of which represent negative (pre-1970) time values. As a collateral benefit, Date now correctly supports dates up to 2149 (instead of 2106). * Added a new strong typedef ExtendedDayNum, which represents dates pre-1970 and post 2149. * Functions that used to return DayNum now return ExtendedDayNum. * Refactored DateLUTImpl to untie DayNum from the dual role of being a value and an index (due to negative time). Index is now a different type LUTIndex with explicit conversion functions from DatNum, time_t, and ExtendedDayNum. * Updated DateLUTImpl to properly support values close to epoch start (1970-01-01 00:00), including negative ones. * Reduced resolution of DateLUTImpl::Values::time_at_offset_change to multiple of 15-minutes to allow storing 64-bits of time_t in DateLUTImpl::Value while keeping same size. * Minor performance updates to DateLUTImpl when building month LUT by skipping non-start-of-month days. * Fixed extractTimeZoneFromFunctionArguments to work correctly with DateTime64. * New unit-tests and stateless integration tests for both DateTime and DateTime64. --- base/common/DateLUT.h | 1 - base/common/DateLUTImpl.cpp | 42 +- base/common/DateLUTImpl.h | 683 +++++++++++------- base/common/DayNum.h | 5 + base/common/LocalDate.h | 3 +- base/common/strong_typedef.h | 1 + base/common/tests/CMakeLists.txt | 4 +- base/common/tests/gtest_DateLutImpl.cpp | 515 +++++++++++++ programs/client/Client.cpp | 2 +- src/Core/DecimalFunctions.h | 18 +- src/Core/MySQL/MySQLReplication.cpp | 8 +- src/Core/tests/gtest_DecimalFunctions.cpp | 2 +- src/DataStreams/MongoDBBlockInputStream.cpp | 4 +- src/DataTypes/DataTypeDateTime64.h | 61 -- src/Functions/CustomWeekTransforms.h | 24 +- src/Functions/DateTimeTransforms.h | 225 +++++- src/Functions/FunctionCustomWeekToSomething.h | 1 + .../FunctionDateOrDateTimeAddInterval.h | 143 ++-- .../FunctionDateOrDateTimeToSomething.h | 3 +- src/Functions/FunctionsConversion.h | 106 +-- src/Functions/TransformDateTime64.h | 92 +++ src/Functions/dateDiff.cpp | 4 +- .../extractTimeZoneFromFunctionArguments.cpp | 5 +- src/Functions/formatDateTime.cpp | 6 +- src/Functions/now64.cpp | 2 +- src/Functions/toStartOfInterval.cpp | 50 +- src/Functions/today.cpp | 2 +- src/IO/ReadHelpers.h | 10 +- src/IO/WriteHelpers.h | 16 +- src/IO/parseDateTimeBestEffort.cpp | 15 +- src/IO/parseDateTimeBestEffort.h | 1 + src/Interpreters/CrashLog.cpp | 2 +- src/Interpreters/MetricLog.cpp | 2 +- src/Interpreters/OpenTelemetrySpanLog.cpp | 2 +- src/Interpreters/PartLog.cpp | 2 +- src/Interpreters/QueryLog.cpp | 2 +- src/Interpreters/QueryThreadLog.cpp | 2 +- src/Interpreters/TextLog.cpp | 2 +- src/Interpreters/TraceLog.cpp | 2 +- src/Interpreters/convertFieldToType.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- src/Storages/tests/part_name.cpp | 2 +- .../00921_datetime64_compatibility.python | 5 +- .../00921_datetime64_compatibility.reference | 152 ---- .../00921_datetime64_compatibility.sh | 2 +- .../01252_weird_time_zone.reference | 14 +- .../0_stateless/01252_weird_time_zone.sql | 28 +- .../01440_to_date_monotonicity.reference | 2 +- .../01561_Date_and_DateTime64_comparision.sql | 6 +- ...1_date_overflow_as_partition_key.reference | 4 +- .../01631_date_overflow_as_partition_key.sql | 2 +- .../01691_DateTime64_clamp.reference | 23 + .../0_stateless/01691_DateTime64_clamp.sql | 7 + .../convert-month-partitioned-parts/main.cpp | 5 +- 54 files changed, 1585 insertions(+), 741 deletions(-) create mode 100644 base/common/tests/gtest_DateLutImpl.cpp create mode 100644 src/Functions/TransformDateTime64.h diff --git a/base/common/DateLUT.h b/base/common/DateLUT.h index 93c6cb403e2..378b4360f3b 100644 --- a/base/common/DateLUT.h +++ b/base/common/DateLUT.h @@ -32,7 +32,6 @@ public: return date_lut.getImplementation(time_zone); } - static void setDefaultTimezone(const std::string & time_zone) { auto & date_lut = getInstance(); diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 50620e21b8f..906f88fa90f 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -46,19 +46,26 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) if (&inside_main) assert(inside_main); - size_t i = 0; - time_t start_of_day = 0; cctz::time_zone cctz_time_zone; if (!cctz::load_time_zone(time_zone, &cctz_time_zone)) throw Poco::Exception("Cannot load time zone " + time_zone_); - cctz::time_zone::absolute_lookup start_of_epoch_lookup = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(start_of_day)); - offset_at_start_of_epoch = start_of_epoch_lookup.offset; + const cctz::civil_day epoch{1970, 1, 1}; + const cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1}; + time_t start_of_day = std::chrono::system_clock::to_time_t(cctz_time_zone.lookup(lut_start).pre); + time_offset_epoch = cctz::convert(cctz::civil_second(lut_start), cctz_time_zone).time_since_epoch().count(); + + // Note validated this against all timezones in the system. + assert((epoch - lut_start) == daynum_offset_epoch); + + offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset; + offset_at_start_of_lut = cctz_time_zone.lookup(cctz_time_zone.lookup(lut_start).pre).offset; offset_is_whole_number_of_hours_everytime = true; - cctz::civil_day date{1970, 1, 1}; + cctz::civil_day date = lut_start; + UInt32 i = 0; do { cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date); @@ -72,7 +79,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.day_of_week = getDayOfWeek(date); values.date = start_of_day; - assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR); + assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR + 1); assert(values.month >= 1 && values.month <= 12); assert(values.day_of_month >= 1 && values.day_of_month <= 31); assert(values.day_of_week >= 1 && values.day_of_week <= 7); @@ -85,10 +92,13 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) else values.days_in_month = i != 0 ? lut[i - 1].days_in_month : 31; - values.time_at_offset_change = 0; - values.amount_of_offset_change = 0; + values.time_at_offset_change_value = 0; + values.amount_of_offset_change_value = 0; - if (start_of_day % 3600) + // TODO: this partially ignores fractional pre-epoch offsets, which may cause incorrect toRelativeHourNum() results for some timezones, namelly Europe\Minsk + // when pre-May 2 1924 it had an offset of UTC+1:50, and after it was UTC+2h. + // https://www.timeanddate.com/time/zone/belarus/minsk?syear=1900 + if (start_of_day > 0 && start_of_day % 3600) offset_is_whole_number_of_hours_everytime = false; /// If UTC offset was changed in previous day. @@ -97,7 +107,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) auto amount_of_offset_change_at_prev_day = 86400 - (lut[i].date - lut[i - 1].date); if (amount_of_offset_change_at_prev_day) { - lut[i - 1].amount_of_offset_change = amount_of_offset_change_at_prev_day; + lut[i - 1].amount_of_offset_change_value = amount_of_offset_change_at_prev_day / Values::OffsetChangeFactor; const auto utc_offset_at_beginning_of_day = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(lut[i - 1].date)).offset; @@ -116,11 +126,11 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) time_at_offset_change += 900; } - lut[i - 1].time_at_offset_change = time_at_offset_change; + lut[i - 1].time_at_offset_change_value = time_at_offset_change / Values::OffsetChangeFactor; - /// We doesn't support cases when time change results in switching to previous day. - if (static_cast(lut[i - 1].time_at_offset_change) + static_cast(lut[i - 1].amount_of_offset_change) < 0) - lut[i - 1].time_at_offset_change = -lut[i - 1].amount_of_offset_change; + /// We don't support cases when time change results in switching to previous day. + if (static_cast(lut[i - 1].time_at_offset_change()) + static_cast(lut[i - 1].amount_of_offset_change()) < 0) + lut[i - 1].time_at_offset_change_value = -lut[i - 1].amount_of_offset_change_value; } } @@ -128,7 +138,9 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) ++date; ++i; } - while (start_of_day <= DATE_LUT_MAX && i <= DATE_LUT_MAX_DAY_NUM); + while (i < DATE_LUT_SIZE && lut[i - 1].year <= DATE_LUT_MAX_YEAR); + +// date_lut_max = start_of_day; /// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases. while (i < DATE_LUT_SIZE) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 064787fb64e..adfffb04681 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -11,9 +11,9 @@ #define DATE_LUT_MAX (0xFFFFFFFFU - 86400) #define DATE_LUT_MAX_DAY_NUM (0xFFFFFFFFU / 86400) /// Table size is bigger than DATE_LUT_MAX_DAY_NUM to fill all indices within UInt16 range: this allows to remove extra check. -#define DATE_LUT_SIZE 0x10000 -#define DATE_LUT_MIN_YEAR 1970 -#define DATE_LUT_MAX_YEAR 2106 /// Last supported year (incomplete) +#define DATE_LUT_SIZE 0x20000 +#define DATE_LUT_MIN_YEAR 1925 /// 1925 since wast majority of timezones changed to 15-minute aligned offsets somewhere in 1924 or earlier. +#define DATE_LUT_MAX_YEAR 2283 /// Last supported year (complete) #define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table #if defined(__PPC__) @@ -45,14 +45,68 @@ public: DateLUTImpl(const DateLUTImpl &&) = delete; DateLUTImpl & operator=(const DateLUTImpl &&) = delete; + // Normalized and bound-checked index of element in lut, + // has to be a separate type to support overloading + // TODO: make sure that any arithmetic on LUTIndex actually results in valid LUTIndex. + STRONG_TYPEDEF(UInt32, LUTIndex) + template + friend inline LUTIndex operator+(const LUTIndex & index, const T v) + { + return LUTIndex{(index.toUnderType() + v) & date_lut_mask}; + } + template + friend inline LUTIndex operator+(const T v, const LUTIndex & index) + { + return LUTIndex{(v + index.toUnderType()) & date_lut_mask}; + } + friend inline LUTIndex operator+(const LUTIndex & index, const LUTIndex & v) + { + return LUTIndex{(index.toUnderType() + v.toUnderType()) & date_lut_mask}; + } + + template + friend inline LUTIndex operator-(const LUTIndex & index, const T v) + { + return LUTIndex{(index.toUnderType() - v) & date_lut_mask}; + } + template + friend inline LUTIndex operator-(const T v, const LUTIndex & index) + { + return LUTIndex{(v - index.toUnderType()) & date_lut_mask}; + } + friend inline LUTIndex operator-(const LUTIndex & index, const LUTIndex & v) + { + return LUTIndex{(index.toUnderType() - v.toUnderType()) & date_lut_mask}; + } + + template + friend inline LUTIndex operator*(const LUTIndex & index, const T v) + { + return LUTIndex{(index.toUnderType() * v) & date_lut_mask}; + } + template + friend inline LUTIndex operator*(const T v, const LUTIndex & index) + { + return LUTIndex{(v * index.toUnderType()) & date_lut_mask}; + } + + template + friend inline LUTIndex operator/(const LUTIndex & index, const T v) + { + return LUTIndex{(index.toUnderType() / v) & date_lut_mask}; + } + template + friend inline LUTIndex operator/(const T v, const LUTIndex & index) + { + return LUTIndex{(v / index.toUnderType()) & date_lut_mask}; + } + public: /// The order of fields matters for alignment and sizeof. struct Values { - /// Least significat 32 bits from time_t at beginning of the day. - /// If the unix timestamp of beginning of the day is negative (example: 1970-01-01 MSK, where time_t == -10800), then value will overflow. - /// Change to time_t; change constants above; and recompile the sources if you need to support time after 2105 year. - UInt32 date; + /// Least significat 64 bits from time_t at beginning of the day. + Int64 date; /// Properties of the day. UInt16 year; @@ -65,107 +119,175 @@ public: UInt8 days_in_month; /// For days, when offset from UTC was changed due to daylight saving time or permanent change, following values could be non zero. - Int16 amount_of_offset_change; /// Usually -3600 or 3600, but look at Lord Howe Island. - UInt32 time_at_offset_change; /// In seconds from beginning of the day. + Int8 amount_of_offset_change_value; /// Usually -3600 or 3600, but look at Lord Howe Island. multiply by OffsetChangeFactor + UInt8 time_at_offset_change_value; /// In seconds from beginning of the day. multiply by OffsetChangeFactor + + inline Int32 amount_of_offset_change() const + { + return static_cast(amount_of_offset_change_value) * OffsetChangeFactor; + } + + inline UInt32 time_at_offset_change() const + { + return static_cast(time_at_offset_change_value) * OffsetChangeFactor; + } + + /// Since most of the modern timezones have a DST change aligned to 15 minutes, to save as much space as possible inside Value, + /// we are dividing any offset change related value by this factor before setting it to Value, + /// hence it has to be explicitly multiplied back by this factor before being used. + static const UInt16 OffsetChangeFactor = 900; }; static_assert(sizeof(Values) == 16); private: - /// Lookup table is indexed by DayNum. + + // Mask is all-ones to allow efficient protection against overflow. + static const UInt32 date_lut_mask = 0x1ffff; + static_assert(date_lut_mask == DATE_LUT_SIZE - 1); + + const UInt32 daynum_offset_epoch = 16436; // offset to epoch in days (ExtendedDayNum) of the first day in LUT. + + /// Lookup table is indexed by LUTIndex. /// Day nums are the same in all time zones. 1970-01-01 is 0 and so on. /// Table is relatively large, so better not to place the object on stack. /// In comparison to std::vector, plain array is cheaper by one indirection. - Values lut[DATE_LUT_SIZE]; + Values lut[DATE_LUT_SIZE + 1]; - /// Year number after DATE_LUT_MIN_YEAR -> day num for start of year. - DayNum years_lut[DATE_LUT_YEARS]; + /// Year number after DATE_LUT_MIN_YEAR -> LUTIndex in lut for start of year. + LUTIndex years_lut[DATE_LUT_YEARS]; /// Year number after DATE_LUT_MIN_YEAR * month number starting at zero -> day num for first day of month - DayNum years_months_lut[DATE_LUT_YEARS * 12]; + LUTIndex years_months_lut[DATE_LUT_YEARS * 12]; /// UTC offset at beginning of the Unix epoch. The same as unix timestamp of 1970-01-01 00:00:00 local time. time_t offset_at_start_of_epoch; + time_t offset_at_start_of_lut; bool offset_is_whole_number_of_hours_everytime; + time_t time_offset_epoch; /// Time zone name. std::string time_zone; - - /// We can correctly process only timestamps that less DATE_LUT_MAX (i.e. up to 2105 year inclusively) - /// We don't care about overflow. - inline DayNum findIndex(time_t t) const + inline LUTIndex findIndex(time_t t) const { /// First guess. - DayNum guess(t / 86400); + const UInt32 guess = ((t / 86400) + daynum_offset_epoch) & date_lut_mask; /// UTC offset is from -12 to +14 in all known time zones. This requires checking only three indices. - - if ((guess == 0 || t >= lut[guess].date) && t < lut[DayNum(guess + 1)].date) - return guess; + if ((guess == daynum_offset_epoch || t >= lut[guess].date) && t < lut[UInt32(guess + 1)].date) + return LUTIndex{guess}; /// Time zones that have offset 0 from UTC do daylight saving time change (if any) towards increasing UTC offset (example: British Standard Time). - if (t >= lut[DayNum(guess + 1)].date) - return DayNum(guess + 1); + if (t >= lut[UInt32(guess + 1)].date) + return LUTIndex(guess + 1); - return DayNum(guess - 1); + if (lut[guess - 1].date <= t) + return LUTIndex(guess - 1); + return LUTIndex(guess - 2); } - inline const Values & find(time_t t) const + inline LUTIndex toLUTIndex(DayNum d) const { - return lut[findIndex(t)]; + return LUTIndex{(d + daynum_offset_epoch) & date_lut_mask}; + } + + inline LUTIndex toLUTIndex(ExtendedDayNum d) const + { + return LUTIndex{static_cast(d + daynum_offset_epoch) & date_lut_mask}; + } + + inline LUTIndex toLUTIndex(time_t t) const + { + return findIndex(t); + } + + inline LUTIndex toLUTIndex(LUTIndex i) const + { + return i; + } + +// template +// inline LUTIndex toLUTIndex(T t) const +// { +// return LUTIndex{static_cast(t) & date_lut_mask}; +// } + + template + inline const Values & find(V v) const + { + return lut[toLUTIndex(v)]; } public: const std::string & getTimeZone() const { return time_zone; } + // Methods only for unit-testing, it makes very little sense to use it from user code. + auto getOffsetAtStartOfEpoch() const { return offset_at_start_of_epoch; } + auto getOffsetIsWholNumberOfHoursEveryWhere() const { return offset_is_whole_number_of_hours_everytime; } + auto getTimeOffsetEpoch() const { return time_offset_epoch; } + auto getTimeOffsetAtStartOfLUT() const { return offset_at_start_of_lut; } + /// All functions below are thread-safe; arguments are not checked. - inline time_t toDate(time_t t) const { return find(t).date; } - inline unsigned toMonth(time_t t) const { return find(t).month; } - inline unsigned toQuarter(time_t t) const { return (find(t).month - 1) / 3 + 1; } - inline unsigned toYear(time_t t) const { return find(t).year; } - inline unsigned toDayOfWeek(time_t t) const { return find(t).day_of_week; } - inline unsigned toDayOfMonth(time_t t) const { return find(t).day_of_month; } + inline ExtendedDayNum toDayNum(ExtendedDayNum d) const + { + return d; + } + + template + inline ExtendedDayNum toDayNum(V v) const + { + return ExtendedDayNum{static_cast(toLUTIndex(v).toUnderType()) - daynum_offset_epoch}; + } /// Round down to start of monday. - inline time_t toFirstDayOfWeek(time_t t) const + template + inline time_t toFirstDayOfWeek(V v) const { - DayNum index = findIndex(t); - return lut[DayNum(index - (lut[index].day_of_week - 1))].date; + const auto i = toLUTIndex(v); + return lut[i - (lut[i].day_of_week - 1)].date; } - inline DayNum toFirstDayNumOfWeek(DayNum d) const + template + inline ExtendedDayNum toFirstDayNumOfWeek(V v) const { - return DayNum(d - (lut[d].day_of_week - 1)); - } - - inline DayNum toFirstDayNumOfWeek(time_t t) const - { - return toFirstDayNumOfWeek(toDayNum(t)); + const auto i = toLUTIndex(v); + return toDayNum(i - (lut[i].day_of_week - 1)); } /// Round down to start of month. - inline time_t toFirstDayOfMonth(time_t t) const + template + inline time_t toFirstDayOfMonth(V v) const { - DayNum index = findIndex(t); - return lut[index - (lut[index].day_of_month - 1)].date; + const auto i = toLUTIndex(v); + return lut[i - (lut[i].day_of_month - 1)].date; } - inline DayNum toFirstDayNumOfMonth(DayNum d) const + template + inline ExtendedDayNum toFirstDayNumOfMonth(V v) const { - return DayNum(d - (lut[d].day_of_month - 1)); + const auto i = toLUTIndex(v); + return toDayNum(i - (lut[i].day_of_month - 1)); } - inline DayNum toFirstDayNumOfMonth(time_t t) const - { - return toFirstDayNumOfMonth(toDayNum(t)); - } +// inline DayNum toFirstDayNumOfMonth(time_t t) const +// { +// return toFirstDayNumOfMonth(toDayNum(t)); +// } /// Round down to start of quarter. - inline DayNum toFirstDayNumOfQuarter(DayNum d) const + template + inline ExtendedDayNum toFirstDayNumOfQuarter(V v) const { - DayNum index = d; + return toDayNum(toFirstDayOfQuarterIndex(v)); + } + + template + inline LUTIndex toFirstDayOfQuarterIndex(V v) const + { + //return fromDayNum(toFirstDayNumOfQuarter(v)); + auto index = toLUTIndex(v); size_t month_inside_quarter = (lut[index].month - 1) % 3; index -= lut[index].day_of_month; @@ -175,17 +297,13 @@ public: --month_inside_quarter; } - return DayNum(index + 1); + return index + 1; } - inline DayNum toFirstDayNumOfQuarter(time_t t) const + template + inline time_t toFirstDayOfQuarter(V v) const { - return toFirstDayNumOfQuarter(toDayNum(t)); - } - - inline time_t toFirstDayOfQuarter(time_t t) const - { - return fromDayNum(toFirstDayNumOfQuarter(t)); + return toDate(toFirstDayOfQuarterIndex(v)); } /// Round down to start of year. @@ -194,48 +312,47 @@ public: return lut[years_lut[lut[findIndex(t)].year - DATE_LUT_MIN_YEAR]].date; } - inline DayNum toFirstDayNumOfYear(DayNum d) const + template + inline LUTIndex toFirstDayNumOfYearIndex(V v) const { - return years_lut[lut[d].year - DATE_LUT_MIN_YEAR]; + return years_lut[lut[toLUTIndex(v)].year - DATE_LUT_MIN_YEAR]; } - inline DayNum toFirstDayNumOfYear(time_t t) const + template + inline ExtendedDayNum toFirstDayNumOfYear(V v) const { - return toFirstDayNumOfYear(toDayNum(t)); + return toDayNum(toFirstDayNumOfYearIndex(v)); } inline time_t toFirstDayOfNextMonth(time_t t) const { - DayNum index = findIndex(t); + auto index = findIndex(t); index += 32 - lut[index].day_of_month; return lut[index - (lut[index].day_of_month - 1)].date; } inline time_t toFirstDayOfPrevMonth(time_t t) const { - DayNum index = findIndex(t); + auto index = findIndex(t); index -= lut[index].day_of_month; return lut[index - (lut[index].day_of_month - 1)].date; } - inline UInt8 daysInMonth(DayNum d) const + template + inline UInt8 daysInMonth(V v) const { - return lut[d].days_in_month; + const auto i = toLUTIndex(v); + return lut[i].days_in_month; } - inline UInt8 daysInMonth(time_t t) const - { - return find(t).days_in_month; - } - - inline UInt8 daysInMonth(UInt16 year, UInt8 month) const + inline UInt8 daysInMonth(Int16 year, UInt8 month) const { UInt16 idx = year - DATE_LUT_MIN_YEAR; if (unlikely(idx >= DATE_LUT_YEARS)) return 31; /// Implementation specific behaviour on overflow. /// 32 makes arithmetic more simple. - DayNum any_day_of_month = DayNum(years_lut[idx] + 32 * (month - 1)); + const auto any_day_of_month = years_lut[year - DATE_LUT_MIN_YEAR] + 32 * (month - 1); return lut[any_day_of_month].days_in_month; } @@ -243,37 +360,38 @@ public: */ inline time_t toDateAndShift(time_t t, Int32 days) const { - return lut[DayNum(findIndex(t) + days)].date; + return lut[findIndex(t) + days].date; } inline time_t toTime(time_t t) const { - DayNum index = findIndex(t); + auto index = findIndex(t); - if (unlikely(index == 0 || index > DATE_LUT_MAX_DAY_NUM)) + if (unlikely(index == daynum_offset_epoch || index > DATE_LUT_MAX_DAY_NUM)) return t + offset_at_start_of_epoch; time_t res = t - lut[index].date; - if (res >= lut[index].time_at_offset_change) - res += lut[index].amount_of_offset_change; + if (res >= lut[index].time_at_offset_change()) + res += lut[index].amount_of_offset_change(); return res - offset_at_start_of_epoch; /// Starting at 1970-01-01 00:00:00 local time. } inline unsigned toHour(time_t t) const { - DayNum index = findIndex(t); + auto index = findIndex(t); /// If it is overflow case, - /// then limit number of hours to avoid insane results like 1970-01-01 89:28:15 - if (unlikely(index == 0 || index > DATE_LUT_MAX_DAY_NUM)) + /// than limit number of hours to avoid insane results like 1970-01-01 89:28:15 + if (unlikely(index == daynum_offset_epoch || index > DATE_LUT_MAX_DAY_NUM)) return static_cast((t + offset_at_start_of_epoch) / 3600) % 24; time_t time = t - lut[index].date; - if (time >= lut[index].time_at_offset_change) - time += lut[index].amount_of_offset_change; + /// Data is cleaned to avoid possibility of underflow. + if (time >= lut[index].time_at_offset_change()) + time += lut[index].amount_of_offset_change(); unsigned res = time / 3600; return res <= 23 ? res : 0; @@ -286,24 +404,32 @@ public: */ inline time_t timezoneOffset(time_t t) const { - DayNum index = findIndex(t); + const auto index = findIndex(t); /// Calculate daylight saving offset first. /// Because the "amount_of_offset_change" in LUT entry only exists in the change day, it's costly to scan it from the very begin. /// but we can figure out all the accumulated offsets from 1970-01-01 to that day just by get the whole difference between lut[].date, /// and then, we can directly subtract multiple 86400s to get the real DST offsets for the leap seconds is not considered now. - time_t res = (lut[index].date - lut[0].date) % 86400; + time_t res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400; /// As so far to know, the maximal DST offset couldn't be more than 2 hours, so after the modulo operation the remainder /// will sits between [-offset --> 0 --> offset] which respectively corresponds to moving clock forward or backward. res = res > 43200 ? (86400 - res) : (0 - res); /// Check if has a offset change during this day. Add the change when cross the line - if (lut[index].amount_of_offset_change != 0 && t >= lut[index].date + lut[index].time_at_offset_change) - res += lut[index].amount_of_offset_change; + if (lut[index].amount_of_offset_change() != 0 && t >= lut[index].date + lut[index].time_at_offset_change()) + res += lut[index].amount_of_offset_change(); return res + offset_at_start_of_epoch; } + static inline time_t toSecondsSinceTheDayStart(time_t t) + { + t %= 86400; + t = (t < 0 ? t + 86400 : t); + + return t; + } + /** Only for time zones with/when offset from UTC is multiple of five minutes. * This is true for all time zones: right now, all time zones have an offset that is multiple of 15 minutes. * @@ -314,13 +440,15 @@ public: * Also please note, that unix timestamp doesn't count "leap seconds": * each minute, with added or subtracted leap second, spans exactly 60 unix timestamps. */ - - inline unsigned toSecond(time_t t) const { return UInt32(t) % 60; } + inline unsigned toSecond(time_t t) const + { + return toSecondsSinceTheDayStart(t) % 60; + } inline unsigned toMinute(time_t t) const { if (offset_is_whole_number_of_hours_everytime) - return (UInt32(t) / 60) % 60; + return (toSecondsSinceTheDayStart(t) / 60) % 60; UInt32 date = find(t).date; return (UInt32(t) - date) / 60 % 60; @@ -348,80 +476,85 @@ public: * because the same calendar day starts/ends at different timestamps in different time zones) */ - inline DayNum toDayNum(time_t t) const { return findIndex(t); } - inline time_t fromDayNum(DayNum d) const { return lut[d].date; } +// inline DayNum toDayNum(time_t t) const { return DayNum{findIndex(t) - daynum_offset_epoch}; } +// inline ExtendedDayNum toExtendedDayNum(time_t t) const { return ExtendedDayNum{findIndex(t) - daynum_offset_epoch}; } + inline time_t fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; } + inline time_t fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; } - inline time_t toDate(DayNum d) const { return lut[d].date; } - inline unsigned toMonth(DayNum d) const { return lut[d].month; } - inline unsigned toQuarter(DayNum d) const { return (lut[d].month - 1) / 3 + 1; } - inline unsigned toYear(DayNum d) const { return lut[d].year; } - inline unsigned toDayOfWeek(DayNum d) const { return lut[d].day_of_week; } - inline unsigned toDayOfMonth(DayNum d) const { return lut[d].day_of_month; } - inline unsigned toDayOfYear(DayNum d) const { return d + 1 - toFirstDayNumOfYear(d); } - - inline unsigned toDayOfYear(time_t t) const { return toDayOfYear(toDayNum(t)); } + template + inline time_t toDate(V v) const { return lut[toLUTIndex(v)].date; } + template + inline unsigned toMonth(V v) const { return lut[toLUTIndex(v)].month; } + template + inline unsigned toQuarter(V v) const { return (lut[toLUTIndex(v)].month - 1) / 3 + 1; } + template + inline Int16 toYear(V v) const { return lut[toLUTIndex(v)].year; } + template + inline unsigned toDayOfWeek(V v) const { return lut[toLUTIndex(v)].day_of_week; } + template + inline unsigned toDayOfMonth(V v) const { return lut[toLUTIndex(v)].day_of_month; } + template + inline unsigned toDayOfYear(V v) const + { + // TODO: different overload for ExtendedDayNum + const auto i = toLUTIndex(v); + return i + 1 - toFirstDayNumOfYearIndex(i); + } /// Number of week from some fixed moment in the past. Week begins at monday. /// (round down to monday and divide DayNum by 7; we made an assumption, /// that in domain of the function there was no weeks with any other number of days than 7) - inline unsigned toRelativeWeekNum(DayNum d) const + template + inline unsigned toRelativeWeekNum(V v) const { + const auto i = toLUTIndex(v); /// We add 8 to avoid underflow at beginning of unix epoch. - return (d + 8 - toDayOfWeek(d)) / 7; - } - - inline unsigned toRelativeWeekNum(time_t t) const - { - return toRelativeWeekNum(toDayNum(t)); + return toDayNum(i + 8 - toDayOfWeek(i)) / 7; } /// Get year that contains most of the current week. Week begins at monday. - inline unsigned toISOYear(DayNum d) const + template + inline unsigned toISOYear(V v) const { + const auto i = toLUTIndex(v); /// That's effectively the year of thursday of current week. - return toYear(DayNum(d + 4 - toDayOfWeek(d))); - } - - inline unsigned toISOYear(time_t t) const - { - return toISOYear(toDayNum(t)); + return toYear(toLUTIndex(i + 4 - toDayOfWeek(i))); } /// ISO year begins with a monday of the week that is contained more than by half in the corresponding calendar year. /// Example: ISO year 2019 begins at 2018-12-31. And ISO year 2017 begins at 2017-01-02. /// https://en.wikipedia.org/wiki/ISO_week_date - inline DayNum toFirstDayNumOfISOYear(DayNum d) const + template + inline LUTIndex toFirstDayNumOfISOYearIndex(V v) const { - auto iso_year = toISOYear(d); + const auto i = toLUTIndex(v); + auto iso_year = toISOYear(i); - DayNum first_day_of_year = years_lut[iso_year - DATE_LUT_MIN_YEAR]; + const auto first_day_of_year = years_lut[iso_year - DATE_LUT_MIN_YEAR]; auto first_day_of_week_of_year = lut[first_day_of_year].day_of_week; - return DayNum(first_day_of_week_of_year <= 4 + return LUTIndex{first_day_of_week_of_year <= 4 ? first_day_of_year + 1 - first_day_of_week_of_year - : first_day_of_year + 8 - first_day_of_week_of_year); + : first_day_of_year + 8 - first_day_of_week_of_year}; } - inline DayNum toFirstDayNumOfISOYear(time_t t) const + template + inline ExtendedDayNum toFirstDayNumOfISOYear(V v) const { - return toFirstDayNumOfISOYear(toDayNum(t)); + return toDayNum(toFirstDayNumOfISOYearIndex(v)); } inline time_t toFirstDayOfISOYear(time_t t) const { - return fromDayNum(toFirstDayNumOfISOYear(t)); + return lut[toFirstDayNumOfISOYearIndex(t)].date; } /// ISO 8601 week number. Week begins at monday. /// The week number 1 is the first week in year that contains 4 or more days (that's more than half). - inline unsigned toISOWeek(DayNum d) const + template + inline unsigned toISOWeek(V v) const { - return 1 + DayNum(toFirstDayNumOfWeek(d) - toFirstDayNumOfISOYear(d)) / 7; - } - - inline unsigned toISOWeek(time_t t) const - { - return toISOWeek(toDayNum(t)); + return 1 + (toFirstDayNumOfWeek(v) - toFirstDayNumOfISOYear(v)) / 7; } /* @@ -457,30 +590,33 @@ public: Otherwise it is the last week of the previous year, and the next week is week 1. */ - inline YearWeek toYearWeek(DayNum d, UInt8 week_mode) const + template + inline YearWeek toYearWeek(V v, UInt8 week_mode) const { - bool newyear_day_mode = week_mode & static_cast(WeekModeFlag::NEWYEAR_DAY); + const bool newyear_day_mode = week_mode & static_cast(WeekModeFlag::NEWYEAR_DAY); week_mode = check_week_mode(week_mode); - bool monday_first_mode = week_mode & static_cast(WeekModeFlag::MONDAY_FIRST); + const bool monday_first_mode = week_mode & static_cast(WeekModeFlag::MONDAY_FIRST); bool week_year_mode = week_mode & static_cast(WeekModeFlag::YEAR); - bool first_weekday_mode = week_mode & static_cast(WeekModeFlag::FIRST_WEEKDAY); + const bool first_weekday_mode = week_mode & static_cast(WeekModeFlag::FIRST_WEEKDAY); + + const auto i = toLUTIndex(v); // Calculate week number of WeekModeFlag::NEWYEAR_DAY mode if (newyear_day_mode) { - return toYearWeekOfNewyearMode(d, monday_first_mode); + return toYearWeekOfNewyearMode(i, monday_first_mode); } - YearWeek yw(toYear(d), 0); + YearWeek yw(toYear(i), 0); UInt16 days = 0; - UInt16 daynr = makeDayNum(yw.first, toMonth(d), toDayOfMonth(d)); - UInt16 first_daynr = makeDayNum(yw.first, 1, 1); + const auto daynr = makeDayNum(yw.first, toMonth(i), toDayOfMonth(i)); + auto first_daynr = makeDayNum(yw.first, 1, 1); // 0 for monday, 1 for tuesday ... // get weekday from first day in year. - UInt16 weekday = calc_weekday(DayNum(first_daynr), !monday_first_mode); + UInt16 weekday = calc_weekday(first_daynr, !monday_first_mode); - if (toMonth(d) == 1 && toDayOfMonth(d) <= static_cast(7 - weekday)) + if (toMonth(i) == 1 && toDayOfMonth(i) <= static_cast(7 - weekday)) { if (!week_year_mode && ((first_weekday_mode && weekday != 0) || (!first_weekday_mode && weekday >= 4))) return yw; @@ -511,30 +647,34 @@ public: /// Calculate week number of WeekModeFlag::NEWYEAR_DAY mode /// The week number 1 is the first week in year that contains January 1, - inline YearWeek toYearWeekOfNewyearMode(DayNum d, bool monday_first_mode) const + template + inline YearWeek toYearWeekOfNewyearMode(V v, bool monday_first_mode) const { YearWeek yw(0, 0); UInt16 offset_day = monday_first_mode ? 0U : 1U; + const auto i = LUTIndex(v); + // Checking the week across the year - yw.first = toYear(DayNum(d + 7 - toDayOfWeek(DayNum(d + offset_day)))); + yw.first = toYear(i + 7 - toDayOfWeek(i + offset_day)); - DayNum first_day = makeDayNum(yw.first, 1, 1); - DayNum this_day = d; + auto first_day = makeLUTIndex(yw.first, 1, 1); + auto this_day = i; + //TODO: do not perform calculations in terms of DayNum, since that would under/overflow for extended range. if (monday_first_mode) { // Rounds down a date to the nearest Monday. first_day = toFirstDayNumOfWeek(first_day); - this_day = toFirstDayNumOfWeek(d); + this_day = toFirstDayNumOfWeek(i); } else { // Rounds down a date to the nearest Sunday. if (toDayOfWeek(first_day) != 7) - first_day = DayNum(first_day - toDayOfWeek(first_day)); - if (toDayOfWeek(d) != 7) - this_day = DayNum(d - toDayOfWeek(d)); + first_day = ExtendedDayNum(first_day - toDayOfWeek(first_day)); + if (toDayOfWeek(i) != 7) + this_day = ExtendedDayNum(i - toDayOfWeek(i)); } yw.second = (this_day - first_day) / 7 + 1; return yw; @@ -543,16 +683,17 @@ public: /** * get first day of week with week_mode, return Sunday or Monday */ - inline DayNum toFirstDayNumOfWeek(DayNum d, UInt8 week_mode) const + template + inline ExtendedDayNum toFirstDayNumOfWeek(V v, UInt8 week_mode) const { bool monday_first_mode = week_mode & static_cast(WeekModeFlag::MONDAY_FIRST); if (monday_first_mode) { - return toFirstDayNumOfWeek(d); + return toFirstDayNumOfWeek(v); } else { - return (toDayOfWeek(d) != 7) ? DayNum(d - toDayOfWeek(d)) : d; + return (toDayOfWeek(v) != 7) ? ExtendedDayNum(v - toDayOfWeek(v)) : toDayNum(v); } } @@ -568,39 +709,35 @@ public: /** Calculate weekday from d. * Returns 0 for monday, 1 for tuesday... */ - inline unsigned calc_weekday(DayNum d, bool sunday_first_day_of_week) const + template + inline unsigned calc_weekday(V v, bool sunday_first_day_of_week) const { + const auto i = toLUTIndex(v); if (!sunday_first_day_of_week) - return toDayOfWeek(d) - 1; + return toDayOfWeek(i) - 1; else - return toDayOfWeek(DayNum(d + 1)) - 1; + return toDayOfWeek(i + 1) - 1; } /// Calculate days in one year. - inline unsigned calc_days_in_year(UInt16 year) const + inline unsigned calc_days_in_year(Int32 year) const { return ((year & 3) == 0 && (year % 100 || (year % 400 == 0 && year)) ? 366 : 365); } /// Number of month from some fixed moment in the past (year * 12 + month) - inline unsigned toRelativeMonthNum(DayNum d) const + template + inline unsigned toRelativeMonthNum(V v) const { - return lut[d].year * 12 + lut[d].month; + const auto i = toLUTIndex(v); + return lut[i].year * 12 + lut[i].month; } - inline unsigned toRelativeMonthNum(time_t t) const + template + inline unsigned toRelativeQuarterNum(V v) const { - return toRelativeMonthNum(toDayNum(t)); - } - - inline unsigned toRelativeQuarterNum(DayNum d) const - { - return lut[d].year * 4 + (lut[d].month - 1) / 3; - } - - inline unsigned toRelativeQuarterNum(time_t t) const - { - return toRelativeQuarterNum(toDayNum(t)); + const auto i = toLUTIndex(v); + return lut[i].year * 4 + (lut[i].month - 1) / 3; } /// We count all hour-length intervals, unrelated to offset changes. @@ -614,9 +751,10 @@ public: return (t + 86400 - offset_at_start_of_epoch) / 3600; } - inline time_t toRelativeHourNum(DayNum d) const + template + inline time_t toRelativeHourNum(V v) const { - return toRelativeHourNum(lut[d].date); + return toRelativeHourNum(lut[toLUTIndex(v)].date); } inline time_t toRelativeMinuteNum(time_t t) const @@ -624,48 +762,52 @@ public: return t / 60; } - inline time_t toRelativeMinuteNum(DayNum d) const + template + inline time_t toRelativeMinuteNum(V v) const { - return toRelativeMinuteNum(lut[d].date); + return toRelativeMinuteNum(lut[toLUTIndex(v)].date); } - inline DayNum toStartOfYearInterval(DayNum d, UInt64 years) const + template + inline ExtendedDayNum toStartOfYearInterval(V v, UInt64 years) const { if (years == 1) - return toFirstDayNumOfYear(d); - return years_lut[(lut[d].year - DATE_LUT_MIN_YEAR) / years * years]; + return toFirstDayNumOfYear(v); + + const auto i = toLUTIndex(v); + return toDayNum(years_lut[lut[i].year / years * years - DATE_LUT_MIN_YEAR]); } - inline DayNum toStartOfQuarterInterval(DayNum d, UInt64 quarters) const + inline ExtendedDayNum toStartOfQuarterInterval(ExtendedDayNum d, UInt64 quarters) const { if (quarters == 1) return toFirstDayNumOfQuarter(d); return toStartOfMonthInterval(d, quarters * 3); } - inline DayNum toStartOfMonthInterval(DayNum d, UInt64 months) const + inline ExtendedDayNum toStartOfMonthInterval(ExtendedDayNum d, UInt64 months) const { if (months == 1) return toFirstDayNumOfMonth(d); - const auto & date = lut[d]; + const auto & date = lut[toLUTIndex(d)]; UInt32 month_total_index = (date.year - DATE_LUT_MIN_YEAR) * 12 + date.month - 1; - return years_months_lut[month_total_index / months * months]; + return toDayNum(years_months_lut[month_total_index / months * months]); } - inline DayNum toStartOfWeekInterval(DayNum d, UInt64 weeks) const + inline ExtendedDayNum toStartOfWeekInterval(ExtendedDayNum d, UInt64 weeks) const { if (weeks == 1) return toFirstDayNumOfWeek(d); UInt64 days = weeks * 7; // January 1st 1970 was Thursday so we need this 4-days offset to make weeks start on Monday. - return DayNum(4 + (d - 4) / days * days); + return ExtendedDayNum(4 + (d - 4) / days * days); } - inline time_t toStartOfDayInterval(DayNum d, UInt64 days) const + inline time_t toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const { if (days == 1) return toDate(d); - return lut[d / days * days].date; + return lut[toLUTIndex(ExtendedDayNum(d / days * days))].date; } inline time_t toStartOfHourInterval(time_t t, UInt64 hours) const @@ -694,33 +836,41 @@ public: return t / seconds * seconds; } - /// Create DayNum from year, month, day of month. - inline DayNum makeDayNum(UInt16 year, UInt8 month, UInt8 day_of_month) const + inline LUTIndex makeLUTIndex(Int16 year, UInt8 month, UInt8 day_of_month) const { if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31)) - return DayNum(0); // TODO (nemkov, DateTime64 phase 2): implement creating real date for year outside of LUT range. + return LUTIndex(0); - // The day after 2106-02-07 will not stored fully as struct Values, so just overflow it as 0 - if (unlikely(year == DATE_LUT_MAX_YEAR && (month > 2 || (month == 2 && day_of_month > 7)))) - return DayNum(0); - - return DayNum(years_months_lut[(year - DATE_LUT_MIN_YEAR) * 12 + month - 1] + day_of_month - 1); + return LUTIndex{years_months_lut[(year - DATE_LUT_MIN_YEAR) * 12 + month - 1] + day_of_month - 1}; } - inline time_t makeDate(UInt16 year, UInt8 month, UInt8 day_of_month) const + /// Create DayNum from year, month, day of month. + inline ExtendedDayNum makeDayNum(Int16 year, UInt8 month, UInt8 day_of_month) const { - return lut[makeDayNum(year, month, day_of_month)].date; + if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31)) + return ExtendedDayNum(0); + + // The day after 2283 are not stored fully as struct Values, so just overflow it as 0 + if (unlikely(year > DATE_LUT_MAX_YEAR)) + return ExtendedDayNum(0); + + return toDayNum(makeLUTIndex(year, month, day_of_month)); + } + + inline time_t makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const + { + return lut[makeLUTIndex(year, month, day_of_month)].date; } /** Does not accept daylight saving time as argument: in case of ambiguity, it choose greater timestamp. */ - inline time_t makeDateTime(UInt16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const + inline time_t makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const { - size_t index = makeDayNum(year, month, day_of_month); + size_t index = makeLUTIndex(year, month, day_of_month); UInt32 time_offset = hour * 3600 + minute * 60 + second; - if (time_offset >= lut[index].time_at_offset_change) - time_offset -= lut[index].amount_of_offset_change; + if (time_offset >= lut[index].time_at_offset_change()) + time_offset -= lut[index].amount_of_offset_change(); UInt32 res = lut[index].date + time_offset; @@ -730,30 +880,20 @@ public: return res; } - inline const Values & getValues(DayNum d) const { return lut[d]; } - inline const Values & getValues(time_t t) const { return lut[findIndex(t)]; } + template + inline const Values & getValues(V v) const { return lut[toLUTIndex(v)]; } - inline UInt32 toNumYYYYMM(time_t t) const + template + inline UInt32 toNumYYYYMM(V v) const { - const Values & values = find(t); + const Values & values = getValues(v); return values.year * 100 + values.month; } - inline UInt32 toNumYYYYMM(DayNum d) const + template + inline UInt32 toNumYYYYMMDD(V v) const { - const Values & values = lut[d]; - return values.year * 100 + values.month; - } - - inline UInt32 toNumYYYYMMDD(time_t t) const - { - const Values & values = find(t); - return values.year * 10000 + values.month * 100 + values.day_of_month; - } - - inline UInt32 toNumYYYYMMDD(DayNum d) const - { - const Values & values = lut[d]; + const Values & values = getValues(v); return values.year * 10000 + values.month * 100 + values.day_of_month; } @@ -762,7 +902,7 @@ public: return makeDate(num / 10000, num / 100 % 100, num % 100); } - inline DayNum YYYYMMDDToDayNum(UInt32 num) const + inline ExtendedDayNum YYYYMMDDToDayNum(UInt32 num) const { return makeDayNum(num / 10000, num / 100 % 100, num % 100); } @@ -796,13 +936,14 @@ public: inline NO_SANITIZE_UNDEFINED time_t addDays(time_t t, Int64 delta) const { - DayNum index = findIndex(t); + auto index = findIndex(t); time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); index += delta; + index &= date_lut_mask; - if (time_offset >= lut[index].time_at_offset_change) - time_offset -= lut[index].amount_of_offset_change; + if (time_offset >= lut[index].time_at_offset_change()) + time_offset -= lut[index].amount_of_offset_change(); return lut[index].date + time_offset; } @@ -812,7 +953,7 @@ public: return addDays(t, delta * 7); } - inline UInt8 saturateDayOfMonth(UInt16 year, UInt8 month, UInt8 day_of_month) const + inline UInt8 saturateDayOfMonth(Int16 year, UInt8 month, UInt8 day_of_month) const { if (likely(day_of_month <= 28)) return day_of_month; @@ -825,23 +966,10 @@ public: return day_of_month; } - /// If resulting month has less deys than source month, then saturation can happen. - /// Example: 31 Aug + 1 month = 30 Sep. - inline time_t addMonths(time_t t, Int64 delta) const + template + inline LUTIndex addMonthsIndex(V v, Int64 delta) const { - DayNum result_day = addMonths(toDayNum(t), delta); - - time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); - - if (time_offset >= lut[result_day].time_at_offset_change) - time_offset -= lut[result_day].amount_of_offset_change; - - return lut[result_day].date + time_offset; - } - - inline NO_SANITIZE_UNDEFINED DayNum addMonths(DayNum d, Int64 delta) const - { - const Values & values = lut[d]; + const Values & values = lut[toLUTIndex(v)]; Int64 month = static_cast(values.month) + delta; @@ -851,7 +979,7 @@ public: month = ((month - 1) % 12) + 1; auto day_of_month = saturateDayOfMonth(year, month, values.day_of_month); - return makeDayNum(year, month, day_of_month); + return makeLUTIndex(year, month, day_of_month); } else { @@ -859,36 +987,43 @@ public: month = 12 - (-month % 12); auto day_of_month = saturateDayOfMonth(year, month, values.day_of_month); - return makeDayNum(year, month, day_of_month); + return makeLUTIndex(year, month, day_of_month); } } - inline NO_SANITIZE_UNDEFINED time_t addQuarters(time_t t, Int64 delta) const + /// If resulting month has less deys than source month, then saturation can happen. + /// Example: 31 Aug + 1 month = 30 Sep. + inline time_t NO_SANITIZE_UNDEFINED addMonths(time_t t, Int64 delta) const { - return addMonths(t, delta * 3); - } - - inline NO_SANITIZE_UNDEFINED DayNum addQuarters(DayNum d, Int64 delta) const - { - return addMonths(d, delta * 3); - } - - /// Saturation can occur if 29 Feb is mapped to non-leap year. - inline NO_SANITIZE_UNDEFINED time_t addYears(time_t t, Int64 delta) const - { - DayNum result_day = addYears(toDayNum(t), delta); + const auto result_day = addMonthsIndex(t, delta); time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); - if (time_offset >= lut[result_day].time_at_offset_change) - time_offset -= lut[result_day].amount_of_offset_change; + if (time_offset >= lut[result_day].time_at_offset_change()) + time_offset -= lut[result_day].amount_of_offset_change(); return lut[result_day].date + time_offset; } - inline NO_SANITIZE_UNDEFINED DayNum addYears(DayNum d, Int64 delta) const + inline ExtendedDayNum NO_SANITIZE_UNDEFINED addMonths(ExtendedDayNum d, Int64 delta) const { - const Values & values = lut[d]; + return toDayNum(addMonthsIndex(d, delta)); + } + + inline time_t NO_SANITIZE_UNDEFINED addQuarters(time_t t, Int64 delta) const + { + return addMonths(t, delta * 3); + } + + inline ExtendedDayNum addQuarters(ExtendedDayNum d, Int64 delta) const + { + return addMonths(d, delta * 3); + } + + template + inline LUTIndex NO_SANITIZE_UNDEFINED addYearsIndex(V v, Int64 delta) const + { + const Values & values = lut[toLUTIndex(v)]; auto year = values.year + delta; auto month = values.month; @@ -898,13 +1033,31 @@ public: if (unlikely(day_of_month == 29 && month == 2)) day_of_month = saturateDayOfMonth(year, month, day_of_month); - return makeDayNum(year, month, day_of_month); + return makeLUTIndex(year, month, day_of_month); + } + + /// Saturation can occur if 29 Feb is mapped to non-leap year. + inline time_t addYears(time_t t, Int64 delta) const + { + auto result_day = addYearsIndex(t, delta); + + time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); + + if (time_offset >= lut[result_day].time_at_offset_change()) + time_offset -= lut[result_day].amount_of_offset_change(); + + return lut[result_day].date + time_offset; + } + + inline ExtendedDayNum addYears(ExtendedDayNum d, Int64 delta) const + { + return toDayNum(addYearsIndex(d, delta)); } inline std::string timeToString(time_t t) const { - const Values & values = find(t); + const Values & values = getValues(t); std::string s {"0000-00-00 00:00:00"}; @@ -933,7 +1086,7 @@ public: inline std::string dateToString(time_t t) const { - const Values & values = find(t); + const Values & values = getValues(t); std::string s {"0000-00-00"}; @@ -949,9 +1102,9 @@ public: return s; } - inline std::string dateToString(DayNum d) const + inline std::string dateToString(ExtendedDayNum d) const { - const Values & values = lut[d]; + const Values & values = getValues(d); std::string s {"0000-00-00"}; diff --git a/base/common/DayNum.h b/base/common/DayNum.h index a4ef0c43b69..5cf4d4635c8 100644 --- a/base/common/DayNum.h +++ b/base/common/DayNum.h @@ -7,3 +7,8 @@ * See DateLUTImpl for usage examples. */ STRONG_TYPEDEF(UInt16, DayNum) + +/** Represent number of days since 1970-01-01 but in extended range, + * for dates before 1970-01-01 and after 2105 + */ +STRONG_TYPEDEF(Int32, ExtendedDayNum) diff --git a/base/common/LocalDate.h b/base/common/LocalDate.h index e5ebe877bc5..7e1260c1385 100644 --- a/base/common/LocalDate.h +++ b/base/common/LocalDate.h @@ -105,7 +105,8 @@ public: DayNum getDayNum() const { - return DateLUT::instance().makeDayNum(m_year, m_month, m_day); + const auto & lut = DateLUT::instance(); + return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType()); } operator DayNum() const diff --git a/base/common/strong_typedef.h b/base/common/strong_typedef.h index d9850a25c37..77b83bfa6e5 100644 --- a/base/common/strong_typedef.h +++ b/base/common/strong_typedef.h @@ -12,6 +12,7 @@ private: T t; public: + using UnderlyingType = T; template ::type> explicit StrongTypedef(const T & t_) : t(t_) {} template ::type> diff --git a/base/common/tests/CMakeLists.txt b/base/common/tests/CMakeLists.txt index b7082ee9900..b335b302cb0 100644 --- a/base/common/tests/CMakeLists.txt +++ b/base/common/tests/CMakeLists.txt @@ -16,7 +16,9 @@ target_link_libraries (realloc-perf PRIVATE common) add_check(local_date_time_comparison) if(USE_GTEST) - add_executable(unit_tests_libcommon gtest_json_test.cpp gtest_strong_typedef.cpp gtest_find_symbols.cpp) + add_executable(unit_tests_libcommon gtest_json_test.cpp gtest_strong_typedef.cpp gtest_find_symbols.cpp gtest_DateLutImpl.cpp + ${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp + ) target_link_libraries(unit_tests_libcommon PRIVATE common ${GTEST_MAIN_LIBRARIES} ${GTEST_LIBRARIES}) add_check(unit_tests_libcommon) endif() diff --git a/base/common/tests/gtest_DateLutImpl.cpp b/base/common/tests/gtest_DateLutImpl.cpp new file mode 100644 index 00000000000..395e2eddb00 --- /dev/null +++ b/base/common/tests/gtest_DateLutImpl.cpp @@ -0,0 +1,515 @@ +#include +#include + +#include + +#include +#include + +/// For the expansion of gtest macros. +#if defined(__clang__) + #pragma clang diagnostic ignored "-Wused-but-marked-unused" +#endif + +// All timezones present at build time and embedded into CH binary. +extern const char * auto_time_zones[]; + +namespace +{ + +cctz::civil_day YYYYMMDDToDay(unsigned value) +{ + return cctz::civil_day( + value / 10000, // year + (value % 10000) / 100, // month + value % 100); // day +} + +cctz::civil_second YYYYMMDDHMMSSToSecond(std::uint64_t value) +{ + return cctz::civil_second( + value / 10000000000, + value / 100000000 % 100, + value / 1000000 % 100, + value / 10000 % 100, + value / 100 % 100, + value % 100); +} + + +std::vector allTimezones() +{ + std::vector result; + + auto timezone_name = auto_time_zones; + while (*timezone_name) + { + result.push_back(*timezone_name); + ++timezone_name; + } + + return result; +} + +struct FailuresCount +{ + size_t non_fatal = 0; + size_t fatal = 0; + size_t total = 0; +}; + +FailuresCount countFailures(const ::testing::TestResult & test_result) +{ + FailuresCount failures{0, 0, 0}; + const size_t count = test_result.total_part_count(); + for (size_t i = 0; i < count; ++i) + { + const auto & part = test_result.GetTestPartResult(i); + if (part.nonfatally_failed()) + { + ++failures.non_fatal; + ++failures.total; + } + if (part.fatally_failed()) + { + ++failures.fatal; + ++failures.total; + } + } + + return failures; +} + +} + +TEST(YYYYMMDDToDay, Test) +{ + std::cerr << YYYYMMDDHMMSSToSecond(19700101'00'00'00) << std::endl; +} + +TEST(DateLUTTest, TimeValuesInMiddleOfRange) +{ + const DateLUTImpl lut("Europe/Minsk"); + const time_t time = 1568650811; // 2019-09-16 19:20:11 (Monday) + + EXPECT_EQ(lut.getTimeZone(), "Europe/Minsk"); + EXPECT_EQ(lut.getOffsetAtStartOfEpoch(), 3600*3); // UTC-3 + + EXPECT_EQ(lut.toDate(time), 1568581200); + EXPECT_EQ(lut.toMonth(time), 9); + EXPECT_EQ(lut.toQuarter(time), 3); + EXPECT_EQ(lut.toYear(time), 2019); + EXPECT_EQ(lut.toDayOfMonth(time), 16); + + EXPECT_EQ(lut.toFirstDayOfWeek(time), 1568581200 /*time_t*/); + EXPECT_EQ(lut.toFirstDayNumOfWeek(time), DayNum(18155) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfMonth(time), 1567285200 /*time_t*/); + EXPECT_EQ(lut.toFirstDayNumOfMonth(time), DayNum(18140) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayNumOfQuarter(time), DayNum(18078) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfQuarter(time), 1561928400 /*time_t*/); + EXPECT_EQ(lut.toFirstDayOfYear(time), 1546290000 /*time_t*/); + EXPECT_EQ(lut.toFirstDayNumOfYear(time), DayNum(17897) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfNextMonth(time), 1569877200 /*time_t*/); + EXPECT_EQ(lut.toFirstDayOfPrevMonth(time), 1564606800 /*time_t*/); + EXPECT_EQ(lut.daysInMonth(time), 30 /*UInt8*/); + EXPECT_EQ(lut.toDateAndShift(time, 10), 1569445200 /*time_t*/); + EXPECT_EQ(lut.toTime(time), 58811 /*time_t*/); + EXPECT_EQ(lut.toHour(time), 19 /*unsigned*/); + EXPECT_EQ(lut.toSecond(time), 11 /*unsigned*/); + EXPECT_EQ(lut.toMinute(time), 20 /*unsigned*/); + EXPECT_EQ(lut.toStartOfMinute(time), 1568650800 /*time_t*/); + EXPECT_EQ(lut.toStartOfFiveMinute(time), 1568650800 /*time_t*/); + EXPECT_EQ(lut.toStartOfFifteenMinutes(time), 1568650500 /*time_t*/); + EXPECT_EQ(lut.toStartOfTenMinutes(time), 1568650800 /*time_t*/); + EXPECT_EQ(lut.toStartOfHour(time), 1568649600 /*time_t*/); + EXPECT_EQ(lut.toDayNum(time), DayNum(18155) /*DayNum*/); + EXPECT_EQ(lut.toDayOfYear(time), 259 /*unsigned*/); + EXPECT_EQ(lut.toRelativeWeekNum(time), 2594 /*unsigned*/); + EXPECT_EQ(lut.toISOYear(time), 2019 /*unsigned*/); + EXPECT_EQ(lut.toFirstDayNumOfISOYear(time), DayNum(17896) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfISOYear(time), 1546203600 /*time_t*/); + EXPECT_EQ(lut.toISOWeek(time), 38 /*unsigned*/); + EXPECT_EQ(lut.toRelativeMonthNum(time), 24237 /*unsigned*/); + EXPECT_EQ(lut.toRelativeQuarterNum(time), 8078 /*unsigned*/); + EXPECT_EQ(lut.toRelativeHourNum(time), 435736 /*time_t*/); + EXPECT_EQ(lut.toRelativeMinuteNum(time), 26144180 /*time_t*/); + EXPECT_EQ(lut.toStartOfHourInterval(time, 5), 1568646000 /*time_t*/); + EXPECT_EQ(lut.toStartOfMinuteInterval(time, 6), 1568650680 /*time_t*/); + EXPECT_EQ(lut.toStartOfSecondInterval(time, 7), 1568650811 /*time_t*/); + EXPECT_EQ(lut.toNumYYYYMM(time), 201909 /*UInt32*/); + EXPECT_EQ(lut.toNumYYYYMMDD(time), 20190916 /*UInt32*/); + EXPECT_EQ(lut.toNumYYYYMMDDhhmmss(time), 20190916192011 /*UInt64*/); + EXPECT_EQ(lut.addDays(time, 100), 1577290811 /*time_t*/); + EXPECT_EQ(lut.addWeeks(time, 100), 1629130811 /*time_t*/); + EXPECT_EQ(lut.addMonths(time, 100), 1831652411 /*time_t*/); + EXPECT_EQ(lut.addQuarters(time, 100), 2357655611 /*time_t*/); + EXPECT_EQ(lut.addYears(time, 10), 1884270011 /*time_t*/); + EXPECT_EQ(lut.timeToString(time), "2019-09-16 19:20:11" /*std::string*/); + EXPECT_EQ(lut.dateToString(time), "2019-09-16" /*std::string*/); +} + + +TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange) +{ + const DateLUTImpl lut("UTC"); + const time_t time = 0; // 1970-01-01 00:00:00 (Thursday) + + EXPECT_EQ(lut.getTimeZone(), "UTC"); + + EXPECT_EQ(lut.toDate(time), 0); + EXPECT_EQ(lut.toMonth(time), 1); + EXPECT_EQ(lut.toQuarter(time), 1); + EXPECT_EQ(lut.toYear(time), 1970); + EXPECT_EQ(lut.toDayOfMonth(time), 1); + + EXPECT_EQ(lut.toFirstDayOfWeek(time), -259200 /*time_t*/); // 1969-12-29 00:00:00 + EXPECT_EQ(lut.toFirstDayNumOfWeek(time), ExtendedDayNum(-3) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfMonth(time), 0 /*time_t*/); + EXPECT_EQ(lut.toFirstDayNumOfMonth(time), DayNum(0) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayNumOfQuarter(time), DayNum(0) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfQuarter(time), 0 /*time_t*/); + EXPECT_EQ(lut.toFirstDayOfYear(time), 0 /*time_t*/); + EXPECT_EQ(lut.toFirstDayNumOfYear(time), DayNum(0) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfNextMonth(time), 2678400 /*time_t*/); + EXPECT_EQ(lut.toFirstDayOfPrevMonth(time), -2678400 /*time_t*/); // 1969-12-01 00:00:00 + EXPECT_EQ(lut.daysInMonth(time), 31 /*UInt8*/); + EXPECT_EQ(lut.toDateAndShift(time, 10), 864000 /*time_t*/); + EXPECT_EQ(lut.toTime(time), 0 /*time_t*/); + EXPECT_EQ(lut.toHour(time), 0 /*unsigned*/); + EXPECT_EQ(lut.toSecond(time), 0 /*unsigned*/); + EXPECT_EQ(lut.toMinute(time), 0 /*unsigned*/); + EXPECT_EQ(lut.toStartOfMinute(time), 0 /*time_t*/); + EXPECT_EQ(lut.toStartOfFiveMinute(time), 0 /*time_t*/); + EXPECT_EQ(lut.toStartOfFifteenMinutes(time), 0 /*time_t*/); + EXPECT_EQ(lut.toStartOfTenMinutes(time), 0 /*time_t*/); + EXPECT_EQ(lut.toStartOfHour(time), 0 /*time_t*/); + EXPECT_EQ(lut.toDayNum(time), DayNum(0) /*DayNum*/); + EXPECT_EQ(lut.toDayOfYear(time), 1 /*unsigned*/); + EXPECT_EQ(lut.toRelativeWeekNum(time), 0 /*unsigned*/); + EXPECT_EQ(lut.toISOYear(time), 1970 /*unsigned*/); + EXPECT_EQ(lut.toFirstDayNumOfISOYear(time), ExtendedDayNum(-3) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfISOYear(time), -259200 /*time_t*/); // 1969-12-29 00:00:00 + EXPECT_EQ(lut.toISOWeek(time), 1 /*unsigned*/); + EXPECT_EQ(lut.toRelativeMonthNum(time), 23641 /*unsigned*/); // ? + EXPECT_EQ(lut.toRelativeQuarterNum(time), 7880 /*unsigned*/); // ? + EXPECT_EQ(lut.toRelativeHourNum(time), 0 /*time_t*/); + EXPECT_EQ(lut.toRelativeMinuteNum(time), 0 /*time_t*/); + EXPECT_EQ(lut.toStartOfHourInterval(time, 5), 0 /*time_t*/); + EXPECT_EQ(lut.toStartOfMinuteInterval(time, 6), 0 /*time_t*/); + EXPECT_EQ(lut.toStartOfSecondInterval(time, 7), 0 /*time_t*/); + EXPECT_EQ(lut.toNumYYYYMM(time), 197001 /*UInt32*/); + EXPECT_EQ(lut.toNumYYYYMMDD(time), 19700101 /*UInt32*/); + EXPECT_EQ(lut.toNumYYYYMMDDhhmmss(time), 19700101000000 /*UInt64*/); + EXPECT_EQ(lut.addDays(time, 100), 8640000 /*time_t*/); + EXPECT_EQ(lut.addWeeks(time, 100), 60480000 /*time_t*/); + EXPECT_EQ(lut.addMonths(time, 100), 262828800 /*time_t*/); + EXPECT_EQ(lut.addQuarters(time, 100), 788918400 /*time_t*/); + EXPECT_EQ(lut.addYears(time, 10), 315532800 /*time_t*/); + EXPECT_EQ(lut.timeToString(time), "1970-01-01 00:00:00" /*std::string*/); + EXPECT_EQ(lut.dateToString(time), "1970-01-01" /*std::string*/); +} + +TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOLDLut) +{ + // Value is at the right border of the OLD (small) LUT, and provides meaningful values where OLD LUT would provide garbage. + const DateLUTImpl lut("UTC"); + + const time_t time = 4294343873; // 2106-01-31T01:17:53 (Sunday) + + EXPECT_EQ(lut.getTimeZone(), "UTC"); + + EXPECT_EQ(lut.toDate(time), 4294339200); + EXPECT_EQ(lut.toMonth(time), 1); + EXPECT_EQ(lut.toQuarter(time), 1); + EXPECT_EQ(lut.toYear(time), 2106); + EXPECT_EQ(lut.toDayOfMonth(time), 31); + + EXPECT_EQ(lut.toFirstDayOfWeek(time), 4293820800 /*time_t*/); + EXPECT_EQ(lut.toFirstDayNumOfWeek(time), DayNum(49697)); + EXPECT_EQ(lut.toFirstDayOfMonth(time), 4291747200 /*time_t*/); // 2016-01-01 + EXPECT_EQ(lut.toFirstDayNumOfMonth(time), DayNum(49673)); + EXPECT_EQ(lut.toFirstDayNumOfQuarter(time), DayNum(49673) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfQuarter(time), 4291747200 /*time_t*/); + EXPECT_EQ(lut.toFirstDayOfYear(time), 4291747200 /*time_t*/); + EXPECT_EQ(lut.toFirstDayNumOfYear(time), DayNum(49673) /*DayNum*/); + EXPECT_EQ(lut.toFirstDayOfNextMonth(time), 4294425600 /*time_t*/); // 2106-02-01 + EXPECT_EQ(lut.toFirstDayOfPrevMonth(time), 4289068800 /*time_t*/); // 2105-12-01 + EXPECT_EQ(lut.daysInMonth(time), 31 /*UInt8*/); + EXPECT_EQ(lut.toDateAndShift(time, 10), 4295203200 /*time_t*/); // 2106-02-10 + EXPECT_EQ(lut.toTime(time), 4673 /*time_t*/); + EXPECT_EQ(lut.toHour(time), 1 /*unsigned*/); + EXPECT_EQ(lut.toMinute(time), 17 /*unsigned*/); + EXPECT_EQ(lut.toSecond(time), 53 /*unsigned*/); + EXPECT_EQ(lut.toStartOfMinute(time), 4294343820 /*time_t*/); + EXPECT_EQ(lut.toStartOfFiveMinute(time), 4294343700 /*time_t*/); + EXPECT_EQ(lut.toStartOfFifteenMinutes(time), 4294343700 /*time_t*/); + EXPECT_EQ(lut.toStartOfTenMinutes(time), 4294343400 /*time_t*/); + EXPECT_EQ(lut.toStartOfHour(time), 4294342800 /*time_t*/); + EXPECT_EQ(lut.toDayNum(time), DayNum(49703) /*DayNum*/); + EXPECT_EQ(lut.toDayOfYear(time), 31 /*unsigned*/); + EXPECT_EQ(lut.toRelativeWeekNum(time), 7100 /*unsigned*/); + EXPECT_EQ(lut.toISOYear(time), 2106 /*unsigned*/); + EXPECT_EQ(lut.toFirstDayNumOfISOYear(time), DayNum(49676) /*DayNum*/); // 2106-01-04 + EXPECT_EQ(lut.toFirstDayOfISOYear(time), 4292006400 /*time_t*/); + EXPECT_EQ(lut.toISOWeek(time), 4 /*unsigned*/); + EXPECT_EQ(lut.toRelativeMonthNum(time), 25273 /*unsigned*/); + EXPECT_EQ(lut.toRelativeQuarterNum(time), 8424 /*unsigned*/); + EXPECT_EQ(lut.toRelativeHourNum(time), 1192873 /*time_t*/); + EXPECT_EQ(lut.toRelativeMinuteNum(time), 71572397 /*time_t*/); + EXPECT_EQ(lut.toStartOfHourInterval(time, 5), 4294332000 /*time_t*/); + EXPECT_EQ(lut.toStartOfMinuteInterval(time, 6), 4294343520 /*time_t*/); + EXPECT_EQ(lut.toStartOfSecondInterval(time, 7), 4294343872 /*time_t*/); + EXPECT_EQ(lut.toNumYYYYMM(time), 210601 /*UInt32*/); + EXPECT_EQ(lut.toNumYYYYMMDD(time), 21060131 /*UInt32*/); + EXPECT_EQ(lut.toNumYYYYMMDDhhmmss(time), 21060131011753 /*UInt64*/); + EXPECT_EQ(lut.addDays(time, 100), 4302983873 /*time_t*/); + EXPECT_EQ(lut.addWeeks(time, 10), 4300391873 /*time_t*/); + EXPECT_EQ(lut.addMonths(time, 10), 4320523073 /*time_t*/); // 2106-11-30 01:17:53 + EXPECT_EQ(lut.addQuarters(time, 10), 4373140673 /*time_t*/); // 2108-07-31 01:17:53 + EXPECT_EQ(lut.addYears(time, 10), 4609876673 /*time_t*/); // 2116-01-31 01:17:53 + + EXPECT_EQ(lut.timeToString(time), "2106-01-31 01:17:53" /*std::string*/); + EXPECT_EQ(lut.dateToString(time), "2106-01-31" /*std::string*/); +} + + +class DateLUT_TimeZone : public ::testing::TestWithParam +{}; + +TEST_P(DateLUT_TimeZone, DISABLED_LoadAllTimeZones) +{ + // There are some assumptions and assertions about TZ data made in DateLUTImpl which are verified upon loading, + // to make sure that those assertions are true for all timezones we are going to load all of them one by one. + DateLUTImpl{GetParam()}; +} + +// Another long running test, shouldn't be run to often +TEST_P(DateLUT_TimeZone, VaidateTimeComponentsAroundEpoch) +{ + // Converting time around 1970-01-01 to hour-minute-seconds time components + // could be problematic. + const size_t max_failures_per_tz = 3; + const auto timezone_name = GetParam(); + + const auto * test_info = ::testing::UnitTest::GetInstance()->current_test_info(); + const auto lut = DateLUTImpl(timezone_name); + + for (time_t i = -856147870; i < 86400 * 10000; i += 11 * 13 * 17 * 19) + { + SCOPED_TRACE(::testing::Message() + << "\n\tTimezone: " << timezone_name + << "\n\ttimestamp: " << i + << "\n\t offset at start of epoch : " << lut.getOffsetAtStartOfEpoch() + << "\n\t offset_is_whole_number_of_hours_everytime : " << lut.getOffsetIsWholNumberOfHoursEveryWhere() + << "\n\t time_offset_epoch : " << lut.getTimeOffsetEpoch() + << "\n\t offset_at_start_of_lut : " << lut.getTimeOffsetAtStartOfLUT()); + + EXPECT_GE(24, lut.toHour(i)); + EXPECT_GT(60, lut.toMinute(i)); + EXPECT_GT(60, lut.toSecond(i)); + + const auto current_failures = countFailures(*test_info->result()); + if (current_failures.total > 0) + { + if (i < 0) + i = -1; + } + + if (current_failures.total >= max_failures_per_tz) + break; + } +} + +TEST_P(DateLUT_TimeZone, getTimeZone) +{ + const auto & lut = DateLUT::instance(GetParam()); + + EXPECT_EQ(GetParam(), lut.getTimeZone()); +} + +TEST_P(DateLUT_TimeZone, ZeroTime) +{ + const auto & lut = DateLUT::instance(GetParam()); + + EXPECT_EQ(0, lut.toDayNum(time_t{0})); + EXPECT_EQ(0, lut.toDayNum(DayNum{0})); + EXPECT_EQ(0, lut.toDayNum(ExtendedDayNum{0})); +} + +// Group of tests for timezones that have or had some time ago an offset which is not multiple of 15 minutes. +INSTANTIATE_TEST_SUITE_P(ExoticTimezones, + DateLUT_TimeZone, + ::testing::ValuesIn(std::initializer_list{ + "Africa/El_Aaiun", + "Pacific/Apia", + "Pacific/Enderbury", + "Pacific/Fakaofo", + "Pacific/Kiritimati", + }) +); + +INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimeZones, + DateLUT_TimeZone, + ::testing::ValuesIn(allTimezones()) +); + +std::ostream & operator<<(std::ostream & ostr, const DateLUTImpl::Values & v) +{ + return ostr << "DateLUTImpl::Values{" + << "\n\t date : " << v.date + << "\n\t year : " << static_cast(v.year) + << "\n\t month : " << static_cast(v.month) + << "\n\t day : " << static_cast(v.day_of_month) + << "\n\t weekday : " << static_cast(v.day_of_week) + << "\n\t days in month : " << static_cast(v.days_in_month) + << "\n\t offset change : " << v.amount_of_offset_change() + << "\n\t offfset change at : " << v.time_at_offset_change() + << "\n}"; +} + +struct TimeRangeParam +{ + const cctz::civil_second begin; + const cctz::civil_second end; + const int step_in_seconds; +}; + +std::ostream & operator<<(std::ostream & ostr, const TimeRangeParam & param) +{ + const auto approximate_step = [](const int step) -> std::string + { + // Convert seconds to a string of seconds or fractional count of minutes/hours/days. + static const size_t multipliers[] = {1 /*seconds to seconds*/, 60 /*seconds to minutes*/, 60 /*minutes to hours*/, 24 /*hours to days*/, 0 /*terminator*/}; + static const char* names[] = {"s", "m", "h", "d", nullptr}; + double result = step; + size_t i = 0; + for (; i < sizeof(multipliers)/sizeof(multipliers[0]) && result > multipliers[i]; ++i) + result /= multipliers[i]; + + char buffer[256] = {'\0'}; + std::snprintf(buffer, sizeof(buffer), "%.1f%s", result, names[i - 1]); + return std::string{buffer}; + }; + + return ostr << param.begin << " : " << param.end << " step: " << param.step_in_seconds << "s (" << approximate_step(param.step_in_seconds) << ")"; +} + +class DateLUT_Timezone_TimeRange : public ::testing::TestWithParam> +{}; + +// refactored test from tests/date_lut3.cpp +TEST_P(DateLUT_Timezone_TimeRange, InRange) +{ + // for a time_t values in range [begin, end) to match with reference obtained from cctz: + // compare date and time components: year, month, day, hours, minutes, seconds, formatted time string. + const auto & [timezone_name, range_data] = GetParam(); + const auto & [begin, end, step] = range_data; + + const auto * test_info = ::testing::UnitTest::GetInstance()->current_test_info(); + static const size_t max_failures_per_case = 3; + cctz::time_zone tz; + ASSERT_TRUE(cctz::load_time_zone(timezone_name, &tz)); + + const auto & lut = DateLUT::instance(timezone_name); + const auto start = cctz::convert(begin, tz).time_since_epoch().count(); + const auto stop = cctz::convert(end, tz).time_since_epoch().count(); + + for (time_t expected_time_t = start; expected_time_t < stop; expected_time_t += step) + { + SCOPED_TRACE(expected_time_t); + + const auto tz_time = cctz::convert(std::chrono::system_clock::from_time_t(expected_time_t), tz); + + EXPECT_EQ(tz_time.year(), lut.toYear(expected_time_t)); + EXPECT_EQ(tz_time.month(), lut.toMonth(expected_time_t)); + EXPECT_EQ(tz_time.day(), lut.toDayOfMonth(expected_time_t)); + EXPECT_EQ(static_cast(cctz::get_weekday(tz_time)) + 1, lut.toDayOfWeek(expected_time_t)); // tm.tm_wday Sunday is 0, while for DateLUTImpl it is 7 + EXPECT_EQ(cctz::get_yearday(tz_time), lut.toDayOfYear(expected_time_t)); + EXPECT_EQ(tz_time.hour(), lut.toHour(expected_time_t)); + EXPECT_EQ(tz_time.minute(), lut.toMinute(expected_time_t)); + EXPECT_EQ(tz_time.second(), lut.toSecond(expected_time_t)); + + const auto time_string = cctz::format("%E4Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(expected_time_t), tz); + EXPECT_EQ(time_string, lut.timeToString(expected_time_t)); + + // it makes sense to let test execute all checks above to simplify debugging, + // but once we've found a bad apple, no need to dig deeper. + if (countFailures(*test_info->result()).total >= max_failures_per_case) + break; + } +} + +/** Next tests are disabled due to following reasons: + * 1. They are huge and take enormous amount of time to run + * 2. Current implementation of DateLUTImpl is inprecise and some cases fail and it seems impractical to try to fix those. + * 3. Many failures (~300) were fixed while refactoring, about ~40 remain the same and 3 new introduced: + * "Asia/Gaza" + * "Pacific/Enderbury" + * "Pacific/Kiritimati" + * So it would be tricky to skip knonw failures to allow all unit tests to pass. + */ +INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010, + DateLUT_Timezone_TimeRange, + ::testing::Combine( + ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(std::initializer_list{ + // Values from tests/date_lut3.cpp + {YYYYMMDDToDay(20101031), YYYYMMDDToDay(20101101), 15 * 60}, + {YYYYMMDDToDay(20100328), YYYYMMDDToDay(20100330), 15 * 60} + })) +); + +INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970_WHOLE, + DateLUT_Timezone_TimeRange, + ::testing::Combine( + ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(std::initializer_list{ + // Values from tests/date_lut3.cpp + {YYYYMMDDToDay(19700101), YYYYMMDDToDay(19701231), 3191 /*53m 11s*/}, + })) +); + +INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010_WHOLE, + DateLUT_Timezone_TimeRange, + ::testing::Combine( + ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(std::initializer_list{ + // Values from tests/date_lut3.cpp + {YYYYMMDDToDay(20100101), YYYYMMDDToDay(20101231), 3191 /*53m 11s*/}, + })) +); + +INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2020_WHOLE, + DateLUT_Timezone_TimeRange, + ::testing::Combine( + ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(std::initializer_list{ + // Values from tests/date_lut3.cpp + {YYYYMMDDToDay(20200101), YYYYMMDDToDay(20201231), 3191 /*53m 11s*/}, + })) +); + +INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_PreEpoch, + DateLUT_Timezone_TimeRange, + ::testing::Combine( + ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(std::initializer_list{ + {YYYYMMDDToDay(19500101), YYYYMMDDToDay(19600101), 15 * 60}, + {YYYYMMDDToDay(19300101), YYYYMMDDToDay(19350101), 11 * 15 * 60} + })) +); + +INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970, + DateLUT_Timezone_TimeRange, + ::testing::Combine( + ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(std::initializer_list{ + {YYYYMMDDToDay(19700101), YYYYMMDDToDay(19700201), 15 * 60}, + {YYYYMMDDToDay(19700101), YYYYMMDDToDay(19701231), 11 * 13 * 17} +// // 11 was chosen as a number which can't divide product of 2-combinarions of (7, 24, 60), +// // to reduce likelehood of hitting same hour/minute/second values for different days. +// // + 12 is just to make sure that last day is covered fully. +// {0, 0 + 11 * 3600 * 24 + 12, 11}, + })) +); + diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 3c27908741c..63fa5d8a5c9 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -390,7 +390,7 @@ private: for (auto d : chineseNewYearIndicators) { /// Let's celebrate until Lantern Festival - if (d <= days && d + 25u >= days) + if (d <= days && d + 25 >= days) return true; else if (d > days) return false; diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index 355cf1d378a..2cd50ab8d08 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -50,9 +50,10 @@ inline auto scaleMultiplier(UInt32 scale) * whole - represents whole part of decimal, can be negative or positive. * fractional - for fractional part of decimal, always positive. */ -template +template struct DecimalComponents { + using T = typename DecimalType::NativeType; T whole; T fractional; }; @@ -106,6 +107,15 @@ inline DecimalType decimalFromComponentsWithMultiplier( return DecimalType(value); } +template +inline DecimalType decimalFromComponentsWithMultiplier( + const DecimalComponents & components, + typename DecimalType::NativeType scale_multiplier) +{ + return decimalFromComponentsWithMultiplier(components.whole, components.fractional, scale_multiplier); +} + + /** Make a decimal value from whole and fractional components with given scale. * * @see `decimalFromComponentsWithMultiplier` for details. @@ -126,7 +136,7 @@ inline DecimalType decimalFromComponents( */ template inline DecimalType decimalFromComponents( - const DecimalComponents & components, + const DecimalComponents & components, UInt32 scale) { return decimalFromComponents(components.whole, components.fractional, scale); @@ -136,7 +146,7 @@ inline DecimalType decimalFromComponents( * This is an optimization to reduce number of calls to scaleMultiplier on known scale. */ template -inline DecimalComponents splitWithScaleMultiplier( +inline DecimalComponents splitWithScaleMultiplier( const DecimalType & decimal, typename DecimalType::NativeType scale_multiplier) { @@ -151,7 +161,7 @@ inline DecimalComponents splitWithScaleMultipl /// Split decimal into components: whole and fractional part, @see `DecimalComponents` for details. template -inline DecimalComponents split(const DecimalType & decimal, UInt32 scale) +inline DecimalComponents split(const DecimalType & decimal, UInt32 scale) { if (scale == 0) { diff --git a/src/Core/MySQL/MySQLReplication.cpp b/src/Core/MySQL/MySQLReplication.cpp index 1b202c4edb4..3e9c5230955 100644 --- a/src/Core/MySQL/MySQLReplication.cpp +++ b/src/Core/MySQL/MySQLReplication.cpp @@ -420,8 +420,8 @@ namespace MySQLReplication UInt32 i24 = 0; payload.readStrict(reinterpret_cast(&i24), 3); - DayNum date_day_number = DateLUT::instance().makeDayNum( - static_cast((i24 >> 9) & 0x7fff), static_cast((i24 >> 5) & 0xf), static_cast(i24 & 0x1f)); + const DayNum date_day_number{DateLUT::instance().makeDayNum( + static_cast((i24 >> 9) & 0x7fff), static_cast((i24 >> 5) & 0xf), static_cast(i24 & 0x1f)).toUnderType()}; row.push_back(Field(date_day_number.toUnderType())); break; @@ -443,7 +443,7 @@ namespace MySQLReplication row.push_back(Field{UInt32(date_time)}); else { - DB::DecimalUtils::DecimalComponents components{ + DB::DecimalUtils::DecimalComponents components{ static_cast(date_time), 0}; components.fractional = fsp; @@ -462,7 +462,7 @@ namespace MySQLReplication row.push_back(Field{sec}); else { - DB::DecimalUtils::DecimalComponents components{ + DB::DecimalUtils::DecimalComponents components{ static_cast(sec), 0}; components.fractional = fsp; diff --git a/src/Core/tests/gtest_DecimalFunctions.cpp b/src/Core/tests/gtest_DecimalFunctions.cpp index be64661176b..1069a810d64 100644 --- a/src/Core/tests/gtest_DecimalFunctions.cpp +++ b/src/Core/tests/gtest_DecimalFunctions.cpp @@ -14,7 +14,7 @@ struct DecimalUtilsSplitAndCombineTestParam Decimal64 decimal_value; uint8_t scale; - DecimalUtils::DecimalComponents components; + DecimalUtils::DecimalComponents components; }; std::ostream & operator << (std::ostream & ostr, const DecimalUtilsSplitAndCombineTestParam & param) diff --git a/src/DataStreams/MongoDBBlockInputStream.cpp b/src/DataStreams/MongoDBBlockInputStream.cpp index 5463d95151b..e4ddcd09ede 100644 --- a/src/DataStreams/MongoDBBlockInputStream.cpp +++ b/src/DataStreams/MongoDBBlockInputStream.cpp @@ -270,8 +270,8 @@ namespace throw Exception{"Type mismatch, expected Timestamp, got type id = " + toString(value.type()) + " for column " + name, ErrorCodes::TYPE_MISMATCH}; - assert_cast(column).getData().push_back(UInt16{DateLUT::instance().toDayNum( - static_cast &>(value).value().epochTime())}); + assert_cast(column).getData().push_back(static_cast(DateLUT::instance().toDayNum( + static_cast &>(value).value().epochTime()))); break; } diff --git a/src/DataTypes/DataTypeDateTime64.h b/src/DataTypes/DataTypeDateTime64.h index 198c3739f58..ec3f2fde889 100644 --- a/src/DataTypes/DataTypeDateTime64.h +++ b/src/DataTypes/DataTypeDateTime64.h @@ -48,66 +48,5 @@ public: bool canBePromoted() const override { return false; } }; -/** Tansform-type wrapper for DateTime64, applies given Transform to DateTime64 value or only to a whole part of it. - * - * Depending on what overloads of Transform::execute() are available, when called with DateTime64 value, - * invokes Transform::execute() with: - * * whole part of DateTime64 value, discarding fractional part. - * * DateTime64 value and scale factor. - * - * Suitable Transfotm-types are commonly used in Date/DateTime manipulation functions, - * and should implement static (or const) function with following signatures: - * R execute(UInt32 whole_value, ... , const TimeZoneImpl &) - * OR - * R execute(DateTime64 value, Int64 scale_factor, ... , const TimeZoneImpl &) - * - * Where R and T could be arbitrary types. -*/ -template -class TransformDateTime64 : public Transform -{ -private: - // Detect if Transform::execute is const or static method - // with signature defined by template args (ignoring result type). - template - struct TransformHasExecuteOverload : std::false_type {}; - - template - struct TransformHasExecuteOverload().execute(std::declval()...))>, Args...> - : std::true_type {}; - - template - static constexpr bool TransformHasExecuteOverload_v = TransformHasExecuteOverload::value; - -public: - static constexpr auto name = Transform::name; - - using Transform::execute; - - // non-explicit constructor to allow creating from scale value (or with no scale at all), indispensable in some contexts. - TransformDateTime64(UInt32 scale_ = 0) - : scale_multiplier(DecimalUtils::scaleMultiplier(scale_)) - {} - - template - inline auto execute(const DateTime64 & t, Args && ... args) const - { - const auto transform = static_cast(this); - - if constexpr (TransformHasExecuteOverload_v) - { - return transform->execute(t, scale_multiplier, std::forward(args)...); - } - else - { - const auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier); - return transform->execute(static_cast(components.whole), std::forward(args)...); - } - } - -private: - DateTime64::NativeType scale_multiplier = 1; -}; - } diff --git a/src/Functions/CustomWeekTransforms.h b/src/Functions/CustomWeekTransforms.h index afcbadc835c..28da546eb93 100644 --- a/src/Functions/CustomWeekTransforms.h +++ b/src/Functions/CustomWeekTransforms.h @@ -33,14 +33,21 @@ static inline UInt32 dateIsNotSupported(const char * name) /// This factor transformation will say that the function is monotone everywhere. struct ZeroTransform { - static inline UInt16 execute(UInt32, UInt8, const DateLUTImpl &) { return 0; } static inline UInt16 execute(UInt16, UInt8, const DateLUTImpl &) { return 0; } + static inline UInt16 execute(UInt32, UInt8, const DateLUTImpl &) { return 0; } + static inline UInt16 execute(Int64, UInt8, const DateLUTImpl &) { return 0; } }; struct ToWeekImpl { static constexpr auto name = "toWeek"; + static inline UInt8 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) + { + // TODO: ditch conversion to DayNum, since it doesn't support extended range. + YearWeek yw = time_zone.toYearWeek(time_zone.toDayNum(t), week_mode); + return yw.second; + } static inline UInt8 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { YearWeek yw = time_zone.toYearWeek(time_zone.toDayNum(t), week_mode); @@ -59,6 +66,13 @@ struct ToYearWeekImpl { static constexpr auto name = "toYearWeek"; + static inline UInt32 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) + { + // TODO: ditch toDayNum() + YearWeek yw = time_zone.toYearWeek(time_zone.toDayNum(t), week_mode | static_cast(WeekModeFlag::YEAR)); + return yw.first * 100 + yw.second; + } + static inline UInt32 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { YearWeek yw = time_zone.toYearWeek(time_zone.toDayNum(t), week_mode | static_cast(WeekModeFlag::YEAR)); @@ -77,13 +91,19 @@ struct ToStartOfWeekImpl { static constexpr auto name = "toStartOfWeek"; + static inline UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) + { + return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); +// return time_zone.toFirstDayNumOfWeek(t, week_mode); + } static inline UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); +// return time_zone.toFirstDayNumOfWeek(t, week_mode); } static inline UInt16 execute(UInt16 d, UInt8 week_mode, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(DayNum(d), week_mode); + return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d), week_mode); } using FactorTransform = ZeroTransform; diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 333b397312d..c299b9c4169 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -3,6 +3,7 @@ #include #include #include +//#include #include #include #include @@ -33,14 +34,15 @@ namespace ErrorCodes * factor-transformation F is "round to the nearest month" (2015-02-03 -> 2015-02-01). */ -static inline UInt32 dateIsNotSupported(const char * name) -{ - throw Exception("Illegal type Date of argument for function " + std::string(name), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); -} + static inline UInt32 dateIsNotSupported(const char * name) + { + throw Exception("Illegal type Date of argument for function " + std::string(name), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } /// This factor transformation will say that the function is monotone everywhere. struct ZeroTransform { + static inline UInt16 execute(Int64, const DateLUTImpl &) { return 0; } static inline UInt16 execute(UInt32, const DateLUTImpl &) { return 0; } static inline UInt16 execute(UInt16, const DateLUTImpl &) { return 0; } }; @@ -49,6 +51,10 @@ struct ToDateImpl { static constexpr auto name = "toDate"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return UInt16(time_zone.toDayNum(t)); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return UInt16(time_zone.toDayNum(t)); @@ -65,13 +71,18 @@ struct ToStartOfDayImpl { static constexpr auto name = "toStartOfDay"; + //TODO: right now it is hardcoded to produce DateTime only, needs fixing later. See date_and_time_type_details::ResultDataTypeMap for deduction of result type example. + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) + { + return time_zone.toDate(static_cast(t.whole)); + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toDate(t); } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toDate(DayNum(d)); + return time_zone.toDate(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -81,13 +92,19 @@ struct ToMondayImpl { static constexpr auto name = "toMonday"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + //return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t)); + return time_zone.toFirstDayNumOfWeek(t); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t)); + //return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t)); + return time_zone.toFirstDayNumOfWeek(t); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(DayNum(d)); + return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -97,13 +114,17 @@ struct ToStartOfMonthImpl { static constexpr auto name = "toStartOfMonth"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toFirstDayNumOfMonth(time_zone.toDayNum(t)); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toFirstDayNumOfMonth(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfMonth(DayNum(d)); + return time_zone.toFirstDayNumOfMonth(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -113,13 +134,17 @@ struct ToStartOfQuarterImpl { static constexpr auto name = "toStartOfQuarter"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toFirstDayNumOfQuarter(time_zone.toDayNum(t)); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toFirstDayNumOfQuarter(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfQuarter(DayNum(d)); + return time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -129,13 +154,17 @@ struct ToStartOfYearImpl { static constexpr auto name = "toStartOfYear"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toFirstDayNumOfYear(time_zone.toDayNum(t)); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toFirstDayNumOfYear(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfYear(DayNum(d)); + return time_zone.toFirstDayNumOfYear(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -144,9 +173,13 @@ struct ToStartOfYearImpl struct ToTimeImpl { + /// When transforming to time, the date will be equated to 1970-01-01. static constexpr auto name = "toTime"; - /// When transforming to time, the date will be equated to 1970-01-02. + static UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) + { + return time_zone.toTime(t.whole) + 86400; + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toTime(t) + 86400; @@ -164,6 +197,10 @@ struct ToStartOfMinuteImpl { static constexpr auto name = "toStartOfMinute"; + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfMinute(t.whole); + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toStartOfMinute(t); @@ -215,6 +252,10 @@ struct ToStartOfFiveMinuteImpl { static constexpr auto name = "toStartOfFiveMinute"; + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfFiveMinute(t.whole); + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toStartOfFiveMinute(t); @@ -231,6 +272,10 @@ struct ToStartOfTenMinutesImpl { static constexpr auto name = "toStartOfTenMinutes"; + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfTenMinutes(t.whole); + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toStartOfTenMinutes(t); @@ -247,6 +292,10 @@ struct ToStartOfFifteenMinutesImpl { static constexpr auto name = "toStartOfFifteenMinutes"; + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfFifteenMinutes(t.whole); + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toStartOfFifteenMinutes(t); @@ -264,6 +313,12 @@ struct TimeSlotImpl { static constexpr auto name = "timeSlot"; + //static inline DecimalUtils::DecimalComponents execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl &) + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl &) + { + return t.whole / 1800 * 1800; + } + static inline UInt32 execute(UInt32 t, const DateLUTImpl &) { return t / 1800 * 1800; @@ -281,6 +336,11 @@ struct ToStartOfHourImpl { static constexpr auto name = "toStartOfHour"; + static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfHour(t.whole); + } + static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toStartOfHour(t); @@ -298,13 +358,17 @@ struct ToYearImpl { static constexpr auto name = "toYear"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toYear(t); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toYear(t); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toYear(DayNum(d)); + return time_zone.toYear(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -314,13 +378,17 @@ struct ToQuarterImpl { static constexpr auto name = "toQuarter"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toQuarter(t); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toQuarter(t); } static inline UInt8 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toQuarter(DayNum(d)); + return time_zone.toQuarter(ExtendedDayNum(d)); } using FactorTransform = ToStartOfYearImpl; @@ -330,13 +398,17 @@ struct ToMonthImpl { static constexpr auto name = "toMonth"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toMonth(t); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toMonth(t); } static inline UInt8 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toMonth(DayNum(d)); + return time_zone.toMonth(ExtendedDayNum(d)); } using FactorTransform = ToStartOfYearImpl; @@ -346,13 +418,17 @@ struct ToDayOfMonthImpl { static constexpr auto name = "toDayOfMonth"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toDayOfMonth(t); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toDayOfMonth(t); } static inline UInt8 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toDayOfMonth(DayNum(d)); + return time_zone.toDayOfMonth(ExtendedDayNum(d)); } using FactorTransform = ToStartOfMonthImpl; @@ -362,13 +438,17 @@ struct ToDayOfWeekImpl { static constexpr auto name = "toDayOfWeek"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toDayOfWeek(t); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toDayOfWeek(t); } static inline UInt8 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toDayOfWeek(DayNum(d)); + return time_zone.toDayOfWeek(ExtendedDayNum(d)); } using FactorTransform = ToMondayImpl; @@ -378,13 +458,17 @@ struct ToDayOfYearImpl { static constexpr auto name = "toDayOfYear"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toDayOfYear(t); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toDayOfYear(t); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toDayOfYear(DayNum(d)); + return time_zone.toDayOfYear(ExtendedDayNum(d)); } using FactorTransform = ToStartOfYearImpl; @@ -394,6 +478,10 @@ struct ToHourImpl { static constexpr auto name = "toHour"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toHour(t); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toHour(t); @@ -411,6 +499,11 @@ struct TimezoneOffsetImpl { static constexpr auto name = "timezoneOffset"; + static inline time_t execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.timezoneOffset(t); + } + static inline time_t execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.timezoneOffset(t); @@ -428,6 +521,10 @@ struct ToMinuteImpl { static constexpr auto name = "toMinute"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toMinute(t); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toMinute(t); @@ -444,6 +541,10 @@ struct ToSecondImpl { static constexpr auto name = "toSecond"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toSecond(t); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toSecond(t); @@ -460,13 +561,17 @@ struct ToISOYearImpl { static constexpr auto name = "toISOYear"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toISOYear(time_zone.toDayNum(t)); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toISOYear(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toISOYear(DayNum(d)); + return time_zone.toISOYear(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -476,13 +581,17 @@ struct ToStartOfISOYearImpl { static constexpr auto name = "toStartOfISOYear"; + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toFirstDayNumOfISOYear(time_zone.toDayNum(t)); + } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toFirstDayNumOfISOYear(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfISOYear(DayNum(d)); + return time_zone.toFirstDayNumOfISOYear(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -492,13 +601,17 @@ struct ToISOWeekImpl { static constexpr auto name = "toISOWeek"; + static inline UInt8 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toISOWeek(time_zone.toDayNum(t)); + } static inline UInt8 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toISOWeek(time_zone.toDayNum(t)); } static inline UInt8 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toISOWeek(DayNum(d)); + return time_zone.toISOWeek(ExtendedDayNum(d)); } using FactorTransform = ToISOYearImpl; @@ -508,13 +621,17 @@ struct ToRelativeYearNumImpl { static constexpr auto name = "toRelativeYearNum"; - static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { return time_zone.toYear(t); } + static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + { + return time_zone.toYear(static_cast(t)); + } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toYear(DayNum(d)); + return time_zone.toYear(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -524,13 +641,17 @@ struct ToRelativeQuarterNumImpl { static constexpr auto name = "toRelativeQuarterNum"; - static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeQuarterNum(t); } + static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + { + return time_zone.toRelativeQuarterNum(static_cast(t)); + } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeQuarterNum(DayNum(d)); + return time_zone.toRelativeQuarterNum(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -540,13 +661,17 @@ struct ToRelativeMonthNumImpl { static constexpr auto name = "toRelativeMonthNum"; - static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeMonthNum(t); } + static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + { + return time_zone.toRelativeMonthNum(static_cast(t)); + } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMonthNum(DayNum(d)); + return time_zone.toRelativeMonthNum(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -556,13 +681,17 @@ struct ToRelativeWeekNumImpl { static constexpr auto name = "toRelativeWeekNum"; - static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeWeekNum(t); } + static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + { + return time_zone.toRelativeWeekNum(static_cast(t)); + } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeWeekNum(DayNum(d)); + return time_zone.toRelativeWeekNum(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -572,10 +701,14 @@ struct ToRelativeDayNumImpl { static constexpr auto name = "toRelativeDayNum"; - static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { return time_zone.toDayNum(t); } + static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) + { + return time_zone.toDayNum(static_cast(t)); + } static inline UInt16 execute(UInt16 d, const DateLUTImpl &) { return static_cast(d); @@ -589,13 +722,17 @@ struct ToRelativeHourNumImpl { static constexpr auto name = "toRelativeHourNum"; - static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) + static inline UInt32 execute(Int64 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeHourNum(t); } + static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) + { + return time_zone.toRelativeHourNum(static_cast(t)); + } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeHourNum(DayNum(d)); + return time_zone.toRelativeHourNum(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -605,13 +742,17 @@ struct ToRelativeMinuteNumImpl { static constexpr auto name = "toRelativeMinuteNum"; - static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) + static inline UInt32 execute(Int64 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeMinuteNum(t); } + static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) + { + return time_zone.toRelativeMinuteNum(static_cast(t)); + } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMinuteNum(DayNum(d)); + return time_zone.toRelativeMinuteNum(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -621,13 +762,17 @@ struct ToRelativeSecondNumImpl { static constexpr auto name = "toRelativeSecondNum"; + static inline Int64 execute(Int64 t, const DateLUTImpl &) + { + return t; + } static inline UInt32 execute(UInt32 t, const DateLUTImpl &) { return t; } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(DayNum(d)); + return time_zone.fromDayNum(ExtendedDayNum(d)); } using FactorTransform = ZeroTransform; @@ -637,6 +782,10 @@ struct ToYYYYMMImpl { static constexpr auto name = "toYYYYMM"; + static inline UInt32 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toNumYYYYMM(t); + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toNumYYYYMM(t); @@ -653,6 +802,10 @@ struct ToYYYYMMDDImpl { static constexpr auto name = "toYYYYMMDD"; + static inline UInt32 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toNumYYYYMMDD(t); + } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toNumYYYYMMDD(t); @@ -669,6 +822,10 @@ struct ToYYYYMMDDhhmmssImpl { static constexpr auto name = "toYYYYMMDDhhmmss"; + static inline UInt64 execute(Int64 t, const DateLUTImpl & time_zone) + { + return time_zone.toNumYYYYMMDDhhmmss(t); + } static inline UInt64 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toNumYYYYMMDDhhmmss(t); diff --git a/src/Functions/FunctionCustomWeekToSomething.h b/src/Functions/FunctionCustomWeekToSomething.h index 8a343cffb95..5634ea11584 100644 --- a/src/Functions/FunctionCustomWeekToSomething.h +++ b/src/Functions/FunctionCustomWeekToSomething.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include diff --git a/src/Functions/FunctionDateOrDateTimeAddInterval.h b/src/Functions/FunctionDateOrDateTimeAddInterval.h index 5f964b899b4..2b0082f4334 100644 --- a/src/Functions/FunctionDateOrDateTimeAddInterval.h +++ b/src/Functions/FunctionDateOrDateTimeAddInterval.h @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -25,31 +26,6 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; } -/// AddOnDateTime64DefaultImpl provides default implementation of add-X functionality for DateTime64. -/// -/// Default implementation is not to change fractional part, but only modify whole part as if it was DateTime. -/// That means large whole values (for scale less than 9) might not fit into UInt32-range, -/// and hence default implementation will produce incorrect results. -template -struct AddOnDateTime64DefaultImpl -{ - AddOnDateTime64DefaultImpl(UInt32 scale_ = 0) - : scale_multiplier(DecimalUtils::scaleMultiplier(scale_)) - {} - - // Default implementation for add/sub on DateTime64: do math on whole part (the same way as for DateTime), leave fractional as it is. - inline DateTime64 execute(const DateTime64 & t, Int64 delta, const DateLUTImpl & time_zone) const - { - const auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier); - - const auto whole = static_cast(this)->execute(static_cast(components.whole), delta, time_zone); - return DecimalUtils::decimalFromComponentsWithMultiplier(static_cast(whole), components.fractional, scale_multiplier); - } - - UInt32 scale_multiplier = 1; -}; - - /// Type of first argument of 'execute' function overload defines what INPUT DataType it is used for. /// Return type defines what is the OUTPUT (return) type of the CH function. /// Corresponding types: @@ -60,14 +36,15 @@ struct AddOnDateTime64DefaultImpl /// - 'AddSecondsImpl::execute(UInt32, ...) -> UInt32' is available to the ClickHouse users as 'addSeconds(DateTime, ...) -> DateTime' /// - 'AddSecondsImpl::execute(UInt16, ...) -> UInt32' is available to the ClickHouse users as 'addSeconds(Date, ...) -> DateTime' -struct AddSecondsImpl : public AddOnDateTime64DefaultImpl +struct AddSecondsImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addSeconds"; + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &) + { + return {t.whole + delta, t.fractional}; + } + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) { return t + delta; @@ -75,18 +52,19 @@ struct AddSecondsImpl : public AddOnDateTime64DefaultImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(DayNum(d)) + delta; + return time_zone.fromDayNum(ExtendedDayNum(d)) + delta; } }; -struct AddMinutesImpl : public AddOnDateTime64DefaultImpl +struct AddMinutesImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addMinutes"; + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &) + { + return {t.whole + delta * 60, t.fractional}; + } + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) { return t + delta * 60; @@ -94,18 +72,18 @@ struct AddMinutesImpl : public AddOnDateTime64DefaultImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(DayNum(d)) + delta * 60; + return time_zone.fromDayNum(ExtendedDayNum(d)) + delta * 60; } }; -struct AddHoursImpl : public AddOnDateTime64DefaultImpl +struct AddHoursImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addHours"; + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &) + { + return {t.whole + delta * 3600, t.fractional}; + } static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) { return t + delta * 3600; @@ -113,19 +91,20 @@ struct AddHoursImpl : public AddOnDateTime64DefaultImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(DayNum(d)) + delta * 3600; + return time_zone.fromDayNum(ExtendedDayNum(d)) + delta * 3600; } }; -struct AddDaysImpl : public AddOnDateTime64DefaultImpl +struct AddDaysImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addDays"; - static inline UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + { + return {time_zone.addDays(t.whole, delta), t.fractional}; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) { return time_zone.addDays(t, delta); } @@ -136,14 +115,15 @@ struct AddDaysImpl : public AddOnDateTime64DefaultImpl } }; -struct AddWeeksImpl : public AddOnDateTime64DefaultImpl +struct AddWeeksImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addWeeks"; + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + { + return {time_zone.addWeeks(t.whole, delta), t.fractional}; + } + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) { return time_zone.addWeeks(t, delta); @@ -155,14 +135,15 @@ struct AddWeeksImpl : public AddOnDateTime64DefaultImpl } }; -struct AddMonthsImpl : public AddOnDateTime64DefaultImpl +struct AddMonthsImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addMonths"; + static inline DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + { + return {time_zone.addMonths(t.whole, delta), t.fractional}; + } + static inline UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) { return time_zone.addMonths(t, delta); @@ -170,18 +151,19 @@ struct AddMonthsImpl : public AddOnDateTime64DefaultImpl static inline UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) { - return time_zone.addMonths(DayNum(d), delta); + return time_zone.addMonths(ExtendedDayNum(d), delta); } }; -struct AddQuartersImpl : public AddOnDateTime64DefaultImpl +struct AddQuartersImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addQuarters"; + static inline DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + { + return {time_zone.addQuarters(t.whole, delta), t.fractional}; + } + static inline UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) { return time_zone.addQuarters(t, delta); @@ -189,18 +171,19 @@ struct AddQuartersImpl : public AddOnDateTime64DefaultImpl static inline UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) { - return time_zone.addQuarters(DayNum(d), delta); + return time_zone.addQuarters(ExtendedDayNum(d), delta); } }; -struct AddYearsImpl : public AddOnDateTime64DefaultImpl +struct AddYearsImpl { - using Base = AddOnDateTime64DefaultImpl; - using Base::Base; - using Base::execute; - static constexpr auto name = "addYears"; + static inline DecimalUtils::DecimalComponents execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + { + return {time_zone.addYears(t.whole, delta), t.fractional}; + } + static inline UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) { return time_zone.addYears(t, delta); @@ -208,7 +191,7 @@ struct AddYearsImpl : public AddOnDateTime64DefaultImpl static inline UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) { - return time_zone.addYears(DayNum(d), delta); + return time_zone.addYears(ExtendedDayNum(d), delta); } }; @@ -351,6 +334,7 @@ template <> struct ResultDataTypeMap { using ResultDataType = DataTy template <> struct ResultDataTypeMap { using ResultDataType = DataTypeDateTime; }; template <> struct ResultDataTypeMap { using ResultDataType = DataTypeDateTime; }; template <> struct ResultDataTypeMap { using ResultDataType = DataTypeDateTime64; }; +template <> struct ResultDataTypeMap { using ResultDataType = DataTypeDateTime64; }; } template @@ -417,10 +401,18 @@ public: } } + // TransformDateTime64 helps choosing correct overload of exec and does some transformations + // on input and output parameters to simplify support of DateTime64 in concrete Transform. + template + using TransformType = std::conditional_t< + std::is_same_v, + TransformDateTime64, + Transform>; + /// Helper templates to deduce return type based on argument type, since some overloads may promote or denote types, /// e.g. addSeconds(Date, 1) => DateTime template - using TransformExecuteReturnType = decltype(std::declval().execute(FieldType(), 0, std::declval())); + using TransformExecuteReturnType = decltype(std::declval>().execute(FieldType(), 0, std::declval())); // Deduces RETURN DataType from INPUT DataType, based on return type of Transform{}.execute(INPUT_TYPE, UInt64, DateLUTImpl). // e.g. for Transform-type that has execute()-overload with 'UInt16' input and 'UInt32' return, @@ -475,8 +467,9 @@ public: } else if (const auto * datetime64_type = assert_cast(from_type)) { - return DateTimeAddIntervalImpl, Transform>::execute( - Transform{datetime64_type->getScale()}, arguments, result_type); + using WrappedTransformType = TransformType; + return DateTimeAddIntervalImpl, WrappedTransformType>::execute( + WrappedTransformType{datetime64_type->getScale()}, arguments, result_type); } else throw Exception("Illegal type " + arguments[0].type->getName() + " of first argument of function " + getName(), diff --git a/src/Functions/FunctionDateOrDateTimeToSomething.h b/src/Functions/FunctionDateOrDateTimeToSomething.h index e0676f3dc0f..abe859e2f29 100644 --- a/src/Functions/FunctionDateOrDateTimeToSomething.h +++ b/src/Functions/FunctionDateOrDateTimeToSomething.h @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -107,6 +108,7 @@ public: else if (which.isDateTime64()) { const auto scale = static_cast(from_type)->getScale(); + const TransformDateTime64 transformer(scale); return DateTimeTransformImpl::execute(arguments, result_type, input_rows_count, transformer); } @@ -133,7 +135,6 @@ public: /// This method is called only if the function has one argument. Therefore, we do not care about the non-local time zone. const DateLUTImpl & date_lut = DateLUT::instance(); - if (left.isNull() || right.isNull()) return is_not_monotonic; diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 2e2a4ce9cfa..62577b8b402 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -310,10 +311,15 @@ struct ToDateTimeImpl return time_zone.fromDayNum(DayNum(d)); } - // no-op conversion from DateTime to DateTime, used in DateTime64 to DateTime conversion. - static inline UInt32 execute(UInt32 d, const DateLUTImpl & /*time_zone*/) + static inline UInt32 execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) { - return d; + return dt; + } + + // TODO: return UInt32 ??? + static inline Int64 execute(Int64 dt64, const DateLUTImpl & /*time_zone*/) + { + return dt64; } }; @@ -329,6 +335,7 @@ struct ToDateTransform32Or64 static inline NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { + // since converting to Date, no need in values outside of default LUT range. return (from < 0xFFFF) ? from : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); @@ -342,6 +349,7 @@ struct ToDateTransform32Or64Signed static inline NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { + // TODO: decide narrow or extended range based on FromType /// The function should be monotonic (better for query optimizations), so we saturate instead of overflow. if (from < 0) return 0; @@ -447,35 +455,8 @@ template struct ConvertImpl struct ConvertImpl : DateTimeTransformImpl> {}; - -/** Conversion of Date or DateTime to DateTime64: add zero sub-second part. - */ -struct ToDateTime64Transform -{ - static constexpr auto name = "toDateTime64"; - - const DateTime64::NativeType scale_multiplier = 1; - - ToDateTime64Transform(UInt32 scale = 0) - : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) - {} - - inline DateTime64::NativeType execute(UInt16 d, const DateLUTImpl & time_zone) const - { - const auto dt = ToDateTimeImpl::execute(d, time_zone); - return execute(dt, time_zone); - } - - inline DateTime64::NativeType execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) const - { - return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); - } -}; - -template struct ConvertImpl - : DateTimeTransformImpl {}; -template struct ConvertImpl - : DateTimeTransformImpl {}; +const time_t LUT_MIN_TIME = -1420070400l; // 1925-01-01 UTC +const time_t LUT_MAX_TIME = 9877248000l; // 2282-12-31 UTC /** Conversion of numeric to DateTime64 */ @@ -493,7 +474,7 @@ struct ToDateTime64TransformUnsigned inline NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const { - from = std::min(time_t(from), time_t(0xFFFFFFFF)); + from = std::min(from, LUT_MAX_TIME); return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); } }; @@ -510,9 +491,8 @@ struct ToDateTime64TransformSigned inline NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const { - if (from < 0) - return 0; - from = std::min(time_t(from), time_t(0xFFFFFFFF)); + from = std::max(from, LUT_MIN_TIME); + from = std::min(from, LUT_MAX_TIME); return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); } }; @@ -551,6 +531,7 @@ template struct ConvertImpl struct ConvertImpl : DateTimeTransformImpl> {}; + /** Conversion of DateTime64 to Date or DateTime: discards fractional part. */ template @@ -571,10 +552,41 @@ struct FromDateTime64Transform } }; +/** Conversion of DateTime64 to Date or DateTime: discards fractional part. + */ template struct ConvertImpl - : DateTimeTransformImpl> {}; + : DateTimeTransformImpl> {}; template struct ConvertImpl - : DateTimeTransformImpl> {}; + : DateTimeTransformImpl> {}; + +struct ToDateTime64Transform +{ + static constexpr auto name = "toDateTime64"; + + const DateTime64::NativeType scale_multiplier = 1; + + ToDateTime64Transform(UInt32 scale = 0) + : scale_multiplier(DecimalUtils::scaleMultiplier(scale)) + {} + + inline DateTime64::NativeType execute(UInt16 d, const DateLUTImpl & time_zone) const + { + const auto dt = ToDateTimeImpl::execute(d, time_zone); + return execute(dt, time_zone); + } + + inline DateTime64::NativeType execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) const + { + return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); + } +}; + +/** Conversion of Date or DateTime to DateTime64: add zero sub-second part. + */ +template struct ConvertImpl + : DateTimeTransformImpl {}; +template struct ConvertImpl + : DateTimeTransformImpl {}; /** Transformation of numbers, dates, datetimes to strings: through formatting. @@ -658,7 +670,6 @@ struct ConvertImpl(*col_with_type_and_name.type); const DateLUTImpl * time_zone = nullptr; - /// For argument of DateTime type, second argument with time zone could be specified. if constexpr (std::is_same_v || std::is_same_v) time_zone = &extractTimeZoneFromFunctionArguments(arguments, 1, 0); @@ -754,6 +765,7 @@ inline void parseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb x = tmp; } +// NOTE: no need of extra overload of DateTime64, since readDateTimeText64 has different signature and that case is explicitly handled in the calling code. template <> inline void parseImpl(DataTypeDateTime::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone) { @@ -762,6 +774,7 @@ inline void parseImpl(DataTypeDateTime::FieldType & x, ReadBuf x = tmp; } + template <> inline void parseImpl(DataTypeUUID::FieldType & x, ReadBuffer & rb, const DateLUTImpl *) { @@ -989,9 +1002,18 @@ struct ConvertThroughParsing } else if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffortUS) { - time_t res; - parseDateTimeBestEffortUS(res, read_buffer, *local_time_zone, *utc_time_zone); - vec_to[i] = res; + if constexpr (to_datetime64) + { + DateTime64 res = 0; + parseDateTime64BestEffortUS(res, vec_to.getScale(), read_buffer, *local_time_zone, *utc_time_zone); + vec_to[i] = res; + } + else + { + time_t res; + parseDateTimeBestEffortUS(res, read_buffer, *local_time_zone, *utc_time_zone); + vec_to[i] = res; + } } else { diff --git a/src/Functions/TransformDateTime64.h b/src/Functions/TransformDateTime64.h new file mode 100644 index 00000000000..0a5e36cd2bd --- /dev/null +++ b/src/Functions/TransformDateTime64.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include + +namespace DB +{ +/** Tansform-type wrapper for DateTime64, simplifies DateTime64 support for given Transform. + * + * Depending on what overloads of Transform::execute() are available, when called with DateTime64 value, + * invokes Transform::execute() with either: + * * whole part of DateTime64 value, discarding fractional part (1) + * * DateTime64 value and scale factor (2) + * * DateTime64 broken down to components, result of execute is then re-assembled back into DateTime64 value (3) + * + * Suitable Transfotm-types are commonly used in Date/DateTime manipulation functions, + * and should implement static (or const) function with following signatures: + * 1: + * R execute(Int64 whole_value, ... ) + * 2: + * R execute(DateTime64 value, Int64 scale_multiplier, ... ) + * 3: + * R execute(DecimalUtils::DecimalComponents components, ... ) + * + * Where R could be of arbitrary type, in case of (3) if R is DecimalUtils::DecimalComponents, result is re-assembed back into DateTime64. +*/ +template +class TransformDateTime64 +{ +private: + // Detect if Transform::execute is const or static method + // with signature defined by template args (ignoring result type). + template + struct TransformHasExecuteOverload : std::false_type {}; + + template + struct TransformHasExecuteOverload().execute(std::declval()...))>, Args...> + : std::true_type {}; + + template + static constexpr bool TransformHasExecuteOverload_v = TransformHasExecuteOverload::value; + +public: + static constexpr auto name = Transform::name; + + // non-explicit constructor to allow creating from scale value (or with no scale at all), indispensable in some contexts. + TransformDateTime64(UInt32 scale_ = 0) + : scale_multiplier(DecimalUtils::scaleMultiplier(scale_)) + {} + + template + inline auto execute(const DateTime64 & t, Args && ... args) const + { + if constexpr (TransformHasExecuteOverload_v) + { + return wrapped_transform.execute(t, scale_multiplier, std::forward(args)...); + } + else if constexpr (TransformHasExecuteOverload_v, Args...>) + { + auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier); + + const auto result = wrapped_transform.execute(components, std::forward(args)...); + using ResultType = std::decay_t; + + if constexpr (std::is_same_v, ResultType>) + { + return DecimalUtils::decimalFromComponentsWithMultiplier(result, scale_multiplier); + } + else + { + return result; + } + } + else + { + const auto components = DecimalUtils::splitWithScaleMultiplier(t, scale_multiplier); + return wrapped_transform.execute(static_cast(components.whole), std::forward(args)...); + } + } + + template >>> + inline auto execute(const T & t, Args && ... args) const + { + return wrapped_transform.execute(t, std::forward(args)...); + } + +private: + DateTime64::NativeType scale_multiplier = 1; + Transform wrapped_transform = {}; +}; + +} diff --git a/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp index 54833eb359f..f660b92efc5 100644 --- a/src/Functions/dateDiff.cpp +++ b/src/Functions/dateDiff.cpp @@ -97,8 +97,8 @@ public: size_t rows = input_rows_count; auto res = ColumnInt64::create(rows); - const DateLUTImpl & timezone_x = extractTimeZoneFromFunctionArguments(arguments, 3, 1); - const DateLUTImpl & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2); + const auto & timezone_x = extractTimeZoneFromFunctionArguments(arguments, 3, 1); + const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2); if (unit == "year" || unit == "yy" || unit == "yyyy") dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); diff --git a/src/Functions/extractTimeZoneFromFunctionArguments.cpp b/src/Functions/extractTimeZoneFromFunctionArguments.cpp index 9d6e54a599e..0ba08b3c612 100644 --- a/src/Functions/extractTimeZoneFromFunctionArguments.cpp +++ b/src/Functions/extractTimeZoneFromFunctionArguments.cpp @@ -66,10 +66,11 @@ const DateLUTImpl & extractTimeZoneFromFunctionArguments(const ColumnsWithTypeAn if (arguments.empty()) return DateLUT::instance(); + const auto & dt_arg = arguments[datetime_arg_num].type.get(); /// If time zone is attached to an argument of type DateTime. - if (const auto * type = checkAndGetDataType(arguments[datetime_arg_num].type.get())) + if (const auto * type = checkAndGetDataType(dt_arg)) return type->getTimeZone(); - if (const auto * type = checkAndGetDataType(arguments[datetime_arg_num].type.get())) + if (const auto * type = checkAndGetDataType(dt_arg)) return type->getTimeZone(); return DateLUT::instance(); diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index fd909ed6fce..5128f077c5a 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -46,9 +46,8 @@ template <> struct ActionValueTypeMap { using ActionValueTyp template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt16; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; -// TODO(vnemkov): once there is support for Int64 in LUT, make that Int64. // TODO(vnemkov): to add sub-second format instruction, make that DateTime64 and do some math in Action. -template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; +template <> struct ActionValueTypeMap { using ActionValueType = Int64; }; /** formatDateTime(time, 'pattern') @@ -434,7 +433,6 @@ public: time_zone_tmp = &DateLUT::instance(); const DateLUTImpl & time_zone = *time_zone_tmp; - const auto & vec = times->getData(); UInt32 scale [[maybe_unused]] = 0; @@ -519,6 +517,8 @@ public: { if constexpr (std::is_same_v) instructions.emplace_back(func, shift); + else if constexpr (std::is_same_v) + instructions.emplace_back(func, shift); else add_shift(shift); }; diff --git a/src/Functions/now64.cpp b/src/Functions/now64.cpp index feb821fde82..32c7a95de17 100644 --- a/src/Functions/now64.cpp +++ b/src/Functions/now64.cpp @@ -30,7 +30,7 @@ Field nowSubsecond(UInt32 scale) if (clock_gettime(CLOCK_REALTIME, &spec)) throwFromErrno("Cannot clock_gettime.", ErrorCodes::CANNOT_CLOCK_GETTIME); - DecimalUtils::DecimalComponents components{spec.tv_sec, spec.tv_nsec}; + DecimalUtils::DecimalComponents components{spec.tv_sec, spec.tv_nsec}; // clock_gettime produces subsecond part in nanoseconds, but decimalFromComponents fractional is scale-dependent. // Andjust fractional to scale, e.g. for 123456789 nanoseconds: diff --git a/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp index 6f5a52ca182..f194da166aa 100644 --- a/src/Functions/toStartOfInterval.cpp +++ b/src/Functions/toStartOfInterval.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -35,13 +36,18 @@ namespace static UInt16 execute(UInt16 d, UInt64 years, const DateLUTImpl & time_zone) { - return time_zone.toStartOfYearInterval(DayNum(d), years); + return time_zone.toStartOfYearInterval(ExtendedDayNum(d), years); } static UInt16 execute(UInt32 t, UInt64 years, const DateLUTImpl & time_zone) { return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years); } + + static UInt16 execute(Int64 t, UInt64 years, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years); + } }; template <> @@ -51,13 +57,18 @@ namespace static UInt16 execute(UInt16 d, UInt64 quarters, const DateLUTImpl & time_zone) { - return time_zone.toStartOfQuarterInterval(DayNum(d), quarters); + return time_zone.toStartOfQuarterInterval(ExtendedDayNum(d), quarters); } static UInt16 execute(UInt32 t, UInt64 quarters, const DateLUTImpl & time_zone) { return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters); } + + static UInt16 execute(Int64 t, UInt64 quarters, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters); + } }; template <> @@ -67,13 +78,18 @@ namespace static UInt16 execute(UInt16 d, UInt64 months, const DateLUTImpl & time_zone) { - return time_zone.toStartOfMonthInterval(DayNum(d), months); + return time_zone.toStartOfMonthInterval(ExtendedDayNum(d), months); } static UInt16 execute(UInt32 t, UInt64 months, const DateLUTImpl & time_zone) { return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months); } + + static UInt16 execute(Int64 t, UInt64 months, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months); + } }; template <> @@ -83,13 +99,18 @@ namespace static UInt16 execute(UInt16 d, UInt64 weeks, const DateLUTImpl & time_zone) { - return time_zone.toStartOfWeekInterval(DayNum(d), weeks); + return time_zone.toStartOfWeekInterval(ExtendedDayNum(d), weeks); } static UInt16 execute(UInt32 t, UInt64 weeks, const DateLUTImpl & time_zone) { return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks); } + + static UInt16 execute(Int64 t, UInt64 weeks, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks); + } }; template <> @@ -99,13 +120,18 @@ namespace static UInt32 execute(UInt16 d, UInt64 days, const DateLUTImpl & time_zone) { - return time_zone.toStartOfDayInterval(DayNum(d), days); + return time_zone.toStartOfDayInterval(ExtendedDayNum(d), days); } static UInt32 execute(UInt32 t, UInt64 days, const DateLUTImpl & time_zone) { return time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days); } + + static UInt32 execute(Int64 t, UInt64 days, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days); + } }; template <> @@ -114,8 +140,8 @@ namespace static constexpr auto name = function_name; static UInt32 execute(UInt16, UInt64, const DateLUTImpl &) { return dateIsNotSupported(function_name); } - static UInt32 execute(UInt32 t, UInt64 hours, const DateLUTImpl & time_zone) { return time_zone.toStartOfHourInterval(t, hours); } + static UInt32 execute(Int64 t, UInt64 hours, const DateLUTImpl & time_zone) { return time_zone.toStartOfHourInterval(t, hours); } }; template <> @@ -129,6 +155,11 @@ namespace { return time_zone.toStartOfMinuteInterval(t, minutes); } + + static UInt32 execute(Int64 t, UInt64 minutes, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfMinuteInterval(t, minutes); + } }; template <> @@ -142,6 +173,11 @@ namespace { return time_zone.toStartOfSecondInterval(t, seconds); } + + static Int64 execute(Int64 t, UInt64 seconds, const DateLUTImpl & time_zone) + { + return time_zone.toStartOfSecondInterval(t, seconds); + } }; @@ -230,7 +266,7 @@ public: { const auto & time_column = arguments[0]; const auto & interval_column = arguments[1]; - const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0); + const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0); auto result_column = dispatchForColumns(time_column, interval_column, time_zone); return result_column; } diff --git a/src/Functions/today.cpp b/src/Functions/today.cpp index 65373058540..511af881d73 100644 --- a/src/Functions/today.cpp +++ b/src/Functions/today.cpp @@ -77,7 +77,7 @@ public: FunctionBaseImplPtr build(const ColumnsWithTypeAndName &, const DataTypePtr &) const override { - return std::make_unique(DateLUT::instance().toDayNum(time(nullptr))); + return std::make_unique(DayNum(DateLUT::instance().toDayNum(time(nullptr)).toUnderType())); } }; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index e33de04f322..369237f329d 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -747,7 +747,7 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re return ReturnType(false); } - DB::DecimalUtils::DecimalComponents components{static_cast(whole), 0}; + DB::DecimalUtils::DecimalComponents components{static_cast(whole), 0}; if (!buf.eof() && *buf.position() == '.') { @@ -791,9 +791,9 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re return ReturnType(true); } -inline void readDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline void readDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - readDateTimeTextImpl(datetime, buf, date_lut); + readDateTimeTextImpl(datetime, buf, time_zone); } inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) @@ -801,9 +801,9 @@ inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer readDateTimeTextImpl(datetime64, scale, buf, date_lut); } -inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - return readDateTimeTextImpl(datetime, buf, date_lut); + return readDateTimeTextImpl(datetime, buf, time_zone); } inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index a382ae13cdd..72c6b69114f 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -819,12 +819,12 @@ inline void writeDateTimeText(const LocalDateTime & datetime, WriteBuffer & buf) /// In the format YYYY-MM-DD HH:MM:SS, according to the specified time zone. template -inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - const auto & values = date_lut.getValues(datetime); + const auto & values = time_zone.getValues(datetime); writeDateTimeText( LocalDateTime(values.year, values.month, values.day_of_month, - date_lut.toHour(datetime), date_lut.toMinute(datetime), date_lut.toSecond(datetime)), buf); + time_zone.toHour(datetime), time_zone.toMinute(datetime), time_zone.toSecond(datetime)), buf); } /// In the format YYYY-MM-DD HH:MM:SS.NNNNNNNNN, according to the specified time zone. @@ -849,9 +849,9 @@ inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & /// In the RFC 1123 format: "Tue, 03 Dec 2019 00:11:50 GMT". You must provide GMT DateLUT. /// This is needed for HTTP requests. -inline void writeDateTimeTextRFC1123(time_t datetime, WriteBuffer & buf, const DateLUTImpl & date_lut) +inline void writeDateTimeTextRFC1123(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - const auto & values = date_lut.getValues(datetime); + const auto & values = time_zone.getValues(datetime); static const char week_days[3 * 8 + 1] = "XXX" "Mon" "Tue" "Wed" "Thu" "Fri" "Sat" "Sun"; static const char months[3 * 13 + 1] = "XXX" "Jan" "Feb" "Mar" "Apr" "May" "Jun" "Jul" "Aug" "Sep" "Oct" "Nov" "Dec"; @@ -865,11 +865,11 @@ inline void writeDateTimeTextRFC1123(time_t datetime, WriteBuffer & buf, const D buf.write(&digits100[values.year / 100 * 2], 2); buf.write(&digits100[values.year % 100 * 2], 2); buf.write(' '); - buf.write(&digits100[date_lut.toHour(datetime) * 2], 2); + buf.write(&digits100[time_zone.toHour(datetime) * 2], 2); buf.write(':'); - buf.write(&digits100[date_lut.toMinute(datetime) * 2], 2); + buf.write(&digits100[time_zone.toMinute(datetime) * 2], 2); buf.write(':'); - buf.write(&digits100[date_lut.toSecond(datetime) * 2], 2); + buf.write(&digits100[time_zone.toSecond(datetime) * 2], 2); buf.write(" GMT", 4); } diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 47a298ede29..26745a8f138 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -600,7 +600,7 @@ ReturnType parseDateTimeBestEffortImpl( return ReturnType(true); } -template +template ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) { time_t whole; @@ -608,12 +608,12 @@ ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuf if constexpr (std::is_same_v) { - if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond)) + if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond)) return false; } else { - parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond); + parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond); } @@ -661,12 +661,17 @@ bool tryParseDateTimeBestEffortUS(time_t & res, ReadBuffer & in, const DateLUTIm void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) { - return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone); + return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone); +} + +void parseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) +{ + return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone); } bool tryParseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) { - return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone); + return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone); } } diff --git a/src/IO/parseDateTimeBestEffort.h b/src/IO/parseDateTimeBestEffort.h index 65e92cbee42..fe3da24a797 100644 --- a/src/IO/parseDateTimeBestEffort.h +++ b/src/IO/parseDateTimeBestEffort.h @@ -61,6 +61,7 @@ bool tryParseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl void parseDateTimeBestEffortUS(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); bool tryParseDateTimeBestEffortUS(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); +void parseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); bool tryParseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); } diff --git a/src/Interpreters/CrashLog.cpp b/src/Interpreters/CrashLog.cpp index bf81a2e8aba..5067acd4a5c 100644 --- a/src/Interpreters/CrashLog.cpp +++ b/src/Interpreters/CrashLog.cpp @@ -40,7 +40,7 @@ void CrashLogElement::appendToBlock(MutableColumns & columns) const { size_t i = 0; - columns[i++]->insert(DateLUT::instance().toDayNum(event_time)); + columns[i++]->insert(DateLUT::instance().toDayNum(event_time).toUnderType()); columns[i++]->insert(event_time); columns[i++]->insert(timestamp_ns); columns[i++]->insert(signal); diff --git a/src/Interpreters/MetricLog.cpp b/src/Interpreters/MetricLog.cpp index ce5d5793b87..fd1c120f18c 100644 --- a/src/Interpreters/MetricLog.cpp +++ b/src/Interpreters/MetricLog.cpp @@ -41,7 +41,7 @@ void MetricLogElement::appendToBlock(MutableColumns & columns) const { size_t column_idx = 0; - columns[column_idx++]->insert(DateLUT::instance().toDayNum(event_time)); + columns[column_idx++]->insert(DateLUT::instance().toDayNum(event_time).toUnderType()); columns[column_idx++]->insert(event_time); columns[column_idx++]->insert(event_time_microseconds); columns[column_idx++]->insert(milliseconds); diff --git a/src/Interpreters/OpenTelemetrySpanLog.cpp b/src/Interpreters/OpenTelemetrySpanLog.cpp index e1df145cf51..f9ae6518af0 100644 --- a/src/Interpreters/OpenTelemetrySpanLog.cpp +++ b/src/Interpreters/OpenTelemetrySpanLog.cpp @@ -49,7 +49,7 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(operation_name); columns[i++]->insert(start_time_us); columns[i++]->insert(finish_time_us); - columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000)); + columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000).toUnderType()); columns[i++]->insert(attribute_names); // The user might add some ints values, and we will have Int Field, and the // insert will fail because the column requires Strings. Convert the fields diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index 860666a0035..c180a4dd254 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -71,7 +71,7 @@ void PartLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(query_id); columns[i++]->insert(event_type); - columns[i++]->insert(DateLUT::instance().toDayNum(event_time)); + columns[i++]->insert(DateLUT::instance().toDayNum(event_time).toUnderType()); columns[i++]->insert(event_time); columns[i++]->insert(event_time_microseconds); columns[i++]->insert(duration_ms); diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index 82b957f895b..b6902468242 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -119,7 +119,7 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const size_t i = 0; columns[i++]->insert(type); - columns[i++]->insert(DateLUT::instance().toDayNum(event_time)); + columns[i++]->insert(DateLUT::instance().toDayNum(event_time).toUnderType()); columns[i++]->insert(event_time); columns[i++]->insert(event_time_microseconds); columns[i++]->insert(query_start_time); diff --git a/src/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp index f1cce1a3da9..31f1fddc87f 100644 --- a/src/Interpreters/QueryThreadLog.cpp +++ b/src/Interpreters/QueryThreadLog.cpp @@ -76,7 +76,7 @@ void QueryThreadLogElement::appendToBlock(MutableColumns & columns) const { size_t i = 0; - columns[i++]->insert(DateLUT::instance().toDayNum(event_time)); + columns[i++]->insert(DateLUT::instance().toDayNum(event_time).toUnderType()); columns[i++]->insert(event_time); columns[i++]->insert(event_time_microseconds); columns[i++]->insert(query_start_time); diff --git a/src/Interpreters/TextLog.cpp b/src/Interpreters/TextLog.cpp index f60b6acae6f..489bb302ad0 100644 --- a/src/Interpreters/TextLog.cpp +++ b/src/Interpreters/TextLog.cpp @@ -55,7 +55,7 @@ void TextLogElement::appendToBlock(MutableColumns & columns) const { size_t i = 0; - columns[i++]->insert(DateLUT::instance().toDayNum(event_time)); + columns[i++]->insert(DateLUT::instance().toDayNum(event_time).toUnderType()); columns[i++]->insert(event_time); columns[i++]->insert(event_time_microseconds); columns[i++]->insert(microseconds); diff --git a/src/Interpreters/TraceLog.cpp b/src/Interpreters/TraceLog.cpp index 40bcc0db445..fe7512f2f00 100644 --- a/src/Interpreters/TraceLog.cpp +++ b/src/Interpreters/TraceLog.cpp @@ -42,7 +42,7 @@ void TraceLogElement::appendToBlock(MutableColumns & columns) const { size_t i = 0; - columns[i++]->insert(DateLUT::instance().toDayNum(event_time)); + columns[i++]->insert(DateLUT::instance().toDayNum(event_time).toUnderType()); columns[i++]->insert(event_time); columns[i++]->insert(event_time_microseconds); columns[i++]->insert(timestamp_ns); diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 1d93ef56dea..d47f64cb1dc 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -141,7 +141,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID /// Conversion between Date and DateTime and vice versa. if (which_type.isDate() && which_from_type.isDateTime()) { - return static_cast(*from_type_hint).getTimeZone().toDayNum(src.get()); + return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.get()).toUnderType()); } else if (which_type.isDateTime() && which_from_type.isDate()) { diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index f2c88cdedd9..8bf785afa0f 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -4369,7 +4369,7 @@ static String getPartNamePossiblyFake(MergeTreeDataFormatVersion format_version, /// The date range is all month long. const auto & lut = DateLUT::instance(); time_t start_time = lut.YYYYMMDDToDate(parse(part_info.partition_id + "01")); - DayNum left_date = lut.toDayNum(start_time); + DayNum left_date = DayNum{lut.toDayNum(start_time).toUnderType()}; DayNum right_date = DayNum(static_cast(left_date) + lut.daysInMonth(start_time) - 1); return part_info.getPartNameV0(left_date, right_date); } diff --git a/src/Storages/tests/part_name.cpp b/src/Storages/tests/part_name.cpp index 79c5578a8ca..aeadfd208cc 100644 --- a/src/Storages/tests/part_name.cpp +++ b/src/Storages/tests/part_name.cpp @@ -5,7 +5,7 @@ int main(int, char **) { - DayNum today = DateLUT::instance().toDayNum(time(nullptr)); + const DayNum today{DateLUT::instance().toDayNum(time(nullptr)).toUnderType()}; for (DayNum date = today; DayNum(date + 10) > today; --date) { diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility.python b/tests/queries/0_stateless/00921_datetime64_compatibility.python index bf0ae8a72ac..c8b9620629d 100644 --- a/tests/queries/0_stateless/00921_datetime64_compatibility.python +++ b/tests/queries/0_stateless/00921_datetime64_compatibility.python @@ -86,8 +86,7 @@ CAST(N as DateTime64(9, 'Europe/Minsk')) formatDateTime(N, '%C %d %D %e %F %H %I %j %m %M %p %R %S %T %u %V %w %y %Y %%') """.splitlines() -# Expanded later to cartesian product of all arguments. -# NOTE: {N} to be turned into N after str.format() for keys (format string), but not for list of values! +# Expanded later to cartesian product of all arguments, using format string. extra_ops = [ # With same type: ( @@ -179,7 +178,7 @@ def escape_string(s): def execute_functions_for_types(functions, types): - # TODO: use string.Template here to allow lines that do not contain type, like: SELECT CAST(toDateTime64(1234567890), 'DateTime64') + # NOTE: use string.Template here to allow lines with missing keys, like type, e.g. SELECT CAST(toDateTime64(1234567890), 'DateTime64') for func in functions: print(("""SELECT 'SELECT {func}';""".format(func=escape_string(func)))) for dt in types: diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility.reference b/tests/queries/0_stateless/00921_datetime64_compatibility.reference index 004f4f5e824..67413512e06 100644 --- a/tests/queries/0_stateless/00921_datetime64_compatibility.reference +++ b/tests/queries/0_stateless/00921_datetime64_compatibility.reference @@ -1,5 +1,4 @@ SELECT toTimeZone(N, \'UTC\') - Code: 43 "DateTime('UTC')","2019-09-16 16:20:11" "DateTime64(3, 'UTC')","2019-09-16 16:20:11.234" @@ -35,25 +34,21 @@ SELECT toDayOfWeek(N) "UInt8",1 ------------------------------------------ SELECT toHour(N) - Code: 43 "UInt8",19 "UInt8",19 ------------------------------------------ SELECT toMinute(N) - Code: 43 "UInt8",20 "UInt8",20 ------------------------------------------ SELECT toSecond(N) - Code: 43 "UInt8",11 "UInt8",11 ------------------------------------------ SELECT toUnixTimestamp(N) - Code: 44 "UInt32",1568650811 "UInt32",1568650811 @@ -94,31 +89,26 @@ SELECT toStartOfDay(N) "DateTime('Europe/Minsk')","2019-09-16 00:00:00" ------------------------------------------ SELECT toStartOfHour(N) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:00:00" "DateTime('Europe/Minsk')","2019-09-16 19:00:00" ------------------------------------------ SELECT toStartOfMinute(N) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:20:00" "DateTime('Europe/Minsk')","2019-09-16 19:20:00" ------------------------------------------ SELECT toStartOfFiveMinute(N) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:20:00" "DateTime('Europe/Minsk')","2019-09-16 19:20:00" ------------------------------------------ SELECT toStartOfTenMinutes(N) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:20:00" "DateTime('Europe/Minsk')","2019-09-16 19:20:00" ------------------------------------------ SELECT toStartOfFifteenMinutes(N) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:15:00" "DateTime('Europe/Minsk')","2019-09-16 19:15:00" @@ -139,7 +129,6 @@ SELECT toStartOfInterval(N, INTERVAL 1 day) "DateTime('Europe/Minsk')","2019-09-16 00:00:00" ------------------------------------------ SELECT toStartOfInterval(N, INTERVAL 15 minute) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:15:00" "DateTime('Europe/Minsk')","2019-09-16 19:15:00" @@ -160,13 +149,11 @@ SELECT date_trunc(\'day\', N) "DateTime('Europe/Minsk')","2019-09-16 00:00:00" ------------------------------------------ SELECT date_trunc(\'minute\', N) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:20:00" "DateTime('Europe/Minsk')","2019-09-16 19:20:00" ------------------------------------------ SELECT toTime(N) - Code: 43 "DateTime('Europe/Minsk')","1970-01-02 19:20:11" "DateTime('Europe/Minsk')","1970-01-02 19:20:11" @@ -232,7 +219,6 @@ SELECT toYearWeek(N) "UInt32",201937 ------------------------------------------ SELECT timeSlot(N) - Code: 43 "DateTime('Europe/Minsk')","2019-09-16 19:00:00" "DateTime('Europe/Minsk')","2019-09-16 19:00:00" @@ -375,15 +361,11 @@ SELECT formatDateTime(N, \'%C %d %D %e %F %H %I %j %m %M %p %R %S %T %u %V %w %y SELECT N - N "Int32",0 "Int32",0 - Code: 43 ------------------------------------------ SELECT N + N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N != N @@ -417,47 +399,33 @@ SELECT N >= N "UInt8",1 ------------------------------------------ SELECT N - DT - Code: 43 "Int32",0 - Code: 43 ------------------------------------------ SELECT DT - N - Code: 43 "Int32",0 - Code: 43 ------------------------------------------ SELECT N - D "Int32",0 - Code: 43 - Code: 43 ------------------------------------------ SELECT D - N "Int32",0 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - DT64 - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT DT64 - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N != DT @@ -726,11 +694,8 @@ SELECT N - toUInt8(1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:10.234" ------------------------------------------ SELECT toUInt8(1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - toInt8(-1) @@ -739,11 +704,8 @@ SELECT N - toInt8(-1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:12.234" ------------------------------------------ SELECT toInt8(-1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - toUInt16(1) @@ -752,11 +714,8 @@ SELECT N - toUInt16(1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:10.234" ------------------------------------------ SELECT toUInt16(1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - toInt16(-1) @@ -765,11 +724,8 @@ SELECT N - toInt16(-1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:12.234" ------------------------------------------ SELECT toInt16(-1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - toUInt32(1) @@ -778,11 +734,8 @@ SELECT N - toUInt32(1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:10.234" ------------------------------------------ SELECT toUInt32(1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - toInt32(-1) @@ -791,11 +744,8 @@ SELECT N - toInt32(-1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:12.234" ------------------------------------------ SELECT toInt32(-1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - toUInt64(1) @@ -804,11 +754,8 @@ SELECT N - toUInt64(1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:10.234" ------------------------------------------ SELECT toUInt64(1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N - toInt64(-1) @@ -817,585 +764,486 @@ SELECT N - toInt64(-1) "DateTime64(3, 'Europe/Minsk')","2019-09-16 19:20:12.234" ------------------------------------------ SELECT toInt64(-1) - N - Code: 43 - Code: 43 - Code: 43 ------------------------------------------ SELECT N == toUInt8(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt8(1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N == toInt8(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt8(-1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N == toUInt16(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt16(1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N == toInt16(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt16(-1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N == toUInt32(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt32(1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N == toInt32(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt32(-1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N == toUInt64(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt64(1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N == toInt64(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt64(-1) == N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N != toUInt8(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt8(1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N != toInt8(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt8(-1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N != toUInt16(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt16(1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N != toInt16(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt16(-1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N != toUInt32(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt32(1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N != toInt32(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt32(-1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N != toUInt64(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt64(1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N != toInt64(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt64(-1) != N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toUInt8(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt8(1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toInt8(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt8(-1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toUInt16(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt16(1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toInt16(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt16(-1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toUInt32(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt32(1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toInt32(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt32(-1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toUInt64(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt64(1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N < toInt64(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt64(-1) < N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toUInt8(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt8(1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toInt8(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt8(-1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toUInt16(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt16(1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toInt16(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt16(-1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toUInt32(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt32(1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toInt32(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt32(-1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toUInt64(1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toUInt64(1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N <= toInt64(-1) - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT toInt64(-1) <= N - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT N > toUInt8(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt8(1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N > toInt8(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt8(-1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N > toUInt16(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt16(1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N > toInt16(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt16(-1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N > toUInt32(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt32(1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N > toInt32(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt32(-1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N > toUInt64(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt64(1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N > toInt64(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt64(-1) > N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toUInt8(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt8(1) >= N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toInt8(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt8(-1) >= N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toUInt16(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt16(1) >= N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toInt16(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt16(-1) >= N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toUInt32(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt32(1) >= N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toInt32(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt32(-1) >= N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toUInt64(1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toUInt64(1) >= N - Code: 43 "UInt8",0 "UInt8",0 ------------------------------------------ SELECT N >= toInt64(-1) - Code: 43 "UInt8",1 "UInt8",1 ------------------------------------------ SELECT toInt64(-1) >= N - Code: 43 "UInt8",0 "UInt8",0 diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility.sh b/tests/queries/0_stateless/00921_datetime64_compatibility.sh index 1617e5b1f77..5f5034819e4 100755 --- a/tests/queries/0_stateless/00921_datetime64_compatibility.sh +++ b/tests/queries/0_stateless/00921_datetime64_compatibility.sh @@ -13,4 +13,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) python3 "${CURDIR}"/00921_datetime64_compatibility.python \ | ${CLICKHOUSE_CLIENT} --ignore-error -T -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ - | sed 's/Received exception .*//g; s/^\(Code: [0-9]\+\).*$/\1/g' + | grep -v 'Received exception .*$' | sed 's/^\(Code: [0-9]\+\).*$/\1/g' diff --git a/tests/queries/0_stateless/01252_weird_time_zone.reference b/tests/queries/0_stateless/01252_weird_time_zone.reference index f2968d4efa6..90f5bf0e30d 100644 --- a/tests/queries/0_stateless/01252_weird_time_zone.reference +++ b/tests/queries/0_stateless/01252_weird_time_zone.reference @@ -1,7 +1,7 @@ -2020-01-02 03:04:05 2020-01-02 00:00:00 3 -2020-01-02 03:04:05 2020-01-02 00:00:00 3 -2020-01-02 03:04:05 2020-01-02 00:00:00 3 -2020-01-02 03:04:05 2020-01-02 00:00:00 3 -2020-01-02 03:04:05 2020-01-02 00:00:00 3 -2020-01-02 03:04:05 2020-01-02 00:00:00 3 -2020-01-02 03:04:05 2020-01-02 00:00:00 3 +Pacific/Kiritimati 2020-01-02 03:04:05 2020-01-02 00:00:00 3 +Africa/El_Aaiun 2020-01-02 03:04:05 2020-01-02 00:00:00 3 +Asia/Pyongyang 2020-01-02 03:04:05 2020-01-02 00:00:00 3 +Pacific/Kwajalein 2020-01-02 03:04:05 2020-01-02 00:00:00 3 +Pacific/Apia 2020-01-02 03:04:05 2020-01-02 00:00:00 3 +Pacific/Enderbury 2020-01-02 03:04:05 2020-01-02 00:00:00 3 +Pacific/Fakaofo 2020-01-02 03:04:05 2020-01-02 00:00:00 3 diff --git a/tests/queries/0_stateless/01252_weird_time_zone.sql b/tests/queries/0_stateless/01252_weird_time_zone.sql index 68ea903a797..c4919ca4fe0 100644 --- a/tests/queries/0_stateless/01252_weird_time_zone.sql +++ b/tests/queries/0_stateless/01252_weird_time_zone.sql @@ -1,15 +1,15 @@ -SELECT toDateTime('2020-01-02 03:04:05', 'Pacific/Kiritimati') AS x, toStartOfDay(x), toHour(x); -SELECT toDateTime('2020-01-02 03:04:05', 'Africa/El_Aaiun') AS x, toStartOfDay(x), toHour(x); -SELECT toDateTime('2020-01-02 03:04:05', 'Asia/Pyongyang') AS x, toStartOfDay(x), toHour(x); -SELECT toDateTime('2020-01-02 03:04:05', 'Pacific/Kwajalein') AS x, toStartOfDay(x), toHour(x); -SELECT toDateTime('2020-01-02 03:04:05', 'Pacific/Apia') AS x, toStartOfDay(x), toHour(x); -SELECT toDateTime('2020-01-02 03:04:05', 'Pacific/Enderbury') AS x, toStartOfDay(x), toHour(x); -SELECT toDateTime('2020-01-02 03:04:05', 'Pacific/Fakaofo') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Kiritimati', toDateTime('2020-01-02 03:04:05', 'Pacific/Kiritimati') AS x, toStartOfDay(x), toHour(x); +SELECT 'Africa/El_Aaiun', toDateTime('2020-01-02 03:04:05', 'Africa/El_Aaiun') AS x, toStartOfDay(x), toHour(x); +SELECT 'Asia/Pyongyang', toDateTime('2020-01-02 03:04:05', 'Asia/Pyongyang') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Kwajalein', toDateTime('2020-01-02 03:04:05', 'Pacific/Kwajalein') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Apia', toDateTime('2020-01-02 03:04:05', 'Pacific/Apia') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Enderbury', toDateTime('2020-01-02 03:04:05', 'Pacific/Enderbury') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Fakaofo', toDateTime('2020-01-02 03:04:05', 'Pacific/Fakaofo') AS x, toStartOfDay(x), toHour(x); -SELECT toHour(toDateTime(rand(), 'Pacific/Kiritimati') AS t) AS h, t FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; -SELECT toHour(toDateTime(rand(), 'Africa/El_Aaiun') AS t) AS h, t FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; -SELECT toHour(toDateTime(rand(), 'Asia/Pyongyang') AS t) AS h, t FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; -SELECT toHour(toDateTime(rand(), 'Pacific/Kwajalein') AS t) AS h, t FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; -SELECT toHour(toDateTime(rand(), 'Pacific/Apia') AS t) AS h, t FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; -SELECT toHour(toDateTime(rand(), 'Pacific/Enderbury') AS t) AS h, t FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; -SELECT toHour(toDateTime(rand(), 'Pacific/Fakaofo') AS t) AS h, t FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Kiritimati', rand() as r, toHour(toDateTime(r, 'Pacific/Kiritimati') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Africa/El_Aaiun', rand() as r, toHour(toDateTime(r, 'Africa/El_Aaiun') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Asia/Pyongyang', rand() as r, toHour(toDateTime(r, 'Asia/Pyongyang') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Kwajalein', rand() as r, toHour(toDateTime(r, 'Pacific/Kwajalein') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Apia', rand() as r, toHour(toDateTime(r, 'Pacific/Apia') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Enderbury', rand() as r, toHour(toDateTime(r, 'Pacific/Enderbury') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Fakaofo', rand() as r, toHour(toDateTime(r, 'Pacific/Fakaofo') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; diff --git a/tests/queries/0_stateless/01440_to_date_monotonicity.reference b/tests/queries/0_stateless/01440_to_date_monotonicity.reference index 96732e5996c..74716fe6223 100644 --- a/tests/queries/0_stateless/01440_to_date_monotonicity.reference +++ b/tests/queries/0_stateless/01440_to_date_monotonicity.reference @@ -1,4 +1,4 @@ 0 -1970-01-01 2106-02-07 1970-04-11 1970-01-01 2106-02-07 +1970-01-01 2106-02-07 1970-04-11 1970-01-01 2149-06-06 1970-01-01 03:00:00 2106-02-07 09:28:15 1970-01-01 03:16:40 2000-01-01 13:12:12 diff --git a/tests/queries/0_stateless/01561_Date_and_DateTime64_comparision.sql b/tests/queries/0_stateless/01561_Date_and_DateTime64_comparision.sql index 7e75d871e07..a61bcff4db7 100644 --- a/tests/queries/0_stateless/01561_Date_and_DateTime64_comparision.sql +++ b/tests/queries/0_stateless/01561_Date_and_DateTime64_comparision.sql @@ -6,7 +6,7 @@ SELECT dt64 < d, toDate(dt64) < d, dt64 < toDateTime64(d, 1, 'UTC'), - + '<=', dt64 <= d, toDate(dt64) <= d, @@ -16,7 +16,7 @@ SELECT dt64 = d, toDate(dt64) = d, dt64 = toDateTime64(d, 1, 'UTC'), - + '>=', dt64 >= d, toDate(dt64) >= d, @@ -31,7 +31,7 @@ SELECT dt64 != d, toDate(dt64) != d, dt64 != toDateTime64(d, 1, 'UTC') -FROM +FROM ( WITH toDateTime('2019-09-16 19:20:11') as val SELECT diff --git a/tests/queries/0_stateless/01631_date_overflow_as_partition_key.reference b/tests/queries/0_stateless/01631_date_overflow_as_partition_key.reference index dbcd92da11c..62f620f3ba9 100644 --- a/tests/queries/0_stateless/01631_date_overflow_as_partition_key.reference +++ b/tests/queries/0_stateless/01631_date_overflow_as_partition_key.reference @@ -1,2 +1,2 @@ -1970-01-01 1 -1970-01-01 1 +2106-11-11 1 +2106-11-12 1 diff --git a/tests/queries/0_stateless/01631_date_overflow_as_partition_key.sql b/tests/queries/0_stateless/01631_date_overflow_as_partition_key.sql index f252e10806a..9a8d37084fb 100644 --- a/tests/queries/0_stateless/01631_date_overflow_as_partition_key.sql +++ b/tests/queries/0_stateless/01631_date_overflow_as_partition_key.sql @@ -6,6 +6,6 @@ insert into dt_overflow values('2106-11-11', 1); insert into dt_overflow values('2106-11-12', 1); -select * from dt_overflow; +select * from dt_overflow ORDER BY d; drop table if exists dt_overflow; diff --git a/tests/queries/0_stateless/01691_DateTime64_clamp.reference b/tests/queries/0_stateless/01691_DateTime64_clamp.reference index 3adc9a17e5c..da80de59e50 100644 --- a/tests/queries/0_stateless/01691_DateTime64_clamp.reference +++ b/tests/queries/0_stateless/01691_DateTime64_clamp.reference @@ -1,4 +1,5 @@ -- { echo } +<<<<<<< HEAD SELECT toTimeZone(toDateTime(-2, 2), 'Europe/Moscow'); 1970-01-01 03:00:00.00 SELECT toDateTime64(-2, 2, 'Europe/Moscow'); @@ -15,3 +16,25 @@ SELECT toDateTime64(-2., 2, 'Europe/Moscow'); SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow'); 2106-02-07 09:00:00.00 SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow') FORMAT Null; +======= +-- These values are within the extended range of DateTime64 [1925-01-01, 2284-01-01) +SELECT toDateTime(-2, 2); +1970-01-01 02:59:58.00 +SELECT toDateTime64(-2, 2); +1970-01-01 02:59:58.00 +SELECT CAST(-1 AS DateTime64); +1970-01-01 02:59:59.000 +SELECT CAST('2020-01-01 00:00:00.3' AS DateTime64); +2020-01-01 00:00:00.300 +SELECT toDateTime64(bitShiftLeft(toUInt64(1),33), 2); +2242-03-16 15:56:32.00 +-- These are outsize of extended range and hence clamped +SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1),35), 2); +1925-01-01 02:00:00.00 +SELECT CAST(-1 * bitShiftLeft(toUInt64(1),35) AS DateTime64); +1925-01-01 02:00:00.000 +SELECT CAST(bitShiftLeft(toUInt64(1),35) AS DateTime64); +2282-12-31 03:00:00.000 +SELECT toDateTime64(bitShiftLeft(toUInt64(1),35), 2); +2282-12-31 03:00:00.00 +>>>>>>> af31042451... Extended range of DateTime64 to years 1925 - 2238 diff --git a/tests/queries/0_stateless/01691_DateTime64_clamp.sql b/tests/queries/0_stateless/01691_DateTime64_clamp.sql index 92d5a33328f..958de4edada 100644 --- a/tests/queries/0_stateless/01691_DateTime64_clamp.sql +++ b/tests/queries/0_stateless/01691_DateTime64_clamp.sql @@ -1,4 +1,5 @@ -- { echo } +-- These values are within the extended range of DateTime64 [1925-01-01, 2284-01-01) SELECT toTimeZone(toDateTime(-2, 2), 'Europe/Moscow'); SELECT toDateTime64(-2, 2, 'Europe/Moscow'); SELECT CAST(-1 AS DateTime64(0, 'Europe/Moscow')); @@ -8,3 +9,9 @@ SELECT toTimeZone(toDateTime(-2., 2), 'Europe/Moscow'); SELECT toDateTime64(-2., 2, 'Europe/Moscow'); SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow'); SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow') FORMAT Null; + +-- These are outsize of extended range and hence clamped +SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1),35), 2); +SELECT CAST(-1 * bitShiftLeft(toUInt64(1),35) AS DateTime64); +SELECT CAST(bitShiftLeft(toUInt64(1),35) AS DateTime64); +SELECT toDateTime64(bitShiftLeft(toUInt64(1),35), 2); diff --git a/utils/convert-month-partitioned-parts/main.cpp b/utils/convert-month-partitioned-parts/main.cpp index 0a697937eb6..a6829d79726 100644 --- a/utils/convert-month-partitioned-parts/main.cpp +++ b/utils/convert-month-partitioned-parts/main.cpp @@ -47,8 +47,9 @@ void run(String part_path, String date_column, String dest_path) DayNum max_date; MergeTreePartInfo::parseMinMaxDatesFromPartName(old_part_name, min_date, max_date); - UInt32 yyyymm = DateLUT::instance().toNumYYYYMM(min_date); - if (yyyymm != DateLUT::instance().toNumYYYYMM(max_date)) + const auto & time_zone = DateLUT::instance(); + UInt32 yyyymm = time_zone.toNumYYYYMM(min_date); + if (yyyymm != time_zone.toNumYYYYMM(max_date)) throw Exception("Part " + old_part_name + " spans different months", ErrorCodes::BAD_DATA_PART_NAME); From 4fcc23ec9ae35eff445089858804dc92d465b499 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Sun, 14 Feb 2021 13:00:40 +0200 Subject: [PATCH 020/333] Fixed build for GCC-10 --- base/common/DateLUTImpl.h | 2 +- src/Core/MySQL/MySQLReplication.cpp | 4 ++-- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- src/Storages/tests/part_name.cpp | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index adfffb04681..f10c62e9865 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -238,7 +238,7 @@ public: template inline ExtendedDayNum toDayNum(V v) const { - return ExtendedDayNum{static_cast(toLUTIndex(v).toUnderType()) - daynum_offset_epoch}; + return ExtendedDayNum{static_cast(toLUTIndex(v).toUnderType() - daynum_offset_epoch)}; } /// Round down to start of monday. diff --git a/src/Core/MySQL/MySQLReplication.cpp b/src/Core/MySQL/MySQLReplication.cpp index 3e9c5230955..4cb885d4c34 100644 --- a/src/Core/MySQL/MySQLReplication.cpp +++ b/src/Core/MySQL/MySQLReplication.cpp @@ -420,8 +420,8 @@ namespace MySQLReplication UInt32 i24 = 0; payload.readStrict(reinterpret_cast(&i24), 3); - const DayNum date_day_number{DateLUT::instance().makeDayNum( - static_cast((i24 >> 9) & 0x7fff), static_cast((i24 >> 5) & 0xf), static_cast(i24 & 0x1f)).toUnderType()}; + const DayNum date_day_number(DateLUT::instance().makeDayNum( + static_cast((i24 >> 9) & 0x7fff), static_cast((i24 >> 5) & 0xf), static_cast(i24 & 0x1f)).toUnderType()); row.push_back(Field(date_day_number.toUnderType())); break; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 8bf785afa0f..150f2aa577f 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -4369,7 +4369,7 @@ static String getPartNamePossiblyFake(MergeTreeDataFormatVersion format_version, /// The date range is all month long. const auto & lut = DateLUT::instance(); time_t start_time = lut.YYYYMMDDToDate(parse(part_info.partition_id + "01")); - DayNum left_date = DayNum{lut.toDayNum(start_time).toUnderType()}; + DayNum left_date = DayNum(lut.toDayNum(start_time).toUnderType()); DayNum right_date = DayNum(static_cast(left_date) + lut.daysInMonth(start_time) - 1); return part_info.getPartNameV0(left_date, right_date); } diff --git a/src/Storages/tests/part_name.cpp b/src/Storages/tests/part_name.cpp index aeadfd208cc..227e19cf17c 100644 --- a/src/Storages/tests/part_name.cpp +++ b/src/Storages/tests/part_name.cpp @@ -5,7 +5,7 @@ int main(int, char **) { - const DayNum today{DateLUT::instance().toDayNum(time(nullptr)).toUnderType()}; + const DayNum today(DateLUT::instance().toDayNum(time(nullptr)).toUnderType()); for (DayNum date = today; DayNum(date + 10) > today; --date) { From 7a53daaefdbfd760cee1099e8f706e5684aac948 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Tue, 16 Feb 2021 12:41:08 +0200 Subject: [PATCH 021/333] Fixed issues reported by PVS-Studio and Clang11/GCC10 --- base/common/DateLUTImpl.h | 4 --- base/common/tests/gtest_DateLutImpl.cpp | 48 +++++++------------------ 2 files changed, 12 insertions(+), 40 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index f10c62e9865..898fb7bf843 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -850,10 +850,6 @@ public: if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31)) return ExtendedDayNum(0); - // The day after 2283 are not stored fully as struct Values, so just overflow it as 0 - if (unlikely(year > DATE_LUT_MAX_YEAR)) - return ExtendedDayNum(0); - return toDayNum(makeLUTIndex(year, month, day_of_month)); } diff --git a/base/common/tests/gtest_DateLutImpl.cpp b/base/common/tests/gtest_DateLutImpl.cpp index 395e2eddb00..2355c117328 100644 --- a/base/common/tests/gtest_DateLutImpl.cpp +++ b/base/common/tests/gtest_DateLutImpl.cpp @@ -25,18 +25,6 @@ cctz::civil_day YYYYMMDDToDay(unsigned value) value % 100); // day } -cctz::civil_second YYYYMMDDHMMSSToSecond(std::uint64_t value) -{ - return cctz::civil_second( - value / 10000000000, - value / 100000000 % 100, - value / 1000000 % 100, - value / 10000 % 100, - value / 100 % 100, - value % 100); -} - - std::vector allTimezones() { std::vector result; @@ -82,14 +70,17 @@ FailuresCount countFailures(const ::testing::TestResult & test_result) } -TEST(YYYYMMDDToDay, Test) +TEST(DateLUTTest, Test_makeDayNum) { - std::cerr << YYYYMMDDHMMSSToSecond(19700101'00'00'00) << std::endl; + const DateLUTImpl & lut = DateLUT::instance("UTC"); + EXPECT_EQ(0, lut.makeDayNum(2500, 12, 25)); + EXPECT_EQ(0, lut.makeDayNum(1924, 12, 31)); } + TEST(DateLUTTest, TimeValuesInMiddleOfRange) { - const DateLUTImpl lut("Europe/Minsk"); + const DateLUTImpl & lut = DateLUT::instance("Europe/Minsk"); const time_t time = 1568650811; // 2019-09-16 19:20:11 (Monday) EXPECT_EQ(lut.getTimeZone(), "Europe/Minsk"); @@ -151,7 +142,7 @@ TEST(DateLUTTest, TimeValuesInMiddleOfRange) TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange) { - const DateLUTImpl lut("UTC"); + const DateLUTImpl & lut = DateLUT::instance("UTC"); const time_t time = 0; // 1970-01-01 00:00:00 (Thursday) EXPECT_EQ(lut.getTimeZone(), "UTC"); @@ -212,7 +203,7 @@ TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange) TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOLDLut) { // Value is at the right border of the OLD (small) LUT, and provides meaningful values where OLD LUT would provide garbage. - const DateLUTImpl lut("UTC"); + const DateLUTImpl & lut = DateLUT::instance("UTC"); const time_t time = 4294343873; // 2106-01-31T01:17:53 (Sunday) @@ -276,11 +267,11 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOLDLut) class DateLUT_TimeZone : public ::testing::TestWithParam {}; -TEST_P(DateLUT_TimeZone, DISABLED_LoadAllTimeZones) +TEST_P(DateLUT_TimeZone, DISABLED_LoadLut) { // There are some assumptions and assertions about TZ data made in DateLUTImpl which are verified upon loading, // to make sure that those assertions are true for all timezones we are going to load all of them one by one. - DateLUTImpl{GetParam()}; + DateLUT::instance(GetParam()); } // Another long running test, shouldn't be run to often @@ -292,7 +283,7 @@ TEST_P(DateLUT_TimeZone, VaidateTimeComponentsAroundEpoch) const auto timezone_name = GetParam(); const auto * test_info = ::testing::UnitTest::GetInstance()->current_test_info(); - const auto lut = DateLUTImpl(timezone_name); + const DateLUTImpl & lut = DateLUT::instance(timezone_name); for (time_t i = -856147870; i < 86400 * 10000; i += 11 * 13 * 17 * 19) { @@ -376,22 +367,7 @@ struct TimeRangeParam std::ostream & operator<<(std::ostream & ostr, const TimeRangeParam & param) { - const auto approximate_step = [](const int step) -> std::string - { - // Convert seconds to a string of seconds or fractional count of minutes/hours/days. - static const size_t multipliers[] = {1 /*seconds to seconds*/, 60 /*seconds to minutes*/, 60 /*minutes to hours*/, 24 /*hours to days*/, 0 /*terminator*/}; - static const char* names[] = {"s", "m", "h", "d", nullptr}; - double result = step; - size_t i = 0; - for (; i < sizeof(multipliers)/sizeof(multipliers[0]) && result > multipliers[i]; ++i) - result /= multipliers[i]; - - char buffer[256] = {'\0'}; - std::snprintf(buffer, sizeof(buffer), "%.1f%s", result, names[i - 1]); - return std::string{buffer}; - }; - - return ostr << param.begin << " : " << param.end << " step: " << param.step_in_seconds << "s (" << approximate_step(param.step_in_seconds) << ")"; + return ostr << param.begin << " : " << param.end << " step: " << param.step_in_seconds << "s"; } class DateLUT_Timezone_TimeRange : public ::testing::TestWithParam> From d321c13cbf688f3fd526b1ec36ae296e8939ab2e Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Thu, 18 Feb 2021 00:33:34 +0200 Subject: [PATCH 022/333] Fixed special build --- base/common/DateLUTImpl.cpp | 2 +- base/common/tests/gtest_DateLutImpl.cpp | 36 ++++++++++++------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 906f88fa90f..6f4fb3dd5fc 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -53,7 +53,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) const cctz::civil_day epoch{1970, 1, 1}; const cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1}; - time_t start_of_day = std::chrono::system_clock::to_time_t(cctz_time_zone.lookup(lut_start).pre); + time_t start_of_day; time_offset_epoch = cctz::convert(cctz::civil_second(lut_start), cctz_time_zone).time_since_epoch().count(); // Note validated this against all timezones in the system. diff --git a/base/common/tests/gtest_DateLutImpl.cpp b/base/common/tests/gtest_DateLutImpl.cpp index 2355c117328..9169d9e768f 100644 --- a/base/common/tests/gtest_DateLutImpl.cpp +++ b/base/common/tests/gtest_DateLutImpl.cpp @@ -29,7 +29,7 @@ std::vector allTimezones() { std::vector result; - auto timezone_name = auto_time_zones; + const auto * timezone_name = auto_time_zones; while (*timezone_name) { result.push_back(*timezone_name); @@ -70,7 +70,7 @@ FailuresCount countFailures(const ::testing::TestResult & test_result) } -TEST(DateLUTTest, Test_makeDayNum) +TEST(DateLUTTest, makeDayNumTest) { const DateLUTImpl & lut = DateLUT::instance("UTC"); EXPECT_EQ(0, lut.makeDayNum(2500, 12, 25)); @@ -264,10 +264,10 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOLDLut) } -class DateLUT_TimeZone : public ::testing::TestWithParam +class DateLUTWithTimeZone : public ::testing::TestWithParam {}; -TEST_P(DateLUT_TimeZone, DISABLED_LoadLut) +TEST_P(DateLUTWithTimeZone, DISABLED_LoadLut) { // There are some assumptions and assertions about TZ data made in DateLUTImpl which are verified upon loading, // to make sure that those assertions are true for all timezones we are going to load all of them one by one. @@ -275,12 +275,12 @@ TEST_P(DateLUT_TimeZone, DISABLED_LoadLut) } // Another long running test, shouldn't be run to often -TEST_P(DateLUT_TimeZone, VaidateTimeComponentsAroundEpoch) +TEST_P(DateLUTWithTimeZone, VaidateTimeComponentsAroundEpoch) { // Converting time around 1970-01-01 to hour-minute-seconds time components // could be problematic. const size_t max_failures_per_tz = 3; - const auto timezone_name = GetParam(); + const auto * timezone_name = GetParam(); const auto * test_info = ::testing::UnitTest::GetInstance()->current_test_info(); const DateLUTImpl & lut = DateLUT::instance(timezone_name); @@ -311,14 +311,14 @@ TEST_P(DateLUT_TimeZone, VaidateTimeComponentsAroundEpoch) } } -TEST_P(DateLUT_TimeZone, getTimeZone) +TEST_P(DateLUTWithTimeZone, getTimeZone) { const auto & lut = DateLUT::instance(GetParam()); EXPECT_EQ(GetParam(), lut.getTimeZone()); } -TEST_P(DateLUT_TimeZone, ZeroTime) +TEST_P(DateLUTWithTimeZone, ZeroTime) { const auto & lut = DateLUT::instance(GetParam()); @@ -329,7 +329,7 @@ TEST_P(DateLUT_TimeZone, ZeroTime) // Group of tests for timezones that have or had some time ago an offset which is not multiple of 15 minutes. INSTANTIATE_TEST_SUITE_P(ExoticTimezones, - DateLUT_TimeZone, + DateLUTWithTimeZone, ::testing::ValuesIn(std::initializer_list{ "Africa/El_Aaiun", "Pacific/Apia", @@ -340,7 +340,7 @@ INSTANTIATE_TEST_SUITE_P(ExoticTimezones, ); INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimeZones, - DateLUT_TimeZone, + DateLUTWithTimeZone, ::testing::ValuesIn(allTimezones()) ); @@ -370,11 +370,11 @@ std::ostream & operator<<(std::ostream & ostr, const TimeRangeParam & param) return ostr << param.begin << " : " << param.end << " step: " << param.step_in_seconds << "s"; } -class DateLUT_Timezone_TimeRange : public ::testing::TestWithParam> +class DateLUTWithTimeZoneAndTimeRange : public ::testing::TestWithParam> {}; // refactored test from tests/date_lut3.cpp -TEST_P(DateLUT_Timezone_TimeRange, InRange) +TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) { // for a time_t values in range [begin, end) to match with reference obtained from cctz: // compare date and time components: year, month, day, hours, minutes, seconds, formatted time string. @@ -425,7 +425,7 @@ TEST_P(DateLUT_Timezone_TimeRange, InRange) * So it would be tricky to skip knonw failures to allow all unit tests to pass. */ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010, - DateLUT_Timezone_TimeRange, + DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), ::testing::ValuesIn(std::initializer_list{ @@ -436,7 +436,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010, ); INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970_WHOLE, - DateLUT_Timezone_TimeRange, + DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), ::testing::ValuesIn(std::initializer_list{ @@ -446,7 +446,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970_WHOLE, ); INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010_WHOLE, - DateLUT_Timezone_TimeRange, + DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), ::testing::ValuesIn(std::initializer_list{ @@ -456,7 +456,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010_WHOLE, ); INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2020_WHOLE, - DateLUT_Timezone_TimeRange, + DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), ::testing::ValuesIn(std::initializer_list{ @@ -466,7 +466,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2020_WHOLE, ); INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_PreEpoch, - DateLUT_Timezone_TimeRange, + DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), ::testing::ValuesIn(std::initializer_list{ @@ -476,7 +476,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_PreEpoch, ); INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970, - DateLUT_Timezone_TimeRange, + DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), ::testing::ValuesIn(std::initializer_list{ From d568ba5ec7f966842075a66fc9ff554db8430701 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Sun, 21 Feb 2021 12:04:29 +0200 Subject: [PATCH 023/333] Marked 00921_datetime64_compatibility as long test --- ...bility.python => 00921_datetime64_compatibility_long.python} | 0 ....reference => 00921_datetime64_compatibility_long.reference} | 0 ..._compatibility.sh => 00921_datetime64_compatibility_long.sh} | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) rename tests/queries/0_stateless/{00921_datetime64_compatibility.python => 00921_datetime64_compatibility_long.python} (100%) rename tests/queries/0_stateless/{00921_datetime64_compatibility.reference => 00921_datetime64_compatibility_long.reference} (100%) rename tests/queries/0_stateless/{00921_datetime64_compatibility.sh => 00921_datetime64_compatibility_long.sh} (91%) diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility.python b/tests/queries/0_stateless/00921_datetime64_compatibility_long.python similarity index 100% rename from tests/queries/0_stateless/00921_datetime64_compatibility.python rename to tests/queries/0_stateless/00921_datetime64_compatibility_long.python diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility.reference b/tests/queries/0_stateless/00921_datetime64_compatibility_long.reference similarity index 100% rename from tests/queries/0_stateless/00921_datetime64_compatibility.reference rename to tests/queries/0_stateless/00921_datetime64_compatibility_long.reference diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility.sh b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh similarity index 91% rename from tests/queries/0_stateless/00921_datetime64_compatibility.sh rename to tests/queries/0_stateless/00921_datetime64_compatibility_long.sh index 5f5034819e4..52a29c19be1 100755 --- a/tests/queries/0_stateless/00921_datetime64_compatibility.sh +++ b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh @@ -11,6 +11,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # ${CURDIR}/00921_datetime64_compatibility.python -python3 "${CURDIR}"/00921_datetime64_compatibility.python \ +python3 "${CURDIR}"/00921_datetime64_compatibility_long.python \ | ${CLICKHOUSE_CLIENT} --ignore-error -T -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ | grep -v 'Received exception .*$' | sed 's/^\(Code: [0-9]\+\).*$/\1/g' From b8b916008c75e041c7d44c69d65fa0c1135d124d Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 22 Feb 2021 20:50:17 +0300 Subject: [PATCH 024/333] Update DateLUTImpl.h --- base/common/DateLUTImpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 898fb7bf843..1c897080e3a 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -105,7 +105,7 @@ public: /// The order of fields matters for alignment and sizeof. struct Values { - /// Least significat 64 bits from time_t at beginning of the day. + /// time_t at beginning of the day. Int64 date; /// Properties of the day. From d5757c67e612b1846e71536a965fde7efda2ac2a Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 22 Feb 2021 22:45:55 +0300 Subject: [PATCH 025/333] Update DateLUTImpl.h --- base/common/DateLUTImpl.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 1c897080e3a..5a12ad5dc13 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -119,8 +119,9 @@ public: UInt8 days_in_month; /// For days, when offset from UTC was changed due to daylight saving time or permanent change, following values could be non zero. - Int8 amount_of_offset_change_value; /// Usually -3600 or 3600, but look at Lord Howe Island. multiply by OffsetChangeFactor - UInt8 time_at_offset_change_value; /// In seconds from beginning of the day. multiply by OffsetChangeFactor + /// All in OffsetChangeFactor (15 minute) intervals. + Int8 amount_of_offset_change_value; /// Usually -4 or 4, but look at Lord Howe Island. Multiply by OffsetChangeFactor + UInt8 time_at_offset_change_value; /// In seconds from beginning of the day. Multiply by OffsetChangeFactor inline Int32 amount_of_offset_change() const { From b331b03d3aad1255b340352e80e3b7e325af9531 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 25 Feb 2021 16:07:09 +0500 Subject: [PATCH 026/333] fix indvalid links, add some PR re-created from scratch https://github.com/ClickHouse/ClickHouse/pull/21122 --- .../system-tables/replication_queue.md | 2 +- docs/en/sql-reference/operators/in.md | 2 +- docs/ru/operations/settings/settings.md | 11 +++++++--- .../system-tables/replication_queue.md | 2 +- docs/ru/sql-reference/operators/in.md | 22 +++++++++++++++++++ 5 files changed, 33 insertions(+), 6 deletions(-) diff --git a/docs/en/operations/system-tables/replication_queue.md b/docs/en/operations/system-tables/replication_queue.md index aa379caa46c..e2e606ef075 100644 --- a/docs/en/operations/system-tables/replication_queue.md +++ b/docs/en/operations/system-tables/replication_queue.md @@ -76,6 +76,6 @@ last_postpone_time: 1970-01-01 03:00:00 **See Also** -- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md/#query-language-system-replicated) +- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md#query-language-system-replicated) [Original article](https://clickhouse.tech/docs/en/operations/system_tables/replication_queue) diff --git a/docs/en/sql-reference/operators/in.md b/docs/en/sql-reference/operators/in.md index bfa8b3d1003..100d00356b1 100644 --- a/docs/en/sql-reference/operators/in.md +++ b/docs/en/sql-reference/operators/in.md @@ -203,7 +203,7 @@ It also makes sense to specify a local table in the `GLOBAL IN` clause, in case When max_parallel_replicas is greater than 1, distributed queries are further transformed. For example, the following: ```sql -SEELECT CounterID, count() FROM distributed_table_1 WHERE UserID IN (SELECT UserID FROM local_table_2 WHERE CounterID < 100) +SELECT CounterID, count() FROM distributed_table_1 WHERE UserID IN (SELECT UserID FROM local_table_2 WHERE CounterID < 100) SETTINGS max_parallel_replicas=3 ``` diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 7322b6c9184..deda437e933 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1086,9 +1086,14 @@ load_balancing = round_robin ## max_parallel_replicas {#settings-max_parallel_replicas} -Максимальное количество используемых реплик каждого шарда при выполнении запроса. -Для консистентности (чтобы получить разные части одного и того же разбиения), эта опция работает только при заданном ключе сэмплирования. -Отставание реплик не контролируется. +Максимальное кол-во реплик для каждого шарда во время исполениня запроса из distributed. В некоторых случаях, это может привести к более быстрому исполнению запроса за счет выполнения на большем кол-ве серверов. Эта настройка полезна только для реплицируемых таблиц созданных с использованием SAMPLING KEY выражения. Есть случаи когда производительность не улучшится или даже ухудшится: + +- позиция ключа семплирования в ключе партицирования не позволяет делать эффективные сканирования по диапозонам +- добавление семплирующего ключа к таблице, делает фильтрацию других колонок менее эффективной +- выражение используемое для вычисления ключа семплирования требует больших вычислительных затрат +- Распределение сетевых задержек внутри кластера имеет длинный хвост, так что запрос большего количества серверов может увеличить общую задержку запроса + +Кроме того, эта настройка может привести к некорректным результатам когда используются join или подзапросы и все таблицы не соответсвуют определенным условиям. Подробнее [Распределенные подзапросы и max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details. ## compile {#compile} diff --git a/docs/ru/operations/system-tables/replication_queue.md b/docs/ru/operations/system-tables/replication_queue.md index 47f64aea55d..1ba4b2b8a36 100644 --- a/docs/ru/operations/system-tables/replication_queue.md +++ b/docs/ru/operations/system-tables/replication_queue.md @@ -76,6 +76,6 @@ last_postpone_time: 1970-01-01 03:00:00 **Смотрите также** -- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md/#query-language-system-replicated) +- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated) [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/replication_queue) diff --git a/docs/ru/sql-reference/operators/in.md b/docs/ru/sql-reference/operators/in.md index 4c1290df166..ee63d09fc66 100644 --- a/docs/ru/sql-reference/operators/in.md +++ b/docs/ru/sql-reference/operators/in.md @@ -197,3 +197,25 @@ SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL 5. Если в GLOBAL IN есть частая необходимость, то спланируйте размещение кластера ClickHouse таким образом, чтобы в каждом дата-центре была хотя бы одна реплика каждого шарда, и среди них была быстрая сеть - чтобы запрос целиком можно было бы выполнить, передавая данные в пределах одного дата-центра. В секции `GLOBAL IN` также имеет смысл указывать локальную таблицу - в случае, если эта локальная таблица есть только на сервере-инициаторе запроса, и вы хотите воспользоваться данными из неё на удалённых серверах. + +### Распределенные подзапросы и max_parallel_replicas {#max_parallel_replica-subqueries} + +Когда настройка max_parallel_replicas больше чем 1, распределенные запросы преобразуются. Например, следующий запрос: + +```sql +SELECT CounterID, count() FROM distributed_table_1 WHERE UserID IN (SELECT UserID FROM local_table_2 WHERE CounterID < 100) +SETTINGS max_parallel_replicas=3 +``` + +преобразуются на каждом сервере в + +```sql +SELECT CounterID, count() FROM local_table_1 WHERE UserID IN (SELECT UserID FROM local_table_2 WHERE CounterID < 100) +SETTINGS parallel_replicas_count=3, parallel_replicas_offset=M +``` + +где M значение между 1 и 3 зависящее от того на какой реплике выполняется локальный запрос. Эти параметры влияют на каждую таблицу семейства MergeTree в запросе и имеют тот же эффект, что и применение `SAMPLE 1/3 OFFSET (M-1)/3` для каждой таблицы. + +Поэтому применение настройки max_parallel_replicas даст корректные результаты если обе таблицы имеют одинаковую схему репликации и семплированы по UserID выражению от UserID. В частности, если local_table_2 не имеет семплирующего ключа, будут получены неверные результаты. Тоже правило применяется для JOIN. + +Один из способов избежать этого, если local_table_2 не удовлетворяет требованиям, использовать `GLOBAL IN` или `GLOBAL JOIN`. From c891cf4557efbf10cc0312a716b756acb927d74c Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 26 Feb 2021 12:48:57 +0300 Subject: [PATCH 027/333] Fixes by review response --- src/Common/ErrorCodes.cpp | 1 + src/Disks/DiskType.h | 32 +++ src/Disks/IDisk.h | 35 +-- src/Disks/IStoragePolicy.h | 5 +- src/Disks/S3/DiskS3.h | 4 + src/Disks/StoragePolicy.cpp | 4 +- src/Disks/StoragePolicy.h | 2 +- src/Interpreters/InterserverIOHandler.h | 2 - src/Storages/MergeTree/DataPartsExchange.cpp | 13 +- src/Storages/MergeTree/DataPartsExchange.h | 20 -- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 237 +--------------- src/Storages/MergeTree/IMergeTreeDataPart.h | 15 - src/Storages/MergeTree/MergeTreeData.h | 15 +- .../MergeTree/MergeTreeDataMergerMutator.cpp | 3 +- .../MergeTree/MergedBlockOutputStream.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 267 +++++++++++++++--- src/Storages/StorageReplicatedMergeTree.h | 28 +- .../configs/config.d/s3.xml | 2 +- .../test_s3_zero_copy_replication/test.py | 8 +- 19 files changed, 330 insertions(+), 365 deletions(-) create mode 100644 src/Disks/DiskType.h diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 09e5945f2b5..8dd05615a19 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -534,6 +534,7 @@ M(565, TOO_MANY_PARTITIONS) \ M(566, CANNOT_RMDIR) \ M(567, DUPLICATED_PART_UUIDS) \ + M(568, INCORRECT_PART_TYPE) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/Disks/DiskType.h b/src/Disks/DiskType.h new file mode 100644 index 00000000000..4e0ae226af4 --- /dev/null +++ b/src/Disks/DiskType.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace DB +{ + +struct DiskType +{ + enum class Type + { + Local, + RAM, + S3 + }; + static String toString(Type disk_type) + { + switch (disk_type) + { + case Type::Local: + return "local"; + case Type::RAM: + return "memory"; + case Type::S3: + return "s3"; + } + __builtin_unreachable(); + } +}; + +} + diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index ac9425a8b3a..44c4fe73d37 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -57,29 +58,6 @@ public: using SpacePtr = std::shared_ptr; -struct DiskType -{ - enum class Type - { - Local, - RAM, - S3 - }; - static String toString(Type disk_type) - { - switch (disk_type) - { - case Type::Local: - return "local"; - case Type::RAM: - return "memory"; - case Type::S3: - return "s3"; - } - __builtin_unreachable(); - } -}; - /** * A guard, that should synchronize file's or directory's state * with storage device (e.g. fsync in POSIX) in its destructor. @@ -196,12 +174,18 @@ public: virtual void removeRecursive(const String & path) = 0; /// Remove file. Throws exception if file doesn't exists or if directory is not empty. + /// Differs from removeFile for S3 disks + /// Second bool param is a flag to remove (true) or keep (false) shared data on S3 virtual void removeSharedFile(const String & path, bool) { removeFile(path); } /// Remove file or directory with all children. Use with extra caution. Throws exception if file doesn't exists. + /// Differs from removeRecursive for S3 disks + /// Second bool param is a flag to remove (true) or keep (false) shared data on S3 virtual void removeSharedRecursive(const String & path, bool) { removeRecursive(path); } /// Remove file or directory if it exists. + /// Differs from removeFileIfExists for S3 disks + /// Second bool param is a flag to remove (true) or keep (false) shared data on S3 virtual void removeSharedFileIfExists(const String & path, bool) { removeFileIfExists(path); } /// Set last modified time to file or directory at `path`. @@ -226,9 +210,12 @@ public: virtual void shutdown() { } /// Return some uniq string for file, overrode for S3 + /// Required for distinguish different copies of the same part on S3 virtual String getUniqueId(const String & path) const { return path; } - /// Check file, overrode for S3 only + /// Check file exists and ClickHouse has an access to it + /// Overrode in DiskS3 + /// Required for S3 to ensure that replica has access to data wroten by other node virtual bool checkUniqueId(const String & id) const { return exists(id); } /// Returns executor to perform asynchronous operations. diff --git a/src/Disks/IStoragePolicy.h b/src/Disks/IStoragePolicy.h index 957021441b8..59cff3c85d5 100644 --- a/src/Disks/IStoragePolicy.h +++ b/src/Disks/IStoragePolicy.h @@ -1,4 +1,7 @@ #pragma once + +#include + #include #include #include @@ -36,7 +39,7 @@ public: /// mutations files virtual DiskPtr getAnyDisk() const = 0; virtual DiskPtr getDiskByName(const String & disk_name) const = 0; - virtual Disks getDisksByType(const String & type) const = 0; + virtual Disks getDisksByType(DiskType::Type type) const = 0; /// Get free space from most free disk virtual UInt64 getMaxUnreservedFreeSpace() const = 0; /// Reserves space on any volume with index > min_volume_index or returns nullptr diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 4eef1512752..5d9effa16fa 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -118,8 +118,12 @@ public: void shutdown() override; + /// Return some uniq string for file + /// Required for distinguish different copies of the same part on S3 String getUniqueId(const String & path) const override; + /// Check file exists and ClickHouse has an access to it + /// Required for S3 to ensure that replica has access to data wroten by other node bool checkUniqueId(const String & id) const override; /// Actions performed after disk creation. diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index be40a5ae72d..cff2685ca24 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -159,12 +159,12 @@ Disks StoragePolicy::getDisks() const } -Disks StoragePolicy::getDisksByType(const String & type) const +Disks StoragePolicy::getDisksByType(DiskType::Type type) const { Disks res; for (const auto & volume : volumes) for (const auto & disk : volume->getDisks()) - if (DB::DiskType::toString(disk->getType()) == type) + if (disk->getType() == type) res.push_back(disk); return res; } diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index 7e72fcda8b1..71773e91f70 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -48,7 +48,7 @@ public: Disks getDisks() const override; /// Returns disks by type ordered by volumes priority - Disks getDisksByType(const String & type) const override; + Disks getDisksByType(DiskType::Type type) const override; /// Returns any disk /// Used when it's not important, for example for diff --git a/src/Interpreters/InterserverIOHandler.h b/src/Interpreters/InterserverIOHandler.h index bcb0e8736f0..f8b0f4d1247 100644 --- a/src/Interpreters/InterserverIOHandler.h +++ b/src/Interpreters/InterserverIOHandler.h @@ -40,8 +40,6 @@ public: virtual void processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & body, WriteBuffer & out, Poco::Net::HTTPServerResponse & response) = 0; virtual ~InterserverIOEndpoint() = default; - virtual void setZooKeeper(const zkutil::ZooKeeperPtr &zookeeper_, const String & zookeeper_path_, const String & replica_name_) = 0; - /// You need to stop the data transfer if blocker is activated. ActionBlocker blocker; std::shared_mutex rwlock; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index d031989bfcd..511cc82a870 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -35,6 +35,7 @@ namespace ErrorCodes extern const int CORRUPTED_DATA; extern const int LOGICAL_ERROR; extern const int S3_ERROR; + extern const int INCORRECT_PART_TYPE; } namespace DataPartsExchange @@ -265,7 +266,7 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB if (disk->getType() != DB::DiskType::Type::S3) throw Exception("S3 disk is not S3 anymore", ErrorCodes::LOGICAL_ERROR); - part->lockSharedData(); + part->storage.lockSharedData(*part); String part_id = part->getUniqueId(); writeStringBinary(part_id, out); @@ -280,9 +281,9 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB Poco::File metadata(metadata_file); if (!metadata.exists()) - throw Exception("S3 metadata '" + file_name + "' is not exists", ErrorCodes::LOGICAL_ERROR); + throw Exception("S3 metadata '" + file_name + "' is not exists", ErrorCodes::CORRUPTED_DATA); if (!metadata.isFile()) - throw Exception("S3 metadata '" + file_name + "' is not a file", ErrorCodes::LOGICAL_ERROR); + throw Exception("S3 metadata '" + file_name + "' is not a file", ErrorCodes::CORRUPTED_DATA); UInt64 file_size = metadata.getSize(); writeStringBinary(it.first, out); @@ -361,7 +362,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( disks_s3.push_back(disk_s3); else { - disks_s3 = data.getDisksByType("s3"); + disks_s3 = data.getDisksByType(DiskType::Type::S3); if (disks_s3.empty()) try_use_s3_copy = false; @@ -411,7 +412,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( String part_type = "Wide"; readStringBinary(part_type, in); if (part_type == "InMemory") - throw Exception("Got 'send_s3_metadata' cookie for in-memory partition", ErrorCodes::LOGICAL_ERROR); + throw Exception("Got 'send_s3_metadata' cookie for in-memory part", ErrorCodes::INCORRECT_PART_TYPE); UUID part_uuid = UUIDHelpers::Nil; if (server_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_UUID) @@ -692,7 +693,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( new_data_part->modification_time = time(nullptr); new_data_part->loadColumnsChecksumsIndexes(true, false); - new_data_part->lockSharedData(); + new_data_part->storage.lockSharedData(*new_data_part); return new_data_part; } diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index f0297aa1d28..1fcee1242e3 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -35,13 +35,6 @@ public: std::string getId(const std::string & node_id) const override; void processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & body, WriteBuffer & out, Poco::Net::HTTPServerResponse & response) override; - void setZooKeeper(const zkutil::ZooKeeperPtr & zookeeper_, const String & zookeeper_path_, const String & replica_name_) override - { - zookeeper = zookeeper_; - zookeeper_path = zookeeper_path_; - replica_name = replica_name_; - } - private: MergeTreeData::DataPartPtr findPart(const String & name); void sendPartFromMemory(const MergeTreeData::DataPartPtr & part, WriteBuffer & out); @@ -53,9 +46,6 @@ private: /// so Service will never access dangling reference to storage MergeTreeData & data; Poco::Logger * log; - zkutil::ZooKeeperPtr zookeeper; - String zookeeper_path; - String replica_name; }; /** Client for getting the parts from the table *MergeTree. @@ -87,13 +77,6 @@ public: /// You need to stop the data transfer. ActionBlocker blocker; - void setZooKeeper(const zkutil::ZooKeeperPtr & zookeeper_, const String & zookeeper_path_, const String & replica_name_) - { - zookeeper = zookeeper_; - zookeeper_path = zookeeper_path_; - replica_name = replica_name_; - } - private: MergeTreeData::MutableDataPartPtr downloadPartToDisk( const String & part_name, @@ -121,9 +104,6 @@ private: MergeTreeData & data; Poco::Logger * log; - zkutil::ZooKeeperPtr zookeeper; - String zookeeper_path; - String replica_name; }; } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 5c35a8d0af3..03dbac0cb68 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1010,8 +1010,8 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_ if (storage.getSettings()->fsync_part_directory) sync_guard = volume->getDisk()->getDirectorySyncGuard(to); - lockSharedData(); - unlockSharedData(old_relative_path); + storage.lockSharedData(*this); + storage.unlockSharedData(*this, old_relative_path); } @@ -1166,15 +1166,7 @@ void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & di } disk->createDirectories(path_to_clone); - bool is_fetched = false; - - if (disk->getType() == DB::DiskType::Type::S3) - { - auto data_settings = storage.getSettings(); - if (data_settings->allow_s3_zero_copy_replication) - is_fetched = tryToFetchIfShared(disk, path_to_clone + "/" + name); - } - + bool is_fetched = storage.tryToFetchIfShared(*this, disk, path_to_clone + "/" + name); if (!is_fetched) volume->getDisk()->copy(getFullRelativePath(), disk, path_to_clone); volume->getDisk()->removeFileIfExists(path_to_clone + '/' + DELETE_ON_DESTROY_MARKER_FILE_NAME); @@ -1326,229 +1318,6 @@ String IMergeTreeDataPart::getUniqueId() const return id; } -void IMergeTreeDataPart::lockSharedData() const -{ - if (!volume) - return; - DiskPtr disk = volume->getDisk(); - if (!disk) - return; - if (disk->getType() != DB::DiskType::Type::S3) - return; - - const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); - if (!replicated_storage) - return; - - StorageReplicatedMergeTree::ZooKeeperAccessData zk = replicated_storage->getZooKeeperAccessData(); - if (!zk.zookeeper) - return; - - String id = getUniqueId(); - boost::replace_all(id, "/", "_"); - String norm_path = relative_path; - boost::replace_all(norm_path, "/", "_"); - - String zookeeper_node = zk.zookeeper_path + "/zero_copy_s3/shared/" + name + "/" + id + "/" + norm_path + "/" + zk.replica_name; - - LOG_TRACE(storage.log, "Set zookeeper lock {}", zookeeper_node); - - /// In rare case other replica can remove path between createAncestors and createIfNotExists - /// So we make up to 5 attempts - for (int attempts = 5; attempts > 0; --attempts) - { - try - { - zk.zookeeper->createAncestors(zookeeper_node); - zk.zookeeper->createIfNotExists(zookeeper_node, "lock"); - break; - } - catch (const zkutil::KeeperException & e) - { - if (e.code == Coordination::Error::ZNONODE) - continue; - throw; - } - } -} - -bool IMergeTreeDataPart::unlockSharedData() const -{ - return unlockSharedData(relative_path); -} - -bool IMergeTreeDataPart::unlockSharedData(const String & path) const -{ - if (!volume) - return true; - DiskPtr disk = volume->getDisk(); - if (!disk) - return true; - if (disk->getType() != DB::DiskType::Type::S3) - return true; - - const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); - if (!replicated_storage) - return true; - - StorageReplicatedMergeTree::ZooKeeperAccessData zk = replicated_storage->getZooKeeperAccessData(); - if (!zk.zookeeper) - return true; - - String id = getUniqueId(); - boost::replace_all(id, "/", "_"); - String norm_path = path; - boost::replace_all(norm_path, "/", "_"); - - String zookeeper_part_node = zk.zookeeper_path + "/zero_copy_s3/shared/" + name; - String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; - String zookeeper_part_path_node = zookeeper_part_uniq_node + "/" + norm_path; - String zookeeper_node = zookeeper_part_path_node + "/" + zk.replica_name; - - LOG_TRACE(storage.log, "Remove zookeeper lock {}", zookeeper_node); - - zk.zookeeper->tryRemove(zookeeper_node); - - Strings children; - zk.zookeeper->tryGetChildren(zookeeper_part_path_node, children); - if (!children.empty()) - { - LOG_TRACE(storage.log, "Found zookeper locks for {}", zookeeper_part_path_node); - return false; - } - - zk.zookeeper->tryRemove(zookeeper_part_path_node); - - children.clear(); - zk.zookeeper->tryGetChildren(zookeeper_part_uniq_node, children); - - if (!children.empty()) - { - LOG_TRACE(storage.log, "Found zookeper locks for {}", zookeeper_part_uniq_node); - return false; - } - - zk.zookeeper->tryRemove(zookeeper_part_uniq_node); - - /// Even when we have lock with same part name, but with different uniq, we can remove files on S3 - children.clear(); - zk.zookeeper->tryGetChildren(zookeeper_part_node, children); - if (children.empty()) - /// Cleanup after last uniq removing - zk.zookeeper->tryRemove(zookeeper_part_node); - - return true; -} - -String IMergeTreeDataPart::getSharedDataReplica( - const String & zookeeper_path, - zkutil::ZooKeeperPtr zookeeper, - const String & replica_name) const -{ - String norm_path = relative_path; - boost::replace_all(norm_path, "/", "_"); - String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + name; - - Strings ids; - zookeeper->tryGetChildren(zookeeper_part_node, ids); - - Strings replicas; - for (const auto & id : ids) - { - String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; - Strings paths; - zookeeper->tryGetChildren(zookeeper_part_uniq_node, paths); - for (const auto &path : paths) - { - String zookeeper_node = zookeeper_part_uniq_node + "/" + path; - Strings id_replicas; - zookeeper->tryGetChildren(zookeeper_node, id_replicas); - LOG_TRACE(storage.log, "Found zookeper replicas for {}: {}", zookeeper_node, id_replicas.size()); - replicas.insert(replicas.end(), id_replicas.begin(), id_replicas.end()); - } - } - - LOG_TRACE(storage.log, "Found zookeper replicas for part {}: {}", name, replicas.size()); - - String best_replica; - Strings active_replicas; - - /// TODO: Move best replica choose in common method (here is the same code as in StorageReplicatedMergeTree::fetchPartition) - - /// Leave only active replicas. - active_replicas.reserve(replicas.size()); - - for (const String & replica : replicas) - if ((replica != replica_name) && (zookeeper->exists(zookeeper_path + "/replicas/" + replica + "/is_active"))) - active_replicas.push_back(replica); - - LOG_TRACE(storage.log, "Found zookeper active replicas for part {}: {}", name, active_replicas.size()); - - if (active_replicas.empty()) - return best_replica; - - /** You must select the best (most relevant) replica. - * This is a replica with the maximum `log_pointer`, then with the minimum `queue` size. - * NOTE This is not exactly the best criteria. It does not make sense to download old partitions, - * and it would be nice to be able to choose the replica closest by network. - * NOTE Of course, there are data races here. You can solve it by retrying. - */ - Int64 max_log_pointer = -1; - UInt64 min_queue_size = std::numeric_limits::max(); - - for (const String & replica : active_replicas) - { - String current_replica_path = zookeeper_path + "/replicas/" + replica; - - String log_pointer_str = zookeeper->get(current_replica_path + "/log_pointer"); - Int64 log_pointer = log_pointer_str.empty() ? 0 : parse(log_pointer_str); - - Coordination::Stat stat; - zookeeper->get(current_replica_path + "/queue", &stat); - size_t queue_size = stat.numChildren; - - if (log_pointer > max_log_pointer - || (log_pointer == max_log_pointer && queue_size < min_queue_size)) - { - max_log_pointer = log_pointer; - min_queue_size = queue_size; - best_replica = replica; - } - } - - return best_replica; -} - -bool IMergeTreeDataPart::tryToFetchIfShared(const DiskPtr & disk, const String & path) const -{ - const StorageReplicatedMergeTree *replicated_storage = dynamic_cast(&storage); - if (!replicated_storage) - return false; - - StorageReplicatedMergeTree::ZooKeeperAccessData zk = replicated_storage->getZooKeeperAccessData(); - if (!zk.zookeeper) - return false; - - String replica = getSharedDataReplica(zk.zookeeper_path, zk.zookeeper, zk.replica_name); - - /// We can't fetch part when none replicas have this part on S3 - if (replica.empty()) - return false; - - ReplicatedMergeTreeLogEntry log_entry; - log_entry.type = ReplicatedMergeTreeLogEntry::FETCH_SHARED_PART; - log_entry.source_replica = replica; - log_entry.new_part_name = name;//part_name; - log_entry.create_time = 0;//part_create_time; - log_entry.disk = disk; - log_entry.path = path; - - /// TODO: Fix const usage - StorageReplicatedMergeTree *replicated_storage_nc = const_cast(replicated_storage); - - return replicated_storage_nc->executeFetchShared(log_entry); -} - bool isCompactPart(const MergeTreeDataPartPtr & data_part) { return (data_part && data_part->getType() == MergeTreeDataPartType::COMPACT); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 2c54cf4e096..f9b5f616f70 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -369,15 +369,6 @@ public: String getUniqueId() const; - /// Lock part in zookeeper for use common S3 data in several nodes - void lockSharedData() const; - - /// Unlock common S3 data part in zookeeper - /// Return true if data unlocked - /// Return false if data is still used by another node - bool unlockSharedData() const; - bool unlockSharedData(const String & path) const; - protected: /// Total size of all columns, calculated once in calcuateColumnSizesOnDisk @@ -442,12 +433,6 @@ private: /// for this column with default parameters. CompressionCodecPtr detectDefaultCompressionCodec() const; - /// Fetch part only if some replica has it on shared storage like S3 - bool tryToFetchIfShared(const DiskPtr & disk, const String & path) const; - - /// Get best replica having this partition on S3 - String getSharedDataReplica(const String & zookeeper_path, zkutil::ZooKeeperPtr zookeeper, const String & replica_name) const; - mutable State state{State::Temporary}; }; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index c99ad621c88..916cdeaf692 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -662,7 +662,7 @@ public: /// Reserves 0 bytes ReservationPtr makeEmptyReservationOnLargestDisk() { return getStoragePolicy()->makeEmptyReservationOnLargestDisk(); } - Disks getDisksByType(const String & type) const { return getStoragePolicy()->getDisksByType(type); } + Disks getDisksByType(DiskType::Type type) const { return getStoragePolicy()->getDisksByType(type); } /// Return alter conversions for part which must be applied on fly. AlterConversions getAlterConversionsForPart(const MergeTreeDataPartPtr part) const; @@ -735,6 +735,19 @@ public: std::optional getDataMovingJob(); bool areBackgroundMovesNeeded() const; + /// Lock part in zookeeper for use common S3 data in several nodes + /// Overrided in StorageReplicatedMergeTree + virtual void lockSharedData(const IMergeTreeDataPart &) const {} + + /// Unlock common S3 data part in zookeeper + /// Overrided in StorageReplicatedMergeTree + virtual bool unlockSharedData(const IMergeTreeDataPart &) const { return true; } + virtual bool unlockSharedData(const IMergeTreeDataPart &, const String &) const { return true; } + + /// Fetch part only if some replica has it on shared storage like S3 + /// Overrided in StorageReplicatedMergeTree + virtual bool tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) const { return false; } + protected: friend class IMergeTreeDataPart; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 1e870486314..a4b0e49e1a9 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -1894,10 +1894,9 @@ void MergeTreeDataMergerMutator::finalizeMutatedPart( MergeTreeData::DataPart::calculateTotalSizeOnDisk(new_data_part->volume->getDisk(), new_data_part->getFullRelativePath())); new_data_part->default_codec = codec; new_data_part->calculateColumnsSizesOnDisk(); - new_data_part->lockSharedData(); + new_data_part->storage.lockSharedData(*new_data_part); } - bool MergeTreeDataMergerMutator::checkOperationIsNotCanceled(const MergeListEntry & merge_entry) const { if (merges_blocker.isCancelled() || merge_entry->is_cancelled) diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 945c047f30e..1b852622efc 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -91,7 +91,7 @@ void MergedBlockOutputStream::writeSuffixAndFinalizePart( new_part->calculateColumnsSizesOnDisk(); if (default_codec != nullptr) new_part->default_codec = default_codec; - new_part->lockSharedData(); + new_part->storage.lockSharedData(*new_part); } void MergedBlockOutputStream::finalizePartOnDisk( diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 25d379e2960..ec2242291cd 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -421,8 +421,6 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( } createNewZooKeeperNodes(); - - fetcher.setZooKeeper(current_zookeeper, zookeeper_path, replica_name); } @@ -1491,6 +1489,7 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) future_merged_part.updatePath(*this, reserved_space); future_merged_part.merge_type = entry.merge_type; + if (storage_settings_ptr->allow_s3_zero_copy_replication) { auto disk = reserved_space->getDisk(); if (disk->getType() == DB::DiskType::Type::S3) @@ -3228,30 +3227,6 @@ String StorageReplicatedMergeTree::findReplicaHavingPart(const String & part_nam return {}; } -String StorageReplicatedMergeTree::findReplicaHavingSharedPart(const String & part_name, bool active) -{ - auto zookeeper = getZooKeeper(); - Strings replicas = zookeeper->getChildren(zookeeper_path + "/replicas"); - - /// Select replicas in uniformly random order. - std::shuffle(replicas.begin(), replicas.end(), thread_local_rng); - - for (const String & replica : replicas) - { - /// We don't interested in ourself. - if (replica == replica_name) - continue; - - if (checkReplicaHavePart(replica, part_name) && - (!active || zookeeper->exists(zookeeper_path + "/replicas/" + replica + "/is_active"))) - return replica; - - /// Obviously, replica could become inactive or even vanish after return from this method. - } - - return {}; -} - String StorageReplicatedMergeTree::findReplicaHavingCoveringPart(LogEntry & entry, bool active) { auto zookeeper = getZooKeeper(); @@ -3668,11 +3643,6 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora Transaction transaction(*this); renameTempPartAndReplace(part, nullptr, &transaction); - /** NOTE - * Here, an error occurs if ALTER occurred with a change in the column type or column deletion, - * and the part on remote server has not yet been modified. - * After a while, one of the following attempts to make `fetchPart` succeed. - */ replaced_parts = checkPartChecksumsAndCommit(transaction, part); /** If a quorum is tracked for this part, you must update it. @@ -3757,7 +3727,6 @@ void StorageReplicatedMergeTree::startup() InterserverIOEndpointPtr data_parts_exchange_ptr = std::make_shared(*this); [[maybe_unused]] auto prev_ptr = std::atomic_exchange(&data_parts_exchange_endpoint, data_parts_exchange_ptr); assert(prev_ptr == nullptr); - data_parts_exchange_ptr->setZooKeeper(tryGetZooKeeper(), zookeeper_path, replica_name); global_context.getInterserverIOHandler().addEndpoint(data_parts_exchange_ptr->getId(replica_path), data_parts_exchange_ptr); /// In this thread replica will be activated. @@ -5464,13 +5433,13 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() } parts.clear(); - auto remove_parts_from_filesystem = [log=log] (const DataPartsVector & parts_to_remove) + auto remove_parts_from_filesystem = [log=log, this] (const DataPartsVector & parts_to_remove) { for (const auto & part : parts_to_remove) { try { - bool keep_s3 = !part->unlockSharedData(); + bool keep_s3 = !this->unlockSharedData(*part); part->remove(keep_s3); } catch (...) @@ -6405,6 +6374,7 @@ CheckResults StorageReplicatedMergeTree::checkData(const ASTPtr & query, const C return results; } + bool StorageReplicatedMergeTree::canUseAdaptiveGranularity() const { const auto storage_settings_ptr = getSettings(); @@ -6419,19 +6389,236 @@ MutationCommands StorageReplicatedMergeTree::getFirstAlterMutationCommandsForPar return queue.getFirstAlterMutationCommandsForPart(part); } + void StorageReplicatedMergeTree::startBackgroundMovesIfNeeded() { if (areBackgroundMovesNeeded()) background_moves_executor.start(); } -StorageReplicatedMergeTree::ZooKeeperAccessData StorageReplicatedMergeTree::getZooKeeperAccessData() const + +void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part) const { - ZooKeeperAccessData res; - res.zookeeper = tryGetZooKeeper(); - res.zookeeper_path = zookeeper_path; - res.replica_name = replica_name; - return res; + if (!part.volume) + return; + DiskPtr disk = part.volume->getDisk(); + if (!disk) + return; + if (disk->getType() != DB::DiskType::Type::S3) + return; + + zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper(); + if (!zookeeper) + return; + + String id = part.getUniqueId(); + boost::replace_all(id, "/", "_"); + String norm_path = part.relative_path; + boost::replace_all(norm_path, "/", "_"); + + String zookeeper_node = zookeeper_path + "/zero_copy_s3/shared/" + part.name + "/" + id + "/" + norm_path + "/" + replica_name; + + LOG_TRACE(log, "Set zookeeper lock {}", zookeeper_node); + + /// In rare case other replica can remove path between createAncestors and createIfNotExists + /// So we make up to 5 attempts + for (int attempts = 5; attempts > 0; --attempts) + { + try + { + zookeeper->createAncestors(zookeeper_node); + zookeeper->createIfNotExists(zookeeper_node, "lock"); + break; + } + catch (const zkutil::KeeperException & e) + { + if (e.code == Coordination::Error::ZNONODE) + continue; + throw; + } + } +} + + +bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part) const +{ + return unlockSharedData(part, part.relative_path); +} + + +bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part, const String & path) const +{ + if (!part.volume) + return true; + DiskPtr disk = part.volume->getDisk(); + if (!disk) + return true; + if (disk->getType() != DB::DiskType::Type::S3) + return true; + + zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper(); + if (!zookeeper) + return true; + + String id = part.getUniqueId(); + boost::replace_all(id, "/", "_"); + String norm_path = path; + boost::replace_all(norm_path, "/", "_"); + + String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + part.name; + String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; + String zookeeper_part_path_node = zookeeper_part_uniq_node + "/" + norm_path; + String zookeeper_node = zookeeper_part_path_node + "/" + replica_name; + + LOG_TRACE(log, "Remove zookeeper lock {}", zookeeper_node); + + zookeeper->tryRemove(zookeeper_node); + + Strings children; + zookeeper->tryGetChildren(zookeeper_part_path_node, children); + if (!children.empty()) + { + LOG_TRACE(log, "Found zookeper locks for {}", zookeeper_part_path_node); + return false; + } + + zookeeper->tryRemove(zookeeper_part_path_node); + + children.clear(); + zookeeper->tryGetChildren(zookeeper_part_uniq_node, children); + + if (!children.empty()) + { + LOG_TRACE(log, "Found zookeper locks for {}", zookeeper_part_uniq_node); + return false; + } + + zookeeper->tryRemove(zookeeper_part_uniq_node); + + /// Even when we have lock with same part name, but with different uniq, we can remove files on S3 + children.clear(); + zookeeper->tryGetChildren(zookeeper_part_node, children); + if (children.empty()) + /// Cleanup after last uniq removing + zookeeper->tryRemove(zookeeper_part_node); + + return true; +} + + +bool StorageReplicatedMergeTree::tryToFetchIfShared( + const IMergeTreeDataPart & part, + const DiskPtr & disk, + const String & path) const +{ + const auto data_settings = getSettings(); + if (!data_settings->allow_s3_zero_copy_replication) + return false; + + if (disk->getType() != DB::DiskType::Type::S3) + return false; + + String replica = getSharedDataReplica(part); + + /// We can't fetch part when none replicas have this part on S3 + if (replica.empty()) + return false; + + ReplicatedMergeTreeLogEntry log_entry; + log_entry.type = ReplicatedMergeTreeLogEntry::FETCH_SHARED_PART; + log_entry.source_replica = replica; + log_entry.new_part_name = part.name; + log_entry.create_time = 0; + log_entry.disk = disk; + log_entry.path = path; + + /// TODO: Fix const usage + StorageReplicatedMergeTree * replicated_storage_nc = const_cast(this); + + return replicated_storage_nc->executeFetchShared(log_entry); +} + + +String StorageReplicatedMergeTree::getSharedDataReplica( + const IMergeTreeDataPart & part) const +{ + String best_replica; + + zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper(); + if (!zookeeper) + return best_replica; + + String norm_path = part.relative_path; + boost::replace_all(norm_path, "/", "_"); + String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + part.name; + + Strings ids; + zookeeper->tryGetChildren(zookeeper_part_node, ids); + + Strings replicas; + for (const auto & id : ids) + { + String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; + Strings paths; + zookeeper->tryGetChildren(zookeeper_part_uniq_node, paths); + for (const auto & path : paths) + { + String zookeeper_node = zookeeper_part_uniq_node + "/" + path; + Strings id_replicas; + zookeeper->tryGetChildren(zookeeper_node, id_replicas); + LOG_TRACE(log, "Found zookeper replicas for {}: {}", zookeeper_node, id_replicas.size()); + replicas.insert(replicas.end(), id_replicas.begin(), id_replicas.end()); + } + } + + LOG_TRACE(log, "Found zookeper replicas for part {}: {}", part.name, replicas.size()); + + Strings active_replicas; + + /// TODO: Move best replica choose in common method (here is the same code as in StorageReplicatedMergeTree::fetchPartition) + + /// Leave only active replicas. + active_replicas.reserve(replicas.size()); + + for (const String & replica : replicas) + if ((replica != replica_name) && (zookeeper->exists(zookeeper_path + "/replicas/" + replica + "/is_active"))) + active_replicas.push_back(replica); + + LOG_TRACE(log, "Found zookeper active replicas for part {}: {}", part.name, active_replicas.size()); + + if (active_replicas.empty()) + return best_replica; + + /** You must select the best (most relevant) replica. + * This is a replica with the maximum `log_pointer`, then with the minimum `queue` size. + * NOTE This is not exactly the best criteria. It does not make sense to download old partitions, + * and it would be nice to be able to choose the replica closest by network. + * NOTE Of course, there are data races here. You can solve it by retrying. + */ + Int64 max_log_pointer = -1; + UInt64 min_queue_size = std::numeric_limits::max(); + + for (const String & replica : active_replicas) + { + String current_replica_path = zookeeper_path + "/replicas/" + replica; + + String log_pointer_str = zookeeper->get(current_replica_path + "/log_pointer"); + Int64 log_pointer = log_pointer_str.empty() ? 0 : parse(log_pointer_str); + + Coordination::Stat stat; + zookeeper->get(current_replica_path + "/queue", &stat); + size_t queue_size = stat.numChildren; + + if (log_pointer > max_log_pointer + || (log_pointer == max_log_pointer && queue_size < min_queue_size)) + { + max_log_pointer = log_pointer; + min_queue_size = queue_size; + best_replica = replica; + } + } + + return best_replica; } } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 177245d68af..32e5da0d04d 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -212,18 +212,24 @@ public: /// is not overloaded bool canExecuteFetch(const ReplicatedMergeTreeLogEntry & entry, String & disable_reason) const; - struct ZooKeeperAccessData - { - zkutil::ZooKeeperPtr zookeeper; - String zookeeper_path; - String replica_name; - }; - - ZooKeeperAccessData getZooKeeperAccessData() const; - /// Fetch part only when it stored on shared storage like S3 bool executeFetchShared(ReplicatedMergeTreeLogEntry & entry); + /// Lock part in zookeeper for use common S3 data in several nodes + void lockSharedData(const IMergeTreeDataPart & part) const override; + + /// Unlock common S3 data part in zookeeper + /// Return true if data unlocked + /// Return false if data is still used by another node + bool unlockSharedData(const IMergeTreeDataPart & part) const override; + bool unlockSharedData(const IMergeTreeDataPart & part, const String & path) const override; + + /// Fetch part only if some replica has it on shared storage like S3 + bool tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) const override; + + /// Get best replica having this partition on S3 + String getSharedDataReplica(const IMergeTreeDataPart & part) const; + private: /// Get a sequential consistent view of current parts. ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock getMaxAddedBlocks() const; @@ -503,10 +509,6 @@ private: */ String findReplicaHavingPart(const String & part_name, bool active); - /** Returns a replica with part on shared storage like S3. - */ - String findReplicaHavingSharedPart(const String & part_name, bool active); - bool checkReplicaHavePart(const String & replica, const String & part_name); /** Find replica having specified part or any part that covers it. diff --git a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml index 285ade3f727..7d8492ed68c 100644 --- a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml +++ b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml @@ -22,7 +22,7 @@ 0 - 2 + 1 1 diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py index 88d038e357b..6a7336b9090 100644 --- a/tests/integration/test_s3_zero_copy_replication/test.py +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -76,9 +76,13 @@ def test_s3_zero_copy_replication(cluster, policy): # Based on version 20.x - after merge, two old parts and one merged assert get_large_objects_count(cluster) == 3 - time.sleep(60) - # Based on version 20.x - after cleanup - only one merged part + countdown = 60 + while countdown > 0: + if get_large_objects_count(cluster) == 1: + break + time.sleep(1) + countdown -= 1 assert get_large_objects_count(cluster) == 1 node1.query("DROP TABLE IF EXISTS s3_test NO DELAY") From 3c11d444940e8b7c59d93a24366c9b2fb096ec2c Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 26 Feb 2021 14:06:24 +0300 Subject: [PATCH 028/333] Add description for getUniqueId method, fix typos --- src/Storages/MergeTree/IMergeTreeDataPart.h | 2 ++ src/Storages/MergeTree/MergeTreeData.h | 6 +++--- src/Storages/StorageReplicatedMergeTree.cpp | 6 ++++-- src/Storages/StorageReplicatedMergeTree.h | 3 +++ 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index f9b5f616f70..83f8c672001 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -367,6 +367,8 @@ public: /// part creation (using alter query with materialize_ttl setting). bool checkAllTTLCalculated(const StorageMetadataPtr & metadata_snapshot) const; + /// Return some uniq string for file + /// Required for distinguish different copies of the same part on S3 String getUniqueId() const; protected: diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 81e977d7ecc..3f41cc04217 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -739,16 +739,16 @@ public: bool areBackgroundMovesNeeded() const; /// Lock part in zookeeper for use common S3 data in several nodes - /// Overrided in StorageReplicatedMergeTree + /// Overridden in StorageReplicatedMergeTree virtual void lockSharedData(const IMergeTreeDataPart &) const {} /// Unlock common S3 data part in zookeeper - /// Overrided in StorageReplicatedMergeTree + /// Overridden in StorageReplicatedMergeTree virtual bool unlockSharedData(const IMergeTreeDataPart &) const { return true; } virtual bool unlockSharedData(const IMergeTreeDataPart &, const String &) const { return true; } /// Fetch part only if some replica has it on shared storage like S3 - /// Overrided in StorageReplicatedMergeTree + /// Overridden in StorageReplicatedMergeTree virtual bool tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) const { return false; } protected: diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index b94d3dd3f89..8c5636d2aa0 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -142,6 +142,10 @@ static const auto MERGE_SELECTING_SLEEP_MS = 5 * 1000; static const auto MUTATIONS_FINALIZING_SLEEP_MS = 1 * 1000; static const auto MUTATIONS_FINALIZING_IDLE_SLEEP_MS = 5 * 1000; + +std::atomic_uint StorageReplicatedMergeTree::total_fetches {0}; + + void StorageReplicatedMergeTree::setZooKeeper() { std::lock_guard lock(current_zookeeper_mutex); @@ -1730,7 +1734,6 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry) const auto storage_settings_ptr = getSettings(); auto metadata_snapshot = getInMemoryMetadataPtr(); - static std::atomic_uint total_fetches {0}; if (storage_settings_ptr->replicated_max_parallel_fetches && total_fetches >= storage_settings_ptr->replicated_max_parallel_fetches) { throw Exception("Too many total fetches from replicas, maximum: " + storage_settings_ptr->replicated_max_parallel_fetches.toString(), @@ -1934,7 +1937,6 @@ bool StorageReplicatedMergeTree::executeFetchShared(ReplicatedMergeTreeLogEntry const auto storage_settings_ptr = getSettings(); auto metadata_snapshot = getInMemoryMetadataPtr(); - static std::atomic_uint total_fetches {0}; if (storage_settings_ptr->replicated_max_parallel_fetches && total_fetches >= storage_settings_ptr->replicated_max_parallel_fetches) { throw Exception("Too many total fetches from replicas, maximum: " + storage_settings_ptr->replicated_max_parallel_fetches.toString(), diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 70a83145da6..58bedfc0408 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -307,6 +307,9 @@ private: /// Event that is signalled (and is reset) by the restarting_thread when the ZooKeeper session expires. Poco::Event partial_shutdown_event {false}; /// Poco::Event::EVENT_MANUALRESET + /// Limiting parallel fetches per node + static std::atomic_uint total_fetches; + /// Limiting parallel fetches per one table std::atomic_uint current_table_fetches {0}; From d01cc968aae88f032eca3f4d0211ca55541f8f84 Mon Sep 17 00:00:00 2001 From: ikarishinjieva Date: Thu, 25 Feb 2021 17:57:00 +0800 Subject: [PATCH 029/333] fix #21170 --- src/Interpreters/InterpreterSelectWithUnionQuery.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index b894db79c7b..5f2728804ca 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -53,9 +53,9 @@ struct CustomizeASTSelectWithUnionQueryNormalize auto & select_list = ast.list_of_selects->children; int i; + /// Rewrite UNION Mode for (i = union_modes.size() - 1; i >= 0; --i) { - /// Rewrite UNION Mode if (union_modes[i] == ASTSelectWithUnionQuery::Mode::Unspecified) { if (union_default_mode == UnionMode::ALL) @@ -67,7 +67,10 @@ struct CustomizeASTSelectWithUnionQueryNormalize "Expected ALL or DISTINCT in SelectWithUnion query, because setting (union_default_mode) is empty", DB::ErrorCodes::EXPECTED_ALL_OR_DISTINCT); } + } + for (i = union_modes.size() - 1; i >= 0; --i) + { if (union_modes[i] == ASTSelectWithUnionQuery::Mode::ALL) { if (auto * inner_union = select_list[i + 1]->as()) From d7bbffa410c2faddb3aff0393ac8cd7f76507e18 Mon Sep 17 00:00:00 2001 From: ikarishinjieva Date: Fri, 26 Feb 2021 13:38:59 +0800 Subject: [PATCH 030/333] fix #21170: use copy instead of move --- .../InterpreterSelectWithUnionQuery.cpp | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index 5f2728804ca..1d5b05ddd6b 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -43,7 +43,7 @@ struct CustomizeASTSelectWithUnionQueryNormalize return; } - selects.push_back(std::move(ast_select)); + selects.push_back(ast_select); } void visit(ASTSelectWithUnionQuery & ast, ASTPtr &) const @@ -53,9 +53,9 @@ struct CustomizeASTSelectWithUnionQueryNormalize auto & select_list = ast.list_of_selects->children; int i; - /// Rewrite UNION Mode for (i = union_modes.size() - 1; i >= 0; --i) { + /// Rewrite UNION Mode if (union_modes[i] == ASTSelectWithUnionQuery::Mode::Unspecified) { if (union_default_mode == UnionMode::ALL) @@ -67,10 +67,7 @@ struct CustomizeASTSelectWithUnionQueryNormalize "Expected ALL or DISTINCT in SelectWithUnion query, because setting (union_default_mode) is empty", DB::ErrorCodes::EXPECTED_ALL_OR_DISTINCT); } - } - for (i = union_modes.size() - 1; i >= 0; --i) - { if (union_modes[i] == ASTSelectWithUnionQuery::Mode::ALL) { if (auto * inner_union = select_list[i + 1]->as()) @@ -79,10 +76,10 @@ struct CustomizeASTSelectWithUnionQueryNormalize for (auto child = inner_union->list_of_selects->children.rbegin(); child != inner_union->list_of_selects->children.rend(); ++child) - selects.push_back(std::move(*child)); + selects.push_back(*child); } else - selects.push_back(std::move(select_list[i + 1])); + selects.push_back(select_list[i + 1]); } /// flatten all left nodes and current node to a UNION DISTINCT list else if (union_modes[i] == ASTSelectWithUnionQuery::Mode::DISTINCT) @@ -111,10 +108,10 @@ struct CustomizeASTSelectWithUnionQueryNormalize /// Inner_union is an UNION ALL list, just lift it up for (auto child = inner_union->list_of_selects->children.rbegin(); child != inner_union->list_of_selects->children.rend(); ++child) - selects.push_back(std::move(*child)); + selects.push_back(*child); } else - selects.push_back(std::move(select_list[0])); + selects.push_back(select_list[0]); } // reverse children list From 6fb68cd454919535b9009705c5ff26b79522caec Mon Sep 17 00:00:00 2001 From: ikarishinjieva Date: Mon, 1 Mar 2021 10:22:39 +0800 Subject: [PATCH 031/333] add functional test on fix #21170 --- tests/queries/0_stateless/01732_union_and_union_all.reference | 0 tests/queries/0_stateless/01732_union_and_union_all.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 tests/queries/0_stateless/01732_union_and_union_all.reference create mode 100644 tests/queries/0_stateless/01732_union_and_union_all.sql diff --git a/tests/queries/0_stateless/01732_union_and_union_all.reference b/tests/queries/0_stateless/01732_union_and_union_all.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01732_union_and_union_all.sql b/tests/queries/0_stateless/01732_union_and_union_all.sql new file mode 100644 index 00000000000..2de6daa5bb9 --- /dev/null +++ b/tests/queries/0_stateless/01732_union_and_union_all.sql @@ -0,0 +1 @@ +select 1 UNION select 1 UNION ALL select 1; -- { serverError 558 } From f4b2cbc30f66371ab83eb0d59a7ecb55825c99f1 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Sat, 27 Feb 2021 13:21:31 +0200 Subject: [PATCH 032/333] Fixed tests --- .../01691_DateTime64_clamp.reference | 23 ++++--------------- ..._toDateTime_from_string_clamping.reference | 8 +++---- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/tests/queries/0_stateless/01691_DateTime64_clamp.reference b/tests/queries/0_stateless/01691_DateTime64_clamp.reference index da80de59e50..f29a9e2d1d5 100644 --- a/tests/queries/0_stateless/01691_DateTime64_clamp.reference +++ b/tests/queries/0_stateless/01691_DateTime64_clamp.reference @@ -1,11 +1,11 @@ -- { echo } -<<<<<<< HEAD +-- These values are within the extended range of DateTime64 [1925-01-01, 2284-01-01) SELECT toTimeZone(toDateTime(-2, 2), 'Europe/Moscow'); -1970-01-01 03:00:00.00 +1970-01-01 02:59:58.00 SELECT toDateTime64(-2, 2, 'Europe/Moscow'); -1970-01-01 03:00:00.00 +1970-01-01 02:59:58.00 SELECT CAST(-1 AS DateTime64(0, 'Europe/Moscow')); -1970-01-01 03:00:00 +1970-01-01 02:59:59 SELECT CAST('2020-01-01 00:00:00.3' AS DateTime64(0, 'Europe/Moscow')); 2020-01-01 00:00:00 SELECT toDateTime64(bitShiftLeft(toUInt64(1), 33), 2, 'Europe/Moscow') FORMAT Null; @@ -14,20 +14,8 @@ SELECT toTimeZone(toDateTime(-2., 2), 'Europe/Moscow'); SELECT toDateTime64(-2., 2, 'Europe/Moscow'); 1970-01-01 03:00:00.00 SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow'); -2106-02-07 09:00:00.00 +2106-02-07 09:28:16.00 SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow') FORMAT Null; -======= --- These values are within the extended range of DateTime64 [1925-01-01, 2284-01-01) -SELECT toDateTime(-2, 2); -1970-01-01 02:59:58.00 -SELECT toDateTime64(-2, 2); -1970-01-01 02:59:58.00 -SELECT CAST(-1 AS DateTime64); -1970-01-01 02:59:59.000 -SELECT CAST('2020-01-01 00:00:00.3' AS DateTime64); -2020-01-01 00:00:00.300 -SELECT toDateTime64(bitShiftLeft(toUInt64(1),33), 2); -2242-03-16 15:56:32.00 -- These are outsize of extended range and hence clamped SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1),35), 2); 1925-01-01 02:00:00.00 @@ -37,4 +25,3 @@ SELECT CAST(bitShiftLeft(toUInt64(1),35) AS DateTime64); 2282-12-31 03:00:00.000 SELECT toDateTime64(bitShiftLeft(toUInt64(1),35), 2); 2282-12-31 03:00:00.00 ->>>>>>> af31042451... Extended range of DateTime64 to years 1925 - 2238 diff --git a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference index 228086615da..92639948fbc 100644 --- a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference +++ b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference @@ -1,9 +1,9 @@ -- { echo } SELECT toString(toDateTime('-922337203.6854775808', 1)); -2106-02-07 15:41:33.6 +1940-10-09 22:13:17.6 SELECT toString(toDateTime('9922337203.6854775808', 1)); -2104-12-30 00:50:11.6 +1925-07-26 00:46:43.6 SELECT toDateTime64(CAST('10000000000.1' AS Decimal64(1)), 1); -2106-02-07 20:50:08.1 +1928-01-11 00:46:40.1 SELECT toDateTime64(CAST('-10000000000.1' AS Decimal64(1)), 1); -2011-12-23 00:38:08.1 +2011-12-22 00:13:20.1 From 6e6ae1db5706e497a27e700772c835b032b9d3ad Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Tue, 2 Mar 2021 15:55:26 +0200 Subject: [PATCH 033/333] Nudge CI From 04672a4365409c3eedbce5d79b3f20692fa7f226 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 3 Mar 2021 22:29:26 +0300 Subject: [PATCH 034/333] Update DateLUTImpl.cpp --- base/common/DateLUTImpl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 6f4fb3dd5fc..d9dde3a8d0d 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -56,7 +56,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) time_t start_of_day; time_offset_epoch = cctz::convert(cctz::civil_second(lut_start), cctz_time_zone).time_since_epoch().count(); - // Note validated this against all timezones in the system. + // Note: it's validated against all timezones in the system. assert((epoch - lut_start) == daynum_offset_epoch); offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset; From e412bcb490c3519373fab7a69369be82bbf248bf Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 3 Mar 2021 22:30:12 +0300 Subject: [PATCH 035/333] Update DateLUTImpl.cpp --- base/common/DateLUTImpl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index d9dde3a8d0d..563b744e073 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -95,7 +95,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = 0; values.amount_of_offset_change_value = 0; - // TODO: this partially ignores fractional pre-epoch offsets, which may cause incorrect toRelativeHourNum() results for some timezones, namelly Europe\Minsk + // TODO: this partially ignores fractional pre-epoch offsets, which may cause incorrect toRelativeHourNum() results for some timezones, namelly Europe/Minsk // when pre-May 2 1924 it had an offset of UTC+1:50, and after it was UTC+2h. // https://www.timeanddate.com/time/zone/belarus/minsk?syear=1900 if (start_of_day > 0 && start_of_day % 3600) From 80c17d55376e29c3a73aa231aac78d341af03271 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 3 Mar 2021 22:41:10 +0300 Subject: [PATCH 036/333] Update DateLUTImpl.cpp --- base/common/DateLUTImpl.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 563b744e073..68a3aa97e51 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -140,8 +140,6 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) } while (i < DATE_LUT_SIZE && lut[i - 1].year <= DATE_LUT_MAX_YEAR); -// date_lut_max = start_of_day; - /// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases. while (i < DATE_LUT_SIZE) { From 2f23f1b123517251b5661a971e7499f0e9fc99b7 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 3 Mar 2021 22:49:20 +0300 Subject: [PATCH 037/333] Update DateLUTImpl.h --- base/common/DateLUTImpl.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 5a12ad5dc13..72786e31cbc 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -136,18 +136,20 @@ public: /// Since most of the modern timezones have a DST change aligned to 15 minutes, to save as much space as possible inside Value, /// we are dividing any offset change related value by this factor before setting it to Value, /// hence it has to be explicitly multiplied back by this factor before being used. - static const UInt16 OffsetChangeFactor = 900; + static constexpr UInt16 OffsetChangeFactor = 900; }; static_assert(sizeof(Values) == 16); private: - // Mask is all-ones to allow efficient protection against overflow. - static const UInt32 date_lut_mask = 0x1ffff; + /// Mask is all-ones to allow efficient protection against overflow. + static constexpr UInt32 date_lut_mask = 0x1ffff; static_assert(date_lut_mask == DATE_LUT_SIZE - 1); - const UInt32 daynum_offset_epoch = 16436; // offset to epoch in days (ExtendedDayNum) of the first day in LUT. + /// Offset to epoch in days (ExtendedDayNum) of the first day in LUT. + static constexpr UInt32 daynum_offset_epoch = 16436; + static_assert(daynum_offset_epoch == (DATE_LUT_MIN_YEAR - 1970) * 365 + (1970 - DATE_LUT_MIN_YEAR / 4 * 4) / 4); /// Lookup table is indexed by LUTIndex. /// Day nums are the same in all time zones. 1970-01-01 is 0 and so on. From 0889e0da87ea511cc30669b6c2aec888052d00ee Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 3 Mar 2021 22:52:07 +0300 Subject: [PATCH 038/333] Update DateLUTImpl.h --- base/common/DateLUTImpl.h | 1 + 1 file changed, 1 insertion(+) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 72786e31cbc..9b36c60eb5e 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -148,6 +148,7 @@ private: static_assert(date_lut_mask == DATE_LUT_SIZE - 1); /// Offset to epoch in days (ExtendedDayNum) of the first day in LUT. + /// "epoch" is the Unix Epoch (starts at unix timestamp zero) static constexpr UInt32 daynum_offset_epoch = 16436; static_assert(daynum_offset_epoch == (DATE_LUT_MIN_YEAR - 1970) * 365 + (1970 - DATE_LUT_MIN_YEAR / 4 * 4) / 4); From 3e19f4a00fc03d466b3648305e3b236653485e8b Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 3 Mar 2021 23:09:59 +0300 Subject: [PATCH 039/333] Update DateLUTImpl.h --- base/common/DateLUTImpl.h | 1 + 1 file changed, 1 insertion(+) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 9b36c60eb5e..79bf1a9f127 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -166,6 +166,7 @@ private: /// UTC offset at beginning of the Unix epoch. The same as unix timestamp of 1970-01-01 00:00:00 local time. time_t offset_at_start_of_epoch; + /// UTC offset at the beginning of the first supported year. time_t offset_at_start_of_lut; bool offset_is_whole_number_of_hours_everytime; time_t time_offset_epoch; From cf0912ba02c8e89fd1d913318a9c117ab336a5df Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 3 Mar 2021 23:14:33 +0300 Subject: [PATCH 040/333] Remove unused field --- base/common/DateLUTImpl.cpp | 1 - base/common/DateLUTImpl.h | 2 -- base/common/tests/gtest_DateLutImpl.cpp | 1 - 3 files changed, 4 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 68a3aa97e51..bf180acb835 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -54,7 +54,6 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) const cctz::civil_day epoch{1970, 1, 1}; const cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1}; time_t start_of_day; - time_offset_epoch = cctz::convert(cctz::civil_second(lut_start), cctz_time_zone).time_since_epoch().count(); // Note: it's validated against all timezones in the system. assert((epoch - lut_start) == daynum_offset_epoch); diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 79bf1a9f127..429db332b49 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -169,7 +169,6 @@ private: /// UTC offset at the beginning of the first supported year. time_t offset_at_start_of_lut; bool offset_is_whole_number_of_hours_everytime; - time_t time_offset_epoch; /// Time zone name. std::string time_zone; @@ -230,7 +229,6 @@ public: // Methods only for unit-testing, it makes very little sense to use it from user code. auto getOffsetAtStartOfEpoch() const { return offset_at_start_of_epoch; } auto getOffsetIsWholNumberOfHoursEveryWhere() const { return offset_is_whole_number_of_hours_everytime; } - auto getTimeOffsetEpoch() const { return time_offset_epoch; } auto getTimeOffsetAtStartOfLUT() const { return offset_at_start_of_lut; } /// All functions below are thread-safe; arguments are not checked. diff --git a/base/common/tests/gtest_DateLutImpl.cpp b/base/common/tests/gtest_DateLutImpl.cpp index 9169d9e768f..8cc4fbbbfb0 100644 --- a/base/common/tests/gtest_DateLutImpl.cpp +++ b/base/common/tests/gtest_DateLutImpl.cpp @@ -292,7 +292,6 @@ TEST_P(DateLUTWithTimeZone, VaidateTimeComponentsAroundEpoch) << "\n\ttimestamp: " << i << "\n\t offset at start of epoch : " << lut.getOffsetAtStartOfEpoch() << "\n\t offset_is_whole_number_of_hours_everytime : " << lut.getOffsetIsWholNumberOfHoursEveryWhere() - << "\n\t time_offset_epoch : " << lut.getTimeOffsetEpoch() << "\n\t offset_at_start_of_lut : " << lut.getTimeOffsetAtStartOfLUT()); EXPECT_GE(24, lut.toHour(i)); From aaef0c5ebd1246fa633371ee2d3635b2539bdcee Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 3 Mar 2021 23:19:26 +0300 Subject: [PATCH 041/333] Fix build --- base/common/DateLUTImpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 429db332b49..5275cc83abb 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -150,7 +150,7 @@ private: /// Offset to epoch in days (ExtendedDayNum) of the first day in LUT. /// "epoch" is the Unix Epoch (starts at unix timestamp zero) static constexpr UInt32 daynum_offset_epoch = 16436; - static_assert(daynum_offset_epoch == (DATE_LUT_MIN_YEAR - 1970) * 365 + (1970 - DATE_LUT_MIN_YEAR / 4 * 4) / 4); + static_assert(daynum_offset_epoch == (1970 - DATE_LUT_MIN_YEAR) * 365 + (1970 - DATE_LUT_MIN_YEAR / 4 * 4) / 4); /// Lookup table is indexed by LUTIndex. /// Day nums are the same in all time zones. 1970-01-01 is 0 and so on. From ed0099a11308719bc8319c9aac5707e092379f29 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 3 Mar 2021 23:24:56 +0300 Subject: [PATCH 042/333] Maybe unused condition --- base/common/DateLUTImpl.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 5275cc83abb..66df4744b72 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -179,10 +179,11 @@ private: const UInt32 guess = ((t / 86400) + daynum_offset_epoch) & date_lut_mask; /// UTC offset is from -12 to +14 in all known time zones. This requires checking only three indices. - if ((guess == daynum_offset_epoch || t >= lut[guess].date) && t < lut[UInt32(guess + 1)].date) + if (t >= lut[guess].date && t < lut[UInt32(guess + 1)].date) return LUTIndex{guess}; - /// Time zones that have offset 0 from UTC do daylight saving time change (if any) towards increasing UTC offset (example: British Standard Time). + /// Time zones that have offset 0 from UTC do daylight saving time change (if any) + /// towards increasing UTC offset (example: British Standard Time). if (t >= lut[UInt32(guess + 1)].date) return LUTIndex(guess + 1); From b1b3db09d765c008bdb053bdc2693d02f546055e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 3 Mar 2021 23:34:15 +0300 Subject: [PATCH 043/333] Remove commented out code --- base/common/DateLUTImpl.h | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 66df4744b72..890ed3ae173 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -189,6 +189,7 @@ private: if (lut[guess - 1].date <= t) return LUTIndex(guess - 1); + return LUTIndex(guess - 2); } @@ -212,12 +213,6 @@ private: return i; } -// template -// inline LUTIndex toLUTIndex(T t) const -// { -// return LUTIndex{static_cast(t) & date_lut_mask}; -// } - template inline const Values & find(V v) const { @@ -275,11 +270,6 @@ public: return toDayNum(i - (lut[i].day_of_month - 1)); } -// inline DayNum toFirstDayNumOfMonth(time_t t) const -// { -// return toFirstDayNumOfMonth(toDayNum(t)); -// } - /// Round down to start of quarter. template inline ExtendedDayNum toFirstDayNumOfQuarter(V v) const From 6f5877abdfcf686913526c013ad09921c995d6df Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 3 Mar 2021 23:53:52 +0300 Subject: [PATCH 044/333] Fix build after merge with master --- base/common/DateLUTImpl.h | 221 +++++++++++++++++++------------------- 1 file changed, 111 insertions(+), 110 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 2fdf293cef4..740411b7113 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -213,8 +213,8 @@ private: return i; } - template - inline const Values & find(V v) const + template + inline const Values & find(DateOrTime v) const { return lut[toLUTIndex(v)]; } @@ -234,54 +234,53 @@ public: return d; } - template - inline ExtendedDayNum toDayNum(V v) const + template + inline ExtendedDayNum toDayNum(DateOrTime v) const { return ExtendedDayNum{static_cast(toLUTIndex(v).toUnderType() - daynum_offset_epoch)}; } /// Round down to start of monday. - template - inline time_t toFirstDayOfWeek(V v) const + template + inline time_t toFirstDayOfWeek(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return lut[i - (lut[i].day_of_week - 1)].date; } - template - inline ExtendedDayNum toFirstDayNumOfWeek(V v) const + template + inline ExtendedDayNum toFirstDayNumOfWeek(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return toDayNum(i - (lut[i].day_of_week - 1)); } /// Round down to start of month. - template - inline time_t toFirstDayOfMonth(V v) const + template + inline time_t toFirstDayOfMonth(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return lut[i - (lut[i].day_of_month - 1)].date; } - template - inline ExtendedDayNum toFirstDayNumOfMonth(V v) const + template + inline ExtendedDayNum toFirstDayNumOfMonth(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return toDayNum(i - (lut[i].day_of_month - 1)); } /// Round down to start of quarter. - template - inline ExtendedDayNum toFirstDayNumOfQuarter(V v) const + template + inline ExtendedDayNum toFirstDayNumOfQuarter(DateOrTime v) const { return toDayNum(toFirstDayOfQuarterIndex(v)); } - template - inline LUTIndex toFirstDayOfQuarterIndex(V v) const + template + inline LUTIndex toFirstDayOfQuarterIndex(DateOrTime v) const { - //return fromDayNum(toFirstDayNumOfQuarter(v)); - auto index = toLUTIndex(v); + LUTIndex index = toLUTIndex(v); size_t month_inside_quarter = (lut[index].month - 1) % 3; index -= lut[index].day_of_month; @@ -294,8 +293,8 @@ public: return index + 1; } - template - inline time_t toFirstDayOfQuarter(V v) const + template + inline time_t toFirstDayOfQuarter(DateOrTime v) const { return toDate(toFirstDayOfQuarterIndex(v)); } @@ -306,36 +305,36 @@ public: return lut[years_lut[lut[findIndex(t)].year - DATE_LUT_MIN_YEAR]].date; } - template - inline LUTIndex toFirstDayNumOfYearIndex(V v) const + template + inline LUTIndex toFirstDayNumOfYearIndex(DateOrTime v) const { return years_lut[lut[toLUTIndex(v)].year - DATE_LUT_MIN_YEAR]; } - template - inline ExtendedDayNum toFirstDayNumOfYear(V v) const + template + inline ExtendedDayNum toFirstDayNumOfYear(DateOrTime v) const { return toDayNum(toFirstDayNumOfYearIndex(v)); } inline time_t toFirstDayOfNextMonth(time_t t) const { - auto index = findIndex(t); + LUTIndex index = findIndex(t); index += 32 - lut[index].day_of_month; return lut[index - (lut[index].day_of_month - 1)].date; } inline time_t toFirstDayOfPrevMonth(time_t t) const { - auto index = findIndex(t); + LUTIndex index = findIndex(t); index -= lut[index].day_of_month; return lut[index - (lut[index].day_of_month - 1)].date; } - template - inline UInt8 daysInMonth(V v) const + template + inline UInt8 daysInMonth(DateOrTime value) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(value); return lut[i].days_in_month; } @@ -359,7 +358,7 @@ public: inline time_t toTime(time_t t) const { - auto index = findIndex(t); + const LUTIndex index = findIndex(t); if (unlikely(index == daynum_offset_epoch || index > DATE_LUT_MAX_DAY_NUM)) return t + offset_at_start_of_epoch; @@ -374,7 +373,7 @@ public: inline unsigned toHour(time_t t) const { - auto index = findIndex(t); + const LUTIndex index = findIndex(t); /// If it is overflow case, /// than limit number of hours to avoid insane results like 1970-01-01 89:28:15 @@ -398,7 +397,7 @@ public: */ inline time_t timezoneOffset(time_t t) const { - const auto index = findIndex(t); + const LUTIndex index = findIndex(t); /// Calculate daylight saving offset first. /// Because the "amount_of_offset_change" in LUT entry only exists in the change day, it's costly to scan it from the very begin. @@ -446,10 +445,10 @@ public: /// To consider the DST changing situation within this day. /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account - DayNum index = findIndex(t); + LUTIndex index = findIndex(t); UInt32 res = t - lut[index].date; - if (lut[index].amount_of_offset_change != 0 && t >= lut[index].date + lut[index].time_at_offset_change) - res += lut[index].amount_of_offset_change; + if (lut[index].amount_of_offset_change() != 0 && t >= lut[index].date + lut[index].time_at_offset_change()) + res += lut[index].amount_of_offset_change(); return res / 60 % 60; } @@ -476,47 +475,51 @@ public: * because the same calendar day starts/ends at different timestamps in different time zones) */ -// inline DayNum toDayNum(time_t t) const { return DayNum{findIndex(t) - daynum_offset_epoch}; } -// inline ExtendedDayNum toExtendedDayNum(time_t t) const { return ExtendedDayNum{findIndex(t) - daynum_offset_epoch}; } inline time_t fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; } inline time_t fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; } - template - inline time_t toDate(V v) const { return lut[toLUTIndex(v)].date; } - template - inline unsigned toMonth(V v) const { return lut[toLUTIndex(v)].month; } - template - inline unsigned toQuarter(V v) const { return (lut[toLUTIndex(v)].month - 1) / 3 + 1; } - template - inline Int16 toYear(V v) const { return lut[toLUTIndex(v)].year; } - template - inline unsigned toDayOfWeek(V v) const { return lut[toLUTIndex(v)].day_of_week; } - template - inline unsigned toDayOfMonth(V v) const { return lut[toLUTIndex(v)].day_of_month; } - template - inline unsigned toDayOfYear(V v) const + template + inline time_t toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; } + + template + inline unsigned toMonth(DateOrTime v) const { return lut[toLUTIndex(v)].month; } + + template + inline unsigned toQuarter(DateOrTime v) const { return (lut[toLUTIndex(v)].month - 1) / 3 + 1; } + + template + inline Int16 toYear(DateOrTime v) const { return lut[toLUTIndex(v)].year; } + + template + inline unsigned toDayOfWeek(DateOrTime v) const { return lut[toLUTIndex(v)].day_of_week; } + + template + inline unsigned toDayOfMonth(DateOrTime v) const { return lut[toLUTIndex(v)].day_of_month; } + + template + inline unsigned toDayOfYear(DateOrTime v) const { // TODO: different overload for ExtendedDayNum - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return i + 1 - toFirstDayNumOfYearIndex(i); } /// Number of week from some fixed moment in the past. Week begins at monday. /// (round down to monday and divide DayNum by 7; we made an assumption, /// that in domain of the function there was no weeks with any other number of days than 7) - template - inline unsigned toRelativeWeekNum(V v) const + template + inline unsigned toRelativeWeekNum(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); /// We add 8 to avoid underflow at beginning of unix epoch. return toDayNum(i + 8 - toDayOfWeek(i)) / 7; } /// Get year that contains most of the current week. Week begins at monday. - template - inline unsigned toISOYear(V v) const + template + inline unsigned toISOYear(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); /// That's effectively the year of thursday of current week. return toYear(toLUTIndex(i + 4 - toDayOfWeek(i))); } @@ -524,10 +527,10 @@ public: /// ISO year begins with a monday of the week that is contained more than by half in the corresponding calendar year. /// Example: ISO year 2019 begins at 2018-12-31. And ISO year 2017 begins at 2017-01-02. /// https://en.wikipedia.org/wiki/ISO_week_date - template - inline LUTIndex toFirstDayNumOfISOYearIndex(V v) const + template + inline LUTIndex toFirstDayNumOfISOYearIndex(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); auto iso_year = toISOYear(i); const auto first_day_of_year = years_lut[iso_year - DATE_LUT_MIN_YEAR]; @@ -538,8 +541,8 @@ public: : first_day_of_year + 8 - first_day_of_week_of_year}; } - template - inline ExtendedDayNum toFirstDayNumOfISOYear(V v) const + template + inline ExtendedDayNum toFirstDayNumOfISOYear(DateOrTime v) const { return toDayNum(toFirstDayNumOfISOYearIndex(v)); } @@ -551,8 +554,8 @@ public: /// ISO 8601 week number. Week begins at monday. /// The week number 1 is the first week in year that contains 4 or more days (that's more than half). - template - inline unsigned toISOWeek(V v) const + template + inline unsigned toISOWeek(DateOrTime v) const { return 1 + (toFirstDayNumOfWeek(v) - toFirstDayNumOfISOYear(v)) / 7; } @@ -590,8 +593,8 @@ public: Otherwise it is the last week of the previous year, and the next week is week 1. */ - template - inline YearWeek toYearWeek(V v, UInt8 week_mode) const + template + inline YearWeek toYearWeek(DateOrTime v, UInt8 week_mode) const { const bool newyear_day_mode = week_mode & static_cast(WeekModeFlag::NEWYEAR_DAY); week_mode = check_week_mode(week_mode); @@ -599,7 +602,7 @@ public: bool week_year_mode = week_mode & static_cast(WeekModeFlag::YEAR); const bool first_weekday_mode = week_mode & static_cast(WeekModeFlag::FIRST_WEEKDAY); - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); // Calculate week number of WeekModeFlag::NEWYEAR_DAY mode if (newyear_day_mode) @@ -647,13 +650,13 @@ public: /// Calculate week number of WeekModeFlag::NEWYEAR_DAY mode /// The week number 1 is the first week in year that contains January 1, - template - inline YearWeek toYearWeekOfNewyearMode(V v, bool monday_first_mode) const + template + inline YearWeek toYearWeekOfNewyearMode(DateOrTime v, bool monday_first_mode) const { YearWeek yw(0, 0); UInt16 offset_day = monday_first_mode ? 0U : 1U; - const auto i = LUTIndex(v); + const LUTIndex i = LUTIndex(v); // Checking the week across the year yw.first = toYear(i + 7 - toDayOfWeek(i + offset_day)); @@ -661,7 +664,7 @@ public: auto first_day = makeLUTIndex(yw.first, 1, 1); auto this_day = i; - //TODO: do not perform calculations in terms of DayNum, since that would under/overflow for extended range. + // TODO: do not perform calculations in terms of DayNum, since that would under/overflow for extended range. if (monday_first_mode) { // Rounds down a date to the nearest Monday. @@ -680,11 +683,9 @@ public: return yw; } - /** - * get first day of week with week_mode, return Sunday or Monday - */ - template - inline ExtendedDayNum toFirstDayNumOfWeek(V v, UInt8 week_mode) const + /// Get first day of week with week_mode, return Sunday or Monday + template + inline ExtendedDayNum toFirstDayNumOfWeek(DateOrTime v, UInt8 week_mode) const { bool monday_first_mode = week_mode & static_cast(WeekModeFlag::MONDAY_FIRST); if (monday_first_mode) @@ -709,10 +710,10 @@ public: /** Calculate weekday from d. * Returns 0 for monday, 1 for tuesday... */ - template - inline unsigned calc_weekday(V v, bool sunday_first_day_of_week) const + template + inline unsigned calc_weekday(DateOrTime v, bool sunday_first_day_of_week) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); if (!sunday_first_day_of_week) return toDayOfWeek(i) - 1; else @@ -726,17 +727,17 @@ public: } /// Number of month from some fixed moment in the past (year * 12 + month) - template - inline unsigned toRelativeMonthNum(V v) const + template + inline unsigned toRelativeMonthNum(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return lut[i].year * 12 + lut[i].month; } - template - inline unsigned toRelativeQuarterNum(V v) const + template + inline unsigned toRelativeQuarterNum(DateOrTime v) const { - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return lut[i].year * 4 + (lut[i].month - 1) / 3; } @@ -751,8 +752,8 @@ public: return (t + 86400 - offset_at_start_of_epoch) / 3600; } - template - inline time_t toRelativeHourNum(V v) const + template + inline time_t toRelativeHourNum(DateOrTime v) const { return toRelativeHourNum(lut[toLUTIndex(v)].date); } @@ -762,19 +763,19 @@ public: return t / 60; } - template - inline time_t toRelativeMinuteNum(V v) const + template + inline time_t toRelativeMinuteNum(DateOrTime v) const { return toRelativeMinuteNum(lut[toLUTIndex(v)].date); } - template - inline ExtendedDayNum toStartOfYearInterval(V v, UInt64 years) const + template + inline ExtendedDayNum toStartOfYearInterval(DateOrTime v, UInt64 years) const { if (years == 1) return toFirstDayNumOfYear(v); - const auto i = toLUTIndex(v); + const LUTIndex i = toLUTIndex(v); return toDayNum(years_lut[lut[i].year / years * years - DATE_LUT_MIN_YEAR]); } @@ -789,8 +790,8 @@ public: { if (months == 1) return toFirstDayNumOfMonth(d); - const auto & date = lut[toLUTIndex(d)]; - UInt32 month_total_index = (date.year - DATE_LUT_MIN_YEAR) * 12 + date.month - 1; + const Values & values = lut[toLUTIndex(d)]; + UInt32 month_total_index = (values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1; return toDayNum(years_months_lut[month_total_index / months * months]); } @@ -876,18 +877,18 @@ public: return res; } - template - inline const Values & getValues(V v) const { return lut[toLUTIndex(v)]; } + template + inline const Values & getValues(DateOrTime v) const { return lut[toLUTIndex(v)]; } - template - inline UInt32 toNumYYYYMM(V v) const + template + inline UInt32 toNumYYYYMM(DateOrTime v) const { const Values & values = getValues(v); return values.year * 100 + values.month; } - template - inline UInt32 toNumYYYYMMDD(V v) const + template + inline UInt32 toNumYYYYMMDD(DateOrTime v) const { const Values & values = getValues(v); return values.year * 10000 + values.month * 100 + values.day_of_month; @@ -932,7 +933,7 @@ public: inline NO_SANITIZE_UNDEFINED time_t addDays(time_t t, Int64 delta) const { - auto index = findIndex(t); + LUTIndex index = findIndex(t); time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); index += delta; @@ -962,8 +963,8 @@ public: return day_of_month; } - template - inline LUTIndex addMonthsIndex(V v, Int64 delta) const + template + inline LUTIndex addMonthsIndex(DateOrTime v, Int64 delta) const { const Values & values = lut[toLUTIndex(v)]; @@ -1016,8 +1017,8 @@ public: return addMonths(d, delta * 3); } - template - inline LUTIndex NO_SANITIZE_UNDEFINED addYearsIndex(V v, Int64 delta) const + template + inline LUTIndex NO_SANITIZE_UNDEFINED addYearsIndex(DateOrTime v, Int64 delta) const { const Values & values = lut[toLUTIndex(v)]; From fdc00beb772a0c83032f6c63e61da0d5b6ac9ff9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 3 Mar 2021 23:56:59 +0300 Subject: [PATCH 045/333] Whitespaces --- base/common/DateLUTImpl.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 740411b7113..159219bab83 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -49,16 +49,19 @@ public: // has to be a separate type to support overloading // TODO: make sure that any arithmetic on LUTIndex actually results in valid LUTIndex. STRONG_TYPEDEF(UInt32, LUTIndex) + template friend inline LUTIndex operator+(const LUTIndex & index, const T v) { return LUTIndex{(index.toUnderType() + v) & date_lut_mask}; } + template friend inline LUTIndex operator+(const T v, const LUTIndex & index) { return LUTIndex{(v + index.toUnderType()) & date_lut_mask}; } + friend inline LUTIndex operator+(const LUTIndex & index, const LUTIndex & v) { return LUTIndex{(index.toUnderType() + v.toUnderType()) & date_lut_mask}; @@ -69,11 +72,13 @@ public: { return LUTIndex{(index.toUnderType() - v) & date_lut_mask}; } + template friend inline LUTIndex operator-(const T v, const LUTIndex & index) { return LUTIndex{(v - index.toUnderType()) & date_lut_mask}; } + friend inline LUTIndex operator-(const LUTIndex & index, const LUTIndex & v) { return LUTIndex{(index.toUnderType() - v.toUnderType()) & date_lut_mask}; @@ -84,6 +89,7 @@ public: { return LUTIndex{(index.toUnderType() * v) & date_lut_mask}; } + template friend inline LUTIndex operator*(const T v, const LUTIndex & index) { @@ -95,6 +101,7 @@ public: { return LUTIndex{(index.toUnderType() / v) & date_lut_mask}; } + template friend inline LUTIndex operator/(const T v, const LUTIndex & index) { From 51d51e474858cddaadc5600995efec37ce6eaa67 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 00:44:58 +0300 Subject: [PATCH 046/333] Return private --- base/common/DateLUT.cpp | 2 +- base/common/DateLUTImpl.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/base/common/DateLUT.cpp b/base/common/DateLUT.cpp index 6ff0884701c..d14b63cd70a 100644 --- a/base/common/DateLUT.cpp +++ b/base/common/DateLUT.cpp @@ -152,7 +152,7 @@ const DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) co auto it = impls.emplace(time_zone, nullptr).first; if (!it->second) - it->second = std::make_unique(time_zone); + it->second = std::unique_ptr(new DateLUTImpl(time_zone)); return *it->second; } diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 159219bab83..8c2bbb3262d 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -37,7 +37,8 @@ using YearWeek = std::pair; */ class DateLUTImpl { -public: +private: + friend class DateLUT; explicit DateLUTImpl(const std::string & time_zone); DateLUTImpl(const DateLUTImpl &) = delete; From 2632b568ae2ce56a635c40d2a4e119c731d2b91c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 01:34:30 +0300 Subject: [PATCH 047/333] Move tests to appropriate place --- base/common/tests/CMakeLists.txt | 14 ----- base/common/tests/date_lut2.cpp | 53 ---------------- base/common/tests/date_lut3.cpp | 62 ------------------- .../tests/date_lut_default_timezone.cpp | 31 ---------- src/CMakeLists.txt | 11 +++- .../Common}/tests/gtest_DateLutImpl.cpp | 2 +- .../Common}/tests/gtest_find_symbols.cpp | 0 .../gtest_global_register_functions.h.bak | 17 +++++ .../Common}/tests/gtest_json_test.cpp | 10 +-- .../Common}/tests/gtest_strong_typedef.cpp | 0 10 files changed, 33 insertions(+), 167 deletions(-) delete mode 100644 base/common/tests/date_lut2.cpp delete mode 100644 base/common/tests/date_lut3.cpp delete mode 100644 base/common/tests/date_lut_default_timezone.cpp rename {base/common => src/Common}/tests/gtest_DateLutImpl.cpp (99%) rename {base/common => src/Common}/tests/gtest_find_symbols.cpp (100%) create mode 100644 src/Common/tests/gtest_global_register_functions.h.bak rename {base/common => src/Common}/tests/gtest_json_test.cpp (99%) rename {base/common => src/Common}/tests/gtest_strong_typedef.cpp (100%) diff --git a/base/common/tests/CMakeLists.txt b/base/common/tests/CMakeLists.txt index b335b302cb0..92be2f67c94 100644 --- a/base/common/tests/CMakeLists.txt +++ b/base/common/tests/CMakeLists.txt @@ -1,27 +1,13 @@ include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake) -add_executable (date_lut2 date_lut2.cpp) -add_executable (date_lut3 date_lut3.cpp) -add_executable (date_lut_default_timezone date_lut_default_timezone.cpp) add_executable (local_date_time_comparison local_date_time_comparison.cpp) add_executable (realloc-perf allocator.cpp) set(PLATFORM_LIBS ${CMAKE_DL_LIBS}) -target_link_libraries (date_lut2 PRIVATE common ${PLATFORM_LIBS}) -target_link_libraries (date_lut3 PRIVATE common ${PLATFORM_LIBS}) -target_link_libraries (date_lut_default_timezone PRIVATE common ${PLATFORM_LIBS}) target_link_libraries (local_date_time_comparison PRIVATE common) target_link_libraries (realloc-perf PRIVATE common) add_check(local_date_time_comparison) -if(USE_GTEST) - add_executable(unit_tests_libcommon gtest_json_test.cpp gtest_strong_typedef.cpp gtest_find_symbols.cpp gtest_DateLutImpl.cpp - ${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp - ) - target_link_libraries(unit_tests_libcommon PRIVATE common ${GTEST_MAIN_LIBRARIES} ${GTEST_LIBRARIES}) - add_check(unit_tests_libcommon) -endif() - add_executable (dump_variable dump_variable.cpp) target_link_libraries (dump_variable PRIVATE clickhouse_common_io) diff --git a/base/common/tests/date_lut2.cpp b/base/common/tests/date_lut2.cpp deleted file mode 100644 index 6dcf5e8adf2..00000000000 --- a/base/common/tests/date_lut2.cpp +++ /dev/null @@ -1,53 +0,0 @@ -#include -#include - -#include - - -static std::string toString(time_t Value) -{ - struct tm tm; - char buf[96]; - - localtime_r(&Value, &tm); - snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d", - tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - - return buf; -} - -static time_t orderedIdentifierToDate(unsigned value) -{ - struct tm tm; - - memset(&tm, 0, sizeof(tm)); - - tm.tm_year = value / 10000 - 1900; - tm.tm_mon = (value % 10000) / 100 - 1; - tm.tm_mday = value % 100; - tm.tm_isdst = -1; - - return mktime(&tm); -} - - -void loop(time_t begin, time_t end, int step) -{ - const auto & date_lut = DateLUT::instance(); - - for (time_t t = begin; t < end; t += step) - std::cout << toString(t) - << ", " << toString(date_lut.toTime(t)) - << ", " << date_lut.toHour(t) - << std::endl; -} - - -int main(int, char **) -{ - loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60); - loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60); - loop(orderedIdentifierToDate(20141020), orderedIdentifierToDate(20141106), 15 * 60); - - return 0; -} diff --git a/base/common/tests/date_lut3.cpp b/base/common/tests/date_lut3.cpp deleted file mode 100644 index 411765d2b2a..00000000000 --- a/base/common/tests/date_lut3.cpp +++ /dev/null @@ -1,62 +0,0 @@ -#include -#include - -#include - -#include - - -static std::string toString(time_t Value) -{ - struct tm tm; - char buf[96]; - - localtime_r(&Value, &tm); - snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d", - tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - - return buf; -} - -static time_t orderedIdentifierToDate(unsigned value) -{ - struct tm tm; - - memset(&tm, 0, sizeof(tm)); - - tm.tm_year = value / 10000 - 1900; - tm.tm_mon = (value % 10000) / 100 - 1; - tm.tm_mday = value % 100; - tm.tm_isdst = -1; - - return mktime(&tm); -} - - -void loop(time_t begin, time_t end, int step) -{ - const auto & date_lut = DateLUT::instance(); - - for (time_t t = begin; t < end; t += step) - { - time_t t2 = date_lut.makeDateTime(date_lut.toYear(t), date_lut.toMonth(t), date_lut.toDayOfMonth(t), - date_lut.toHour(t), date_lut.toMinute(t), date_lut.toSecond(t)); - - std::string s1 = toString(t); - std::string s2 = toString(t2); - - std::cerr << s1 << ", " << s2 << std::endl; - - if (s1 != s2) - throw Poco::Exception("Test failed."); - } -} - - -int main(int, char **) -{ - loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60); - loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60); - - return 0; -} diff --git a/base/common/tests/date_lut_default_timezone.cpp b/base/common/tests/date_lut_default_timezone.cpp deleted file mode 100644 index b8e5aa08931..00000000000 --- a/base/common/tests/date_lut_default_timezone.cpp +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include -#include - -int main(int, char **) -{ - try - { - const auto & date_lut = DateLUT::instance(); - std::cout << "Detected default timezone: `" << date_lut.getTimeZone() << "'" << std::endl; - time_t now = time(nullptr); - std::cout << "Current time: " << date_lut.timeToString(now) - << ", UTC: " << DateLUT::instance("UTC").timeToString(now) << std::endl; - } - catch (const Poco::Exception & e) - { - std::cerr << e.displayText() << std::endl; - return 1; - } - catch (std::exception & e) - { - std::cerr << "std::exception: " << e.what() << std::endl; - return 2; - } - catch (...) - { - std::cerr << "Some exception" << std::endl; - return 3; - } - return 0; -} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b80bcfdf4d4..a6a7d280479 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -478,6 +478,15 @@ if (ENABLE_TESTS AND USE_GTEST) -Wno-gnu-zero-variadic-macro-arguments ) - target_link_libraries(unit_tests_dbms PRIVATE ${GTEST_BOTH_LIBRARIES} clickhouse_functions clickhouse_aggregate_functions clickhouse_parsers dbms clickhouse_common_zookeeper string_utils) + target_link_libraries(unit_tests_dbms PRIVATE + ${GTEST_BOTH_LIBRARIES} + clickhouse_functions + clickhouse_aggregate_functions + clickhouse_parsers + clickhouse_storages_system + dbms + clickhouse_common_zookeeper + string_utils) + add_check(unit_tests_dbms) endif () diff --git a/base/common/tests/gtest_DateLutImpl.cpp b/src/Common/tests/gtest_DateLutImpl.cpp similarity index 99% rename from base/common/tests/gtest_DateLutImpl.cpp rename to src/Common/tests/gtest_DateLutImpl.cpp index 8cc4fbbbfb0..3a0da1ee1ee 100644 --- a/base/common/tests/gtest_DateLutImpl.cpp +++ b/src/Common/tests/gtest_DateLutImpl.cpp @@ -11,7 +11,7 @@ #pragma clang diagnostic ignored "-Wused-but-marked-unused" #endif -// All timezones present at build time and embedded into CH binary. +// All timezones present at build time and embedded into ClickHouse binary. extern const char * auto_time_zones[]; namespace diff --git a/base/common/tests/gtest_find_symbols.cpp b/src/Common/tests/gtest_find_symbols.cpp similarity index 100% rename from base/common/tests/gtest_find_symbols.cpp rename to src/Common/tests/gtest_find_symbols.cpp diff --git a/src/Common/tests/gtest_global_register_functions.h.bak b/src/Common/tests/gtest_global_register_functions.h.bak new file mode 100644 index 00000000000..197ce5838b9 --- /dev/null +++ b/src/Common/tests/gtest_global_register_functions.h.bak @@ -0,0 +1,17 @@ +#include +#include + +struct RegisteredFunctionsState +{ + RegisteredFunctionsState() + { + DB::registerFunctions(); + } + + RegisteredFunctionsState(RegisteredFunctionsState &&) = default; +}; + +inline void tryRegisterFunctions() +{ + static RegisteredFunctionsState registered_functions_state; +} diff --git a/base/common/tests/gtest_json_test.cpp b/src/Common/tests/gtest_json_test.cpp similarity index 99% rename from base/common/tests/gtest_json_test.cpp rename to src/Common/tests/gtest_json_test.cpp index 189a1a03d99..7b810504952 100644 --- a/base/common/tests/gtest_json_test.cpp +++ b/src/Common/tests/gtest_json_test.cpp @@ -500,14 +500,14 @@ TEST(JSONSuite, SimpleTest) { R"("detail")", ResultType::Return, "detail" }, { R"("actionField")", ResultType::Return, "actionField" }, { R"("list")", ResultType::Return, "list" }, - { "\0\"", ResultType::Throw, "JSON: expected \", got \0" }, + { "\0\"", ResultType::Throw, "JSON: begin >= end." }, { "\"/igrushki/konstruktory\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"/Творчество/Рисование/Инструменты и кра\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0t", ResultType::Throw, "JSON: expected \", got \0" }, + { "\0t", ResultType::Throw, "JSON: begin >= end." }, { "\"/Хозтовары/Хранение вещей и организа\xD1\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"/Хозтовары/Товары для стир\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"li\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, @@ -572,10 +572,10 @@ TEST(JSONSuite, SimpleTest) { "\"/Игр\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"/Игрушки/Игрушки для девочек/Игровые модули дл\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"Крупная бытовая техника/Стиральные машины/С фронт\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0 ", ResultType::Throw, "JSON: expected \", got \0" }, + { "\0 ", ResultType::Throw, "JSON: begin >= end." }, { "\"Светодиодная лента SMD3528, 5 м. IP33, 60LED, зеленый, 4,8W/мет\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"Сантехника/Мебель для ванных комнат/Стол\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0o", ResultType::Throw, "JSON: expected \", got \0" }, + { "\0o", ResultType::Throw, "JSON: begin >= end." }, { "\"/igrushki/konstruktory\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"/posuda/kuhonnye-prinadlezhnosti-i-instrumenty/kuhonnye-pr\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, @@ -583,7 +583,7 @@ TEST(JSONSuite, SimpleTest) { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0 ", ResultType::Throw, "JSON: expected \", got \0" }, + { "\0 ", ResultType::Throw, "JSON: begin >= end." }, { "\"/Хозтовары/Хранение вещей и организа\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"/Хозтовары/Товары для стир\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, { "\"li\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, diff --git a/base/common/tests/gtest_strong_typedef.cpp b/src/Common/tests/gtest_strong_typedef.cpp similarity index 100% rename from base/common/tests/gtest_strong_typedef.cpp rename to src/Common/tests/gtest_strong_typedef.cpp From 329074bd029a397adbda1fa67751e2a35cb2d48b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 01:55:56 +0300 Subject: [PATCH 048/333] Fix test --- src/Common/tests/gtest_json_test.cpp | 1230 +++++++++++++------------- 1 file changed, 615 insertions(+), 615 deletions(-) diff --git a/src/Common/tests/gtest_json_test.cpp b/src/Common/tests/gtest_json_test.cpp index 7b810504952..726fb836030 100644 --- a/src/Common/tests/gtest_json_test.cpp +++ b/src/Common/tests/gtest_json_test.cpp @@ -1,14 +1,13 @@ #include #include #include +#include #include #include - -using namespace std::literals::string_literals; - #include + enum class ResultType { Return, @@ -17,620 +16,622 @@ enum class ResultType struct GetStringTestRecord { - const char * input; + std::string_view input; ResultType result_type; - const char * result; + std::string_view result; }; TEST(JSONSuite, SimpleTest) { + using namespace std::literals; + std::vector test_data = { - { R"("name")", ResultType::Return, "name" }, - { R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("184509")", ResultType::Return, "184509" }, - { R"("category")", ResultType::Return, "category" }, - { R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("В наличии")", ResultType::Return, "В наличии" }, - { R"("price")", ResultType::Return, "price" }, - { R"("2390.00")", ResultType::Return, "2390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("Карточка")", ResultType::Return, "Карточка" }, - { R"("position")", ResultType::Return, "position" }, - { R"("detail")", ResultType::Return, "detail" }, - { R"("actionField")", ResultType::Return, "actionField" }, - { R"("list")", ResultType::Return, "list" }, - { R"("http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc")", ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc" }, - { R"("action")", ResultType::Return, "action" }, - { R"("detail")", ResultType::Return, "detail" }, - { R"("products")", ResultType::Return, "products" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" }, - { R"("id")", ResultType::Return, "id" }, - { R"("184509")", ResultType::Return, "184509" }, - { R"("price")", ResultType::Return, "price" }, - { R"("2390.00")", ResultType::Return, "2390.00" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("Vitek")", ResultType::Return, "Vitek" }, - { R"("category")", ResultType::Return, "category" }, - { R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("В наличии")", ResultType::Return, "В наличии" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("isAuthorized")", ResultType::Return, "isAuthorized" }, - { R"("isSubscriber")", ResultType::Return, "isSubscriber" }, - { R"("postType")", ResultType::Return, "postType" }, - { R"("Новости")", ResultType::Return, "Новости" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("Электроплита GEFEST Брест ЭПНД 5140-01 0001")", ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001" }, - { R"("price")", ResultType::Return, "price" }, - { R"("currencyCode")", ResultType::Return, "currencyCode" }, - { R"("RUB")", ResultType::Return, "RUB" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("trash_login")", ResultType::Return, "trash_login" }, - { R"("novikoff")", ResultType::Return, "novikoff" }, - { R"("trash_cat_link")", ResultType::Return, "trash_cat_link" }, - { R"("progs")", ResultType::Return, "progs" }, - { R"("trash_parent_link")", ResultType::Return, "trash_parent_link" }, - { R"("content")", ResultType::Return, "content" }, - { R"("trash_posted_parent")", ResultType::Return, "trash_posted_parent" }, - { R"("content.01.2016")", ResultType::Return, "content.01.2016" }, - { R"("trash_posted_cat")", ResultType::Return, "trash_posted_cat" }, - { R"("progs.01.2016")", ResultType::Return, "progs.01.2016" }, - { R"("trash_virus_count")", ResultType::Return, "trash_virus_count" }, - { R"("trash_is_android")", ResultType::Return, "trash_is_android" }, - { R"("trash_is_wp8")", ResultType::Return, "trash_is_wp8" }, - { R"("trash_is_ios")", ResultType::Return, "trash_is_ios" }, - { R"("trash_posted")", ResultType::Return, "trash_posted" }, - { R"("01.2016")", ResultType::Return, "01.2016" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("merchantId")", ResultType::Return, "merchantId" }, - { R"("13694_49246")", ResultType::Return, "13694_49246" }, - { R"("cps-source")", ResultType::Return, "cps-source" }, - { R"("wargaming")", ResultType::Return, "wargaming" }, - { R"("cps_provider")", ResultType::Return, "cps_provider" }, - { R"("default")", ResultType::Return, "default" }, - { R"("errorReason")", ResultType::Return, "errorReason" }, - { R"("no errors")", ResultType::Return, "no errors" }, - { R"("scid")", ResultType::Return, "scid" }, - { R"("isAuthPayment")", ResultType::Return, "isAuthPayment" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("rubric")", ResultType::Return, "rubric" }, - { R"("")", ResultType::Return, "" }, - { R"("rubric")", ResultType::Return, "rubric" }, - { R"("Мир")", ResultType::Return, "Мир" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("__ym")", ResultType::Return, "__ym" }, - { R"("ecommerce")", ResultType::Return, "ecommerce" }, - { R"("impressions")", ResultType::Return, "impressions" }, - { R"("id")", ResultType::Return, "id" }, - { R"("863813")", ResultType::Return, "863813" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Happy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("863839")", ResultType::Return, "863839" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("863847")", ResultType::Return, "863847" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911480")", ResultType::Return, "911480" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911484")", ResultType::Return, "911484" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Little bears, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911489")", ResultType::Return, "911489" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911496")", ResultType::Return, "911496" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Pretty, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911504")", ResultType::Return, "911504" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911508")", ResultType::Return, "911508" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Kittens, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911512")", ResultType::Return, "911512" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911516")", ResultType::Return, "911516" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911520")", ResultType::Return, "911520" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911524")", ResultType::Return, "911524" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("911528")", ResultType::Return, "911528" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Футболка детская 3D Turtle, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("390.00")", ResultType::Return, "390.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("888616")", ResultType::Return, "888616" }, - { R"("name")", ResultType::Return, "name" }, - { "\"3Д Футболка мужская \\\"Collorista\\\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж\"", ResultType::Return, "3Д Футболка мужская \"Collorista\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Одежда и обувь/Мужская одежда/Футболки/")", ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("406.60")", ResultType::Return, "406.60" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("913361")", ResultType::Return, "913361" }, - { R"("name")", ResultType::Return, "name" }, - { R"("3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("470.00")", ResultType::Return, "470.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("913364")", ResultType::Return, "913364" }, - { R"("name")", ResultType::Return, "name" }, - { R"("3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("470.00")", ResultType::Return, "470.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("913367")", ResultType::Return, "913367" }, - { R"("name")", ResultType::Return, "name" }, - { R"("3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("470.00")", ResultType::Return, "470.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("913385")", ResultType::Return, "913385" }, - { R"("name")", ResultType::Return, "name" }, - { R"("3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("470.00")", ResultType::Return, "470.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("id")", ResultType::Return, "id" }, - { R"("913391")", ResultType::Return, "913391" }, - { R"("name")", ResultType::Return, "name" }, - { R"("3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж" }, - { R"("category")", ResultType::Return, "category" }, - { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("")", ResultType::Return, "" }, - { R"("price")", ResultType::Return, "price" }, - { R"("470.00")", ResultType::Return, "470.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" }, - { R"("position")", ResultType::Return, "position" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" }, - { R"("usertype")", ResultType::Return, "usertype" }, - { R"("visitor")", ResultType::Return, "visitor" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("__ym")", ResultType::Return, "__ym" }, - { R"("ecommerce")", ResultType::Return, "ecommerce" }, - { R"("impressions")", ResultType::Return, "impressions" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("experiments")", ResultType::Return, "experiments" }, - { R"("lang")", ResultType::Return, "lang" }, - { R"("ru")", ResultType::Return, "ru" }, - { R"("los_portal")", ResultType::Return, "los_portal" }, - { R"("los_level")", ResultType::Return, "los_level" }, - { R"("none")", ResultType::Return, "none" }, - { R"("__ym")", ResultType::Return, "__ym" }, - { R"("ecommerce")", ResultType::Return, "ecommerce" }, - { R"("currencyCode")", ResultType::Return, "currencyCode" }, - { R"("RUR")", ResultType::Return, "RUR" }, - { R"("impressions")", ResultType::Return, "impressions" }, - { R"("name")", ResultType::Return, "name" }, - { R"("Чайник электрический Mystery MEK-1627, белый")", ResultType::Return, "Чайник электрический Mystery MEK-1627, белый" }, - { R"("brand")", ResultType::Return, "brand" }, - { R"("Mystery")", ResultType::Return, "Mystery" }, - { R"("id")", ResultType::Return, "id" }, - { R"("187180")", ResultType::Return, "187180" }, - { R"("category")", ResultType::Return, "category" }, - { R"("Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery")", ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery" }, - { R"("variant")", ResultType::Return, "variant" }, - { R"("В наличии")", ResultType::Return, "В наличии" }, - { R"("price")", ResultType::Return, "price" }, - { R"("1630.00")", ResultType::Return, "1630.00" }, - { R"("list")", ResultType::Return, "list" }, - { R"("Карточка")", ResultType::Return, "Карточка" }, - { R"("position")", ResultType::Return, "position" }, - { R"("detail")", ResultType::Return, "detail" }, - { R"("actionField")", ResultType::Return, "actionField" }, - { R"("list")", ResultType::Return, "list" }, - { "\0\"", ResultType::Throw, "JSON: begin >= end." }, - { "\"/igrushki/konstruktory\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Творчество/Рисование/Инструменты и кра\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0t", ResultType::Throw, "JSON: begin >= end." }, - { "\"/Хозтовары/Хранение вещей и организа\xD1\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Хозтовары/Товары для стир\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"li\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/kosmetika-i-parfyum/parfyumeriya/mu\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/ko\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "", ResultType::Throw, "JSON: begin >= end." }, - { "\"/stroitelstvo-i-remont/stroit\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/s\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Строительство и ремонт/Строительный инструмент/Изм\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/avto/soputstvuy\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/str\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xFF", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Мелкая бытовая техника/Мелки\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Пряжа \\\"Бамбук стрейч\\0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Карандаш чёрнографитны\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Канцтовары/Ежедневники и блокн\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/kanctovary/ezhednevniki-i-blok\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Стакан \xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\x80", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"c\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Органайзер для хранения аксессуаров, \0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"quantity\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Сменный блок для тетрадей на кольцах А5, 160 листов клетка, офсет \xE2\x84\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Сувениры/Ф\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"\0\"", ResultType::Return, "\0" }, - { "\"\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"va\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"ca\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"В \0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/letnie-tovary/z\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Посудомоечная машина Ha\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Крупная бытов\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"var\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Телевизоры и видеотехника/Всё для домашних кинотеатр\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Флеш-диск Transcend JetFlash 620 8GB (TS8GJF62\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Табурет Мег\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"variant\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Катал\xD0\0\"", ResultType::Return, "Катал\xD0\0" }, - { "\"К\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"17\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/igrushki/razvivayusc\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Ключница \\\"\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Игр\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Игрушки/Игрушки для девочек/Игровые модули дл\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Крупная бытовая техника/Стиральные машины/С фронт\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0 ", ResultType::Throw, "JSON: begin >= end." }, - { "\"Светодиодная лента SMD3528, 5 м. IP33, 60LED, зеленый, 4,8W/мет\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Сантехника/Мебель для ванных комнат/Стол\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0o", ResultType::Throw, "JSON: begin >= end." }, - { "\"/igrushki/konstruktory\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/posuda/kuhonnye-prinadlezhnosti-i-instrumenty/kuhonnye-pr\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Творчество/Рисование/Инструменты и кра\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\0 ", ResultType::Throw, "JSON: begin >= end." }, - { "\"/Хозтовары/Хранение вещей и организа\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Хозтовары/Товары для стир\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"li\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/igrushki/igrus\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/kosmetika-i-parfyum/parfyumeriya/mu\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/ko\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/avto/avtomobilnyy\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/stroitelstvo-i-remont/stroit\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/s\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Строительство и ремонт/Строительный инструмент/Изм\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/avto/soputstvuy\0\"", ResultType::Return, "/avto/soputstvuy\0" }, - { "\"/str\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Чайник электрический Vitesse\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Мелкая бытовая техника/Мелки\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Пряжа \\\"Бамбук стрейч\\0о", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Карандаш чёрнографитны\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0\"", ResultType::Return, "/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0" }, - { "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"ca\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Подаро\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Средство для прочис\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"i\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/p\0\"", ResultType::Return, "/p\0" }, - { "\"/Сувениры/Магниты, н\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Дерев\xD0\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/prazdniki/svadba/svadebnaya-c\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Канцт\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Праздники/То\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"v\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Косметика \xD0\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Спорт и отдых/Настольные игры/Покер, руле\xD1\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"categ\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/retailr\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/retailrocket\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Ежедневник недат А5 140л кл,ляссе,обл пв\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/432809/ezhednevnik-organayzer-sredniy-s-remeshkom-na-knopke-v-oblozhke-kalkulyator-kalendar-do-\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/Канцтовары/Ежедневники и блокн\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"/kanctovary/ezhednevniki-i-blok\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Стакан \xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." }, - { "\"c\0\"", ResultType::Return, "c\0" }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Вафельница Vitek WX-1102 FL")"sv, ResultType::Return, "Вафельница Vitek WX-1102 FL"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("184509")"sv, ResultType::Return, "184509"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("Все для детей/Детская техника/Vitek")"sv, ResultType::Return, "Все для детей/Детская техника/Vitek"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("В наличии")"sv, ResultType::Return, "В наличии"sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("2390.00")"sv, ResultType::Return, "2390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("Карточка")"sv, ResultType::Return, "Карточка"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("detail")"sv, ResultType::Return, "detail"sv }, + { R"("actionField")"sv, ResultType::Return, "actionField"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc")"sv, ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc"sv }, + { R"("action")"sv, ResultType::Return, "action"sv }, + { R"("detail")"sv, ResultType::Return, "detail"sv }, + { R"("products")"sv, ResultType::Return, "products"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Вафельница Vitek WX-1102 FL")"sv, ResultType::Return, "Вафельница Vitek WX-1102 FL"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("184509")"sv, ResultType::Return, "184509"sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("2390.00")"sv, ResultType::Return, "2390.00"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("Vitek")"sv, ResultType::Return, "Vitek"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("Все для детей/Детская техника/Vitek")"sv, ResultType::Return, "Все для детей/Детская техника/Vitek"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("В наличии")"sv, ResultType::Return, "В наличии"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("isAuthorized")"sv, ResultType::Return, "isAuthorized"sv }, + { R"("isSubscriber")"sv, ResultType::Return, "isSubscriber"sv }, + { R"("postType")"sv, ResultType::Return, "postType"sv }, + { R"("Новости")"sv, ResultType::Return, "Новости"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("Электроплита GEFEST Брест ЭПНД 5140-01 0001")"sv, ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001"sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("currencyCode")"sv, ResultType::Return, "currencyCode"sv }, + { R"("RUB")"sv, ResultType::Return, "RUB"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("trash_login")"sv, ResultType::Return, "trash_login"sv }, + { R"("novikoff")"sv, ResultType::Return, "novikoff"sv }, + { R"("trash_cat_link")"sv, ResultType::Return, "trash_cat_link"sv }, + { R"("progs")"sv, ResultType::Return, "progs"sv }, + { R"("trash_parent_link")"sv, ResultType::Return, "trash_parent_link"sv }, + { R"("content")"sv, ResultType::Return, "content"sv }, + { R"("trash_posted_parent")"sv, ResultType::Return, "trash_posted_parent"sv }, + { R"("content.01.2016")"sv, ResultType::Return, "content.01.2016"sv }, + { R"("trash_posted_cat")"sv, ResultType::Return, "trash_posted_cat"sv }, + { R"("progs.01.2016")"sv, ResultType::Return, "progs.01.2016"sv }, + { R"("trash_virus_count")"sv, ResultType::Return, "trash_virus_count"sv }, + { R"("trash_is_android")"sv, ResultType::Return, "trash_is_android"sv }, + { R"("trash_is_wp8")"sv, ResultType::Return, "trash_is_wp8"sv }, + { R"("trash_is_ios")"sv, ResultType::Return, "trash_is_ios"sv }, + { R"("trash_posted")"sv, ResultType::Return, "trash_posted"sv }, + { R"("01.2016")"sv, ResultType::Return, "01.2016"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("merchantId")"sv, ResultType::Return, "merchantId"sv }, + { R"("13694_49246")"sv, ResultType::Return, "13694_49246"sv }, + { R"("cps-source")"sv, ResultType::Return, "cps-source"sv }, + { R"("wargaming")"sv, ResultType::Return, "wargaming"sv }, + { R"("cps_provider")"sv, ResultType::Return, "cps_provider"sv }, + { R"("default")"sv, ResultType::Return, "default"sv }, + { R"("errorReason")"sv, ResultType::Return, "errorReason"sv }, + { R"("no errors")"sv, ResultType::Return, "no errors"sv }, + { R"("scid")"sv, ResultType::Return, "scid"sv }, + { R"("isAuthPayment")"sv, ResultType::Return, "isAuthPayment"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("rubric")"sv, ResultType::Return, "rubric"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("rubric")"sv, ResultType::Return, "rubric"sv }, + { R"("Мир")"sv, ResultType::Return, "Мир"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("__ym")"sv, ResultType::Return, "__ym"sv }, + { R"("ecommerce")"sv, ResultType::Return, "ecommerce"sv }, + { R"("impressions")"sv, ResultType::Return, "impressions"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("863813")"sv, ResultType::Return, "863813"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Happy, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("863839")"sv, ResultType::Return, "863839"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("863847")"sv, ResultType::Return, "863847"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911480")"sv, ResultType::Return, "911480"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Puppy, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911484")"sv, ResultType::Return, "911484"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Little bears, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911489")"sv, ResultType::Return, "911489"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911496")"sv, ResultType::Return, "911496"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Pretty, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911504")"sv, ResultType::Return, "911504"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911508")"sv, ResultType::Return, "911508"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Kittens, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911512")"sv, ResultType::Return, "911512"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911516")"sv, ResultType::Return, "911516"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911520")"sv, ResultType::Return, "911520"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911524")"sv, ResultType::Return, "911524"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("911528")"sv, ResultType::Return, "911528"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Футболка детская 3D Turtle, возраст 1-2 года, трикотаж")"sv, ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("390.00")"sv, ResultType::Return, "390.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("888616")"sv, ResultType::Return, "888616"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { "\"3Д Футболка мужская \\\"Collorista\\\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж\""sv, ResultType::Return, "3Д Футболка мужская \"Collorista\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Одежда и обувь/Мужская одежда/Футболки/")"sv, ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("406.60")"sv, ResultType::Return, "406.60"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("913361")"sv, ResultType::Return, "913361"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж")"sv, ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("470.00")"sv, ResultType::Return, "470.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("913364")"sv, ResultType::Return, "913364"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж")"sv, ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("470.00")"sv, ResultType::Return, "470.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("913367")"sv, ResultType::Return, "913367"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж")"sv, ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("470.00")"sv, ResultType::Return, "470.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("913385")"sv, ResultType::Return, "913385"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж")"sv, ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("470.00")"sv, ResultType::Return, "470.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("913391")"sv, ResultType::Return, "913391"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж")"sv, ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("/Летние товары/Летний текстиль/")"sv, ResultType::Return, "/Летние товары/Летний текстиль/"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("")"sv, ResultType::Return, ""sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("470.00")"sv, ResultType::Return, "470.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("/retailrocket/")"sv, ResultType::Return, "/retailrocket/"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")"sv, ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/"sv }, + { R"("usertype")"sv, ResultType::Return, "usertype"sv }, + { R"("visitor")"sv, ResultType::Return, "visitor"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("__ym")"sv, ResultType::Return, "__ym"sv }, + { R"("ecommerce")"sv, ResultType::Return, "ecommerce"sv }, + { R"("impressions")"sv, ResultType::Return, "impressions"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("experiments")"sv, ResultType::Return, "experiments"sv }, + { R"("lang")"sv, ResultType::Return, "lang"sv }, + { R"("ru")"sv, ResultType::Return, "ru"sv }, + { R"("los_portal")"sv, ResultType::Return, "los_portal"sv }, + { R"("los_level")"sv, ResultType::Return, "los_level"sv }, + { R"("none")"sv, ResultType::Return, "none"sv }, + { R"("__ym")"sv, ResultType::Return, "__ym"sv }, + { R"("ecommerce")"sv, ResultType::Return, "ecommerce"sv }, + { R"("currencyCode")"sv, ResultType::Return, "currencyCode"sv }, + { R"("RUR")"sv, ResultType::Return, "RUR"sv }, + { R"("impressions")"sv, ResultType::Return, "impressions"sv }, + { R"("name")"sv, ResultType::Return, "name"sv }, + { R"("Чайник электрический Mystery MEK-1627, белый")"sv, ResultType::Return, "Чайник электрический Mystery MEK-1627, белый"sv }, + { R"("brand")"sv, ResultType::Return, "brand"sv }, + { R"("Mystery")"sv, ResultType::Return, "Mystery"sv }, + { R"("id")"sv, ResultType::Return, "id"sv }, + { R"("187180")"sv, ResultType::Return, "187180"sv }, + { R"("category")"sv, ResultType::Return, "category"sv }, + { R"("Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery")"sv, ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery"sv }, + { R"("variant")"sv, ResultType::Return, "variant"sv }, + { R"("В наличии")"sv, ResultType::Return, "В наличии"sv }, + { R"("price")"sv, ResultType::Return, "price"sv }, + { R"("1630.00")"sv, ResultType::Return, "1630.00"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { R"("Карточка")"sv, ResultType::Return, "Карточка"sv }, + { R"("position")"sv, ResultType::Return, "position"sv }, + { R"("detail")"sv, ResultType::Return, "detail"sv }, + { R"("actionField")"sv, ResultType::Return, "actionField"sv }, + { R"("list")"sv, ResultType::Return, "list"sv }, + { "\0\""sv, ResultType::Throw, "JSON: expected \", got \0"sv }, + { "\"/igrushki/konstruktory\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Творчество/Рисование/Инструменты и кра\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0t"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\0t"sv, ResultType::Throw, "JSON: expected \", got \0"sv }, + { "\"/Хозтовары/Хранение вещей и организа\xD1\0t"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Хозтовары/Товары для стир\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"li\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/kosmetika-i-parfyum/parfyumeriya/mu\0t"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/ko\0\x04"sv, ResultType::Throw, "JSON: begin >= end."sv }, + { ""sv, ResultType::Throw, "JSON: expected \", got \0"sv }, + { "\"/stroitelstvo-i-remont/stroit\0t"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/s\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Строительство и ремонт/Строительный инструмент/Изм\0e"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/avto/soputstvuy\0l"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/str\0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xFF"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Мелкая бытовая техника/Мелки\xD0\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Пряжа \\\"Бамбук стрейч\\0\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Карандаш чёрнографитны\xD0\0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0l"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0e"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"ca\0e"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"ca\0e"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0t"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Канцтовары/Ежедневники и блокн\xD0\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/kanctovary/ezhednevniki-i-blok\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Стакан \xD0\0a"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\x80"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"c\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Органайзер для хранения аксессуаров, \0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"quantity\00"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Сменный блок для тетрадей на кольцах А5, 160 листов клетка, офсет \xE2\x84\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Сувениры/Ф\xD0\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"\0\""sv, ResultType::Return, "\0"sv }, + { "\"\0\x04"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"va\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"ca\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"В \0\x04"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/letnie-tovary/z\0\x04"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Посудомоечная машина Ha\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Крупная бытов\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Полочная акустическая система Magnat Needl\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"brand\00"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"pos\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"c\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"var\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Телевизоры и видеотехника/Всё для домашних кинотеатр\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Флеш-диск Transcend JetFlash 620 8GB (TS8GJF62\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Табурет Мег\0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"variant\0\x04"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Катал\xD0\0\""sv, ResultType::Return, "Катал\xD0\0"sv }, + { "\"К\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Полочная акустическая система Magnat Needl\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"brand\00"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"pos\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"c\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"17\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/igrushki/razvivayusc\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Ключница \\\"\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Игр\xD1\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Игрушки/Игрушки для девочек/Игровые модули дл\xD1\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Крупная бытовая техника/Стиральные машины/С фронт\xD0\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\0 "sv, ResultType::Throw, "JSON: expected \", got \0"sv }, + { "\"Светодиодная лента SMD3528, 5 м. IP33, 60LED, зеленый, 4,8W/мет\xD1\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Сантехника/Мебель для ванных комнат/Стол\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\0o"sv, ResultType::Throw, "JSON: expected \", got \0"sv }, + { "\"/igrushki/konstruktory\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/posuda/kuhonnye-prinadlezhnosti-i-instrumenty/kuhonnye-pr\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Творчество/Рисование/Инструменты и кра\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\0 "sv, ResultType::Throw, "JSON: expected \", got \0"sv }, + { "\"/Хозтовары/Хранение вещей и организа\xD1\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Хозтовары/Товары для стир\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"li\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/igrushki/igrus\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/kosmetika-i-parfyum/parfyumeriya/mu\00"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/ko\0\0"sv, ResultType::Throw, "JSON: begin >= end."sv }, + { "\"/avto/avtomobilnyy\0\0"sv, ResultType::Throw, "JSON: begin >= end."sv }, + { "\"/stroitelstvo-i-remont/stroit\00"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/s\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Строительство и ремонт/Строительный инструмент/Изм\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/avto/soputstvuy\0\""sv, ResultType::Return, "/avto/soputstvuy\0"sv }, + { "\"/str\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Чайник электрический Vitesse\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Мелкая бытовая техника/Мелки\xD0\0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Пряжа \\\"Бамбук стрейч\\0о"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Карандаш чёрнографитны\xD0\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0\""sv, ResultType::Return, "/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0"sv }, + { "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"ca\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Подаро\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Средство для прочис\xD1\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"i\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/p\0\""sv, ResultType::Return, "/p\0"sv }, + { "\"/Сувениры/Магниты, н\xD0\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Дерев\xD0\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/prazdniki/svadba/svadebnaya-c\0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Канцт\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Праздники/То\xD0\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"v\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Косметика \xD0\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Спорт и отдых/Настольные игры/Покер, руле\xD1\0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"categ\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/retailr\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/retailrocket\0k"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Ежедневник недат А5 140л кл,ляссе,обл пв\0="sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/432809/ezhednevnik-organayzer-sredniy-s-remeshkom-na-knopke-v-oblozhke-kalkulyator-kalendar-do-\0\xD0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0d"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0 "sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/Канцтовары/Ежедневники и блокн\xD0\0o"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"/kanctovary/ezhednevniki-i-blok\00"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Стакан \xD0\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\0"sv, ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)."sv }, + { "\"c\0\""sv, ResultType::Return, "c\0"sv }, }; for (auto i : boost::irange(0, 1/*00000*/)) @@ -641,15 +642,14 @@ TEST(JSONSuite, SimpleTest) { try { - JSON j(r.input, r.input + strlen(r.input)); + JSON j(r.input.data(), r.input.data() + r.input.size()); ASSERT_EQ(j.getString(), r.result); - ASSERT_TRUE(r.result_type == ResultType::Return); + ASSERT_EQ(r.result_type, ResultType::Return); } - catch (JSONException & e) + catch (const JSONException &) { - ASSERT_TRUE(r.result_type == ResultType::Throw); - ASSERT_EQ(e.message(), r.result); + ASSERT_EQ(r.result_type, ResultType::Throw); } } } From ffc39574f18cd77c50ab1c1c063aa5c5a199e91e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 02:04:01 +0300 Subject: [PATCH 049/333] Removed very old example (for jemalloc performance regression) --- base/common/tests/CMakeLists.txt | 4 --- base/common/tests/allocator.cpp | 47 -------------------------------- 2 files changed, 51 deletions(-) delete mode 100644 base/common/tests/allocator.cpp diff --git a/base/common/tests/CMakeLists.txt b/base/common/tests/CMakeLists.txt index 92be2f67c94..402a924baa9 100644 --- a/base/common/tests/CMakeLists.txt +++ b/base/common/tests/CMakeLists.txt @@ -1,12 +1,8 @@ include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake) add_executable (local_date_time_comparison local_date_time_comparison.cpp) -add_executable (realloc-perf allocator.cpp) - -set(PLATFORM_LIBS ${CMAKE_DL_LIBS}) target_link_libraries (local_date_time_comparison PRIVATE common) -target_link_libraries (realloc-perf PRIVATE common) add_check(local_date_time_comparison) add_executable (dump_variable dump_variable.cpp) diff --git a/base/common/tests/allocator.cpp b/base/common/tests/allocator.cpp deleted file mode 100644 index 03f6228e0f5..00000000000 --- a/base/common/tests/allocator.cpp +++ /dev/null @@ -1,47 +0,0 @@ -#include -#include -#include -#include - - -void thread_func() -{ - for (size_t i = 0; i < 100; ++i) - { - size_t size = 4096; - - void * buf = malloc(size); - if (!buf) - abort(); - memset(buf, 0, size); - - while (size < 1048576) - { - size_t next_size = size * 4; - - void * new_buf = realloc(buf, next_size); - if (!new_buf) - abort(); - buf = new_buf; - - memset(reinterpret_cast(buf) + size, 0, next_size - size); - size = next_size; - } - - free(buf); - } -} - - -int main(int, char **) -{ - std::vector threads(16); - for (size_t i = 0; i < 1000; ++i) - { - for (auto & thread : threads) - thread = std::thread(thread_func); - for (auto & thread : threads) - thread.join(); - } - return 0; -} From 1096fc328ad5cb187bc0910272d3604e31da58fa Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 02:11:27 +0300 Subject: [PATCH 050/333] Make some tests automated --- base/common/tests/CMakeLists.txt | 7 ------- src/CMakeLists.txt | 2 ++ .../Common/tests/gtest_local_date_time_comparison.cpp | 8 ++++---- src/IO/tests/CMakeLists.txt | 2 -- src/Interpreters/tests/CMakeLists.txt | 1 - 5 files changed, 6 insertions(+), 14 deletions(-) rename base/common/tests/local_date_time_comparison.cpp => src/Common/tests/gtest_local_date_time_comparison.cpp (79%) diff --git a/base/common/tests/CMakeLists.txt b/base/common/tests/CMakeLists.txt index 402a924baa9..2a07a94055f 100644 --- a/base/common/tests/CMakeLists.txt +++ b/base/common/tests/CMakeLists.txt @@ -1,9 +1,2 @@ -include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake) - -add_executable (local_date_time_comparison local_date_time_comparison.cpp) - -target_link_libraries (local_date_time_comparison PRIVATE common) -add_check(local_date_time_comparison) - add_executable (dump_variable dump_variable.cpp) target_link_libraries (dump_variable PRIVATE clickhouse_common_io) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a6a7d280479..82d84e42364 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -458,6 +458,8 @@ endif() dbms_target_link_libraries(PRIVATE _boost_context) +include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake) + if (ENABLE_TESTS AND USE_GTEST) macro (grep_gtest_sources BASE_DIR DST_VAR) # Cold match files that are not in tests/ directories diff --git a/base/common/tests/local_date_time_comparison.cpp b/src/Common/tests/gtest_local_date_time_comparison.cpp similarity index 79% rename from base/common/tests/local_date_time_comparison.cpp rename to src/Common/tests/gtest_local_date_time_comparison.cpp index 5492ec31004..f75c2647100 100644 --- a/base/common/tests/local_date_time_comparison.cpp +++ b/src/Common/tests/gtest_local_date_time_comparison.cpp @@ -1,5 +1,6 @@ #include #include +#include #include @@ -16,14 +17,13 @@ void checkComparison() LocalDateTime a("2018-07-18 01:02:03"); LocalDateTime b("2018-07-18 01:02:03"); - if (a != b) - throw std::runtime_error("Test failed"); + EXPECT_EQ(a, b); + EXPECT_FALSE(a != b); } -int main(int, char **) +TEST(LocalDateTime, Comparison) { fillStackWithGarbage(); checkComparison(); - return 0; } diff --git a/src/IO/tests/CMakeLists.txt b/src/IO/tests/CMakeLists.txt index 79800d8339c..bcd0a8bba24 100644 --- a/src/IO/tests/CMakeLists.txt +++ b/src/IO/tests/CMakeLists.txt @@ -39,11 +39,9 @@ target_link_libraries (o_direct_and_dirty_pages PRIVATE clickhouse_common_io) add_executable (hashing_write_buffer hashing_write_buffer.cpp) target_link_libraries (hashing_write_buffer PRIVATE clickhouse_common_io) -add_check(hashing_write_buffer) add_executable (hashing_read_buffer hashing_read_buffer.cpp) target_link_libraries (hashing_read_buffer PRIVATE clickhouse_common_io) -add_check (hashing_read_buffer) add_executable (io_operators io_operators.cpp) target_link_libraries (io_operators PRIVATE clickhouse_common_io) diff --git a/src/Interpreters/tests/CMakeLists.txt b/src/Interpreters/tests/CMakeLists.txt index 1bc9d7fbacb..8905d2fe6e6 100644 --- a/src/Interpreters/tests/CMakeLists.txt +++ b/src/Interpreters/tests/CMakeLists.txt @@ -38,7 +38,6 @@ target_link_libraries (two_level_hash_map PRIVATE dbms) add_executable (in_join_subqueries_preprocessor in_join_subqueries_preprocessor.cpp) target_link_libraries (in_join_subqueries_preprocessor PRIVATE clickhouse_aggregate_functions dbms clickhouse_parsers) -add_check(in_join_subqueries_preprocessor) if (OS_LINUX) add_executable (internal_iotop internal_iotop.cpp) From 3eb3830dd8c409939223f5649c06f30f80de5363 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 02:13:49 +0300 Subject: [PATCH 051/333] Remove trash --- .../tests/gtest_global_register_functions.h.bak | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 src/Common/tests/gtest_global_register_functions.h.bak diff --git a/src/Common/tests/gtest_global_register_functions.h.bak b/src/Common/tests/gtest_global_register_functions.h.bak deleted file mode 100644 index 197ce5838b9..00000000000 --- a/src/Common/tests/gtest_global_register_functions.h.bak +++ /dev/null @@ -1,17 +0,0 @@ -#include -#include - -struct RegisteredFunctionsState -{ - RegisteredFunctionsState() - { - DB::registerFunctions(); - } - - RegisteredFunctionsState(RegisteredFunctionsState &&) = default; -}; - -inline void tryRegisterFunctions() -{ - static RegisteredFunctionsState registered_functions_state; -} From 78cc70881647aea920bedebb45a74cd2134fd612 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 02:15:33 +0300 Subject: [PATCH 052/333] Fix bad whitespaces in test --- tests/queries/0_stateless/01691_DateTime64_clamp.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01691_DateTime64_clamp.sql b/tests/queries/0_stateless/01691_DateTime64_clamp.sql index 958de4edada..c77a66febb3 100644 --- a/tests/queries/0_stateless/01691_DateTime64_clamp.sql +++ b/tests/queries/0_stateless/01691_DateTime64_clamp.sql @@ -11,7 +11,7 @@ SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow') SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow') FORMAT Null; -- These are outsize of extended range and hence clamped -SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1),35), 2); -SELECT CAST(-1 * bitShiftLeft(toUInt64(1),35) AS DateTime64); -SELECT CAST(bitShiftLeft(toUInt64(1),35) AS DateTime64); -SELECT toDateTime64(bitShiftLeft(toUInt64(1),35), 2); +SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1), 35), 2); +SELECT CAST(-1 * bitShiftLeft(toUInt64(1), 35) AS DateTime64); +SELECT CAST(bitShiftLeft(toUInt64(1), 35) AS DateTime64); +SELECT toDateTime64(bitShiftLeft(toUInt64(1), 35), 2); From 4f08539754f4cc4065241fb00d9e4a3e7cefd567 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 03:01:08 +0300 Subject: [PATCH 053/333] Remove strange code --- base/common/DateLUTImpl.h | 44 ++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 8c2bbb3262d..4dee7eb5d55 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -109,6 +109,26 @@ private: return LUTIndex{(v / index.toUnderType()) & date_lut_mask}; } + /// Remainder of division in the sense of modular arithmetic: + /// the difference between x and the maximal divisable number not greater than x. + /// Example: -1 % 10 = 9, because -10 is the maximal number not greater than -1 that is divisable by 10. + /// Why do we need this: + /// - because the unix timestamp -1 is 1969-12-31 23:59:59 in UTC. + template + static constexpr inline auto mod(T x, U divisor) + { + /// This is the C++ way of modulo of division: + /// x % y is the number that: (x / y) * y + x % y == x + /// For example, -1 % 10 = -1 + /// Note that both variants are "correct" in the mathematical sense. They are just different operations. + auto res = x % divisor; + + if (unlikely(res < 0)) + res += divisor; + + return res; + } + public: /// The order of fields matters for alignment and sizeof. struct Values @@ -368,7 +388,7 @@ public: { const LUTIndex index = findIndex(t); - if (unlikely(index == daynum_offset_epoch || index > DATE_LUT_MAX_DAY_NUM)) + if (unlikely(index > DATE_LUT_MAX_DAY_NUM)) return t + offset_at_start_of_epoch; time_t res = t - lut[index].date; @@ -385,7 +405,7 @@ public: /// If it is overflow case, /// than limit number of hours to avoid insane results like 1970-01-01 89:28:15 - if (unlikely(index == daynum_offset_epoch || index > DATE_LUT_MAX_DAY_NUM)) + if (unlikely(index > DATE_LUT_MAX_DAY_NUM)) return static_cast((t + offset_at_start_of_epoch) / 3600) % 24; time_t time = t - lut[index].date; @@ -399,10 +419,10 @@ public: } /** Calculating offset from UTC in seconds. - * which means Using the same literal time of "t" to get the corresponding timestamp in UTC, - * then subtract the former from the latter to get the offset result. - * The boundaries when meets DST(daylight saving time) change should be handled very carefully. - */ + * which means Using the same literal time of "t" to get the corresponding timestamp in UTC, + * then subtract the former from the latter to get the offset result. + * The boundaries when meets DST(daylight saving time) change should be handled very carefully. + */ inline time_t timezoneOffset(time_t t) const { const LUTIndex index = findIndex(t); @@ -412,6 +432,7 @@ public: /// but we can figure out all the accumulated offsets from 1970-01-01 to that day just by get the whole difference between lut[].date, /// and then, we can directly subtract multiple 86400s to get the real DST offsets for the leap seconds is not considered now. time_t res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400; + /// As so far to know, the maximal DST offset couldn't be more than 2 hours, so after the modulo operation the remainder /// will sits between [-offset --> 0 --> offset] which respectively corresponds to moving clock forward or backward. res = res > 43200 ? (86400 - res) : (0 - res); @@ -423,13 +444,6 @@ public: return res + offset_at_start_of_epoch; } - static inline time_t toSecondsSinceTheDayStart(time_t t) - { - t %= 86400; - t = (t < 0 ? t + 86400 : t); - - return t; - } /** Only for time zones with/when offset from UTC is multiple of five minutes. * This is true for all time zones: right now, all time zones have an offset that is multiple of 15 minutes. @@ -443,13 +457,13 @@ public: */ inline unsigned toSecond(time_t t) const { - return toSecondsSinceTheDayStart(t) % 60; + return mod(t, 60); } inline unsigned toMinute(time_t t) const { if (offset_is_whole_number_of_hours_everytime) - return (toSecondsSinceTheDayStart(t) / 60) % 60; + return mod((t / 60), 60); /// To consider the DST changing situation within this day. /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account From 7f1ae506fc3dcd145c3384db9ff065fd91c88419 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 03:11:40 +0300 Subject: [PATCH 054/333] The code is too complicated --- src/Functions/TransformDateTime64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/TransformDateTime64.h b/src/Functions/TransformDateTime64.h index 0a5e36cd2bd..e42c3155327 100644 --- a/src/Functions/TransformDateTime64.h +++ b/src/Functions/TransformDateTime64.h @@ -78,7 +78,7 @@ public: } } - template >>> + template >> inline auto execute(const T & t, Args && ... args) const { return wrapped_transform.execute(t, std::forward(args)...); From 9f3760e1e369839e3e0cd73a02e7e8325060323a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 03:18:48 +0300 Subject: [PATCH 055/333] Bad filename --- src/Common/tests/{gtest_DateLutImpl.cpp => gtest_DateLUTImpl.cpp} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/Common/tests/{gtest_DateLutImpl.cpp => gtest_DateLUTImpl.cpp} (100%) diff --git a/src/Common/tests/gtest_DateLutImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp similarity index 100% rename from src/Common/tests/gtest_DateLutImpl.cpp rename to src/Common/tests/gtest_DateLUTImpl.cpp From 31010624d6e3729dc2715216cc60a905865c90d8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 03:35:23 +0300 Subject: [PATCH 056/333] Attempt to simplify code --- base/common/DateLUTImpl.h | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 4dee7eb5d55..eb34053ca6e 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -8,20 +8,23 @@ #include -#define DATE_LUT_MAX (0xFFFFFFFFU - 86400) -#define DATE_LUT_MAX_DAY_NUM (0xFFFFFFFFU / 86400) -/// Table size is bigger than DATE_LUT_MAX_DAY_NUM to fill all indices within UInt16 range: this allows to remove extra check. -#define DATE_LUT_SIZE 0x20000 #define DATE_LUT_MIN_YEAR 1925 /// 1925 since wast majority of timezones changed to 15-minute aligned offsets somewhere in 1924 or earlier. #define DATE_LUT_MAX_YEAR 2283 /// Last supported year (complete) #define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table +#define DATE_LUT_SIZE 0x20000 + +#define DATE_LUT_MAX (0xFFFFFFFFU - 86400) +#define DATE_LUT_MAX_DAY_NUM 0xFFFF + + #if defined(__PPC__) #if !__clang__ #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif #endif + /// Flags for toYearWeek() function. enum class WeekModeFlag : UInt8 { @@ -208,17 +211,14 @@ private: /// UTC offset is from -12 to +14 in all known time zones. This requires checking only three indices. if (t >= lut[guess].date && t < lut[UInt32(guess + 1)].date) - return LUTIndex{guess}; + return LUTIndex(guess); /// Time zones that have offset 0 from UTC do daylight saving time change (if any) /// towards increasing UTC offset (example: British Standard Time). if (t >= lut[UInt32(guess + 1)].date) return LUTIndex(guess + 1); - if (lut[guess - 1].date <= t) - return LUTIndex(guess - 1); - - return LUTIndex(guess - 2); + return LUTIndex(guess - 1); } inline LUTIndex toLUTIndex(DayNum d) const @@ -388,9 +388,6 @@ public: { const LUTIndex index = findIndex(t); - if (unlikely(index > DATE_LUT_MAX_DAY_NUM)) - return t + offset_at_start_of_epoch; - time_t res = t - lut[index].date; if (res >= lut[index].time_at_offset_change()) @@ -403,11 +400,6 @@ public: { const LUTIndex index = findIndex(t); - /// If it is overflow case, - /// than limit number of hours to avoid insane results like 1970-01-01 89:28:15 - if (unlikely(index > DATE_LUT_MAX_DAY_NUM)) - return static_cast((t + offset_at_start_of_epoch) / 3600) % 24; - time_t time = t - lut[index].date; /// Data is cleaned to avoid possibility of underflow. From 6dc21cd09869da972fb9b87388d499fc2973a484 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 03:48:37 +0300 Subject: [PATCH 057/333] Remove discrepances --- src/Common/tests/gtest_DateLUTImpl.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 3a0da1ee1ee..0cb619e19f7 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -200,9 +200,9 @@ TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange) EXPECT_EQ(lut.dateToString(time), "1970-01-01" /*std::string*/); } -TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOLDLut) +TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT) { - // Value is at the right border of the OLD (small) LUT, and provides meaningful values where OLD LUT would provide garbage. + // Value is at the right border of the old (small) LUT, and provides meaningful values where old LUT would provide garbage. const DateLUTImpl & lut = DateLUT::instance("UTC"); const time_t time = 4294343873; // 2106-01-31T01:17:53 (Sunday) @@ -267,7 +267,7 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOLDLut) class DateLUTWithTimeZone : public ::testing::TestWithParam {}; -TEST_P(DateLUTWithTimeZone, DISABLED_LoadLut) +TEST_P(DateLUTWithTimeZone, DISABLED_LoadLUT) { // There are some assumptions and assertions about TZ data made in DateLUTImpl which are verified upon loading, // to make sure that those assertions are true for all timezones we are going to load all of them one by one. From b9a8509f79b3aa96e46c05dc6be9f12eea5efbf5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 05:18:17 +0300 Subject: [PATCH 058/333] Adjustments --- base/common/DateLUTImpl.h | 5 ++--- src/Common/tests/gtest_DateLUTImpl.cpp | 10 +--------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index eb34053ca6e..050951b2409 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -402,7 +402,6 @@ public: time_t time = t - lut[index].date; - /// Data is cleaned to avoid possibility of underflow. if (time >= lut[index].time_at_offset_change()) time += lut[index].amount_of_offset_change(); @@ -454,8 +453,8 @@ public: inline unsigned toMinute(time_t t) const { - if (offset_is_whole_number_of_hours_everytime) - return mod((t / 60), 60); + if (offset_is_whole_number_of_hours_everytime && t >= 0) + return (t / 60) % 60; /// To consider the DST changing situation within this day. /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 0cb619e19f7..91c1f40fdcb 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -267,7 +267,7 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT) class DateLUTWithTimeZone : public ::testing::TestWithParam {}; -TEST_P(DateLUTWithTimeZone, DISABLED_LoadLUT) +TEST_P(DateLUTWithTimeZone, LoadLUT) { // There are some assumptions and assertions about TZ data made in DateLUTImpl which are verified upon loading, // to make sure that those assertions are true for all timezones we are going to load all of them one by one. @@ -317,14 +317,6 @@ TEST_P(DateLUTWithTimeZone, getTimeZone) EXPECT_EQ(GetParam(), lut.getTimeZone()); } -TEST_P(DateLUTWithTimeZone, ZeroTime) -{ - const auto & lut = DateLUT::instance(GetParam()); - - EXPECT_EQ(0, lut.toDayNum(time_t{0})); - EXPECT_EQ(0, lut.toDayNum(DayNum{0})); - EXPECT_EQ(0, lut.toDayNum(ExtendedDayNum{0})); -} // Group of tests for timezones that have or had some time ago an offset which is not multiple of 15 minutes. INSTANTIATE_TEST_SUITE_P(ExoticTimezones, From ed492ccf5dba666e34b60d5ed045bab4e2b42bca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 05:36:36 +0300 Subject: [PATCH 059/333] Fix Africa/Juba --- base/common/DateLUTImpl.cpp | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index bf180acb835..71d17dbab2b 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -94,9 +94,10 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = 0; values.amount_of_offset_change_value = 0; - // TODO: this partially ignores fractional pre-epoch offsets, which may cause incorrect toRelativeHourNum() results for some timezones, namelly Europe/Minsk - // when pre-May 2 1924 it had an offset of UTC+1:50, and after it was UTC+2h. - // https://www.timeanddate.com/time/zone/belarus/minsk?syear=1900 + /// TODO: This partially ignores fractional pre-epoch offsets, + /// which may cause incorrect toRelativeHourNum() results for some timezones, namelly Europe/Minsk + /// when pre-May 2 1924 it had an offset of UTC+1:50, and after it was UTC+2h. + /// https://www.timeanddate.com/time/zone/belarus/minsk?syear=1900 if (start_of_day > 0 && start_of_day % 3600) offset_is_whole_number_of_hours_everytime = false; @@ -113,16 +114,28 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) /// Find a time (timestamp offset from beginning of day), /// when UTC offset was changed. Search is performed with 15-minute granularity, assuming it is enough. - time_t time_at_offset_change = 900; - while (time_at_offset_change < 86400) + time_t time_at_offset_change = 0; + + /// If offset was changed just at midnight. + if (utc_offset_at_beginning_of_day != cctz_time_zone.lookup( + std::chrono::system_clock::from_time_t(lut[i - 1].date - 1)).offset) { - auto utc_offset_at_current_time = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t( - lut[i - 1].date + time_at_offset_change)).offset; + /// time_at_offset_change is zero. + } + else + { + time_at_offset_change = 900; + while (time_at_offset_change < 86400) + { + auto utc_offset_at_current_time = cctz_time_zone.lookup( + std::chrono::system_clock::from_time_t( + lut[i - 1].date + time_at_offset_change)).offset; - if (utc_offset_at_current_time != utc_offset_at_beginning_of_day) - break; + if (utc_offset_at_current_time != utc_offset_at_beginning_of_day) + break; - time_at_offset_change += 900; + time_at_offset_change += 900; + } } lut[i - 1].time_at_offset_change_value = time_at_offset_change / Values::OffsetChangeFactor; From 9416f5901492c31d4a7ed2c6f9bf88760ffdddea Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 06:48:24 +0300 Subject: [PATCH 060/333] Some progress on tests --- src/Common/tests/gtest_DateLUTImpl.cpp | 46 ++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 7 deletions(-) diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 91c1f40fdcb..95ac055e237 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -4,8 +4,11 @@ #include #include +#include + #include + /// For the expansion of gtest macros. #if defined(__clang__) #pragma clang diagnostic ignored "-Wused-but-marked-unused" @@ -25,14 +28,17 @@ cctz::civil_day YYYYMMDDToDay(unsigned value) value % 100); // day } -std::vector allTimezones() +std::vector allTimezones(bool with_weird_offsets = true) { std::vector result; const auto * timezone_name = auto_time_zones; while (*timezone_name) { - result.push_back(*timezone_name); + bool weird_offsets = (std::string_view(*timezone_name) == "Africa/Monrovia"); + + if (!weird_offsets || with_weird_offsets) + result.push_back(*timezone_name); ++timezone_name; } @@ -345,7 +351,7 @@ std::ostream & operator<<(std::ostream & ostr, const DateLUTImpl::Values & v) << "\n\t weekday : " << static_cast(v.day_of_week) << "\n\t days in month : " << static_cast(v.days_in_month) << "\n\t offset change : " << v.amount_of_offset_change() - << "\n\t offfset change at : " << v.time_at_offset_change() + << "\n\t offset change at : " << v.time_at_offset_change() << "\n}"; } @@ -387,6 +393,32 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) const auto tz_time = cctz::convert(std::chrono::system_clock::from_time_t(expected_time_t), tz); + /// Weird offset, not supported. + /// Example: Africa/Monrovia has offset UTC-0:44:30 in year 1970. + if (tz.lookup(std::chrono::system_clock::from_time_t(expected_time_t)).offset % 900) + continue; + + /// Unsupported timezone transitions - not in 15-minute time point or to different day. + /// Example: America/Goose_Bay decided to go back one hour at 00:01: + /// $ seq 1289097900 30 1289103600 | TZ=America/Goose_Bay LC_ALL=C xargs -I{} date -d @{} + /// Sat Nov 6 23:59:00 ADT 2010 + /// Sat Nov 6 23:59:30 ADT 2010 + /// Sun Nov 7 00:00:00 ADT 2010 + /// Sun Nov 7 00:00:30 ADT 2010 + /// Sat Nov 6 23:01:00 AST 2010 + /// Sat Nov 6 23:01:30 AST 2010 + cctz::time_zone::civil_transition transition{}; + if (tz.next_transition(std::chrono::system_clock::from_time_t(expected_time_t), &transition) + && transition.from.day() == tz_time.day() + && (transition.from.second() != 0 || transition.from.minute() % 900 != 0 + || (transition.from.day() != transition.to.day() + && (transition.from.hour() != 0 && transition.from.minute() != 0 && transition.from.second() != 0)))) + { + std::cerr << "Skipping " << timezone_name << " " << tz_time + << " because of unsupported timezone transition from " << transition.from << " to " << transition.to << "\n"; + continue; + } + EXPECT_EQ(tz_time.year(), lut.toYear(expected_time_t)); EXPECT_EQ(tz_time.month(), lut.toMonth(expected_time_t)); EXPECT_EQ(tz_time.day(), lut.toDayOfMonth(expected_time_t)); @@ -429,7 +461,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010, INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970_WHOLE, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( - ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(allTimezones(false)), ::testing::ValuesIn(std::initializer_list{ // Values from tests/date_lut3.cpp {YYYYMMDDToDay(19700101), YYYYMMDDToDay(19701231), 3191 /*53m 11s*/}, @@ -439,7 +471,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970_WHOLE, INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010_WHOLE, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( - ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(allTimezones(false)), ::testing::ValuesIn(std::initializer_list{ // Values from tests/date_lut3.cpp {YYYYMMDDToDay(20100101), YYYYMMDDToDay(20101231), 3191 /*53m 11s*/}, @@ -459,7 +491,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2020_WHOLE, INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_PreEpoch, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( - ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(allTimezones(false)), ::testing::ValuesIn(std::initializer_list{ {YYYYMMDDToDay(19500101), YYYYMMDDToDay(19600101), 15 * 60}, {YYYYMMDDToDay(19300101), YYYYMMDDToDay(19350101), 11 * 15 * 60} @@ -469,7 +501,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_PreEpoch, INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( - ::testing::ValuesIn(allTimezones()), + ::testing::ValuesIn(allTimezones(false)), ::testing::ValuesIn(std::initializer_list{ {YYYYMMDDToDay(19700101), YYYYMMDDToDay(19700201), 15 * 60}, {YYYYMMDDToDay(19700101), YYYYMMDDToDay(19701231), 11 * 13 * 17} From 66f495b6904c81f53b61e920e1a0f1d5fe4dd097 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Mar 2021 06:49:46 +0300 Subject: [PATCH 061/333] Fix build --- programs/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 9adca58b55a..6b322df5ffd 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -325,7 +325,7 @@ else () endif () if (ENABLE_TESTS AND USE_GTEST) - set (CLICKHOUSE_UNIT_TESTS_TARGETS unit_tests_libcommon unit_tests_dbms) + set (CLICKHOUSE_UNIT_TESTS_TARGETS unit_tests_dbms) add_custom_target (clickhouse-tests ALL DEPENDS ${CLICKHOUSE_UNIT_TESTS_TARGETS}) add_dependencies(clickhouse-bundle clickhouse-tests) endif() From 7cc471f9b90c2adc6c32bb339d39c5ec9da5df7f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 5 Mar 2021 04:22:12 +0300 Subject: [PATCH 062/333] Simplify code --- base/common/DateLUTImpl.cpp | 65 ++++++++++++++----------------------- 1 file changed, 24 insertions(+), 41 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 71d17dbab2b..e2bf60fff4e 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -69,7 +69,8 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) { cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date); - start_of_day = std::chrono::system_clock::to_time_t(lookup.pre); /// Ambiguity is possible. + /// Ambiguity is possible. + start_of_day = std::chrono::system_clock::to_time_t(lookup.pre); Values & values = lut[i]; values.year = date.year(); @@ -94,56 +95,38 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = 0; values.amount_of_offset_change_value = 0; - /// TODO: This partially ignores fractional pre-epoch offsets, + /// TODO: This partially ignores fractional offsets, /// which may cause incorrect toRelativeHourNum() results for some timezones, namelly Europe/Minsk /// when pre-May 2 1924 it had an offset of UTC+1:50, and after it was UTC+2h. /// https://www.timeanddate.com/time/zone/belarus/minsk?syear=1900 if (start_of_day > 0 && start_of_day % 3600) offset_is_whole_number_of_hours_everytime = false; - /// If UTC offset was changed in previous day. - if (i != 0) + /// If UTC offset was changed this day. + /// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST + cctz::time_zone::civil_transition transition{}; + if (cctz_time_zone.next_transition(lookup.pre, &transition) + && transition.from.year() == date.year() + && transition.from.month() == date.month() + && transition.from.day() == date.day() + && transition.from != transition.to) { - auto amount_of_offset_change_at_prev_day = 86400 - (lut[i].date - lut[i - 1].date); - if (amount_of_offset_change_at_prev_day) - { - lut[i - 1].amount_of_offset_change_value = amount_of_offset_change_at_prev_day / Values::OffsetChangeFactor; + values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor; + values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor; - const auto utc_offset_at_beginning_of_day = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(lut[i - 1].date)).offset; +// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n"; +// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n"; - /// Find a time (timestamp offset from beginning of day), - /// when UTC offset was changed. Search is performed with 15-minute granularity, assuming it is enough. + /// We don't support too large changes. + if (values.amount_of_offset_change_value > 24 * 4) + values.amount_of_offset_change_value = 24 * 4; + else if (values.amount_of_offset_change_value < -24 * 4) + values.amount_of_offset_change_value = -24 * 4; - time_t time_at_offset_change = 0; - - /// If offset was changed just at midnight. - if (utc_offset_at_beginning_of_day != cctz_time_zone.lookup( - std::chrono::system_clock::from_time_t(lut[i - 1].date - 1)).offset) - { - /// time_at_offset_change is zero. - } - else - { - time_at_offset_change = 900; - while (time_at_offset_change < 86400) - { - auto utc_offset_at_current_time = cctz_time_zone.lookup( - std::chrono::system_clock::from_time_t( - lut[i - 1].date + time_at_offset_change)).offset; - - if (utc_offset_at_current_time != utc_offset_at_beginning_of_day) - break; - - time_at_offset_change += 900; - } - } - - lut[i - 1].time_at_offset_change_value = time_at_offset_change / Values::OffsetChangeFactor; - - /// We don't support cases when time change results in switching to previous day. - if (static_cast(lut[i - 1].time_at_offset_change()) + static_cast(lut[i - 1].amount_of_offset_change()) < 0) - lut[i - 1].time_at_offset_change_value = -lut[i - 1].amount_of_offset_change_value; - } + /// We don't support cases when time change results in switching to previous day. + /// Shift the point of time change later. + if (values.time_at_offset_change_value + values.amount_of_offset_change_value < 0) + values.time_at_offset_change_value = -values.amount_of_offset_change_value; } /// Going to next day. From 1722978c2b05feb15a1ae35e902d4ac31af70a92 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 5 Mar 2021 04:45:12 +0300 Subject: [PATCH 063/333] Comments and corrections --- base/common/DateLUTImpl.cpp | 9 ++++++--- base/common/DateLUTImpl.h | 40 +++++++++++++++++++++++++++---------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index e2bf60fff4e..0685b37365b 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -69,8 +69,11 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) { cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date); - /// Ambiguity is possible. - start_of_day = std::chrono::system_clock::to_time_t(lookup.pre); + /// Ambiguity is possible if time was changed backwards at the midnight + /// (or after midnight time has been changed to the previous day, for example two hours backwards at 01:00). + /// Then midnight appears twice. Usually time change happens exactly at 00:00. + /// Then we should use the second midnight as the start of the day. + start_of_day = std::chrono::system_clock::to_time_t(lookup.post); Values & values = lut[i]; values.year = date.year(); @@ -105,7 +108,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) /// If UTC offset was changed this day. /// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST cctz::time_zone::civil_transition transition{}; - if (cctz_time_zone.next_transition(lookup.pre, &transition) + if (cctz_time_zone.next_transition(lookup.post, &transition) && transition.from.year() == date.year() && transition.from.month() == date.month() && transition.from.day() == date.day() diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 050951b2409..d798cffab63 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -406,7 +406,9 @@ public: time += lut[index].amount_of_offset_change(); unsigned res = time / 3600; - return res <= 23 ? res : 0; + + /// In case time was changed backwards at the start of next day, we will repeat the hour 23. + return res <= 23 ? res : 23; } /** Calculating offset from UTC in seconds. @@ -456,28 +458,35 @@ public: if (offset_is_whole_number_of_hours_everytime && t >= 0) return (t / 60) % 60; - /// To consider the DST changing situation within this day. - /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account - LUTIndex index = findIndex(t); - UInt32 res = t - lut[index].date; - if (lut[index].amount_of_offset_change() != 0 && t >= lut[index].date + lut[index].time_at_offset_change()) - res += lut[index].amount_of_offset_change(); + /// To consider the DST changing situation within this day + /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account. - return res / 60 % 60; + LUTIndex index = findIndex(t); + UInt32 time = t - lut[index].date; + + if (time >= lut[index].time_at_offset_change()) + time += lut[index].amount_of_offset_change(); + + return time / 60 % 60; } + /// NOTE: These functions are wrong for negative time_t. + /// NOTE: Assuming timezone offset is a multiple of 15 minutes. inline time_t toStartOfMinute(time_t t) const { return t / 60 * 60; } inline time_t toStartOfFiveMinute(time_t t) const { return t / 300 * 300; } inline time_t toStartOfFifteenMinutes(time_t t) const { return t / 900 * 900; } + + /// NOTE: This most likely wrong for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate. inline time_t toStartOfTenMinutes(time_t t) const { return t / 600 * 600; } + /// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception. inline time_t toStartOfHour(time_t t) const { - if (offset_is_whole_number_of_hours_everytime) + if (offset_is_whole_number_of_hours_everytime && t >= 0) return t / 3600 * 3600; - UInt32 date = find(t).date; - return date + (UInt32(t) - date) / 3600 * 3600; + Int64 date = find(t).date; + return date + (t - date) / 3600 * 3600; } /** Number of calendar day since the beginning of UNIX epoch (1970-01-01 is zero) @@ -757,6 +766,7 @@ public: /// We count all hour-length intervals, unrelated to offset changes. inline time_t toRelativeHourNum(time_t t) const { + /// NOTE: This is also wrong for negative time_t. if (offset_is_whole_number_of_hours_everytime) return t / 3600; @@ -771,6 +781,7 @@ public: return toRelativeHourNum(lut[toLUTIndex(v)].date); } + /// NOTE: This is wrong for negative time_t. inline time_t toRelativeMinuteNum(time_t t) const { return t / 60; @@ -829,7 +840,10 @@ public: if (hours == 1) return toStartOfHour(t); UInt64 seconds = hours * 3600; + + /// NOTE: This is wrong for negative time_t. t = t / seconds * seconds; + if (offset_is_whole_number_of_hours_everytime) return t; return toStartOfHour(t); @@ -840,6 +854,8 @@ public: if (minutes == 1) return toStartOfMinute(t); UInt64 seconds = 60 * minutes; + + /// NOTE: This is wrong for negative time_t. return t / seconds * seconds; } @@ -847,6 +863,8 @@ public: { if (seconds == 1) return t; + + /// NOTE: This is wrong for negative time_t. return t / seconds * seconds; } From 790aa8697ce48257d913802f698ca886da11ec7b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 5 Mar 2021 04:47:50 +0300 Subject: [PATCH 064/333] Fix typos --- base/common/DateLUTImpl.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index d798cffab63..ec47fea60f6 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -113,8 +113,8 @@ private: } /// Remainder of division in the sense of modular arithmetic: - /// the difference between x and the maximal divisable number not greater than x. - /// Example: -1 % 10 = 9, because -10 is the maximal number not greater than -1 that is divisable by 10. + /// the difference between x and the maximal divisible number not greater than x. + /// Example: -1 % 10 = 9, because -10 is the maximal number not greater than -1 that is divisible by 10. /// Why do we need this: /// - because the unix timestamp -1 is 1969-12-31 23:59:59 in UTC. template From ecee81b714ec212b7a4c61c8dbb133d89ed9165c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 5 Mar 2021 04:50:16 +0300 Subject: [PATCH 065/333] Fix clang-tidy --- base/common/DateLUTImpl.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 0685b37365b..46fc88aeb4d 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -46,17 +46,16 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) if (&inside_main) assert(inside_main); - cctz::time_zone cctz_time_zone; if (!cctz::load_time_zone(time_zone, &cctz_time_zone)) throw Poco::Exception("Cannot load time zone " + time_zone_); - const cctz::civil_day epoch{1970, 1, 1}; - const cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1}; + constexpr cctz::civil_day epoch{1970, 1, 1}; + constexpr cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1}; time_t start_of_day; - // Note: it's validated against all timezones in the system. - assert((epoch - lut_start) == daynum_offset_epoch); + /// Note: it's validated against all timezones in the system. + static_assert((epoch - lut_start) == daynum_offset_epoch); offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset; offset_at_start_of_lut = cctz_time_zone.lookup(cctz_time_zone.lookup(lut_start).pre).offset; From 738d106874de5e61c943bbb022c1053fdbca10b0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 5 Mar 2021 05:03:55 +0300 Subject: [PATCH 066/333] Support negative time_t in more functions --- base/common/DateLUTImpl.h | 60 +++++++++++++-------------------------- 1 file changed, 19 insertions(+), 41 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index ec47fea60f6..8da45fa6622 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -17,6 +17,10 @@ #define DATE_LUT_MAX (0xFFFFFFFFU - 86400) #define DATE_LUT_MAX_DAY_NUM 0xFFFF +/// A constant to add to time_t so every supported time point becomes non-negative and still has the same remainder of division by 3600. +/// If we treat "remainder of division" operation in the sense of modular arithmetic (not like in C++). +#define DATE_LUT_ADD (1970 - DATE_LUT_MIN_YEAR) * 366 * 86400 + #if defined(__PPC__) #if !__clang__ @@ -112,26 +116,6 @@ private: return LUTIndex{(v / index.toUnderType()) & date_lut_mask}; } - /// Remainder of division in the sense of modular arithmetic: - /// the difference between x and the maximal divisible number not greater than x. - /// Example: -1 % 10 = 9, because -10 is the maximal number not greater than -1 that is divisible by 10. - /// Why do we need this: - /// - because the unix timestamp -1 is 1969-12-31 23:59:59 in UTC. - template - static constexpr inline auto mod(T x, U divisor) - { - /// This is the C++ way of modulo of division: - /// x % y is the number that: (x / y) * y + x % y == x - /// For example, -1 % 10 = -1 - /// Note that both variants are "correct" in the mathematical sense. They are just different operations. - auto res = x % divisor; - - if (unlikely(res < 0)) - res += divisor; - - return res; - } - public: /// The order of fields matters for alignment and sizeof. struct Values @@ -450,13 +434,13 @@ public: */ inline unsigned toSecond(time_t t) const { - return mod(t, 60); + return (t + DATE_LUT_ADD) % 60; } inline unsigned toMinute(time_t t) const { - if (offset_is_whole_number_of_hours_everytime && t >= 0) - return (t / 60) % 60; + if (offset_is_whole_number_of_hours_everytime) + return ((t + DATE_LUT_ADD) / 60) % 60; /// To consider the DST changing situation within this day /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account. @@ -470,20 +454,19 @@ public: return time / 60 % 60; } - /// NOTE: These functions are wrong for negative time_t. /// NOTE: Assuming timezone offset is a multiple of 15 minutes. - inline time_t toStartOfMinute(time_t t) const { return t / 60 * 60; } - inline time_t toStartOfFiveMinute(time_t t) const { return t / 300 * 300; } - inline time_t toStartOfFifteenMinutes(time_t t) const { return t / 900 * 900; } + inline time_t toStartOfMinute(time_t t) const { return (t + DATE_LUT_ADD) / 60 * 60 - DATE_LUT_ADD; } + inline time_t toStartOfFiveMinute(time_t t) const { return (t + DATE_LUT_ADD) / 300 * 300 - DATE_LUT_ADD; } + inline time_t toStartOfFifteenMinutes(time_t t) const { return (t + DATE_LUT_ADD) / 900 * 900 - DATE_LUT_ADD; } /// NOTE: This most likely wrong for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate. - inline time_t toStartOfTenMinutes(time_t t) const { return t / 600 * 600; } + inline time_t toStartOfTenMinutes(time_t t) const { return (t + DATE_LUT_ADD) / 600 * 600 - DATE_LUT_ADD; } /// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception. inline time_t toStartOfHour(time_t t) const { - if (offset_is_whole_number_of_hours_everytime && t >= 0) - return t / 3600 * 3600; + if (offset_is_whole_number_of_hours_everytime) + return (t + DATE_LUT_ADD) / 3600 * 3600 - DATE_LUT_ADD; Int64 date = find(t).date; return date + (t - date) / 3600 * 3600; @@ -766,13 +749,12 @@ public: /// We count all hour-length intervals, unrelated to offset changes. inline time_t toRelativeHourNum(time_t t) const { - /// NOTE: This is also wrong for negative time_t. if (offset_is_whole_number_of_hours_everytime) - return t / 3600; + return (t + DATE_LUT_ADD) / 3600 - (DATE_LUT_ADD / 3600); /// Assume that if offset was fractional, then the fraction is the same as at the beginning of epoch. /// NOTE This assumption is false for "Pacific/Pitcairn" and "Pacific/Kiritimati" time zones. - return (t + 86400 - offset_at_start_of_epoch) / 3600; + return (t + DATE_LUT_ADD + 86400 - offset_at_start_of_epoch) / 3600 - (DATE_LUT_ADD / 3600); } template @@ -781,10 +763,9 @@ public: return toRelativeHourNum(lut[toLUTIndex(v)].date); } - /// NOTE: This is wrong for negative time_t. inline time_t toRelativeMinuteNum(time_t t) const { - return t / 60; + return (t + DATE_LUT_ADD) / 60 - (DATE_LUT_ADD / 60); } template @@ -841,8 +822,7 @@ public: return toStartOfHour(t); UInt64 seconds = hours * 3600; - /// NOTE: This is wrong for negative time_t. - t = t / seconds * seconds; + t = (t + DATE_LUT_ADD) / seconds * seconds - DATE_LUT_ADD; if (offset_is_whole_number_of_hours_everytime) return t; @@ -855,8 +835,7 @@ public: return toStartOfMinute(t); UInt64 seconds = 60 * minutes; - /// NOTE: This is wrong for negative time_t. - return t / seconds * seconds; + return (t + DATE_LUT_ADD) / seconds * seconds - DATE_LUT_ADD; } inline time_t toStartOfSecondInterval(time_t t, UInt64 seconds) const @@ -864,8 +843,7 @@ public: if (seconds == 1) return t; - /// NOTE: This is wrong for negative time_t. - return t / seconds * seconds; + return (t + DATE_LUT_ADD) / seconds * seconds - DATE_LUT_ADD; } inline LUTIndex makeLUTIndex(Int16 year, UInt8 month, UInt8 day_of_month) const From d276fac135aa2ea2866d867a73ac93657b5ef088 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 5 Mar 2021 05:04:23 +0300 Subject: [PATCH 067/333] Support negative time_t in more functions --- base/common/DateLUTImpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 8da45fa6622..2534f20838f 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -19,7 +19,7 @@ /// A constant to add to time_t so every supported time point becomes non-negative and still has the same remainder of division by 3600. /// If we treat "remainder of division" operation in the sense of modular arithmetic (not like in C++). -#define DATE_LUT_ADD (1970 - DATE_LUT_MIN_YEAR) * 366 * 86400 +#define DATE_LUT_ADD ((1970 - DATE_LUT_MIN_YEAR) * 366 * 86400) #if defined(__PPC__) From 7c8d17045cc806c5b1ca190d18510a79204525c0 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 5 Mar 2021 06:15:20 +0300 Subject: [PATCH 068/333] fix frame formatting error --- programs/client/Client.cpp | 30 ++++++++++++++++++++++-- src/Parsers/ASTWindowDefinition.cpp | 2 +- src/Parsers/ExpressionElementParsers.cpp | 7 ++---- 3 files changed, 31 insertions(+), 8 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 3c27908741c..f213c81e298 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1317,7 +1317,10 @@ private: auto base_after_fuzz = fuzz_base->formatForErrorMessage(); - // Debug AST cloning errors. + // Check that the source AST didn't change after fuzzing. This + // helps debug AST cloning errors, where the cloned AST doesn't + // clone all its children, and erroneously points to some source + // child elements. if (base_before_fuzz != base_after_fuzz) { fmt::print(stderr, @@ -1334,7 +1337,7 @@ private: fmt::print(stderr, "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly."); - assert(false); + exit(1); } auto fuzzed_text = ast_to_process->formatForErrorMessage(); @@ -1344,6 +1347,29 @@ private: continue; } + // Check that the query is formatted properly and we can parse + // it back and format again and get the same result. Unfortunately + // we can't compare the ASTs, which would be more sensitive to + // errors. This double formatting check doesn't catch all errors, + // e.g. we can format query incorrectly, but to a valid SQL that + // we can then parse and format into the same SQL. + { + const auto * tmp_pos = fuzzed_text.c_str(); + auto parsed_formatted_query = parseQuery(tmp_pos, + tmp_pos + fuzzed_text.size(), + false /* allow_multi_statements */); + const auto formatted_twice + = parsed_formatted_query->formatForErrorMessage(); + + if (formatted_twice != fuzzed_text) + { + fmt::print(stderr, "The query formatting is broken. Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", + formatted_twice, fuzzed_text); + + exit(1); + } + } + parsed_query = ast_to_process; query_to_send = parsed_query->formatForErrorMessage(); diff --git a/src/Parsers/ASTWindowDefinition.cpp b/src/Parsers/ASTWindowDefinition.cpp index dba2935e630..aee951fc1f3 100644 --- a/src/Parsers/ASTWindowDefinition.cpp +++ b/src/Parsers/ASTWindowDefinition.cpp @@ -81,7 +81,7 @@ void ASTWindowDefinition::formatImpl(const FormatSettings & settings, } else if (frame.end_type == WindowFrame::BoundaryType::Unbounded) { - settings.ostr << "UNBOUNDED PRECEDING"; + settings.ostr << "UNBOUNDED FOLLOWING"; } else { diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 7a426e7774d..da63ea6e658 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -533,6 +533,7 @@ static bool tryParseFrameDefinition(ASTWindowDefinition * node, IParser::Pos & p ParserKeyword keyword_groups("GROUPS"); ParserKeyword keyword_range("RANGE"); + node->frame.is_default = false; if (keyword_rows.ignore(pos, expected)) { node->frame.type = WindowFrame::FrameType::Rows; @@ -548,6 +549,7 @@ static bool tryParseFrameDefinition(ASTWindowDefinition * node, IParser::Pos & p else { /* No frame clause. */ + node->frame.is_default = true; return true; } @@ -699,11 +701,6 @@ static bool tryParseFrameDefinition(ASTWindowDefinition * node, IParser::Pos & p } } - if (!(node->frame == WindowFrame{})) - { - node->frame.is_default = false; - } - return true; } From 6b9d1f67a382d198f283a79260ffe485aaff9efb Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 5 Mar 2021 10:05:27 +0300 Subject: [PATCH 069/333] make NULL case consistent w/FieldVisitorToString --- src/Core/Field.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Field.cpp b/src/Core/Field.cpp index 93107d7bb2c..8aa79b0bbe1 100644 --- a/src/Core/Field.cpp +++ b/src/Core/Field.cpp @@ -452,7 +452,7 @@ template <> bool decimalLessOrEqual(DateTime64 x, DateTime64 y, UInt32 x_scale, inline void writeText(const Null &, WriteBuffer & buf) { - writeText(std::string("Null"), buf); + writeText(std::string("NULL"), buf); } String toString(const Field & x) From 70dfcdac8d1c90019674cc932e5e0e61ded563f8 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 5 Mar 2021 12:59:50 +0300 Subject: [PATCH 070/333] ignore some errors --- programs/client/Client.cpp | 48 +++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 11 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index f213c81e298..c4954fa51cc 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1354,19 +1354,45 @@ private: // e.g. we can format query incorrectly, but to a valid SQL that // we can then parse and format into the same SQL. { - const auto * tmp_pos = fuzzed_text.c_str(); - auto parsed_formatted_query = parseQuery(tmp_pos, - tmp_pos + fuzzed_text.size(), - false /* allow_multi_statements */); - const auto formatted_twice - = parsed_formatted_query->formatForErrorMessage(); - - if (formatted_twice != fuzzed_text) + ASTPtr parsed_formatted_query; + try { - fmt::print(stderr, "The query formatting is broken. Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", - formatted_twice, fuzzed_text); + const auto * tmp_pos = fuzzed_text.c_str(); + parsed_formatted_query = parseQuery(tmp_pos, + tmp_pos + fuzzed_text.size(), + false /* allow_multi_statements */); + } + catch (Exception & e) + { + // Some complicated cases where we can generate the SQL + // which we can't parse: + // * first argument of lambda() replaced by fuzzer with + // something else, leading to constructs such as + // arrayMap((min(x) + 3) -> x + 1, ....) + // * internals of Enum replaced, leading to: + // Enum(equals(someFunction(y), 3)). + // We could filter them on case-by-case basis, but they + // are probably also helpful in that they test the parsing + // errors, so let's just ignore them in this check and + // send them to the server normally. + if (e.code() != ErrorCodes::SYNTAX_ERROR) + { + throw; + } + } - exit(1); + if (parsed_formatted_query) + { + const auto formatted_twice + = parsed_formatted_query->formatForErrorMessage(); + + if (formatted_twice != fuzzed_text) + { + fmt::print(stderr, "The query formatting is broken. Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", + formatted_twice, fuzzed_text); + + exit(1); + } } } From 9ac39dcda9b1b6877634695f67cd7c3995e20dac Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Fri, 5 Mar 2021 15:36:19 +0300 Subject: [PATCH 071/333] Update docs/en/sql-reference/statements/detach.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/statements/detach.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index adb2df570d7..8a1a14ee007 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -10,7 +10,7 @@ Deletes information about the `name` table from the server. The server stops kno Syntax: ``` sql -DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] ``` This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again. From 59b16ecb3c5b79133a09788ab83df3374054496d Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Fri, 5 Mar 2021 15:36:31 +0300 Subject: [PATCH 072/333] Update docs/en/sql-reference/statements/detach.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/statements/detach.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 8a1a14ee007..f27f61a933a 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -5,7 +5,7 @@ toc_title: DETACH # DETACH Statement {#detach} -Deletes information about the `name` table from the server. The server stops knowing about the table’s existence. +Deletes information about the table or view from the server. The server stops knowing about their existence. Syntax: From e996e2be8eeb6021d6bc056c459b3cc24bce328f Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Fri, 5 Mar 2021 15:37:34 +0300 Subject: [PATCH 073/333] Update docs/en/sql-reference/statements/detach.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/statements/detach.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index f27f61a933a..0bd4f730364 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -13,7 +13,15 @@ Syntax: DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] ``` -This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again. +Detaching does not delete the data or metadata for the table or view. If the table or view was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view again. If the table or view was detached `PERMANENTLY`, there will be no automatic recall. + +Whether the table was detached permanently or not, in both cases you can reattach it using the [ATTACH](../../sql-reference/statements/attach.md) query (with the exception of system tables, which do not have metadata stored for them). + +`ATTACH MATERIALIZED VIEW` doesn't work with short syntax (without `SELECT`), but you can attach it using the `ATTACH TABLE` query. + +Note that you can not detach permanently the table which is already detached (temporary). But you can attach it back and then detach permanently again. + +Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query. Similarly, a “detached” table can be re-attached using the [ATTACH](../../sql-reference/statements/attach.md) query (with the exception of system tables, which do not have metadata stored for them). From d08b4816601b6e693789ca5e4f7425305eaed18a Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Fri, 5 Mar 2021 02:10:20 +0300 Subject: [PATCH 074/333] Fixes by review responces --- S3ZeroCopyReplication.md | 61 ---------------- src/Storages/MergeTree/DataPartsExchange.cpp | 3 - src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 - src/Storages/MergeTree/MergeTreeData.cpp | 5 +- src/Storages/MergeTree/MergeTreeData.h | 4 +- .../MergeTree/ReplicatedMergeTreeLogEntry.h | 5 -- src/Storages/StorageReplicatedMergeTree.cpp | 70 +++++-------------- src/Storages/StorageReplicatedMergeTree.h | 3 +- 8 files changed, 20 insertions(+), 133 deletions(-) delete mode 100644 S3ZeroCopyReplication.md diff --git a/S3ZeroCopyReplication.md b/S3ZeroCopyReplication.md deleted file mode 100644 index 5230640ebcc..00000000000 --- a/S3ZeroCopyReplication.md +++ /dev/null @@ -1,61 +0,0 @@ -# ClickHouse S3 Zero Copy Replication - -Код просто для теста, не production-ready ни разу. - -[Ветка](https://github.com/ianton-ru/ClickHouse/tree/s3_zero_copy_replication) - -## Как сделано - -При fetch-е парта при репликации в случае, если источник хранит, а приемник собирается хранить парт в S3, вместо данных пересылаются только метаданные S3, приемник кладет их локально себе -и испольузет общие с источником данные на S3. Для того, чтобы не удалить такие пошареные данные, делается пометка в ZooKeeper. - -Введена новая версия протокола REPLICATION_PROTOCOL_VERSION_WITH_PARTS_S3_COPY. В запросе новый параметр send_s3_metadata, если 1, то приемних просит у источника метаданные вместо данных, если это возможно. -Приемник в ответ отсылает куку send_s3_metadata=1 в случае, если идут метаданные. В остальных случаях отсылаются данные, как и прежде. - -В новой версии протокола перед полем с количеством файлов добавлена еще одна строка. Абстрактно это некий ID, по которому ноды могу понять, с одним S3 они работают или с разными. -Практически сейчас это один имя первого объекта файла checksums.txt. Эта же строка используется в качестве ID парта в зукипере. - -Применик перед запросом смотрит, будет ли хранить данные в S3. Проверка сейчас кривая - если в сторадже есть S3, то считаем, что будет S3. -Если да S3, то отсылает в запросе send_s3_metadata=1. - -Источник при получении такого запроса смотрит, лежит ли парт на S3. Если да, то в Зукипере ставит метку по пути `<путь к данным таблицы>/zero_copy_s3/shared/<имя парта>//<Путь парта>/`, -ставит в ответ куку send_s3_metadata=1 и вместо файлов с данными отсылает только файлы метаданных. - -Путь получился сложным, потому что требуется -* по имени парта получить, на каких репликах он уже есть на S3 (нужно для гибридного хранилища) -* по уникальному пути понимать, используелся ли эта копия парта другими репликами -* для павильного времени жизни лока различать лок основного варианта (all_0_0_0) от временного (tmp_fetch_all_0_0_0) - -Приемник при получении ответа с send_s3_metadata=1 проверяет доступность по переданному ключу (первый объект checksums.txt) создает только файлики с идентичными меаданными, которые в итоге будут ссылаться на те же ключи в S3, ставит в зукипере аналогичную метку, -только со своим ID реплики, и работает с этим. - -При желании удалить парт нода удаляет в Зукипере ключ `<путь к данным таблицы>/zero_copy_s3/shared//`, потом получает все подключи `<путь к данным таблицы>/zero_copy_s3/shared/`. -Если список не пустой, то считает, что данные использует другая нода и удаляет только локальные метаданные, если пустой, то удаляет и данные в S3. - -При мерже если реузльтат будет на S3, нода ставит эфемерную метку в Zookeeper по пути `<путь к данным таблицы>/zero_copy_s3/merged/<имя нового парта>` (!! НЕ !!). Если такая метка уже есть, то считает, что другая нода -уже помержила или мержит сейчас, и надо сделать fetch вместо мержа самой. - -В гибридном хранилище если парт переносится на S3, нода через ZK проверяет, нет был ли парт перенесен другой нодой, если был, то делает fetch (модифицированный по сравнению с обычным fetch'ем). - -В конфиг добавлен флаг, по которому включается функционал нового протокола репликации - merge_tree->allow_s3_zero_copy_replication. Сейчас стоит в false. - -## Костыли и недоработки, коих много - -* В качестве ID парта берется имя первого S3-ключа от файла checksums.txt. - -* При удалении класс диска ничего не знает про парты, прокинул флаг, что надо оставлять данные в S3 параметром, это очень криво получилось. - -* Возможна гонка, если источник отошлет метаданные про парт и тут же решит его удалить до того, как приемник поставит в зукипер пометку. - -* В протоколе репликации обмен инфой через параметр запрос в одну сторону и куку в другую мне не нравится, хотя так сделан обмен версиями репликации. - -* При ошибке должно пытаться реплицироваться по старому, но не уверен, всегда ли сработает - -* Не будет обратной совместимости, если образуются такие шареные парты, откатиться на старую версию кликхауса не получится, иначе нода может удалить используемые другой данные. - -* Возможны все же дублирования партов. Пример - нода делает мерж, падает. Другая нода незавимо делает мерж, первая нода поднимается. В итоге есть две копии померженого парта. - -* Тесты пока только самые базовые. - -* Для гибридного хранилища если две ноды решают одновременно перенести парт на S3, обе проверяют, что его там еще нет и обе переносят. - diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 6a033690912..4c355465ea0 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -630,9 +630,6 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3( } } - if (!disk) - throw Exception("Can't find S3 disk", ErrorCodes::S3_ERROR); - static const String TMP_PREFIX = "tmp_fetch_"; String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index dd33761070e..591987404b5 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1014,7 +1014,6 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_ volume->getDisk()->setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr))); volume->getDisk()->moveFile(from, to); - String old_relative_path = relative_path; relative_path = new_relative_path; SyncGuardPtr sync_guard; @@ -1022,7 +1021,6 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_ sync_guard = volume->getDisk()->getDirectorySyncGuard(to); storage.lockSharedData(*this); - storage.unlockSharedData(*this, old_relative_path); } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 7b310aec707..0c22d5fbc0f 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1921,8 +1921,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace( const MergeTreePartInfo & new_part_info, const String & new_part_name, DataPartPtr & out_covering_part, - DataPartsLock & /* data_parts_lock */, - bool allow_duplicate) const + DataPartsLock & /* data_parts_lock */) const { /// Parts contained in the part are consecutive in data_parts, intersecting the insertion place for the part itself. auto it_middle = data_parts_by_state_and_info.lower_bound(DataPartStateAndInfo{DataPartState::Committed, new_part_info}); @@ -1956,7 +1955,7 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace( DataPartIteratorByStateAndInfo end = it_middle; while (end != committed_parts_range.end()) { - if ((*end)->info == new_part_info && !allow_duplicate) + if ((*end)->info == new_part_info) throw Exception("Unexpected duplicate part " + (*end)->getNameWithState() + ". It is a bug.", ErrorCodes::LOGICAL_ERROR); if (!new_part_info.contains((*end)->info)) diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index da5ff283420..679518f8d5d 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -750,7 +750,6 @@ public: /// Unlock common S3 data part in zookeeper /// Overridden in StorageReplicatedMergeTree virtual bool unlockSharedData(const IMergeTreeDataPart &) const { return true; } - virtual bool unlockSharedData(const IMergeTreeDataPart &, const String &) const { return true; } /// Fetch part only if some replica has it on shared storage like S3 /// Overridden in StorageReplicatedMergeTree @@ -897,8 +896,7 @@ protected: const MergeTreePartInfo & new_part_info, const String & new_part_name, DataPartPtr & out_covering_part, - DataPartsLock & data_parts_lock, - bool allow_duplicate = false) const; + DataPartsLock & data_parts_lock) const; /// Checks whether the column is in the primary key, possibly wrapped in a chain of functions with single argument. bool isPrimaryOrMinMaxKeyColumnPossiblyWrappedInFunctions(const ASTPtr & node, const StorageMetadataPtr & metadata_snapshot) const; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h index d18256f8515..afd8c963943 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h @@ -39,7 +39,6 @@ struct ReplicatedMergeTreeLogEntryData REPLACE_RANGE, /// Drop certain range of partitions and replace them by new ones MUTATE_PART, /// Apply one or several mutations to the part. ALTER_METADATA, /// Apply alter modification according to global /metadata and /columns paths - FETCH_SHARED_PART, /// Get the part from other replica only if it on shared S3 storade }; static String typeToString(Type type) @@ -54,7 +53,6 @@ struct ReplicatedMergeTreeLogEntryData case ReplicatedMergeTreeLogEntryData::REPLACE_RANGE: return "REPLACE_RANGE"; case ReplicatedMergeTreeLogEntryData::MUTATE_PART: return "MUTATE_PART"; case ReplicatedMergeTreeLogEntryData::ALTER_METADATA: return "ALTER_METADATA"; - case ReplicatedMergeTreeLogEntryData::FETCH_SHARED_PART: return "FETCH_SHARED_PART"; default: throw Exception("Unknown log entry type: " + DB::toString(type), ErrorCodes::LOGICAL_ERROR); } @@ -195,9 +193,6 @@ struct ReplicatedMergeTreeLogEntry : public ReplicatedMergeTreeLogEntryData, std std::condition_variable execution_complete; /// Awake when currently_executing becomes false. static Ptr parse(const String & s, const Coordination::Stat & stat); - - DiskPtr disk; - String path; }; using ReplicatedMergeTreeLogEntryPtr = std::shared_ptr; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 3acffdcad9f..1f2bd4f4775 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1955,16 +1955,15 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry) } -bool StorageReplicatedMergeTree::executeFetchShared(ReplicatedMergeTreeLogEntry & entry) +bool StorageReplicatedMergeTree::executeFetchShared( + const String & source_replica, + const String & new_part_name, + const DiskPtr & disk, + const String & path) { - if (entry.type != LogEntry::FETCH_SHARED_PART) + if (source_replica.empty()) { - throw Exception("Wrong entry.type in executeFetchShared", ErrorCodes::LOGICAL_ERROR); - } - - if (entry.source_replica.empty()) - { - LOG_INFO(log, "No active replica has part {} on S3.", entry.new_part_name); + LOG_INFO(log, "No active replica has part {} on S3.", new_part_name); return false; } @@ -1992,8 +1991,8 @@ bool StorageReplicatedMergeTree::executeFetchShared(ReplicatedMergeTreeLogEntry try { - if (!fetchPart(entry.new_part_name, metadata_snapshot, zookeeper_path + "/replicas/" + entry.source_replica, false, entry.quorum, - nullptr, true, entry.disk, entry.path)) + if (!fetchPart(new_part_name, metadata_snapshot, zookeeper_path + "/replicas/" + source_replica, false, 0, + nullptr, true, disk, path)) return false; } catch (Exception & e) @@ -6478,10 +6477,8 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part) String id = part.getUniqueId(); boost::replace_all(id, "/", "_"); - String norm_path = part.relative_path; - boost::replace_all(norm_path, "/", "_"); - String zookeeper_node = zookeeper_path + "/zero_copy_s3/shared/" + part.name + "/" + id + "/" + norm_path + "/" + replica_name; + String zookeeper_node = zookeeper_path + "/zero_copy_s3/shared/" + part.name + "/" + id + "/" + replica_name; LOG_TRACE(log, "Set zookeeper lock {}", zookeeper_node); @@ -6506,12 +6503,6 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part) bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part) const -{ - return unlockSharedData(part, part.relative_path); -} - - -bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part, const String & path) const { if (!part.volume) return true; @@ -6527,29 +6518,16 @@ bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & par String id = part.getUniqueId(); boost::replace_all(id, "/", "_"); - String norm_path = path; - boost::replace_all(norm_path, "/", "_"); String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + part.name; String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; - String zookeeper_part_path_node = zookeeper_part_uniq_node + "/" + norm_path; - String zookeeper_node = zookeeper_part_path_node + "/" + replica_name; + String zookeeper_node = zookeeper_part_uniq_node + "/" + replica_name; LOG_TRACE(log, "Remove zookeeper lock {}", zookeeper_node); zookeeper->tryRemove(zookeeper_node); Strings children; - zookeeper->tryGetChildren(zookeeper_part_path_node, children); - if (!children.empty()) - { - LOG_TRACE(log, "Found zookeper locks for {}", zookeeper_part_path_node); - return false; - } - - zookeeper->tryRemove(zookeeper_part_path_node); - - children.clear(); zookeeper->tryGetChildren(zookeeper_part_uniq_node, children); if (!children.empty()) @@ -6589,18 +6567,10 @@ bool StorageReplicatedMergeTree::tryToFetchIfShared( if (replica.empty()) return false; - ReplicatedMergeTreeLogEntry log_entry; - log_entry.type = ReplicatedMergeTreeLogEntry::FETCH_SHARED_PART; - log_entry.source_replica = replica; - log_entry.new_part_name = part.name; - log_entry.create_time = 0; - log_entry.disk = disk; - log_entry.path = path; - /// TODO: Fix const usage StorageReplicatedMergeTree * replicated_storage_nc = const_cast(this); - return replicated_storage_nc->executeFetchShared(log_entry); + return replicated_storage_nc->executeFetchShared(replica, part.name, disk, path); } @@ -6613,8 +6583,6 @@ String StorageReplicatedMergeTree::getSharedDataReplica( if (!zookeeper) return best_replica; - String norm_path = part.relative_path; - boost::replace_all(norm_path, "/", "_"); String zookeeper_part_node = zookeeper_path + "/zero_copy_s3/shared/" + part.name; Strings ids; @@ -6624,16 +6592,10 @@ String StorageReplicatedMergeTree::getSharedDataReplica( for (const auto & id : ids) { String zookeeper_part_uniq_node = zookeeper_part_node + "/" + id; - Strings paths; - zookeeper->tryGetChildren(zookeeper_part_uniq_node, paths); - for (const auto & path : paths) - { - String zookeeper_node = zookeeper_part_uniq_node + "/" + path; - Strings id_replicas; - zookeeper->tryGetChildren(zookeeper_node, id_replicas); - LOG_TRACE(log, "Found zookeper replicas for {}: {}", zookeeper_node, id_replicas.size()); - replicas.insert(replicas.end(), id_replicas.begin(), id_replicas.end()); - } + Strings id_replicas; + zookeeper->tryGetChildren(zookeeper_part_uniq_node, id_replicas); + LOG_TRACE(log, "Found zookeper replicas for {}: {}", zookeeper_part_uniq_node, id_replicas.size()); + replicas.insert(replicas.end(), id_replicas.begin(), id_replicas.end()); } LOG_TRACE(log, "Found zookeper replicas for part {}: {}", part.name, replicas.size()); diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 58bedfc0408..5bd10d93c8e 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -213,7 +213,7 @@ public: bool canExecuteFetch(const ReplicatedMergeTreeLogEntry & entry, String & disable_reason) const; /// Fetch part only when it stored on shared storage like S3 - bool executeFetchShared(ReplicatedMergeTreeLogEntry & entry); + bool executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path); /// Lock part in zookeeper for use common S3 data in several nodes void lockSharedData(const IMergeTreeDataPart & part) const override; @@ -222,7 +222,6 @@ public: /// Return true if data unlocked /// Return false if data is still used by another node bool unlockSharedData(const IMergeTreeDataPart & part) const override; - bool unlockSharedData(const IMergeTreeDataPart & part, const String & path) const override; /// Fetch part only if some replica has it on shared storage like S3 bool tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) const override; From 5b3161e0b532f25a0984cb9a98bf6994ce22ceda Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 5 Mar 2021 20:24:06 +0300 Subject: [PATCH 075/333] Get rid of const_cast --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 5 +-- src/Storages/MergeTree/MergeTreeData.h | 2 +- .../MergeTree/MergeTreePartsMover.cpp | 35 ++++++++++++++++--- src/Storages/StorageReplicatedMergeTree.cpp | 7 ++-- src/Storages/StorageReplicatedMergeTree.h | 2 +- 5 files changed, 35 insertions(+), 16 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 591987404b5..1f18c894465 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1174,10 +1174,7 @@ void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & di disk->removeRecursive(path_to_clone + relative_path + '/'); } disk->createDirectories(path_to_clone); - - bool is_fetched = storage.tryToFetchIfShared(*this, disk, path_to_clone + "/" + name); - if (!is_fetched) - volume->getDisk()->copy(getFullRelativePath(), disk, path_to_clone); + volume->getDisk()->copy(getFullRelativePath(), disk, path_to_clone); volume->getDisk()->removeFileIfExists(path_to_clone + '/' + DELETE_ON_DESTROY_MARKER_FILE_NAME); } diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 679518f8d5d..1f1505fe552 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -753,7 +753,7 @@ public: /// Fetch part only if some replica has it on shared storage like S3 /// Overridden in StorageReplicatedMergeTree - virtual bool tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) const { return false; } + virtual bool tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return false; } protected: diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 7b8c88b1bff..41eae7fed38 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -194,15 +194,40 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt if (moves_blocker.isCancelled()) throw Exception("Cancelled moving parts.", ErrorCodes::ABORTED); - LOG_TRACE(log, "Cloning part {}", moving_part.part->name); + auto settings = data->getSettings(); + auto part = moving_part.part; + LOG_TRACE(log, "Cloning part {}", part->name); + + auto disk = moving_part.reserved_space->getDisk(); const String directory_to_move = "moving"; - moving_part.part->makeCloneOnDisk(moving_part.reserved_space->getDisk(), directory_to_move); + if (settings->allow_s3_zero_copy_replication) + { + /// Try to fetch part from S3 without copy and fallback to default copy + /// if it's not possible + moving_part.part->assertOnDisk(); + String path_to_clone = data->getRelativeDataPath() + directory_to_move + '/'; + String relative_path = part->relative_path; + if (disk->exists(path_to_clone + relative_path)) + { + LOG_WARNING(log, "Path " + fullPath(disk, path_to_clone + relative_path) + " already exists. Will remove it and clone again."); + disk->removeRecursive(path_to_clone + relative_path + '/'); + } + disk->createDirectories(path_to_clone); + bool is_fetched = data->tryToFetchIfShared(*part, disk, path_to_clone + "/" + part->name); + if (!is_fetched) + part->volume->getDisk()->copy(data->getRelativeDataPath() + relative_path, disk, path_to_clone); + part->volume->getDisk()->removeFileIfExists(path_to_clone + '/' + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME); + } + else + { + part->makeCloneOnDisk(disk, directory_to_move); + } - auto single_disk_volume = std::make_shared("volume_" + moving_part.part->name, moving_part.reserved_space->getDisk(), 0); + auto single_disk_volume = std::make_shared("volume_" + part->name, moving_part.reserved_space->getDisk(), 0); MergeTreeData::MutableDataPartPtr cloned_part = - data->createPart(moving_part.part->name, single_disk_volume, directory_to_move + '/' + moving_part.part->name); - LOG_TRACE(log, "Part {} was cloned to {}", moving_part.part->name, cloned_part->getFullPath()); + data->createPart(part->name, single_disk_volume, directory_to_move + '/' + part->name); + LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->getFullPath()); cloned_part->loadColumnsChecksumsIndexes(true, true); return cloned_part; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 1f2bd4f4775..ddc63793640 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -6552,7 +6552,7 @@ bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & par bool StorageReplicatedMergeTree::tryToFetchIfShared( const IMergeTreeDataPart & part, const DiskPtr & disk, - const String & path) const + const String & path) { const auto data_settings = getSettings(); if (!data_settings->allow_s3_zero_copy_replication) @@ -6567,10 +6567,7 @@ bool StorageReplicatedMergeTree::tryToFetchIfShared( if (replica.empty()) return false; - /// TODO: Fix const usage - StorageReplicatedMergeTree * replicated_storage_nc = const_cast(this); - - return replicated_storage_nc->executeFetchShared(replica, part.name, disk, path); + return executeFetchShared(replica, part.name, disk, path); } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 5bd10d93c8e..e3d7e6b2556 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -224,7 +224,7 @@ public: bool unlockSharedData(const IMergeTreeDataPart & part) const override; /// Fetch part only if some replica has it on shared storage like S3 - bool tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) const override; + bool tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; /// Get best replica having this partition on S3 String getSharedDataReplica(const IMergeTreeDataPart & part) const; From 60cb84d41755a8c7e61074c8e586d18267c4145d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 5 Mar 2021 20:24:50 +0300 Subject: [PATCH 076/333] more debug info --- programs/client/Client.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c4954fa51cc..c5b579f2046 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1390,6 +1390,9 @@ private: { fmt::print(stderr, "The query formatting is broken. Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", formatted_twice, fuzzed_text); + fmt::print(stderr, "AST parsed back:\n'{}'\nSource AST:\n'{}'\n", + parsed_formatted_query->dumpTree(), + ast_to_process->dumpTree()); exit(1); } From 8d5d1b76fbc08a337bd4390dfc2e9c54883569d9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 00:36:29 +0300 Subject: [PATCH 077/333] Fix error --- base/common/DateLUTImpl.h | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 2534f20838f..6e8b424ab9d 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -5,7 +5,9 @@ #include "types.h" #include +#include #include +#include #define DATE_LUT_MIN_YEAR 1925 /// 1925 since wast majority of timezones changed to 15-minute aligned offsets somewhere in 1924 or earlier. @@ -231,6 +233,21 @@ private: return lut[toLUTIndex(v)]; } + template + static inline T roundDown(T x, Divisor divisor) + { + static_assert(std::is_integral_v && std::is_integral_v); + assert(divisor > 0); + + if (likely(x >= 0)) + return x / divisor * divisor; + + /// Integer division for negative numbers rounds them towards zero (up). + /// We will shift the number so it will be rounded towards -inf (down). + + return (x + 1 - divisor) / divisor * divisor; + } + public: const std::string & getTimeZone() const { return time_zone; } @@ -822,10 +839,12 @@ public: return toStartOfHour(t); UInt64 seconds = hours * 3600; - t = (t + DATE_LUT_ADD) / seconds * seconds - DATE_LUT_ADD; + t = roundDown(t, seconds); if (offset_is_whole_number_of_hours_everytime) return t; + + /// TODO check if it's correct. return toStartOfHour(t); } @@ -833,9 +852,9 @@ public: { if (minutes == 1) return toStartOfMinute(t); - UInt64 seconds = 60 * minutes; - return (t + DATE_LUT_ADD) / seconds * seconds - DATE_LUT_ADD; + UInt64 seconds = 60 * minutes; + return roundDown(t, seconds); } inline time_t toStartOfSecondInterval(time_t t, UInt64 seconds) const @@ -843,7 +862,7 @@ public: if (seconds == 1) return t; - return (t + DATE_LUT_ADD) / seconds * seconds - DATE_LUT_ADD; + return roundDown(t, seconds); } inline LUTIndex makeLUTIndex(Int16 year, UInt8 month, UInt8 day_of_month) const From 0dbadc8d8eec1e23afdd635609aaf8fd612110a6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 01:35:23 +0300 Subject: [PATCH 078/333] Fix error --- base/common/DateLUTImpl.cpp | 28 ++++++++++++++++------------ base/common/DateLUTImpl.h | 19 +++++++++++++------ 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 46fc88aeb4d..a111a21d8fd 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -69,10 +69,18 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date); /// Ambiguity is possible if time was changed backwards at the midnight - /// (or after midnight time has been changed to the previous day, for example two hours backwards at 01:00). - /// Then midnight appears twice. Usually time change happens exactly at 00:00. - /// Then we should use the second midnight as the start of the day. - start_of_day = std::chrono::system_clock::to_time_t(lookup.post); + /// or after midnight time has been changed back to midnight, for example one hour backwards at 01:00 + /// or after midnight time has been changed to the previous day, for example two hours backwards at 01:00 + /// Then midnight appears twice. Usually time change happens exactly at 00:00 or 01:00. + + /// If transition did not involve previous day, we should use the first midnight as the start of the day, + /// otherwise it's better to use the second midnight. + + std::chrono::time_point start_of_day_time_point = lookup.trans < lookup.post + ? lookup.post /* Second midnight appears after transition, so there was a piece of previous day after transition */ + : lookup.pre; + + start_of_day = std::chrono::system_clock::to_time_t(start_of_day_time_point); Values & values = lut[i]; values.year = date.year(); @@ -97,17 +105,13 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = 0; values.amount_of_offset_change_value = 0; - /// TODO: This partially ignores fractional offsets, - /// which may cause incorrect toRelativeHourNum() results for some timezones, namelly Europe/Minsk - /// when pre-May 2 1924 it had an offset of UTC+1:50, and after it was UTC+2h. - /// https://www.timeanddate.com/time/zone/belarus/minsk?syear=1900 - if (start_of_day > 0 && start_of_day % 3600) + if (offset_is_whole_number_of_hours_everytime && start_of_day > 0 && start_of_day % 3600) offset_is_whole_number_of_hours_everytime = false; /// If UTC offset was changed this day. /// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST cctz::time_zone::civil_transition transition{}; - if (cctz_time_zone.next_transition(lookup.post, &transition) + if (cctz_time_zone.next_transition(start_of_day_time_point - std::chrono::seconds(1), &transition) && transition.from.year() == date.year() && transition.from.month() == date.month() && transition.from.day() == date.day() @@ -116,8 +120,8 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor; values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor; -// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n"; -// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n"; + std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n"; + std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n"; /// We don't support too large changes. if (values.amount_of_offset_change_value > 24 * 4) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 6e8b424ab9d..e558a3b45ce 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -472,18 +472,25 @@ public: } /// NOTE: Assuming timezone offset is a multiple of 15 minutes. - inline time_t toStartOfMinute(time_t t) const { return (t + DATE_LUT_ADD) / 60 * 60 - DATE_LUT_ADD; } - inline time_t toStartOfFiveMinute(time_t t) const { return (t + DATE_LUT_ADD) / 300 * 300 - DATE_LUT_ADD; } - inline time_t toStartOfFifteenMinutes(time_t t) const { return (t + DATE_LUT_ADD) / 900 * 900 - DATE_LUT_ADD; } + inline time_t toStartOfMinute(time_t t) const { return roundDown(t, 60); } + inline time_t toStartOfFiveMinute(time_t t) const { return roundDown(t, 300); } + inline time_t toStartOfFifteenMinutes(time_t t) const { return roundDown(t, 900); } - /// NOTE: This most likely wrong for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate. - inline time_t toStartOfTenMinutes(time_t t) const { return (t + DATE_LUT_ADD) / 600 * 600 - DATE_LUT_ADD; } + inline time_t toStartOfTenMinutes(time_t t) const + { + if (offset_is_whole_number_of_hours_everytime) + return roundDown(t, 600); + + /// More complex logic is for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate. + Int64 date = find(t).date; + return date + (t - date) / 600 * 600; + } /// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception. inline time_t toStartOfHour(time_t t) const { if (offset_is_whole_number_of_hours_everytime) - return (t + DATE_LUT_ADD) / 3600 * 3600 - DATE_LUT_ADD; + return roundDown(t, 3600); Int64 date = find(t).date; return date + (t - date) / 3600 * 3600; From e33fae76256d3c0e01e7e14f5c94a4cdd763caca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 01:35:49 +0300 Subject: [PATCH 079/333] Improve test --- src/Common/tests/gtest_DateLUTImpl.cpp | 39 ++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 95ac055e237..80cb73f06c8 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -407,22 +407,49 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) /// Sun Nov 7 00:00:30 ADT 2010 /// Sat Nov 6 23:01:00 AST 2010 /// Sat Nov 6 23:01:30 AST 2010 + + bool has_transition = false; cctz::time_zone::civil_transition transition{}; if (tz.next_transition(std::chrono::system_clock::from_time_t(expected_time_t), &transition) - && transition.from.day() == tz_time.day() - && (transition.from.second() != 0 || transition.from.minute() % 900 != 0 - || (transition.from.day() != transition.to.day() - && (transition.from.hour() != 0 && transition.from.minute() != 0 && transition.from.second() != 0)))) + && (transition.from.day() == tz_time.day() || transition.to.day() == tz_time.day())) + { + has_transition = true; + } + + if (has_transition && (transition.from.second() != 0 || transition.from.minute() % 900 != 0)) { std::cerr << "Skipping " << timezone_name << " " << tz_time - << " because of unsupported timezone transition from " << transition.from << " to " << transition.to << "\n"; + << " because of unsupported timezone transition from " << transition.from << " to " << transition.to + << " (not divisable by 15 minutes)\n"; + continue; + } + + /// Transition to previous day, but not from midnight. + if (has_transition && cctz::civil_day(transition.from) == cctz::civil_day(transition.to) + 1 + && transition.from != cctz::civil_day(transition.from)) + { + std::cerr << "Skipping " << timezone_name << " " << tz_time + << " because of unsupported timezone transition from " << transition.from << " to " << transition.to + << " (to previous day but not at midnight)\n"; + continue; + } + + /// To large transition. + if (has_transition + && cctz::civil_day(transition.from) != cctz::civil_day(transition.to) + && cctz::civil_day(transition.from) != cctz::civil_day(transition.to) + 1) + { + std::cerr << "Skipping " << timezone_name << " " << tz_time + << " because of unsupported timezone transition from " << transition.from << " to " << transition.to + << " (it is too large)\n"; continue; } EXPECT_EQ(tz_time.year(), lut.toYear(expected_time_t)); EXPECT_EQ(tz_time.month(), lut.toMonth(expected_time_t)); EXPECT_EQ(tz_time.day(), lut.toDayOfMonth(expected_time_t)); - EXPECT_EQ(static_cast(cctz::get_weekday(tz_time)) + 1, lut.toDayOfWeek(expected_time_t)); // tm.tm_wday Sunday is 0, while for DateLUTImpl it is 7 + /// tm.tm_wday Sunday is 0, while for DateLUTImpl it is 7 + EXPECT_EQ(static_cast(cctz::get_weekday(tz_time)) + 1, lut.toDayOfWeek(expected_time_t)); EXPECT_EQ(cctz::get_yearday(tz_time), lut.toDayOfYear(expected_time_t)); EXPECT_EQ(tz_time.hour(), lut.toHour(expected_time_t)); EXPECT_EQ(tz_time.minute(), lut.toMinute(expected_time_t)); From d406999e8ff03b91ed5f4dbd4cf0077fe32fbbd1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 01:36:48 +0300 Subject: [PATCH 080/333] Remove debug output --- base/common/DateLUTImpl.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index a111a21d8fd..9e7a7eab7d3 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -120,8 +120,8 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor; values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor; - std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n"; - std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n"; +// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n"; +// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n"; /// We don't support too large changes. if (values.amount_of_offset_change_value > 24 * 4) From 8271cec093a71a319aac3a584f6c1501d9d2bdbe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 02:36:24 +0300 Subject: [PATCH 081/333] Fix error --- base/common/DateLUTImpl.cpp | 4 +--- src/Common/tests/gtest_DateLUTImpl.cpp | 7 +++---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 9e7a7eab7d3..6097763fb49 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -112,9 +112,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) /// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST cctz::time_zone::civil_transition transition{}; if (cctz_time_zone.next_transition(start_of_day_time_point - std::chrono::seconds(1), &transition) - && transition.from.year() == date.year() - && transition.from.month() == date.month() - && transition.from.day() == date.day() + && (cctz::civil_day(transition.from) == date || cctz::civil_day(transition.to) == date) && transition.from != transition.to) { values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor; diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 80cb73f06c8..7d3d38df645 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -416,11 +416,11 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) has_transition = true; } - if (has_transition && (transition.from.second() != 0 || transition.from.minute() % 900 != 0)) + if (has_transition && (transition.from.second() != 0 || transition.from.minute() % 15 != 0)) { std::cerr << "Skipping " << timezone_name << " " << tz_time << " because of unsupported timezone transition from " << transition.from << " to " << transition.to - << " (not divisable by 15 minutes)\n"; + << " (not divisible by 15 minutes)\n"; continue; } @@ -436,8 +436,7 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) /// To large transition. if (has_transition - && cctz::civil_day(transition.from) != cctz::civil_day(transition.to) - && cctz::civil_day(transition.from) != cctz::civil_day(transition.to) + 1) + && std::abs(transition.from - transition.to) > 3600 * 3) { std::cerr << "Skipping " << timezone_name << " " << tz_time << " because of unsupported timezone transition from " << transition.from << " to " << transition.to From 1bb62f578b75541689eec40971b3a10d8630ed1a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 04:10:38 +0300 Subject: [PATCH 082/333] Unit tests passing --- base/common/DateLUTImpl.cpp | 6 ++-- base/common/DateLUTImpl.h | 31 ++++++++-------- src/Common/tests/gtest_DateLUTImpl.cpp | 50 ++++++++++++-------------- 3 files changed, 42 insertions(+), 45 deletions(-) diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp index 6097763fb49..e7faeb63760 100644 --- a/base/common/DateLUTImpl.cpp +++ b/base/common/DateLUTImpl.cpp @@ -59,7 +59,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset; offset_at_start_of_lut = cctz_time_zone.lookup(cctz_time_zone.lookup(lut_start).pre).offset; - offset_is_whole_number_of_hours_everytime = true; + offset_is_whole_number_of_hours_during_epoch = true; cctz::civil_day date = lut_start; @@ -105,8 +105,8 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = 0; values.amount_of_offset_change_value = 0; - if (offset_is_whole_number_of_hours_everytime && start_of_day > 0 && start_of_day % 3600) - offset_is_whole_number_of_hours_everytime = false; + if (offset_is_whole_number_of_hours_during_epoch && start_of_day > 0 && start_of_day % 3600) + offset_is_whole_number_of_hours_during_epoch = false; /// If UTC offset was changed this day. /// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index e558a3b45ce..16abd3dfb0e 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -185,7 +185,7 @@ private: time_t offset_at_start_of_epoch; /// UTC offset at the beginning of the first supported year. time_t offset_at_start_of_lut; - bool offset_is_whole_number_of_hours_everytime; + bool offset_is_whole_number_of_hours_during_epoch; /// Time zone name. std::string time_zone; @@ -193,15 +193,19 @@ private: inline LUTIndex findIndex(time_t t) const { /// First guess. - const UInt32 guess = ((t / 86400) + daynum_offset_epoch) & date_lut_mask; + UInt32 guess = ((t / 86400) + daynum_offset_epoch) & date_lut_mask; + + /// For negative time_t the integer division was rounded up, so the guess is offset by one. + if (unlikely(t < 0)) + --guess; /// UTC offset is from -12 to +14 in all known time zones. This requires checking only three indices. - if (t >= lut[guess].date && t < lut[UInt32(guess + 1)].date) + if (t >= lut[guess].date && t < lut[guess + 1].date) return LUTIndex(guess); /// Time zones that have offset 0 from UTC do daylight saving time change (if any) /// towards increasing UTC offset (example: British Standard Time). - if (t >= lut[UInt32(guess + 1)].date) + if (t >= lut[guess + 1].date) return LUTIndex(guess + 1); return LUTIndex(guess - 1); @@ -253,7 +257,6 @@ public: // Methods only for unit-testing, it makes very little sense to use it from user code. auto getOffsetAtStartOfEpoch() const { return offset_at_start_of_epoch; } - auto getOffsetIsWholNumberOfHoursEveryWhere() const { return offset_is_whole_number_of_hours_everytime; } auto getTimeOffsetAtStartOfLUT() const { return offset_at_start_of_lut; } /// All functions below are thread-safe; arguments are not checked. @@ -456,8 +459,8 @@ public: inline unsigned toMinute(time_t t) const { - if (offset_is_whole_number_of_hours_everytime) - return ((t + DATE_LUT_ADD) / 60) % 60; + if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) + return (t / 60) % 60; /// To consider the DST changing situation within this day /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account. @@ -478,8 +481,8 @@ public: inline time_t toStartOfTenMinutes(time_t t) const { - if (offset_is_whole_number_of_hours_everytime) - return roundDown(t, 600); + if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) + return t / 600 * 600; /// More complex logic is for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate. Int64 date = find(t).date; @@ -489,8 +492,8 @@ public: /// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception. inline time_t toStartOfHour(time_t t) const { - if (offset_is_whole_number_of_hours_everytime) - return roundDown(t, 3600); + if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) + return t / 3600 * 3600; Int64 date = find(t).date; return date + (t - date) / 3600 * 3600; @@ -773,8 +776,8 @@ public: /// We count all hour-length intervals, unrelated to offset changes. inline time_t toRelativeHourNum(time_t t) const { - if (offset_is_whole_number_of_hours_everytime) - return (t + DATE_LUT_ADD) / 3600 - (DATE_LUT_ADD / 3600); + if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) + return t / 3600; /// Assume that if offset was fractional, then the fraction is the same as at the beginning of epoch. /// NOTE This assumption is false for "Pacific/Pitcairn" and "Pacific/Kiritimati" time zones. @@ -848,7 +851,7 @@ public: t = roundDown(t, seconds); - if (offset_is_whole_number_of_hours_everytime) + if (t >= 0 && offset_is_whole_number_of_hours_during_epoch) return t; /// TODO check if it's correct. diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 7d3d38df645..e18bd16e731 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -297,7 +297,6 @@ TEST_P(DateLUTWithTimeZone, VaidateTimeComponentsAroundEpoch) << "\n\tTimezone: " << timezone_name << "\n\ttimestamp: " << i << "\n\t offset at start of epoch : " << lut.getOffsetAtStartOfEpoch() - << "\n\t offset_is_whole_number_of_hours_everytime : " << lut.getOffsetIsWholNumberOfHoursEveryWhere() << "\n\t offset_at_start_of_lut : " << lut.getTimeOffsetAtStartOfLUT()); EXPECT_GE(24, lut.toHour(i)); @@ -336,7 +335,7 @@ INSTANTIATE_TEST_SUITE_P(ExoticTimezones, }) ); -INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimeZones, +INSTANTIATE_TEST_SUITE_P(AllTimeZones, DateLUTWithTimeZone, ::testing::ValuesIn(allTimezones()) ); @@ -391,11 +390,15 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) { SCOPED_TRACE(expected_time_t); - const auto tz_time = cctz::convert(std::chrono::system_clock::from_time_t(expected_time_t), tz); + const cctz::civil_second tz_time = cctz::convert(std::chrono::system_clock::from_time_t(expected_time_t), tz); /// Weird offset, not supported. /// Example: Africa/Monrovia has offset UTC-0:44:30 in year 1970. - if (tz.lookup(std::chrono::system_clock::from_time_t(expected_time_t)).offset % 900) + + auto timestamp_current_day_pre = std::chrono::system_clock::to_time_t(tz.lookup(cctz::civil_day(tz_time)).pre); + auto timestamp_current_day_post = std::chrono::system_clock::to_time_t(tz.lookup(cctz::civil_day(tz_time) + 1).post); + + if (timestamp_current_day_pre % 900 || timestamp_current_day_post % 900) continue; /// Unsupported timezone transitions - not in 15-minute time point or to different day. @@ -410,7 +413,7 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) bool has_transition = false; cctz::time_zone::civil_transition transition{}; - if (tz.next_transition(std::chrono::system_clock::from_time_t(expected_time_t), &transition) + if (tz.next_transition(std::chrono::system_clock::from_time_t(expected_time_t - 1), &transition) && (transition.from.day() == tz_time.day() || transition.to.day() == tz_time.day())) { has_transition = true; @@ -418,9 +421,9 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) if (has_transition && (transition.from.second() != 0 || transition.from.minute() % 15 != 0)) { - std::cerr << "Skipping " << timezone_name << " " << tz_time + /*std::cerr << "Skipping " << timezone_name << " " << tz_time << " because of unsupported timezone transition from " << transition.from << " to " << transition.to - << " (not divisible by 15 minutes)\n"; + << " (not divisible by 15 minutes)\n";*/ continue; } @@ -428,9 +431,9 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) if (has_transition && cctz::civil_day(transition.from) == cctz::civil_day(transition.to) + 1 && transition.from != cctz::civil_day(transition.from)) { - std::cerr << "Skipping " << timezone_name << " " << tz_time + /*std::cerr << "Skipping " << timezone_name << " " << tz_time << " because of unsupported timezone transition from " << transition.from << " to " << transition.to - << " (to previous day but not at midnight)\n"; + << " (to previous day but not at midnight)\n";*/ continue; } @@ -438,9 +441,9 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) if (has_transition && std::abs(transition.from - transition.to) > 3600 * 3) { - std::cerr << "Skipping " << timezone_name << " " << tz_time + /*std::cerr << "Skipping " << timezone_name << " " << tz_time << " because of unsupported timezone transition from " << transition.from << " to " << transition.to - << " (it is too large)\n"; + << " (it is too large)\n";*/ continue; } @@ -457,23 +460,14 @@ TEST_P(DateLUTWithTimeZoneAndTimeRange, InRange) const auto time_string = cctz::format("%E4Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(expected_time_t), tz); EXPECT_EQ(time_string, lut.timeToString(expected_time_t)); - // it makes sense to let test execute all checks above to simplify debugging, - // but once we've found a bad apple, no need to dig deeper. + /// It makes sense to let test execute all checks above to simplify debugging, + /// but once we've found a bad apple, no need to dig deeper. if (countFailures(*test_info->result()).total >= max_failures_per_case) break; } } -/** Next tests are disabled due to following reasons: - * 1. They are huge and take enormous amount of time to run - * 2. Current implementation of DateLUTImpl is inprecise and some cases fail and it seems impractical to try to fix those. - * 3. Many failures (~300) were fixed while refactoring, about ~40 remain the same and 3 new introduced: - * "Asia/Gaza" - * "Pacific/Enderbury" - * "Pacific/Kiritimati" - * So it would be tricky to skip knonw failures to allow all unit tests to pass. - */ -INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010, +INSTANTIATE_TEST_SUITE_P(AllTimezones_Year2010, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), @@ -484,7 +478,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010, })) ); -INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970_WHOLE, +INSTANTIATE_TEST_SUITE_P(AllTimezones_Year1970_WHOLE, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones(false)), @@ -494,7 +488,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970_WHOLE, })) ); -INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010_WHOLE, +INSTANTIATE_TEST_SUITE_P(AllTimezones_Year2010_WHOLE, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones(false)), @@ -504,7 +498,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2010_WHOLE, })) ); -INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2020_WHOLE, +INSTANTIATE_TEST_SUITE_P(AllTimezones_Year2020_WHOLE, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones()), @@ -514,7 +508,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year2020_WHOLE, })) ); -INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_PreEpoch, +INSTANTIATE_TEST_SUITE_P(AllTimezones_PreEpoch, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones(false)), @@ -524,7 +518,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_PreEpoch, })) ); -INSTANTIATE_TEST_SUITE_P(DISABLED_AllTimezones_Year1970, +INSTANTIATE_TEST_SUITE_P(AllTimezones_Year1970, DateLUTWithTimeZoneAndTimeRange, ::testing::Combine( ::testing::ValuesIn(allTimezones(false)), From 7052ecd446ef637d03cba1ac618a6a85d642dcb3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 09:30:52 +0300 Subject: [PATCH 083/333] Fix error --- base/common/DateLUTImpl.h | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 16abd3dfb0e..b75274e8569 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -442,19 +442,12 @@ public: } - /** Only for time zones with/when offset from UTC is multiple of five minutes. - * This is true for all time zones: right now, all time zones have an offset that is multiple of 15 minutes. - * - * "By 1929, most major countries had adopted hourly time zones. Nepal was the last - * country to adopt a standard offset, shifting slightly to UTC+5:45 in 1986." - * - https://en.wikipedia.org/wiki/Time_zone#Offsets_from_UTC - * - * Also please note, that unix timestamp doesn't count "leap seconds": - * each minute, with added or subtracted leap second, spans exactly 60 unix timestamps. - */ inline unsigned toSecond(time_t t) const { - return (t + DATE_LUT_ADD) % 60; + auto res = t % 60; + if (likely(res >= 0)) + return res; + return res + 60; } inline unsigned toMinute(time_t t) const From 5cf42e87be76f0f30771f63a155986077168eb1d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 09:31:01 +0300 Subject: [PATCH 084/333] Update test --- .../01702_toDateTime_from_string_clamping.reference | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference index 92639948fbc..644de54a6a2 100644 --- a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference +++ b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference @@ -2,8 +2,8 @@ SELECT toString(toDateTime('-922337203.6854775808', 1)); 1940-10-09 22:13:17.6 SELECT toString(toDateTime('9922337203.6854775808', 1)); -1925-07-26 00:46:43.6 +1925-07-26 23:46:43.6 SELECT toDateTime64(CAST('10000000000.1' AS Decimal64(1)), 1); -1928-01-11 00:46:40.1 +1928-01-11 23:46:40.1 SELECT toDateTime64(CAST('-10000000000.1' AS Decimal64(1)), 1); -2011-12-22 00:13:20.1 +2011-12-22 23:38:20.1 From d3b422a3360a8fa768bf910e74c8303486ac4858 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 10:10:15 +0300 Subject: [PATCH 085/333] Update test --- .../0_stateless/01699_timezoneOffset.reference | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/01699_timezoneOffset.reference b/tests/queries/0_stateless/01699_timezoneOffset.reference index 45f30314f5a..a1cc6391e6f 100644 --- a/tests/queries/0_stateless/01699_timezoneOffset.reference +++ b/tests/queries/0_stateless/01699_timezoneOffset.reference @@ -1,8 +1,8 @@ DST boundary test for Europe/Moscow: -0 1981-04-01 22:40:00 10800 355002000 -1 1981-04-01 22:50:00 10800 355002600 -2 1981-04-02 00:00:00 14400 355003200 -3 1981-04-02 00:10:00 14400 355003800 +0 1981-04-01 22:40:00 14400 354998400 +1 1981-04-01 22:50:00 14400 354999000 +2 1981-04-01 23:00:00 14400 354999600 +3 1981-04-01 23:10:00 14400 355000200 0 1981-09-30 23:00:00 14400 370724400 1 1981-09-30 23:10:00 14400 370725000 2 1981-09-30 23:20:00 14400 370725600 @@ -22,10 +22,10 @@ DST boundary test for Europe/Moscow: 16 1981-10-01 00:40:00 10800 370734000 17 1981-10-01 00:50:00 10800 370734600 DST boundary test for Asia/Tehran: -0 2020-03-21 22:40:00 12600 1584817800 -1 2020-03-21 22:50:00 12600 1584818400 -2 2020-03-22 00:00:00 16200 1584819000 -3 2020-03-22 00:10:00 16200 1584819600 +0 2020-03-21 22:40:00 16200 1584814200 +1 2020-03-21 22:50:00 16200 1584814800 +2 2020-03-21 23:00:00 16200 1584815400 +3 2020-03-21 23:10:00 16200 1584816000 0 2020-09-20 23:00:00 16200 1600626600 1 2020-09-20 23:10:00 16200 1600627200 2 2020-09-20 23:20:00 16200 1600627800 From 1200d9d9c5447a900a692f170af894f02d43147d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 7 Mar 2021 10:10:37 +0300 Subject: [PATCH 086/333] Range checks and monotonicity --- base/common/DateLUTImpl.h | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index b75274e8569..6e968a0cd50 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -193,22 +193,28 @@ private: inline LUTIndex findIndex(time_t t) const { /// First guess. - UInt32 guess = ((t / 86400) + daynum_offset_epoch) & date_lut_mask; + Int64 guess = (t / 86400) + daynum_offset_epoch; /// For negative time_t the integer division was rounded up, so the guess is offset by one. if (unlikely(t < 0)) --guess; + if (guess < 0) + return LUTIndex(0); + if (guess >= DATE_LUT_SIZE) + return LUTIndex(DATE_LUT_SIZE - 1); + /// UTC offset is from -12 to +14 in all known time zones. This requires checking only three indices. - if (t >= lut[guess].date && t < lut[guess + 1].date) - return LUTIndex(guess); - /// Time zones that have offset 0 from UTC do daylight saving time change (if any) - /// towards increasing UTC offset (example: British Standard Time). - if (t >= lut[guess + 1].date) + if (t >= lut[guess].date) + { + if (guess + 1 >= DATE_LUT_SIZE || t < lut[guess + 1].date) + return LUTIndex(guess); + return LUTIndex(guess + 1); + } - return LUTIndex(guess - 1); + return LUTIndex(guess ? guess - 1 : 0); } inline LUTIndex toLUTIndex(DayNum d) const From 019ed517bc33892d107480ee30ddf53de8fd4fee Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 8 Mar 2021 03:05:06 +0300 Subject: [PATCH 087/333] Fix gcc warning --- src/Dictionaries/RangeHashedDictionary.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Dictionaries/RangeHashedDictionary.cpp b/src/Dictionaries/RangeHashedDictionary.cpp index 9fb1a57a381..aec641c58fe 100644 --- a/src/Dictionaries/RangeHashedDictionary.cpp +++ b/src/Dictionaries/RangeHashedDictionary.cpp @@ -491,8 +491,9 @@ void RangeHashedDictionary::getIdsAndDates( start_dates.push_back(value.range.left); end_dates.push_back(value.range.right); - if (is_date && static_cast(end_dates.back()) > DATE_LUT_MAX_DAY_NUM) - end_dates.back() = 0; + if constexpr (std::numeric_limits::max() > DATE_LUT_MAX_DAY_NUM) /// Avoid warning about tautological comparison in next line. + if (is_date && static_cast(end_dates.back()) > DATE_LUT_MAX_DAY_NUM) + end_dates.back() = 0; } } } From 6b309dcc5c7da70bc9896ff3047afa1f7aff6955 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 8 Mar 2021 03:14:39 +0300 Subject: [PATCH 088/333] Update tests --- tests/queries/0_stateless/00189_time_zones.reference | 7 ++++--- tests/queries/0_stateless/00189_time_zones.sql | 5 +++-- .../queries/0_stateless/01691_DateTime64_clamp.reference | 8 ++++---- .../01702_toDateTime_from_string_clamping.reference | 6 +++--- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/tests/queries/0_stateless/00189_time_zones.reference b/tests/queries/0_stateless/00189_time_zones.reference index 664c30056de..e7e5a71782a 100644 --- a/tests/queries/0_stateless/00189_time_zones.reference +++ b/tests/queries/0_stateless/00189_time_zones.reference @@ -179,13 +179,13 @@ toRelativeYearNum 44 44 44 -44 +45 toRelativeMonthNum 536 536 536 537 -536 +537 toRelativeWeekNum 2335 2335 @@ -197,12 +197,13 @@ toRelativeDayNum 16343 16343 16344 -16343 +16344 toRelativeHourNum 392251 392251 392251 392251 +392252 toRelativeMinuteNum 23535110 23535110 diff --git a/tests/queries/0_stateless/00189_time_zones.sql b/tests/queries/0_stateless/00189_time_zones.sql index a0ef5b59517..36c7dfb402a 100644 --- a/tests/queries/0_stateless/00189_time_zones.sql +++ b/tests/queries/0_stateless/00189_time_zones.sql @@ -277,7 +277,8 @@ SELECT toRelativeDayNum(toDateTime(1412106600), 'Europe/Moscow') - toRelativeDay SELECT toRelativeDayNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeDayNum(toDateTime(0), 'Europe/Paris'); SELECT toRelativeDayNum(toDateTime(1412106600), 'Europe/London') - toRelativeDayNum(toDateTime(0), 'Europe/London'); SELECT toRelativeDayNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeDayNum(toDateTime(0), 'Asia/Tokyo'); -SELECT toRelativeDayNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeDayNum(toDateTime(0), 'Pacific/Pitcairn'); +-- NOTE: toRelativeDayNum(toDateTime(0), 'Pacific/Pitcairn') overflows from -1 to 65535 +SELECT toUInt16(toRelativeDayNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeDayNum(toDateTime(0), 'Pacific/Pitcairn')); /* toRelativeHourNum */ @@ -286,7 +287,7 @@ SELECT toRelativeHourNum(toDateTime(1412106600), 'Europe/Moscow') - toRelativeHo SELECT toRelativeHourNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeHourNum(toDateTime(0), 'Europe/Paris'); SELECT toRelativeHourNum(toDateTime(1412106600), 'Europe/London') - toRelativeHourNum(toDateTime(0), 'Europe/London'); SELECT toRelativeHourNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeHourNum(toDateTime(0), 'Asia/Tokyo'); --- known wrong result: SELECT toRelativeHourNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeHourNum(toDateTime(0), 'Pacific/Pitcairn'); +SELECT toRelativeHourNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeHourNum(toDateTime(0), 'Pacific/Pitcairn'); /* toRelativeMinuteNum */ diff --git a/tests/queries/0_stateless/01691_DateTime64_clamp.reference b/tests/queries/0_stateless/01691_DateTime64_clamp.reference index f29a9e2d1d5..881ab4feff8 100644 --- a/tests/queries/0_stateless/01691_DateTime64_clamp.reference +++ b/tests/queries/0_stateless/01691_DateTime64_clamp.reference @@ -17,11 +17,11 @@ SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow') 2106-02-07 09:28:16.00 SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2, 'Europe/Moscow') FORMAT Null; -- These are outsize of extended range and hence clamped -SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1),35), 2); +SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1), 35), 2); 1925-01-01 02:00:00.00 -SELECT CAST(-1 * bitShiftLeft(toUInt64(1),35) AS DateTime64); +SELECT CAST(-1 * bitShiftLeft(toUInt64(1), 35) AS DateTime64); 1925-01-01 02:00:00.000 -SELECT CAST(bitShiftLeft(toUInt64(1),35) AS DateTime64); +SELECT CAST(bitShiftLeft(toUInt64(1), 35) AS DateTime64); 2282-12-31 03:00:00.000 -SELECT toDateTime64(bitShiftLeft(toUInt64(1),35), 2); +SELECT toDateTime64(bitShiftLeft(toUInt64(1), 35), 2); 2282-12-31 03:00:00.00 diff --git a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference index 644de54a6a2..77da114be68 100644 --- a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference +++ b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference @@ -2,8 +2,8 @@ SELECT toString(toDateTime('-922337203.6854775808', 1)); 1940-10-09 22:13:17.6 SELECT toString(toDateTime('9922337203.6854775808', 1)); -1925-07-26 23:46:43.6 +2283-11-11 23:46:43.6 SELECT toDateTime64(CAST('10000000000.1' AS Decimal64(1)), 1); -1928-01-11 23:46:40.1 +2283-11-11 23:46:40.1 SELECT toDateTime64(CAST('-10000000000.1' AS Decimal64(1)), 1); -2011-12-22 23:38:20.1 +1925-01-01 23:09:20.1 From 460658aeabb8d9276bd3c5f620d09b4ca1fc601c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 8 Mar 2021 03:18:55 +0300 Subject: [PATCH 089/333] Update tests --- src/Storages/StorageGenerateRandom.cpp | 2 +- .../01087_table_function_generate.reference | 60 +++++++++---------- .../01125_generate_random_qoega.reference | 2 +- .../01128_generate_random_nested.reference | 4 +- 4 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp index fd10691ecc4..f06daa3a2bd 100644 --- a/src/Storages/StorageGenerateRandom.cpp +++ b/src/Storages/StorageGenerateRandom.cpp @@ -215,7 +215,7 @@ ColumnPtr fillColumnWithRandomData( column->getData().resize(limit); for (size_t i = 0; i < limit; ++i) - column->getData()[i] = rng() % (DATE_LUT_MAX_DAY_NUM + 1); /// Slow + column->getData()[i] = rng() % (DATE_LUT_MAX_DAY_NUM + 1); return column; } diff --git a/tests/queries/0_stateless/01087_table_function_generate.reference b/tests/queries/0_stateless/01087_table_function_generate.reference index d7cc6b0a933..d8886945caa 100644 --- a/tests/queries/0_stateless/01087_table_function_generate.reference +++ b/tests/queries/0_stateless/01087_table_function_generate.reference @@ -1,14 +1,14 @@ UInt64 Int64 UInt32 Int32 UInt16 Int16 UInt8 Int8 -2804162938822577320 -2776833771540858 3467776823 1163715250 31161 -2916 220 -117 -7885388429666205427 -1363628932535403038 484159052 -308788249 43346 13638 143 -105 -4357435422797280898 1355609803008819271 4126129912 -852056475 34184 9166 49 33 -5935810273536892891 -804738887697332962 3109335413 -80126721 47877 -31421 186 -77 -368066018677693974 -4927165984347126295 1015254922 2026080544 46037 -29626 240 108 -8124171311239967992 -1179703908046100129 1720727300 -138469036 33028 -12819 138 16 -15657812979985370729 -5733276247123822513 3254757884 -500590428 3829 30527 3 -81 -18371568619324220532 -6793779541583578394 1686821450 -455892108 43475 2284 252 -90 -821735343441964030 3148260644406230976 256251035 -885069056 11643 11455 176 90 -9558594037060121162 -2907172753635797124 4276198376 1947296644 45922 26632 97 43 +2804162938822577320 -2776833771540858 3467776823 1163715250 23903 -2916 220 -117 +7885388429666205427 -1363628932535403038 484159052 -308788249 44305 13638 143 -105 +4357435422797280898 1355609803008819271 4126129912 -852056475 58858 9166 49 33 +5935810273536892891 -804738887697332962 3109335413 -80126721 13655 -31421 186 -77 +368066018677693974 -4927165984347126295 1015254922 2026080544 21973 -29626 240 108 +8124171311239967992 -1179703908046100129 1720727300 -138469036 36175 -12819 138 16 +15657812979985370729 -5733276247123822513 3254757884 -500590428 13193 30527 3 -81 +18371568619324220532 -6793779541583578394 1686821450 -455892108 52282 2284 252 -90 +821735343441964030 3148260644406230976 256251035 -885069056 55255 11455 176 90 +9558594037060121162 -2907172753635797124 4276198376 1947296644 48701 26632 97 43 - Enum8(\'hello\' = 1, \'world\' = 5) hello @@ -47,16 +47,16 @@ h o - Date DateTime DateTime(\'Europe/Moscow\') -2077-09-17 1970-10-09 02:30:14 2074-08-12 11:31:27 -2005-11-19 2106-01-30 21:52:44 2097-05-25 07:54:35 -2007-02-24 2096-12-12 00:40:50 1988-08-10 11:16:31 -2019-06-30 2096-01-15 16:31:33 2063-10-20 08:48:17 -2039-01-16 2103-02-11 16:44:39 2036-10-09 04:29:10 -1994-11-03 1980-01-02 05:18:22 2055-12-23 12:33:52 -2083-08-20 2079-06-11 16:29:02 2000-12-05 17:46:24 -2030-06-25 2100-03-01 18:50:22 1993-03-25 01:19:12 -2087-03-16 2034-08-25 19:46:33 2045-12-10 16:47:40 -2006-04-30 2069-09-30 16:07:48 2084-08-26 03:33:12 +2113-06-12 1970-10-09 02:30:14 2074-08-12 11:31:27 +2103-11-03 2106-01-30 21:52:44 2097-05-25 07:54:35 +2008-03-16 2096-12-12 00:40:50 1988-08-10 11:16:31 +2126-11-26 2096-01-15 16:31:33 2063-10-20 08:48:17 +1991-02-02 2103-02-11 16:44:39 2036-10-09 04:29:10 +2096-11-03 1980-01-02 05:18:22 2055-12-23 12:33:52 +2024-12-16 2079-06-11 16:29:02 2000-12-05 17:46:24 +2085-04-07 2100-03-01 18:50:22 1993-03-25 01:19:12 +2135-05-30 2034-08-25 19:46:33 2045-12-10 16:47:40 +2094-12-18 2069-09-30 16:07:48 2084-08-26 03:33:12 - DateTime64(3) DateTime64(6) DateTime64(6, \'Europe/Moscow\') 1978-06-07 23:50:57.320 2013-08-28 10:21:54.010758 1991-08-25 16:23:26.140215 @@ -225,14 +225,14 @@ RL,{Xs\\tw [114] -84125.1554 ('2023-06-06 06:55:06.492','bf9ab359-ef9f-ad11-7e6c-160368b1e5ea') [124] -114719.5228 ('2010-11-11 22:57:23.722','c1046ffb-3415-cc3a-509a-e0005856d7d7') - -[] 1900051923 { -189530.5846 h -5.6279699579452485e47 ('1980-08-29','2090-10-31 19:35:45','2038-07-15 05:22:51.805','63d9a12d-d1cf-1f3a-57c6-9bc6dddd0975') 8502 -[-102,-118] 392272782 Eb -14818.0200 o -2.664492247169164e59 ('2059-02-10','1994-07-16 00:40:02','2034-02-02 05:30:44.960','4fa09948-d32e-8903-63df-43ad759e43f7') DA61 -[-71] 775049089 \N -158115.1178 w 4.1323844687113747e-305 ('1997-02-15','2062-08-12 23:41:53','2074-02-13 10:29:40.749','c4a44dd7-d009-6f65-1494-9daedfa8a124') 83A7 -[-28,100] 3675466147 { -146685.1749 h 3.6676044396877755e142 ('1997-10-26','2002-06-26 03:33:41','2002-12-02 05:46:03.455','98714b2c-65e7-b5cb-a040-421e260c6d8d') 4B94 -[-23] 2514120753 (`u, -119659.6174 w 1.3231258347475906e34 ('2055-11-20','2080-03-28 08:11:25','2073-07-10 12:19:58.146','003b3b6b-088f-f941-aeb9-c26e0ee72b8e') 6B1F -[11,-36] 3308237300 \N 171205.1896 \N 5.634708707075817e195 ('2009-03-18','2041-11-11 13:19:44','2044-03-18 17:34:17.814','9e60f4cb-6e55-1deb-5ac4-d66a86a8886d') 1964 -[39] 1614362420 `4A8P 157144.0630 o -1.1843143253872814e-255 ('1991-04-27','2066-03-02 11:07:49','1997-10-22 20:14:13.755','97685503-2609-d2b9-981c-02fd75d106cb') A35B -[48,-120] 3848918261 1 Date: Mon, 8 Mar 2021 03:28:25 +0300 Subject: [PATCH 090/333] Try to enable long performance test --- tests/performance/date_time_long.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/performance/date_time_long.xml b/tests/performance/date_time_long.xml index 1229631a434..3a61a5992e5 100644 --- a/tests/performance/date_time_long.xml +++ b/tests/performance/date_time_long.xml @@ -1,5 +1,4 @@ - long datetime_transform From 831135432fa3662e280fccefa52a10769f6e3b17 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 22 Feb 2021 20:41:23 +0500 Subject: [PATCH 091/333] add PostgreSQL engine and table function documentation Signed-off-by: Slach --- .../table-engines/integrations/postgresql.md | 102 ++++++++++++++++ .../external-dicts-dict-lifetime.md | 4 +- .../external-dicts-dict-sources.md | 54 ++++++++- .../table-functions/postgresql.md | 100 ++++++++++++++++ .../table-engines/integrations/mysql.md | 4 +- .../table-engines/integrations/postgresql.md | 102 ++++++++++++++++ .../external-dicts-dict-lifetime.md | 17 +-- .../external-dicts-dict-sources.md | 113 +++++++++++++++++- .../table-functions/postgresql.md | 99 +++++++++++++++ 9 files changed, 578 insertions(+), 17 deletions(-) create mode 100644 docs/en/engines/table-engines/integrations/postgresql.md create mode 100644 docs/en/sql-reference/table-functions/postgresql.md create mode 100644 docs/ru/engines/table-engines/integrations/postgresql.md create mode 100644 docs/ru/sql-reference/table-functions/postgresql.md diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md new file mode 100644 index 00000000000..4a81251e60a --- /dev/null +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -0,0 +1,102 @@ +--- +toc_priority: 8 +toc_title: PostgreSQL +--- + +# PosgtreSQL {#postgresql} + +The PostgreSQL engine allows you to perform `SELECT` queries on data that is stored on a remote PostgreSQL server. + +## Creating a Table {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... +) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'); +``` + +See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query. + +The table structure can differ from the original PostgreSQL table structure: + +- Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order. +- Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. + +**Engine Parameters** + +- `host:port` — PostgreSQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — PostgreSQL user. + +- `password` — User password. + +SELECT Queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query. + +Simple `WHERE` clauses such as `=, !=, >, >=, <, <=, IN` are executed on the PostgreSQL server. + +All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes. + +INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. + +PostgreSQL Array types converts into ClickHouse arrays. + +## Usage Example {#usage-example} + +Table in PostgreSQL: + +``` text +postgres=# CREATE TABLE "public"."test" ( +"int_id" SERIAL, +"int_nullable" INT NULL DEFAULT NULL, +"float" FLOAT NOT NULL, +"str" VARCHAR(100) NOT NULL DEFAULT '', +"float_nullable" FLOAT NULL DEFAULT NULL, +PRIMARY KEY (int_id)); + +CREATE TABLE + +postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +INSERT 0 1 + +postgresql> select * from test; + int_id | int_nullable | float | str | float_nullable +--------+--------------+-------+------+---------------- + 1 | | 2 | test | +(1 row) +``` + +Table in ClickHouse, retrieving data from the PostgreSQL table created above: + +``` sql +CREATE TABLE default.postgresql_table +( + `float_nullable` Nullable(Float32), + `str` String, + `int_id` Int32 +) +ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postges_user', 'postgres_password'); +``` + +``` sql +SELECT * FROM postgresql_table WHERE str IN ('test') +``` + +``` text +┌─float_nullable─┬─str──┬─int_id─┐ +│ ᴺᵁᴸᴸ │ test │ 1 │ +└────────────────┴──────┴────────┘ +1 rows in set. Elapsed: 0.019 sec. +``` + + +## See Also {#see-also} + +- [The ‘postgresql’ table function](../../../sql-reference/table-functions/postgresql.md) +- [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 20486ebbcc8..32763e27ddd 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -19,6 +19,8 @@ Example of settings: ``` +or + ``` sql CREATE DICTIONARY (...) ... @@ -58,7 +60,7 @@ When upgrading the dictionaries, the ClickHouse server applies different logic d - For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`. - Dictionaries from other sources are updated every time by default. -For other sources (ODBC, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: +For other sources (ODBC, PostgreSQL, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps: - The dictionary table must have a field that always changes when the source data is updated. - The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `` field in the settings for the [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md). diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 7cd26a9dffb..f8f4745bb16 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -65,6 +65,7 @@ Types of sources (`source_type`): - DBMS - [ODBC](#dicts-external_dicts_dict_sources-odbc) - [MySQL](#dicts-external_dicts_dict_sources-mysql) + - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [Redis](#dicts-external_dicts_dict_sources-redis) @@ -659,7 +660,7 @@ Example of settings: Setting fields: - `host` – The Cassandra host or comma-separated list of hosts. -- `port` – The port on the Cassandra servers. If not specified, default port is used. +- `port` – The port on the Cassandra servers. If not specified, default port 9042 is used. - `user` – Name of the Cassandra user. - `password` – Password of the Cassandra user. - `keyspace` – Name of the keyspace (database). @@ -673,4 +674,55 @@ Default value is 1 (the first key column is a partition key and other key column - `where` – Optional selection criteria. - `max_threads` – The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries. +### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql} + +Example of settings: + +``` xml + + + 5432 + clickhouse + qwerty + db_name + table_name
+ id=10 + SQL_QUERY + + +``` + +or + +``` sql +SOURCE(POSTGRESQL( + port 5432 + host 'postgresql-hostname' + user 'postgres_user' + password 'postgres_password' + db 'db_name' + table 'table_name' + replica(host 'example01-1' port 5432 priority 1) + replica(host 'example01-2' port 5432 priority 2) + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +Setting fields: + +- `host` – The host on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside ``). +- `port` – The port on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside ``). +- `user` – Name of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). +- `password` – Password of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside ``). +- `replica` – Section of replica configurations. There can be multiple sections. + - `replica/host` – The PostgreSQL host. + - `replica/port` – The PostgreSQL port. + - `replica/priority` – The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority. +- `db` – Name of the database. +- `table` – Name of the table. +- `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL, for example, `id > 10 AND id < 20`. Optional parameter. +- `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). + + [Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md new file mode 100644 index 00000000000..95724694d7a --- /dev/null +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -0,0 +1,100 @@ +--- +toc_priority: 42 +toc_title: postgresql +--- + +# postgresql {#postgresql} + +Allows `SELECT` and `INSERT` queries to be performed on data that is stored on a remote PostgreSQL server. + +**Syntax** + +``` sql +postgresql('host:port', 'database', 'table', 'user', 'password') +``` + +**Arguments** + +- `host:port` — PostgreSQL server address. + +- `database` — Remote database name. + +- `table` — Remote table name. + +- `user` — PostgreSQL user. + +- `password` — User password. + + +SELECT Queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query. + +Simple `WHERE` clauses such as `=, !=, >, >=, <, <=, IN` are executed on the PostgreSQL server. + +All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes. + +INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. + +PostgreSQL Array types converts into ClickHouse arrays. + +**Returned Value** + +A table object with the same columns as the original PostgreSQL table. + +!!! info "Note" + In the `INSERT` query to distinguish table function `postgresql(...)` from table name with column names list you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below. + +**Examples** + +Table in PostgreSQL: + +``` text +postgres=# CREATE TABLE "public"."test" ( +"int_id" SERIAL, +"int_nullable" INT NULL DEFAULT NULL, +"float" FLOAT NOT NULL, +"str" VARCHAR(100) NOT NULL DEFAULT '', +"float_nullable" FLOAT NULL DEFAULT NULL, +PRIMARY KEY (int_id)); + +CREATE TABLE + +postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +INSERT 0 1 + +postgresql> select * from test; + int_id | int_nullable | float | str | float_nullable +--------+--------------+-------+------+---------------- + 1 | | 2 | test | +(1 row) +``` + +Selecting data from ClickHouse: + +```sql +SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password') WHERE str IN ('test'); +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴──────┴────────────────┘ +``` + +Inserting: + +```sql +INSERT INTO TABLE FUNCTION postgresql('localhost:5432', 'test', 'test', 'postgrsql_user', 'password') (int_id, float) VALUES (2, 3); +SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password'); +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │ +│ 2 │ ᴺᵁᴸᴸ │ 3 │ │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴──────┴────────────────┘ +``` + +**See Also** + +- [The ‘PostgreSQL’ table engine](../../engines/table-engines/integrations/postgresql.md) +- [Using PostgreSQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 3370e9b06d0..2254ddcb68c 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -18,12 +18,12 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); ``` -Смотрите подробное описание запроса [CREATE TABLE](../../../engines/table-engines/integrations/mysql.md#create-table-query). +Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). Структура таблицы может отличаться от исходной структуры таблицы MySQL: - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. -- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [приводить](../../../engines/table-engines/integrations/mysql.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. **Параметры движка** diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md new file mode 100644 index 00000000000..4febd23067b --- /dev/null +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -0,0 +1,102 @@ +--- +toc_priority: 8 +toc_title: PostgreSQL +--- + +# PosgtreSQL {#postgresql} + +Движок PostgreSQL позволяет выполнять запросы `SELECT` над данными, хранящимися на удалённом PostgreSQL сервере. + +## Создание таблицы {#creating-a-table} + +``` sql +CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] +( + name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1], + name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2], + ... +) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'); +``` + +Смотрите подробное описание запроса [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query). + +Структура таблицы может отличаться от исходной структуры таблицы PostgreSQL: + +- Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. +- Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. + +**Параметры движка** + +- `host:port` — адрес сервера PostgreSQL. + +- `database` — Имя базы данных на сервере PostgreSQL. + +- `table` — Имя таблицы. + +- `user` — Имя пользователя PostgreSQL. + +- `password` — Пароль пользователя PostgreSQL. + +SELECT запросы на стороне PostgreSQL выполняются как `COPY (SELECT ...) TO STDOUT` внутри транзакции PostgreSQL только на чтение с коммитом после каждого `SELECT` запроса. + +Простые условия для `WHERE` такие как `=, !=, >, >=, <, <=, IN` исполняются на стороне PostgreSQL сервера. + +Все операции объединения, аггрегации, сортировки, условия `IN [ array ]` и ограничения `LIMIT` выполняются на стороне ClickHouse только после того как запрос к PostgreSQL закончился. + +INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. + +PostgreSQL массивы конвертируются в массивы ClickHouse. + +## Пример использования {#usage-example} + +Таблица в PostgreSQL: + +``` text +postgres=# CREATE TABLE "public"."test" ( +"int_id" SERIAL, +"int_nullable" INT NULL DEFAULT NULL, +"float" FLOAT NOT NULL, +"str" VARCHAR(100) NOT NULL DEFAULT '', +"float_nullable" FLOAT NULL DEFAULT NULL, +PRIMARY KEY (int_id)); + +CREATE TABLE + +postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +INSERT 0 1 + +postgresql> select * from test; + int_id | int_nullable | float | str | float_nullable +--------+--------------+-------+------+---------------- + 1 | | 2 | test | +(1 row) +``` + +Таблица в ClickHouse, получение данных из PostgreSQL таблицы созданной выше: + +``` sql +CREATE TABLE default.postgresql_table +( + `float_nullable` Nullable(Float32), + `str` String, + `int_id` Int32 +) +ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postges_user', 'postgres_password'); +``` + +``` sql +SELECT * FROM postgresql_table WHERE str IN ('test') +``` + +``` text +┌─float_nullable─┬─str──┬─int_id─┐ +│ ᴺᵁᴸᴸ │ test │ 1 │ +└────────────────┴──────┴────────┘ +1 rows in set. Elapsed: 0.019 sec. +``` + + +## Смотри также {#see-also} + +- [Табличная функция ‘postgresql’](../../../sql-reference/table-functions/postgresql.md) +- [Использование PostgreSQL в качестве истояника для внешнего словаря](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index ec0fb8e0ee5..f816caa3aa5 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -28,7 +28,7 @@ LIFETIME(300) ... ``` -Настройка `0` запрещает обновление словарей. +Настройка `0` (`LIFETIME(0)`) запрещает обновление словарей. Можно задать интервал, внутри которого ClickHouse равномерно-случайно выберет время для обновления. Это необходимо для распределения нагрузки на источник словаря при обновлении на большом количестве серверов. @@ -51,16 +51,19 @@ LIFETIME(300) LIFETIME(MIN 300 MAX 360) ``` +Если `0` и `0`, ClickHouse не перегружает словарь по истечению времени. +В этм случае, ClickHouse может перезагрузить данные словаря если изменился XML файл с конфигурацией словаря или если была выполнена команда `SYSTEM RELOAD DICTIONARY`. + При обновлении словарей сервер ClickHouse применяет различную логику в зависимости от типа [источника](external-dicts-dict-sources.md): -> - У текстового файла проверяется время модификации. Если время изменилось по отношению к запомненному ранее, то словарь обновляется. -> - Для MySQL источника, время модификации проверяется запросом `SHOW TABLE STATUS` (для MySQL 8 необходимо отключить кеширование мета-информации в MySQL `set global information_schema_stats_expiry=0`. -> - Словари из других источников по умолчанию обновляются каждый раз. +- У текстового файла проверяется время модификации. Если время изменилось по отношению к запомненному ранее, то словарь обновляется. +- Для MySQL источника, время модификации проверяется запросом `SHOW TABLE STATUS` (для MySQL 8 необходимо отключить кеширование мета-информации в MySQL `set global information_schema_stats_expiry=0`. +- Словари из других источников по умолчанию обновляются каждый раз. -Для других источников (ODBC, ClickHouse и т.д.) можно настроить запрос, который позволит обновлять словари только в случае их фактического изменения, а не каждый раз. Чтобы это сделать необходимо выполнить следующие условия/действия: +Для других источников (ODBC, PostgreSQL, ClickHouse и т.д.) можно настроить запрос, который позволит обновлять словари только в случае их фактического изменения, а не каждый раз. Чтобы это сделать необходимо выполнить следующие условия/действия: -> - В таблице словаря должно быть поле, которое гарантированно изменяется при обновлении данных в источнике. -> - В настройках источника указывается запрос, который получает изменяющееся поле. Результат запроса сервер ClickHouse интерпретирует как строку и если эта строка изменилась по отношению к предыдущему состоянию, то словарь обновляется. Запрос следует указывать в поле `` настроек [источника](external-dicts-dict-sources.md). +- В таблице словаря должно быть поле, которое гарантированно изменяется при обновлении данных в источнике. +- В настройках источника указывается запрос, который получает изменяющееся поле. Результат запроса сервер ClickHouse интерпретирует как строку и если эта строка изменилась по отношению к предыдущему состоянию, то словарь обновляется. Запрос следует указывать в поле `` настроек [источника](external-dicts-dict-sources.md). Пример настройки: diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 77275b65a05..a6142cc210d 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -65,6 +65,7 @@ SETTINGS(format_csv_allow_single_quotes = 0) - СУБД: - [ODBC](#dicts-external_dicts_dict_sources-odbc) - [MySQL](#dicts-external_dicts_dict_sources-mysql) + - [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql) - [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse) - [MongoDB](#dicts-external_dicts_dict_sources-mongodb) - [Redis](#dicts-external_dicts_dict_sources-redis) @@ -313,6 +314,7 @@ PRIMARY KEY id SOURCE(ODBC(connection_string 'DSN=myconnection' table 'postgresql_table')) LAYOUT(HASHED()) LIFETIME(MIN 300 MAX 360) +``` Может понадобиться в `odbc.ini` указать полный путь до библиотеки с драйвером `DRIVER=/usr/local/lib/psqlodbcw.so`. @@ -320,15 +322,15 @@ LIFETIME(MIN 300 MAX 360) ОС Ubuntu. -Установка драйвера: : +Установка драйвера: ```bash $ sudo apt-get install tdsodbc freetds-bin sqsh ``` -Настройка драйвера: : +Настройка драйвера: -``` bash +```bash $ cat /etc/freetds/freetds.conf ... @@ -338,8 +340,11 @@ $ sudo apt-get install tdsodbc freetds-bin sqsh tds version = 7.0 client charset = UTF-8 + # тестирование TDS соединения + $ sqsh -S MSSQL -D database -U user -P password + + $ cat /etc/odbcinst.ini - ... [FreeTDS] Description = FreeTDS @@ -348,8 +353,8 @@ $ sudo apt-get install tdsodbc freetds-bin sqsh FileUsage = 1 UsageCount = 5 - $ cat ~/.odbc.ini - ... + $ cat /etc/odbc.ini + # $ cat ~/.odbc.ini # если вы вошли из под пользователя из под которого запущен ClickHouse [MSSQL] Description = FreeTDS @@ -359,8 +364,15 @@ $ sudo apt-get install tdsodbc freetds-bin sqsh UID = test PWD = test Port = 1433 + + + # (не обязательно) тест ODBC соединения (используйте isql поставляемый вместе с [unixodbc](https://packages.debian.org/sid/unixodbc)-package) + $ isql -v MSSQL "user" "password" ``` +Примечание: +- чтобы определить самую раннюю версию TDS, которая поддерживается определенной версией SQL Server, обратитесь к документации продукта или посмотрите на [MS-TDS Product Behavior](https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-tds/135d0ebe-5c4c-4a94-99bf-1811eccb9f4a) + Настройка словаря в ClickHouse: ``` xml @@ -624,4 +636,93 @@ SOURCE(REDIS( - `storage_type` – способ хранения ключей. Необходимо использовать `simple` для источников с одним столбцом ключей, `hash_map` – для источников с двумя столбцами ключей. Источники с более, чем двумя столбцами ключей, не поддерживаются. Может отсутствовать, значение по умолчанию `simple`. - `db_index` – номер базы данных. Может отсутствовать, значение по умолчанию 0. +### Cassandra {#dicts-external_dicts_dict_sources-cassandra} + +Пример настройки: + +``` xml + + + localhost + 9042 + username + qwerty123 + database_name + table_name + 1 + 1 + One + "SomeColumn" = 42 + 8 + + +``` + +Поля настройки: +- `host` – Имя хоста с установленной Cassandra или разделенный через запятую список хостов. +- `port` – Порт на серверах Cassandra. Если не указан, используется значение по умолчанию 9042. +- `user` – Имя пользователя для соединения с Cassandra. +- `password` – Пароль для соединения с Cassandra. +- `keyspace` – Имя keyspace (база данных). +- `column_family` – Имя семейства столбцов (таблица). +- `allow_filering` – Флаг, разрешающий или не разрешающий потенциально дорогостоящие условия на кластеризации ключевых столбцов. Значение по умолчанию 1. +- `partition_key_prefix` – Количество партиций ключевых столбцов в первичном ключе таблицы Cassandra. +Необходимо для составления ключей словаря. Порядок ключевых столбцов в определении словеря должен быть таким же как в Cassandra. +Значение по умолчанию 1 (первый ключевой столбец это ключ партицирования, остальные ключевые столбцы - ключи кластеризации). +- `consistency` – Уровень консистентности. Возмодные значения: `One`, `Two`, `Three`, + `All`, `EachQuorum`, `Quorum`, `LocalQuorum`, `LocalOne`, `Serial`, `LocalSerial`. Значение по умолчанию `One`. +- `where` – Опциональный критерий выборки. +- `max_threads` – Максимальное кол-во тредов для загрузки данных из нескольких партиций в словарь. + +### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql} + +Пример настройки: + +``` xml + + + 5432 + clickhouse + qwerty + db_name + table_name
+ id=10 + SQL_QUERY + + +``` + +или + +``` sql +SOURCE(POSTGRESQL( + port 5432 + host 'postgresql-hostname' + user 'postgres_user' + password 'postgres_password' + db 'db_name' + table 'table_name' + replica(host 'example01-1' port 5432 priority 1) + replica(host 'example01-2' port 5432 priority 2) + where 'id=10' + invalidate_query 'SQL_QUERY' +)) +``` + +Setting fields: + +- `host` – Хост для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). +- `port` – Порт для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). +- `user` – Имя пользователя для соединения с PostgreSQL. Вы можете указать его для всех реплик или задать индивидуально для каждой релпики (внутри ``). +- `password` – Пароль для пользователя PostgreSQL. +- `replica` – Section of replica configurations. There can be multiple sections. + - `replica/host` – хост PostgreSQL. + - `replica/port` – порт PostgreSQL . + - `replica/priority` – Приоритет реплики. Во время попытки соединения, ClickHouse будет перебирать реплики в порядке приоритет. Меньшее значение означает более высокий приоритет. +- `db` – Имя базы данных. +- `table` – Имя таблицы. +- `where` – Условие выборки. Синтаксис для условий такой же как для `WHERE` выражения в PostgreSQL, для примера, `id > 10 AND id < 20`. Необязательный параметр. +- `invalidate_query` – Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). + + [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md new file mode 100644 index 00000000000..5e6bcc0406b --- /dev/null +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -0,0 +1,99 @@ +--- +toc_priority: 42 +toc_title: postgresql +--- + +# postgresql {#postgresql} + +Позволяет выполнять запросы `SELECT` над данными, хранящимися на удалённом PostgreSQL сервере. + +**Синтаксис** +``` sql +postgresql('host:port', 'database', 'table', 'user', 'password') +``` + +**Параметры** + +- `host:port` — адрес сервера PostgreSQL. + +- `database` — имя базы данных на удалённом сервере. + +- `table` — имя таблицы на удалённом сервере. + +- `user` — пользователь PostgreSQL. + +- `password` — пароль пользователя. + + +SELECT запросы на стороне PostgreSQL выполняются как `COPY (SELECT ...) TO STDOUT` внутри транзакции PostgreSQL только на чтение с коммитом после каждого `SELECT` запроса. + +Простые условия для `WHERE` такие как `=, !=, >, >=, <, <=, IN` исполняются на стороне PostgreSQL сервера. + +Все операции объединения, аггрегации, сортировки, условия `IN [ array ]` и ограничения `LIMIT` выполняются на стороне ClickHouse только после того как запрос к PostgreSQL закончился. + +INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. + +PostgreSQL массивы конвертируются в массивы ClickHouse. + +**Возвращаемое значение** + +Объект таблицы с теми же столбцами, что и в исходной таблице PostgreSQL. + +!!! info "Примечание" +В запросах `INSERT` для того чтобы отличить табличную функцию `postgresql(...)` от таблицы со списком имен столбцов вы должны указывать ключевые слова `FUNCTION` или `TABLE FUNCTION`. See examples below. + +**Примеры** + +Таблица в PostgreSQL: + +``` text +postgres=# CREATE TABLE "public"."test" ( +"int_id" SERIAL, +"int_nullable" INT NULL DEFAULT NULL, +"float" FLOAT NOT NULL, +"str" VARCHAR(100) NOT NULL DEFAULT '', +"float_nullable" FLOAT NULL DEFAULT NULL, +PRIMARY KEY (int_id)); + +CREATE TABLE + +postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2); +INSERT 0 1 + +postgresql> select * from test; + int_id | int_nullable | float | str | float_nullable +--------+--------------+-------+------+---------------- + 1 | | 2 | test | +(1 row) +``` + +Получение данных в ClickHouse: + +```sql +SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password') WHERE str IN ('test'); +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴──────┴────────────────┘ +``` + +Вставка: + +```sql +INSERT INTO TABLE FUNCTION postgresql('localhost:5432', 'test', 'test', 'postgrsql_user', 'password') (int_id, float) VALUES (2, 3); +SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password'); +``` + +``` text +┌─int_id─┬─int_nullable─┬─float─┬─str──┬─float_nullable─┐ +│ 1 │ ᴺᵁᴸᴸ │ 2 │ test │ ᴺᵁᴸᴸ │ +│ 2 │ ᴺᵁᴸᴸ │ 3 │ │ ᴺᵁᴸᴸ │ +└────────┴──────────────┴───────┴──────┴────────────────┘ +``` + +**Смотрите также** + +- [Движок таблиц ‘PostgreSQL’](../../sql-reference/table-functions/postgresql.md) +- [Использование PostgreSQL как источника данных для внешнего словаря](../../sql-reference/table-functions/postgresql.md#dicts-external_dicts_dict_sources-postgresql) From 5791cf5f2b9ab2eb604d090dfa977702df823b8c Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 23 Feb 2021 08:57:50 +0500 Subject: [PATCH 092/333] fix review comments https://github.com/ClickHouse/ClickHouse/pull/21078#discussion_r580413875 and https://github.com/ClickHouse/ClickHouse/pull/21078#discussion_r580410204 Signed-off-by: Slach --- docs/en/engines/table-engines/integrations/mysql.md | 1 + docs/en/engines/table-engines/integrations/odbc.md | 1 + docs/en/engines/table-engines/integrations/postgresql.md | 2 ++ docs/en/sql-reference/table-functions/postgresql.md | 1 + docs/ru/engines/table-engines/integrations/mysql.md | 1 + docs/ru/engines/table-engines/integrations/odbc.md | 1 + docs/ru/engines/table-engines/integrations/postgresql.md | 2 ++ docs/ru/sql-reference/table-functions/postgresql.md | 1 + src/Core/Settings.h | 2 +- 9 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 2cb1facce91..2ea8ea95958 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -24,6 +24,7 @@ The table structure can differ from the original MySQL table structure: - Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md index fffc125b0ff..8083d644deb 100644 --- a/docs/en/engines/table-engines/integrations/odbc.md +++ b/docs/en/engines/table-engines/integrations/odbc.md @@ -29,6 +29,7 @@ The table structure can differ from the source table structure: - Column names should be the same as in the source table, but you can use just some of these columns and in any order. - Column types may differ from those in the source table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 4a81251e60a..6153e3a29a2 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -24,6 +24,7 @@ The table structure can differ from the original PostgreSQL table structure: - Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order. - Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. +- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types. **Engine Parameters** @@ -46,6 +47,7 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. PostgreSQL Array types converts into ClickHouse arrays. +Be careful in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse only allows multidimensional arrays of the same dimension in all rows. ## Usage Example {#usage-example} diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 95724694d7a..88f5b26b99b 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -35,6 +35,7 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. PostgreSQL Array types converts into ClickHouse arrays. +Be careful in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse only allows multidimensional arrays of the same dimension in all rows. **Returned Value** diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 2254ddcb68c..459f8844ce8 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -24,6 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице MySQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** diff --git a/docs/ru/engines/table-engines/integrations/odbc.md b/docs/ru/engines/table-engines/integrations/odbc.md index 97317d647c8..898d569d504 100644 --- a/docs/ru/engines/table-engines/integrations/odbc.md +++ b/docs/ru/engines/table-engines/integrations/odbc.md @@ -29,6 +29,7 @@ ENGINE = ODBC(connection_settings, external_database, external_table) - Имена столбцов должны быть такими же, как в исходной таблице, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов аналогичных столбцов в исходной таблице. ClickHouse пытается [приводить](../../../engines/table-engines/integrations/odbc.md#type_conversion_function-cast) значения к типам данных ClickHouse. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index 4febd23067b..85512e0ea26 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -24,6 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - Имена столбцов должны быть такими же, как в исходной таблице MySQL, но вы можете использовать только некоторые из этих столбцов и в любом порядке. - Типы столбцов могут отличаться от типов в исходной таблице PostgreSQL. ClickHouse пытается [приводить](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types. +- Настройка `external_table_functions_use_nulls` определяет как обрабатывать Nullable столбцы. По умолчанию 1, если 0 - табличная функция не будет делать nullable столбцы и будет вместо null выставлять значения по умолчанию для скалярного типа. Это также применимо для null значений внутри массивов. **Параметры движка** @@ -46,6 +47,7 @@ SELECT запросы на стороне PostgreSQL выполняются ка INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. PostgreSQL массивы конвертируются в массивы ClickHouse. +Будьте осторожны в PostgreSQL многомерные массивы могут содержать в себе другие массивы с разным кол-вом элементов в разных подмассивах, но внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом элементов в каждом вложенном массиве. ## Пример использования {#usage-example} diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index 5e6bcc0406b..338e7d8e7f2 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -34,6 +34,7 @@ SELECT запросы на стороне PostgreSQL выполняются ка INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. PostgreSQL массивы конвертируются в массивы ClickHouse. +Будьте осторожны в PostgreSQL многомерные массивы могут содержать в себе другие массивы с разным кол-вом элементов в разных подмассивах, но внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом элементов в каждом вложенном массиве. **Возвращаемое значение** diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 8afc08da21a..cf8d34bb56f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -355,7 +355,7 @@ class IColumn; M(UInt64, read_in_order_two_level_merge_threshold, 100, "Minimal number of parts to read to run preliminary merge step during multithread reading in order of primary key.", 0) \ M(Bool, low_cardinality_allow_in_native_format, true, "Use LowCardinality type in Native format. Otherwise, convert LowCardinality columns to ordinary for select query, and convert ordinary columns to required LowCardinality for insert query.", 0) \ M(Bool, cancel_http_readonly_queries_on_client_close, false, "Cancel HTTP readonly queries when a client closes the connection without waiting for response.", 0) \ - M(Bool, external_table_functions_use_nulls, true, "If it is set to true, external table functions will implicitly use Nullable type if needed. Otherwise NULLs will be substituted with default values. Currently supported only by 'mysql' and 'odbc' table functions.", 0) \ + M(Bool, external_table_functions_use_nulls, true, "If it is set to true, external table functions will implicitly use Nullable type if needed. Otherwise NULLs will be substituted with default values. Currently supported only by 'mysql', 'postgresql' and 'odbc' table functions.", 0) \ \ M(Bool, allow_hyperscan, true, "Allow functions that use Hyperscan library. Disable to avoid potentially long compilation times and excessive resource usage.", 0) \ M(Bool, allow_simdjson, true, "Allow using simdjson library in 'JSON*' functions if AVX2 instructions are available. If disabled rapidjson will be used.", 0) \ From 7c91b1d6e60a230288817d3d331e592a8acb7c63 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 24 Feb 2021 10:00:15 +0500 Subject: [PATCH 093/333] fix multi dimensional array warning - https://github.com/ClickHouse/ClickHouse/pull/21078#discussion_r580829045 Signed-off-by: Slach --- docs/en/engines/table-engines/integrations/postgresql.md | 2 +- docs/en/sql-reference/table-functions/postgresql.md | 2 +- docs/ru/engines/table-engines/integrations/postgresql.md | 2 +- docs/ru/sql-reference/table-functions/postgresql.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 6153e3a29a2..b82b97bfed0 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -47,7 +47,7 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. PostgreSQL Array types converts into ClickHouse arrays. -Be careful in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse only allows multidimensional arrays of the same dimension in all rows. +Be careful in PostgreSQL an array data created like type_name[] may contain multi-dimensional arrays of different dimensions in different rows in same column, but in ClickHouse only allows multi-dimensional arrays of the same count of dimension in all rows in same column in table. ## Usage Example {#usage-example} diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 88f5b26b99b..42790e0d870 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -35,7 +35,7 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. PostgreSQL Array types converts into ClickHouse arrays. -Be careful in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse only allows multidimensional arrays of the same dimension in all rows. +Be careful in PostgreSQL an array data created like type_name[] may contain multi-dimensional arrays of different dimensions in different rows in same column, but in ClickHouse only allows multi-dimensional arrays of the same count of dimension in all rows in same column in table. **Returned Value** diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index 85512e0ea26..3ab98682203 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -47,7 +47,7 @@ SELECT запросы на стороне PostgreSQL выполняются ка INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. PostgreSQL массивы конвертируются в массивы ClickHouse. -Будьте осторожны в PostgreSQL многомерные массивы могут содержать в себе другие массивы с разным кол-вом элементов в разных подмассивах, но внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом элементов в каждом вложенном массиве. +Будьте осторожны в PostgreSQL массивы созданные как type_name[], являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы, внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. ## Пример использования {#usage-example} diff --git a/docs/ru/sql-reference/table-functions/postgresql.md b/docs/ru/sql-reference/table-functions/postgresql.md index 338e7d8e7f2..a8ed23db8ed 100644 --- a/docs/ru/sql-reference/table-functions/postgresql.md +++ b/docs/ru/sql-reference/table-functions/postgresql.md @@ -34,7 +34,7 @@ SELECT запросы на стороне PostgreSQL выполняются ка INSERT запросы на стороне PostgreSQL выполняются как `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` внутри PostgreSQL транзакции с автоматическим коммитом после каждого `INSERT` запроса. PostgreSQL массивы конвертируются в массивы ClickHouse. -Будьте осторожны в PostgreSQL многомерные массивы могут содержать в себе другие массивы с разным кол-вом элементов в разных подмассивах, но внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом элементов в каждом вложенном массиве. +Будьте осторожны в PostgreSQL массивы созданные как type_name[], являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы, внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы. **Возвращаемое значение** From c2e97c295182fd6701bb8c37b4b7dffc715a5808 Mon Sep 17 00:00:00 2001 From: Eugene Klimov Date: Tue, 23 Feb 2021 13:08:11 +0500 Subject: [PATCH 094/333] Update docs/en/sql-reference/table-functions/postgresql.md Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- docs/en/sql-reference/table-functions/postgresql.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 42790e0d870..082931343bf 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -35,14 +35,15 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. PostgreSQL Array types converts into ClickHouse arrays. -Be careful in PostgreSQL an array data created like type_name[] may contain multi-dimensional arrays of different dimensions in different rows in same column, but in ClickHouse only allows multi-dimensional arrays of the same count of dimension in all rows in same column in table. + +Be careful in PostgreSQL an array data type column like Integer[] may contain arrays of different dimensions in different rows, but in ClickHouse it is only allowed to have multidimensional arrays of the same dimension in all rows. **Returned Value** A table object with the same columns as the original PostgreSQL table. !!! info "Note" - In the `INSERT` query to distinguish table function `postgresql(...)` from table name with column names list you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below. + In the `INSERT` query to distinguish table function `postgresql(...)` from table name with column names list you must use keywords `FUNCTION` or `TABLE FUNCTION`. See examples below. **Examples** @@ -72,7 +73,7 @@ postgresql> select * from test; Selecting data from ClickHouse: ```sql -SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password') WHERE str IN ('test'); +SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'password') WHERE str IN ('test'); ``` ``` text From 8763ac98fe5094cc9af4c946d688c34f195fd249 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 24 Feb 2021 10:22:32 +0500 Subject: [PATCH 095/333] fix multi dimensional array warning - https://github.com/ClickHouse/ClickHouse/pull/21078#discussion_r580829045 Signed-off-by: Slach --- docs/en/engines/table-engines/integrations/postgresql.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index b82b97bfed0..7272f2e5edf 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -47,7 +47,7 @@ All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` samp INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement. PostgreSQL Array types converts into ClickHouse arrays. -Be careful in PostgreSQL an array data created like type_name[] may contain multi-dimensional arrays of different dimensions in different rows in same column, but in ClickHouse only allows multi-dimensional arrays of the same count of dimension in all rows in same column in table. +Be careful in PostgreSQL an array data created like a type_name[] may contain multi-dimensional arrays of different dimensions in different table rows in same column, but in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column. ## Usage Example {#usage-example} From ddd5acf251c272b70aa7dd2e1580946249e53d6b Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 4 Mar 2021 17:08:35 +0500 Subject: [PATCH 096/333] try to fix "fake" nowhere links according to https://github.com/ClickHouse/ClickHouse/pull/21268#issuecomment-787106299 --- .../integrations/embedded-rocksdb.md | 2 +- .../table-engines/integrations/hdfs.md | 7 +- .../table-engines/integrations/index.md | 3 + .../table-engines/integrations/jdbc.md | 2 +- .../table-engines/integrations/kafka.md | 2 +- .../table-engines/integrations/mongodb.md | 2 +- .../table-engines/integrations/mysql.md | 2 +- .../table-engines/integrations/odbc.md | 2 +- .../table-engines/integrations/postgresql.md | 2 + .../table-engines/integrations/rabbitmq.md | 2 + .../engines/table-engines/integrations/s3.md | 26 +-- docs/en/sql-reference/table-functions/file.md | 2 +- docs/en/sql-reference/table-functions/hdfs.md | 2 +- .../en/sql-reference/table-functions/index.md | 25 +-- docs/en/sql-reference/table-functions/odbc.md | 2 +- .../table-functions/postgresql.md | 2 + docs/en/sql-reference/table-functions/s3.md | 2 +- docs/en/sql-reference/table-functions/view.md | 3 +- .../integrations/embedded-rocksdb.md | 2 +- .../table-engines/integrations/hdfs.md | 96 ++++++++++- .../table-engines/integrations/index.md | 2 + .../table-engines/integrations/jdbc.md | 2 +- .../table-engines/integrations/kafka.md | 2 +- .../table-engines/integrations/mongodb.md | 2 +- .../table-engines/integrations/mysql.md | 2 +- .../table-engines/integrations/odbc.md | 2 +- .../table-engines/integrations/postgresql.md | 2 + .../table-engines/integrations/rabbitmq.md | 2 + .../engines/table-engines/integrations/s3.md | 156 ++++++++++++++++++ 29 files changed, 310 insertions(+), 50 deletions(-) create mode 100644 docs/ru/engines/table-engines/integrations/s3.md diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 6e864751cc3..e9e069933e5 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -39,4 +39,4 @@ ENGINE = EmbeddedRocksDB PRIMARY KEY key ``` -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/embedded-rocksdb/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/embedded-rocksdb/) diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index 5c36e3f1c21..0782efe8e72 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -5,7 +5,7 @@ toc_title: HDFS # HDFS {#table_engines-hdfs} -This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)via ClickHouse. This engine is similar +This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) via ClickHouse. This engine is similar to the [File](../../../engines/table-engines/special/file.md#table_engines-file) and [URL](../../../engines/table-engines/special/url.md#table_engines-url) engines, but provides Hadoop-specific features. ## Usage {#usage} @@ -174,7 +174,7 @@ Similar to GraphiteMergeTree, the HDFS engine supports extended configuration us | dfs\_domain\_socket\_path | "" | -[HDFS Configuration Reference ](https://hawq.apache.org/docs/userguide/2.3.0.0-incubating/reference/HDFSConfigurationParameterReference.html) might explain some parameters. +[HDFS Configuration Reference](https://hawq.apache.org/docs/userguide/2.3.0.0-incubating/reference/HDFSConfigurationParameterReference.html) might explain some parameters. #### ClickHouse extras {#clickhouse-extras} @@ -185,7 +185,6 @@ Similar to GraphiteMergeTree, the HDFS engine supports extended configuration us |hadoop\_kerberos\_kinit\_command | kinit | #### Limitations {#limitations} - * hadoop\_security\_kerberos\_ticket\_cache\_path can be global only, not user specific ## Kerberos support {#kerberos-support} @@ -207,4 +206,4 @@ If hadoop\_kerberos\_keytab, hadoop\_kerberos\_principal or hadoop\_kerberos\_ki - [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/hdfs/) diff --git a/docs/en/engines/table-engines/integrations/index.md b/docs/en/engines/table-engines/integrations/index.md index 288c9c3cd56..28f38375448 100644 --- a/docs/en/engines/table-engines/integrations/index.md +++ b/docs/en/engines/table-engines/integrations/index.md @@ -18,3 +18,6 @@ List of supported integrations: - [Kafka](../../../engines/table-engines/integrations/kafka.md) - [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md) - [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) +- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) + +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/) diff --git a/docs/en/engines/table-engines/integrations/jdbc.md b/docs/en/engines/table-engines/integrations/jdbc.md index 2144be9f1e3..edbc5d3ed3e 100644 --- a/docs/en/engines/table-engines/integrations/jdbc.md +++ b/docs/en/engines/table-engines/integrations/jdbc.md @@ -85,4 +85,4 @@ FROM jdbc_table - [JDBC table function](../../../sql-reference/table-functions/jdbc.md). -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/jdbc/) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index fb1df62bb15..1b3aaa4b569 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -194,4 +194,4 @@ Example: - [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) +[Original article](https://clickhouse.tech/docs/enen/engines/table-engines/integrations/kafka/) diff --git a/docs/en/engines/table-engines/integrations/mongodb.md b/docs/en/engines/table-engines/integrations/mongodb.md index e648a13b5e0..2fee27ce80d 100644 --- a/docs/en/engines/table-engines/integrations/mongodb.md +++ b/docs/en/engines/table-engines/integrations/mongodb.md @@ -54,4 +54,4 @@ SELECT COUNT() FROM mongo_table; └─────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/integrations/mongodb/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/mongodb/) diff --git a/docs/en/engines/table-engines/integrations/mysql.md b/docs/en/engines/table-engines/integrations/mysql.md index 2ea8ea95958..8b7caa12c91 100644 --- a/docs/en/engines/table-engines/integrations/mysql.md +++ b/docs/en/engines/table-engines/integrations/mysql.md @@ -101,4 +101,4 @@ SELECT * FROM mysql_table - [The ‘mysql’ table function](../../../sql-reference/table-functions/mysql.md) - [Using MySQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql) -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/mysql/) diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md index 8083d644deb..99efd870088 100644 --- a/docs/en/engines/table-engines/integrations/odbc.md +++ b/docs/en/engines/table-engines/integrations/odbc.md @@ -128,4 +128,4 @@ SELECT * FROM odbc_t - [ODBC external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) - [ODBC table function](../../../sql-reference/table-functions/odbc.md) -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/odbc/) diff --git a/docs/en/engines/table-engines/integrations/postgresql.md b/docs/en/engines/table-engines/integrations/postgresql.md index 7272f2e5edf..1a2ccf3e0dc 100644 --- a/docs/en/engines/table-engines/integrations/postgresql.md +++ b/docs/en/engines/table-engines/integrations/postgresql.md @@ -102,3 +102,5 @@ SELECT * FROM postgresql_table WHERE str IN ('test') - [The ‘postgresql’ table function](../../../sql-reference/table-functions/postgresql.md) - [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) + +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/postgresql/) diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 4a0550275ca..476192d3969 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -163,3 +163,5 @@ Example: - `_redelivered` - `redelivered` flag of the message. - `_message_id` - messageID of the received message; non-empty if was set, when message was published. - `_timestamp` - timestamp of the received message; non-empty if was set, when message was published. + +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/rabbitmq/) diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 5858a0803e6..93dcbdbc0f1 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -6,11 +6,11 @@ toc_title: S3 # S3 {#table_engines-s3} This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem. This engine is similar -to the [HDFS](../../../engines/table-engines/special/file.md#table_engines-hdfs) engine, but provides S3-specific features. +to the [HDFS](../../../engines/table-engines/integrations/hdfs.md#table_engines-hdfs) engine, but provides S3-specific features. ## Usage {#usage} -``` sql +```sql ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) ``` @@ -25,23 +25,23 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, **1.** Set up the `s3_engine_table` table: -``` sql +```sql CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') ``` **2.** Fill file: -``` sql +```sql INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) ``` **3.** Query the data: -``` sql +```sql SELECT * FROM s3_engine_table LIMIT 2 ``` -``` text +```text ┌─name─┬─value─┐ │ one │ 1 │ │ two │ 2 │ @@ -69,7 +69,7 @@ Constructions with `{}` are similar to the [remote](../../../sql-reference/table **Example** -1. Suppose we have several files in TSV format with the following URIs on HDFS: +1. Suppose we have several files in CSV format with the following URIs on S3: - ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv’ - ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv’ @@ -82,19 +82,19 @@ Constructions with `{}` are similar to the [remote](../../../sql-reference/table -``` sql +```sql CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV') ``` 3. Another way: -``` sql +```sql CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV') ``` 4. Table consists of all the files in both directories (all files should satisfy format and schema described in query): -``` sql +```sql CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV') ``` @@ -105,7 +105,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https: Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`: -``` sql +```sql CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV') ``` @@ -124,7 +124,7 @@ The following settings can be set before query execution or placed into configur - `s3_max_single_part_upload_size` — Default value is `64Mb`. The maximum size of object to upload using singlepart upload to S3. - `s3_min_upload_part_size` — Default value is `512Mb`. The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). -- `s3_max_redirects` — Default value is `10`. Max number of S3 redirects hops allowed. +- `s3_max_redirects` — Default value is `10`. Max number of HTTP redirects S3 hops allowed. Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration. @@ -153,4 +153,4 @@ Example: ``` -[Original article](https://clickhouse.tech/docs/en/operations/table_engines/s3/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/s3/) diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index da0999e66eb..e1459b5e254 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -124,6 +124,6 @@ SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, **See Also** -- [Virtual columns](index.md#table_engines-virtual_columns) +- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns) [Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/file/) diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 512f47a2b46..31e2000b22d 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -97,6 +97,6 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin **See Also** -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) +- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns) [Original article](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/en/sql-reference/table-functions/index.md b/docs/en/sql-reference/table-functions/index.md index 691687dea25..d1368c6a674 100644 --- a/docs/en/sql-reference/table-functions/index.md +++ b/docs/en/sql-reference/table-functions/index.md @@ -21,17 +21,18 @@ You can use table functions in: !!! warning "Warning" You can’t use table functions if the [allow_ddl](../../operations/settings/permissions-for-queries.md#settings_allow_ddl) setting is disabled. -| Function | Description | -|-----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------| -| [file](../../sql-reference/table-functions/file.md) | Creates a [File](../../engines/table-engines/special/file.md)-engine table. | -| [merge](../../sql-reference/table-functions/merge.md) | Creates a [Merge](../../engines/table-engines/special/merge.md)-engine table. | -| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. | -| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md)-engine table. | -| [url](../../sql-reference/table-functions/url.md) | Creates a [Url](../../engines/table-engines/special/url.md)-engine table. | -| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a [MySQL](../../engines/table-engines/integrations/mysql.md)-engine table. | -| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a [JDBC](../../engines/table-engines/integrations/jdbc.md)-engine table. | -| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a [ODBC](../../engines/table-engines/integrations/odbc.md)-engine table. | -| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a [HDFS](../../engines/table-engines/integrations/hdfs.md)-engine table. | -| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. | +| Function | Description | +|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------| +| [file](../../sql-reference/table-functions/file.md) | Creates a [File](../../engines/table-engines/special/file.md)-engine table. | +| [merge](../../sql-reference/table-functions/merge.md) | Creates a [Merge](../../engines/table-engines/special/merge.md)-engine table. | +| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. | +| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md)-engine table. | +| [url](../../sql-reference/table-functions/url.md) | Creates a [Url](../../engines/table-engines/special/url.md)-engine table. | +| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a [MySQL](../../engines/table-engines/integrations/mysql.md)-engine table. | +| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a [PostgreSQL](../../engines/table-engines/integrations/posgresql.md)-engine table. | +| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a [JDBC](../../engines/table-engines/integrations/jdbc.md)-engine table. | +| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a [ODBC](../../engines/table-engines/integrations/odbc.md)-engine table. | +| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a [HDFS](../../engines/table-engines/integrations/hdfs.md)-engine table. | +| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. | [Original article](https://clickhouse.tech/docs/en/query_language/table_functions/) diff --git a/docs/en/sql-reference/table-functions/odbc.md b/docs/en/sql-reference/table-functions/odbc.md index ea79cd44a93..38ca4d40d17 100644 --- a/docs/en/sql-reference/table-functions/odbc.md +++ b/docs/en/sql-reference/table-functions/odbc.md @@ -103,4 +103,4 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') - [ODBC external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) - [ODBC table engine](../../engines/table-engines/integrations/odbc.md). -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) +[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/jdbc/) diff --git a/docs/en/sql-reference/table-functions/postgresql.md b/docs/en/sql-reference/table-functions/postgresql.md index 082931343bf..ad5d8a29904 100644 --- a/docs/en/sql-reference/table-functions/postgresql.md +++ b/docs/en/sql-reference/table-functions/postgresql.md @@ -100,3 +100,5 @@ SELECT * FROM postgresql('localhost:5432', 'test', 'test', 'postgresql_user', 'p - [The ‘PostgreSQL’ table engine](../../engines/table-engines/integrations/postgresql.md) - [Using PostgreSQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) + +[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/postgresql/) diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 76a0e042ea4..ea5dde707b8 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -164,6 +164,6 @@ Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max **See Also** -- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns) +- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns) [Original article](https://clickhouse.tech/docs/en/query_language/table_functions/s3/) diff --git a/docs/en/sql-reference/table-functions/view.md b/docs/en/sql-reference/table-functions/view.md index 08096c2b019..b627feee4c2 100644 --- a/docs/en/sql-reference/table-functions/view.md +++ b/docs/en/sql-reference/table-functions/view.md @@ -64,4 +64,5 @@ SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)) **See Also** - [View Table Engine](https://clickhouse.tech/docs/en/engines/table-engines/special/view/) -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/view/) \ No newline at end of file + +[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/view/) \ No newline at end of file diff --git a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md index 9b68bcfc770..7bd1420dfab 100644 --- a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md @@ -41,4 +41,4 @@ ENGINE = EmbeddedRocksDB PRIMARY KEY key; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/embedded-rocksdb/) \ No newline at end of file +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/embedded-rocksdb/) \ No newline at end of file diff --git a/docs/ru/engines/table-engines/integrations/hdfs.md b/docs/ru/engines/table-engines/integrations/hdfs.md index bd8e760fce4..449d7c9a20c 100644 --- a/docs/ru/engines/table-engines/integrations/hdfs.md +++ b/docs/ru/engines/table-engines/integrations/hdfs.md @@ -102,16 +102,104 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = HDFS('hdfs Создадим таблицу с именами `file000`, `file001`, … , `file999`: ``` sql -CREARE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') +CREATE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV') ``` +## Конфигурация {#configuration} + +Похоже на GraphiteMergeTree, движок HDFS поддерживает расширенную конфигурацию с использованием файла конфигурации ClickHouse. Есть два раздела конфигурации которые вы можете использовать: глобальный (`hdfs`) и на уровне пользователя (`hdfs_*`). Глобальные настройки применяются первыми, и затем применяется конфигурация уровня пользователя (если она указана). + +``` xml + + + /tmp/keytab/clickhouse.keytab + clickuser@TEST.CLICKHOUSE.TECH + kerberos + + + + + root@TEST.CLICKHOUSE.TECH + +``` + +### Список возможных опций конфигурации со значениями по умолчанию +#### Поддерживаемые из libhdfs3 + + +| **параметр** | **по умолчанию** | +| rpc\_client\_connect\_tcpnodelay | true | +| dfs\_client\_read\_shortcircuit | true | +| output\_replace-datanode-on-failure | true | +| input\_notretry-another-node | false | +| input\_localread\_mappedfile | true | +| dfs\_client\_use\_legacy\_blockreader\_local | false | +| rpc\_client\_ping\_interval | 10 * 1000 | +| rpc\_client\_connect\_timeout | 600 * 1000 | +| rpc\_client\_read\_timeout | 3600 * 1000 | +| rpc\_client\_write\_timeout | 3600 * 1000 | +| rpc\_client\_socekt\_linger\_timeout | -1 | +| rpc\_client\_connect\_retry | 10 | +| rpc\_client\_timeout | 3600 * 1000 | +| dfs\_default\_replica | 3 | +| input\_connect\_timeout | 600 * 1000 | +| input\_read\_timeout | 3600 * 1000 | +| input\_write\_timeout | 3600 * 1000 | +| input\_localread\_default\_buffersize | 1 * 1024 * 1024 | +| dfs\_prefetchsize | 10 | +| input\_read\_getblockinfo\_retry | 3 | +| input\_localread\_blockinfo\_cachesize | 1000 | +| input\_read\_max\_retry | 60 | +| output\_default\_chunksize | 512 | +| output\_default\_packetsize | 64 * 1024 | +| output\_default\_write\_retry | 10 | +| output\_connect\_timeout | 600 * 1000 | +| output\_read\_timeout | 3600 * 1000 | +| output\_write\_timeout | 3600 * 1000 | +| output\_close\_timeout | 3600 * 1000 | +| output\_packetpool\_size | 1024 | +| output\_heeartbeat\_interval | 10 * 1000 | +| dfs\_client\_failover\_max\_attempts | 15 | +| dfs\_client\_read\_shortcircuit\_streams\_cache\_size | 256 | +| dfs\_client\_socketcache\_expiryMsec | 3000 | +| dfs\_client\_socketcache\_capacity | 16 | +| dfs\_default\_blocksize | 64 * 1024 * 1024 | +| dfs\_default\_uri | "hdfs://localhost:9000" | +| hadoop\_security\_authentication | "simple" | +| hadoop\_security\_kerberos\_ticket\_cache\_path | "" | +| dfs\_client\_log\_severity | "INFO" | +| dfs\_domain\_socket\_path | "" | + + +[Руководство по конфигурации HDFS](https://hawq.apache.org/docs/userguide/2.3.0.0-incubating/reference/HDFSConfigurationParameterReference.html) поможет обьяснить назначения некоторых параметров. + + +#### Расширенные параметры для ClickHouse {#clickhouse-extras} + +| **параметр** | **по умолчанию** | +|hadoop\_kerberos\_keytab | "" | +|hadoop\_kerberos\_principal | "" | +|hadoop\_kerberos\_kinit\_command | kinit | + +#### Ограничения {#limitations} + * hadoop\_security\_kerberos\_ticket\_cache\_path могут быть определены только на глобальном уровне + +## Поддержика Kerberos {#kerberos-support} + +Если hadoop\_security\_authentication параметр имеет значение 'kerberos', ClickHouse аутентифицируется с помощью Kerberos. +[Расширенные параметры](#clickhouse-extras) и hadoop\_security\_kerberos\_ticket\_cache\_path помогают сделать это. +Обратите внимание что из-за ограничений libhdfs3 поддерживается только устаревший метод аутентификации, +коммуникация с узлами данных не защищена SASL (HADOOP\_SECURE\_DN\_USER надежный показатель такого +подхода к безопасности). Используйте tests/integration/test\_storage\_kerberized\_hdfs/hdfs_configs/bootstrap.sh для примера настроек. + +Если hadoop\_kerberos\_keytab, hadoop\_kerberos\_principal или hadoop\_kerberos\_kinit\_command указаны в настройках, kinit будет вызван. hadoop\_kerberos\_keytab и hadoop\_kerberos\_principal обязательны в этом случае. Необходимо также будет установить kinit и файлы конфигурации krb5. ## Виртуальные столбцы {#virtualnye-stolbtsy} - `_path` — Путь к файлу. - `_file` — Имя файла. -**Смотрите также** +**См. также** -- [Виртуальные столбцы](index.md#table_engines-virtual_columns) +- [Виртуальные колонки](../../../engines/table-engines/index.md#table_engines-virtual_columns) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/hdfs/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/hdfs/) diff --git a/docs/ru/engines/table-engines/integrations/index.md b/docs/ru/engines/table-engines/integrations/index.md index db7e527442e..7a11a5176cd 100644 --- a/docs/ru/engines/table-engines/integrations/index.md +++ b/docs/ru/engines/table-engines/integrations/index.md @@ -14,8 +14,10 @@ toc_priority: 30 - [MySQL](../../../engines/table-engines/integrations/mysql.md) - [MongoDB](../../../engines/table-engines/integrations/mongodb.md) - [HDFS](../../../engines/table-engines/integrations/hdfs.md) +- [S3](../../../engines/table-engines/integrations/s3.md) - [Kafka](../../../engines/table-engines/integrations/kafka.md) - [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md) - [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) +- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) [Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/) diff --git a/docs/ru/engines/table-engines/integrations/jdbc.md b/docs/ru/engines/table-engines/integrations/jdbc.md index d7d438e0633..8ead5abb277 100644 --- a/docs/ru/engines/table-engines/integrations/jdbc.md +++ b/docs/ru/engines/table-engines/integrations/jdbc.md @@ -89,4 +89,4 @@ FROM jdbc_table - [Табличная функция JDBC](../../../engines/table-engines/integrations/jdbc.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/jdbc/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/jdbc/) diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index 5a6971b1ae6..06a0d4df180 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -193,4 +193,4 @@ ClickHouse может поддерживать учетные данные Kerbe - [Виртуальные столбцы](index.md#table_engines-virtual_columns) - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/kafka/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/kafka/) diff --git a/docs/ru/engines/table-engines/integrations/mongodb.md b/docs/ru/engines/table-engines/integrations/mongodb.md index 0765b3909de..5ab63494648 100644 --- a/docs/ru/engines/table-engines/integrations/mongodb.md +++ b/docs/ru/engines/table-engines/integrations/mongodb.md @@ -54,4 +54,4 @@ SELECT COUNT() FROM mongo_table; └─────────┘ ``` -[Original article](https://clickhouse.tech/docs/ru/operations/table_engines/integrations/mongodb/) +[Original article](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/mongodb/) diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 459f8844ce8..bc53e0f1fbb 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -101,4 +101,4 @@ SELECT * FROM mysql_table - [Табличная функция ‘mysql’](../../../engines/table-engines/integrations/mysql.md) - [Использование MySQL в качестве источника для внешнего словаря](../../../engines/table-engines/integrations/mysql.md#dicts-external_dicts_dict_sources-mysql) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/mysql/) +[Оригинальная статья](https://clickhouse.tech/docs/engines/table-engines/integrations/mysql/) diff --git a/docs/ru/engines/table-engines/integrations/odbc.md b/docs/ru/engines/table-engines/integrations/odbc.md index 898d569d504..ee34be302bc 100644 --- a/docs/ru/engines/table-engines/integrations/odbc.md +++ b/docs/ru/engines/table-engines/integrations/odbc.md @@ -128,4 +128,4 @@ SELECT * FROM odbc_t - [Внешние словари ODBC](../../../engines/table-engines/integrations/odbc.md#dicts-external_dicts_dict_sources-odbc) - [Табличная функция odbc](../../../engines/table-engines/integrations/odbc.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/odbc/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/odbc/) diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index 3ab98682203..bc26899f55b 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -102,3 +102,5 @@ SELECT * FROM postgresql_table WHERE str IN ('test') - [Табличная функция ‘postgresql’](../../../sql-reference/table-functions/postgresql.md) - [Использование PostgreSQL в качестве истояника для внешнего словаря](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) + +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/postgresql/) diff --git a/docs/ru/engines/table-engines/integrations/rabbitmq.md b/docs/ru/engines/table-engines/integrations/rabbitmq.md index f55163c1988..1865cb16fcc 100644 --- a/docs/ru/engines/table-engines/integrations/rabbitmq.md +++ b/docs/ru/engines/table-engines/integrations/rabbitmq.md @@ -155,3 +155,5 @@ Example: - `_redelivered` - флаг `redelivered`. (Не равно нулю, если есть возможность, что сообщение было получено более, чем одним каналом.) - `_message_id` - значение поля `messageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. - `_timestamp` - значение поля `timestamp` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. + +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/rabbitmq/) diff --git a/docs/ru/engines/table-engines/integrations/s3.md b/docs/ru/engines/table-engines/integrations/s3.md new file mode 100644 index 00000000000..f1b2e78b0ba --- /dev/null +++ b/docs/ru/engines/table-engines/integrations/s3.md @@ -0,0 +1,156 @@ +--- +toc_priority: 4 +toc_title: S3 +--- + +# S3 {#table_engines-s3} + +Этот движок обеспечивает интеграцию с экосистемой [Amazon S3](https://aws.amazon.com/s3/). Этот движок похож на +движок [HDFS](../../../engines/table-engines/integrations/hdfs.md#table_engines-hdfs), но предоставляет S3-специфичные функции. + +## Использование {#usage} + +```sql +ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +``` + +**Параметры** + +- `path` — URL ссылающийся на файл расположенный в S3. В режиме для чтения можно читать несколько файлов как один, поддерживаются следующие шаблоны для указания маски пути к файлам: *, ?, {abc,def} и {N..M} где N, M — числа, `’abc’, ‘def’ — строки. +- `format` — [Формат](../../../interfaces/formats.md#formats) файла. +- `structure` — Структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`. +- `compression` — Алгоритм сжатия, не обязятельный параметр. Поддерживаемые значения: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. По умолчанию, алгоритм сжатия будет автоматически применен в зависимости от расширения в имени файла. + +**Пример:** + +**1.** Создание таблицы `s3_engine_table` : + +```sql +CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') +``` + +**2.** Заполнение файла: + +```sql +INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3) +``` + +**3.** Запрос данных: + +```sql +SELECT * FROM s3_engine_table LIMIT 2 +``` + +```text +┌─name─┬─value─┐ +│ one │ 1 │ +│ two │ 2 │ +└──────┴───────┘ +``` + +## Детали реализации {#implementation-details} + +- Чтение и запись могут быть одновременными и паралельными +- Не поддерживается: + - `ALTER` и `SELECT...SAMPLE` операции. + - Индексы. + - Репликация. + +**Поддержка шаблонов в параметре path** + +Множество частей параметра `path` поддерживает шаблоны. Для того чтобы быть обработанным файл должен присутствовать в S3 и соответсвовать шаблону. Списки файлов определяются в момент `SELECT` (но не в момент `CREATE`). + +- `*` — Заменяет любой количество любых символов кроме `/` включая пустые строки. +- `?` — Заменяет один символ. +- `{some_string,another_string,yet_another_one}` — Заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Заменяет любое числов в диапозоне от N до M включительно. N и M могут иметь лидирующие нули например `000..078`. + +Конструкции с`{}` работают также как в табличной функции [remote](../../../sql-reference/table-functions/remote.md). + +**Пример** + +1. Предположим у нас есть некоторые файлы в CSV формате со следующими URIs в S3: + +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv’ + +2. Есть несколько способов сделать таблицу состяющую из всех шести файлов: + + + +```sql +CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV') +``` + +3. Другой способ: + +```sql +CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV') +``` + +4. Таблица состоящая из всех файлах в обоих каталогах (все файлы должны удовлетворять формату и схеме описанными в запросе): + +```sql +CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV') +``` + +!!! warning "Предупреждение" + Если список файлов содержит диапозоны номеров с ведующими нулями, используйте конструкции со скобками для каждой цифры или используйте `?`. + +**Пример** + +Создание таблицы с именами файлов `file-000.csv`, `file-001.csv`, … , `file-999.csv`: + +```sql +CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV') +``` + +## Виртуальные колонки {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +**Смотри также** + +- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) + +## S3-специфичные настройки {#settings} + +Следующие настройки могут быть заданы при запуске запроса или установлены в конфигурационном файле для пользовательского профиля. + +- `s3_max_single_part_upload_size` — По умолчанию `64Mb`. Максикальный размер куска данных для загрузки в S3 как singlepart. +- `s3_min_upload_part_size` — По умолчанию `512Mb`. Минимальный размер куска данных для загрузки в S3 с помощью [S3 Multipart загрузки](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +- `s3_max_redirects` — Значение по умолчанию `10`. Максимально допустимое количество HTTP перенаправлений от серверов S3. + +Примечания для безопасности: если злоумышленник может указать произвольные ссылки на S3, то лучше выставить `s3_max_redirects` как ноль для избежания атак типа [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) ; или ограничить с помощью `remote_host_filter` список адресов по которым возможно взаимодействие с S3. + +### Настройки специфичные для заданной конечной точки {#endpointsettings} + +Следующие настройки могут быть указаны в конфигурационном файле для заданной конечной точки (которой будет сопоставлен точный конечный префик URL): + +- `endpoint` — Обязательный параметр. Указывает префикс URL для конечной точки. +- `access_key_id` и `secret_access_key` — Не обязательно. Задает параметры авторизации для заданной конечной точки. +- `use_environment_credentials` — Не обязательный параметр, значение по умолчанию `false`. Если установлено как `true`, S3 клиент будет пытаться получить параметры авторизации из переменных окружения и Amazon EC2 метаданных для заданной конечной точки. +- `header` — Не обязательный параметр, может быть указан несколько раз. Добавляет указанный HTTP заголовок к запросу для заданной в `endpoint` URL префикса. +- `server_side_encryption_customer_key_base64` — Не обязательный параметр. Если указан, к запросам будут указаны заголовки необходимые для доступа к S3 объектам с SSE-C шифрованием. + +Пример: + +``` + + + https://storage.yandexcloud.net/my-test-bucket-768/ + + + + + + + +``` + +[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/s3/) From 5016b30209e276f04d26213813b92fe319ebe79d Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 4 Mar 2021 17:18:51 +0500 Subject: [PATCH 097/333] replace `posgresql` to `postgresql` --- .../external-dicts-dict-sources.md | 2 +- .../en/sql-reference/table-functions/index.md | 2 +- .../external-dicts-dict-sources.md | 2 +- .../ru/sql-reference/table-functions/index.md | 20 ++++++++++--------- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index f8f4745bb16..5772992f418 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -688,7 +688,7 @@ Example of settings: table_name
id=10 SQL_QUERY - +
``` diff --git a/docs/en/sql-reference/table-functions/index.md b/docs/en/sql-reference/table-functions/index.md index d1368c6a674..f51e58d6d67 100644 --- a/docs/en/sql-reference/table-functions/index.md +++ b/docs/en/sql-reference/table-functions/index.md @@ -29,7 +29,7 @@ You can use table functions in: | [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md)-engine table. | | [url](../../sql-reference/table-functions/url.md) | Creates a [Url](../../engines/table-engines/special/url.md)-engine table. | | [mysql](../../sql-reference/table-functions/mysql.md) | Creates a [MySQL](../../engines/table-engines/integrations/mysql.md)-engine table. | -| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a [PostgreSQL](../../engines/table-engines/integrations/posgresql.md)-engine table. | +| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a [PostgreSQL](../../engines/table-engines/integrations/postgresql.md)-engine table. | | [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a [JDBC](../../engines/table-engines/integrations/jdbc.md)-engine table. | | [odbc](../../sql-reference/table-functions/odbc.md) | Creates a [ODBC](../../engines/table-engines/integrations/odbc.md)-engine table. | | [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a [HDFS](../../engines/table-engines/integrations/hdfs.md)-engine table. | diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index a6142cc210d..fdc13973d47 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -688,7 +688,7 @@ SOURCE(REDIS( table_name
id=10 SQL_QUERY - +
``` diff --git a/docs/ru/sql-reference/table-functions/index.md b/docs/ru/sql-reference/table-functions/index.md index 83225d54e60..31407dc250d 100644 --- a/docs/ru/sql-reference/table-functions/index.md +++ b/docs/ru/sql-reference/table-functions/index.md @@ -24,14 +24,16 @@ toc_title: "\u0412\u0432\u0435\u0434\u0435\u043D\u0438\u0435" | Функция | Описание | |-----------------------|---------------------------------------------------------------------------------------------------------------------------------------| -| [file](file.md) | Создаёт таблицу с движком [File](../../engines/table-engines/special/file.md). | -| [merge](merge.md) | Создаёт таблицу с движком [Merge](../../engines/table-engines/special/merge.md). | -| [numbers](numbers.md) | Создаёт таблицу с единственным столбцом, заполненным целыми числами. | -| [remote](remote.md) | Предоставляет доступ к удалённым серверам, не создавая таблицу с движком [Distributed](../../engines/table-engines/special/distributed.md). | -| [url](url.md) | Создаёт таблицу с движком [Url](../../engines/table-engines/special/url.md). | -| [mysql](mysql.md) | Создаёт таблицу с движком [MySQL](../../engines/table-engines/integrations/mysql.md). | -| [jdbc](jdbc.md) | Создаёт таблицу с дижком [JDBC](../../engines/table-engines/integrations/jdbc.md). | -| [odbc](odbc.md) | Создаёт таблицу с движком [ODBC](../../engines/table-engines/integrations/odbc.md). | -| [hdfs](hdfs.md) | Создаёт таблицу с движком [HDFS](../../engines/table-engines/integrations/hdfs.md). | +| [file](../../sql-reference/table-functions/file.md) | Создаёт таблицу с движком [File](../../engines/table-engines/special/file.md). | +| [merge](../../sql-reference/table-functions/merge.md) | Создаёт таблицу с движком [Merge](../../engines/table-engines/special/merge.md). | +| [numbers](../../sql-reference/table-functions/numbers.md) | Создаёт таблицу с единственным столбцом, заполненным целыми числами. | +| [remote](../../sql-reference/table-functions/remote.md) | Предоставляет доступ к удалённым серверам, не создавая таблицу с движком [Distributed](../../engines/table-engines/special/distributed.md). | +| [url](../../sql-reference/table-functions/url.md) | Создаёт таблицу с движком [Url](../../engines/table-engines/special/url.md). | +| [mysql](../../sql-reference/table-functions/mysql.md) | Создаёт таблицу с движком [MySQL](../../engines/table-engines/integrations/mysql.md). | +| [postgresql](../../sql-reference/table-functions/postgresql.md) | Создаёт таблицу с движком [PostgreSQL](../../engines/table-engines/integrations/postgresql.md). | +| [jdbc](../../sql-reference/table-functions/jdbc.md) | Создаёт таблицу с движком [JDBC](../../engines/table-engines/integrations/jdbc.md). | +| [odbc](../../sql-reference/table-functions/odbc.md) | Создаёт таблицу с движком [ODBC](../../engines/table-engines/integrations/odbc.md). | +| [hdfs](../../sql-reference/table-functions/hdfs.md) | Создаёт таблицу с движком [HDFS](../../engines/table-engines/integrations/hdfs.md). | +| [s3](../../sql-reference/table-functions/s3.md) | Создаёт таблицу с движком [S3](../../engines/table-engines/integrations/s3.md). | [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/) From a165089ee38c07a51ac6eb320f7d39c3e7811794 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 5 Mar 2021 22:00:44 +0300 Subject: [PATCH 098/333] Update odbc.md --- docs/en/sql-reference/table-functions/odbc.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/en/sql-reference/table-functions/odbc.md b/docs/en/sql-reference/table-functions/odbc.md index 38ca4d40d17..a8481fbfd68 100644 --- a/docs/en/sql-reference/table-functions/odbc.md +++ b/docs/en/sql-reference/table-functions/odbc.md @@ -102,5 +102,3 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') - [ODBC external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc) - [ODBC table engine](../../engines/table-engines/integrations/odbc.md). - -[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/jdbc/) From c1a96e977a6aebe9e85c8935668f3358941373d8 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 6 Mar 2021 23:53:21 +0300 Subject: [PATCH 099/333] Update kafka.md --- docs/en/engines/table-engines/integrations/kafka.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 1b3aaa4b569..0ec50094a27 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -194,4 +194,4 @@ Example: - [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) -[Original article](https://clickhouse.tech/docs/enen/engines/table-engines/integrations/kafka/) +[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/kafka/) From 7876415b8ac0897522aef9a4b5747db6e147dd0c Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 6 Mar 2021 23:55:26 +0300 Subject: [PATCH 100/333] Update external-dicts-dict-sources.md --- .../external-dictionaries/external-dicts-dict-sources.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 5772992f418..b7129725820 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -723,6 +723,3 @@ Setting fields: - `table` – Name of the table. - `where` – The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL, for example, `id > 10 AND id < 20`. Optional parameter. - `invalidate_query` – Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). - - -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) From 9922d8e1024c18a13dd7a825defd0be4eef48d05 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 6 Mar 2021 23:56:16 +0300 Subject: [PATCH 101/333] Update index.md --- docs/en/sql-reference/table-functions/index.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/en/sql-reference/table-functions/index.md b/docs/en/sql-reference/table-functions/index.md index f51e58d6d67..16bf23db857 100644 --- a/docs/en/sql-reference/table-functions/index.md +++ b/docs/en/sql-reference/table-functions/index.md @@ -34,5 +34,3 @@ You can use table functions in: | [odbc](../../sql-reference/table-functions/odbc.md) | Creates a [ODBC](../../engines/table-engines/integrations/odbc.md)-engine table. | | [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a [HDFS](../../engines/table-engines/integrations/hdfs.md)-engine table. | | [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. | - -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/) From 08081fe965fcec94a7e18844f528e1dd12495521 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 7 Mar 2021 00:05:38 +0300 Subject: [PATCH 102/333] Update index.md --- .../en/sql-reference/table-functions/index.md | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/en/sql-reference/table-functions/index.md b/docs/en/sql-reference/table-functions/index.md index 16bf23db857..fef30c04c9d 100644 --- a/docs/en/sql-reference/table-functions/index.md +++ b/docs/en/sql-reference/table-functions/index.md @@ -23,14 +23,14 @@ You can use table functions in: | Function | Description | |-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------| -| [file](../../sql-reference/table-functions/file.md) | Creates a [File](../../engines/table-engines/special/file.md)-engine table. | -| [merge](../../sql-reference/table-functions/merge.md) | Creates a [Merge](../../engines/table-engines/special/merge.md)-engine table. | -| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. | -| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md)-engine table. | -| [url](../../sql-reference/table-functions/url.md) | Creates a [Url](../../engines/table-engines/special/url.md)-engine table. | -| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a [MySQL](../../engines/table-engines/integrations/mysql.md)-engine table. | -| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a [PostgreSQL](../../engines/table-engines/integrations/postgresql.md)-engine table. | -| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a [JDBC](../../engines/table-engines/integrations/jdbc.md)-engine table. | -| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a [ODBC](../../engines/table-engines/integrations/odbc.md)-engine table. | -| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a [HDFS](../../engines/table-engines/integrations/hdfs.md)-engine table. | -| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. | +| [file](../../sql-reference/table-functions/file.md) | Creates a File-engine table. | +| [merge](../../sql-reference/table-functions/merge.md) | Creates a Merge-engine table. | +| [numbers](../../sql-reference/table-functions/numbers.md) | Creates a table with a single column filled with integer numbers. | +| [remote](../../sql-reference/table-functions/remote.md) | Allows you to access remote servers without creating a Distributed-engine table. | +| [url](../../sql-reference/table-functions/url.md) | Creates a URL-engine table. | +| [mysql](../../sql-reference/table-functions/mysql.md) | Creates a MySQL-engine table. | +| [postgresql](../../sql-reference/table-functions/postgresql.md) | Creates a PostgreSQL-engine table. | +| [jdbc](../../sql-reference/table-functions/jdbc.md) | Creates a JDBC-engine table. | +| [odbc](../../sql-reference/table-functions/odbc.md) | Creates a ODBC-engine table. | +| [hdfs](../../sql-reference/table-functions/hdfs.md) | Creates a HDFS-engine table. | +| [s3](../../sql-reference/table-functions/s3.md) | Creates a S3-engine table. | From 095ee81a4e8769daf0805418150c61b22ad262e7 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 8 Mar 2021 19:40:05 +0300 Subject: [PATCH 103/333] Updated description --- docs/en/sql-reference/statements/detach.md | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 0bd4f730364..19ff8f10ad3 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -15,7 +15,7 @@ DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] Detaching does not delete the data or metadata for the table or view. If the table or view was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view again. If the table or view was detached `PERMANENTLY`, there will be no automatic recall. -Whether the table was detached permanently or not, in both cases you can reattach it using the [ATTACH](../../sql-reference/statements/attach.md) query (with the exception of system tables, which do not have metadata stored for them). +Whether the table was detached permanently or not, in both cases you can reattach it using the [ATTACH](../../sql-reference/statements/attach.md). System log tables can be also attached back (e.g. `query_log`, `text_log`, etc). Other system tables can't be reattached. On the next server launch the server will recall those tables again. `ATTACH MATERIALIZED VIEW` doesn't work with short syntax (without `SELECT`), but you can attach it using the `ATTACH TABLE` query. @@ -23,20 +23,4 @@ Note that you can not detach permanently the table which is already detached (te Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query. -Similarly, a “detached” table can be re-attached using the [ATTACH](../../sql-reference/statements/attach.md) query (with the exception of system tables, which do not have metadata stored for them). - -## DETACH PERMANENTLY {#detach-permanently} - -Deletes information about `name` table or view from the server. Permanently detached tables won't automatically reappear after the server restart. - -Syntax: - -``` sql -DETACH TABLE/VIEW [IF EXISTS] [db.]name PERMAMENTLY [ON CLUSTER cluster] -``` - -This statement does not delete the table’s data or metadata. - -Permanently detached table or view can be reattached with [ATTACH](../../sql-reference/statements/attach.md) query and can be shown with [SHOW CREATE TABLE](../../sql-reference/statements/show.md#show-create-table) query. - [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) From d80c2cef0668f79c0c9d6f421a333233ba0e9da4 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 9 Mar 2021 11:45:41 +0300 Subject: [PATCH 104/333] Slightly better --- src/Storages/StorageReplicatedMergeTree.cpp | 36 +++++++++------------ 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index ddc63793640..bfa4efbd739 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -528,6 +528,7 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( void StorageReplicatedMergeTree::createNewZooKeeperNodes() { + auto storage_settings = getSettings(); auto zookeeper = getZooKeeper(); /// Working with quorum. @@ -543,6 +544,14 @@ void StorageReplicatedMergeTree::createNewZooKeeperNodes() /// Mutations zookeeper->createIfNotExists(zookeeper_path + "/mutations", String()); zookeeper->createIfNotExists(replica_path + "/mutation_pointer", String()); + + /// Nodes for zero-copy S3 replication + if (storage_settings->allow_s3_zero_copy_replication) + { + zookeeper->createIfNotExists(zookeeper_path + "/zero_copy_s3", String()); + zookeeper->createIfNotExists(zookeeper_path + "/zero_copy_s3/merged", String()); + zookeeper->createIfNotExists(zookeeper_path + "/zero_copy_s3/shared", String()); + } } @@ -1541,27 +1550,12 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) auto zookeeper = getZooKeeper(); String zookeeper_node = zookeeper_path + "/zero_copy_s3/merged/" + entry.new_part_name; - /// In rare case other replica can remove path between createAncestors and tryCreate - /// So we make up to 5 attempts to make a lock - for (int attempts = 5; attempts > 0; --attempts) - { - try - { - zookeeper->createAncestors(zookeeper_node); - auto code = zookeeper->tryCreate(zookeeper_node, "lock", zkutil::CreateMode::Ephemeral); - /// Someone else created or started create this merge - if (code == Coordination::Error::ZNODEEXISTS) - return false; - if (code != Coordination::Error::ZNONODE) - break; - } - catch (const zkutil::KeeperException & e) - { - if (e.code == Coordination::Error::ZNONODE) - continue; - throw; - } - } + auto code = zookeeper->tryCreate(zookeeper_node, "lock", zkutil::CreateMode::Ephemeral); + + /// Someone else created or started create this merge, + /// so will try to fetch. + if (code == Coordination::Error::ZNODEEXISTS) + return false; } } From 0188c195b9317e974718ab3f52ad1646b47350a4 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 9 Mar 2021 20:01:49 +0300 Subject: [PATCH 105/333] Update settings.md --- docs/ru/operations/settings/settings.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index deda437e933..bfa679f461d 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1088,9 +1088,9 @@ load_balancing = round_robin Максимальное кол-во реплик для каждого шарда во время исполениня запроса из distributed. В некоторых случаях, это может привести к более быстрому исполнению запроса за счет выполнения на большем кол-ве серверов. Эта настройка полезна только для реплицируемых таблиц созданных с использованием SAMPLING KEY выражения. Есть случаи когда производительность не улучшится или даже ухудшится: -- позиция ключа семплирования в ключе партицирования не позволяет делать эффективные сканирования по диапозонам -- добавление семплирующего ключа к таблице, делает фильтрацию других колонок менее эффективной -- выражение используемое для вычисления ключа семплирования требует больших вычислительных затрат +- Позиция ключа семплирования в ключе партицирования не позволяет делать эффективные сканирования по диапозонам +- Добавление семплирующего ключа к таблице, делает фильтрацию других колонок менее эффективной +- Выражение используемое для вычисления ключа семплирования требует больших вычислительных затрат - Распределение сетевых задержек внутри кластера имеет длинный хвост, так что запрос большего количества серверов может увеличить общую задержку запроса Кроме того, эта настройка может привести к некорректным результатам когда используются join или подзапросы и все таблицы не соответсвуют определенным условиям. Подробнее [Распределенные подзапросы и max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details. From 265d2939340df643613f86c6325f893f76445b97 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Tue, 9 Mar 2021 17:34:28 +0300 Subject: [PATCH 106/333] Use 'merge on single replica' option instead of zookeeper lock --- src/Storages/MergeTree/MergeTreeSettings.h | 1 + ...ReplicatedMergeTreeMergeStrategyPicker.cpp | 28 ++++++++++++++++--- .../ReplicatedMergeTreeMergeStrategyPicker.h | 5 ++++ src/Storages/StorageReplicatedMergeTree.cpp | 27 ++++++++++-------- 4 files changed, 45 insertions(+), 16 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 89a9af373e6..c9685f68793 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -71,6 +71,7 @@ struct Settings; M(Seconds, prefer_fetch_merged_part_time_threshold, 3600, "If time passed after replication log entry creation exceeds this threshold and sum size of parts is greater than \"prefer_fetch_merged_part_size_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ M(UInt64, prefer_fetch_merged_part_size_threshold, 10ULL * 1024 * 1024 * 1024, "If sum size of parts exceeds this threshold and time passed after replication log entry creation is greater than \"prefer_fetch_merged_part_time_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ M(Seconds, execute_merges_on_single_replica_time_threshold, 0, "When greater than zero only a single replica starts the merge immediately, others wait up to that amount of time to download the result instead of doing merges locally. If the chosen replica doesn't finish the merge during that amount of time, fallback to standard behavior happens.", 0) \ + M(Seconds, s3_execute_merges_on_single_replica_time_threshold, 3 * 60 * 60, "When greater than zero only a single replica starts the merge immediatelys when merged part on S3 storage and 'allow_s3_zero_copy_replication' is enabled.", 0) \ M(Seconds, try_fetch_recompressed_part_timeout, 7200, "Recompression works slow in most cases, so we don't start merge with recompression until this timeout and trying to fetch recompressed part from replica which assigned this merge with recompression.", 0) \ M(Bool, always_fetch_merged_part, 0, "If true, replica never merge parts and always download merged parts from other replicas.", 0) \ M(UInt64, max_suspicious_broken_parts, 10, "Max broken parts, if more - deny automatic deletion.", 0) \ diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp index d90183abd95..61c9126ef04 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp @@ -56,6 +56,17 @@ bool ReplicatedMergeTreeMergeStrategyPicker::shouldMergeOnSingleReplica(const Re } +bool ReplicatedMergeTreeMergeStrategyPicker::shouldMergeOnSingleReplicaS3Shared(const ReplicatedMergeTreeLogEntryData & entry) const +{ + time_t threshold = s3_execute_merges_on_single_replica_time_threshold; + return ( + threshold > 0 /// feature turned on + && entry.type == ReplicatedMergeTreeLogEntry::MERGE_PARTS /// it is a merge log entry + && entry.create_time + threshold > time(nullptr) /// not too much time waited + ); +} + + /// that will return the same replica name for ReplicatedMergeTreeLogEntry on all the replicas (if the replica set is the same). /// that way each replica knows who is responsible for doing a certain merge. @@ -90,18 +101,23 @@ std::optional ReplicatedMergeTreeMergeStrategyPicker::pickReplicaToExecu void ReplicatedMergeTreeMergeStrategyPicker::refreshState() { auto threshold = storage.getSettings()->execute_merges_on_single_replica_time_threshold.totalSeconds(); + auto threshold_s3 = 0; + if (storage.getSettings()->allow_s3_zero_copy_replication) + threshold_s3 = storage.getSettings()->s3_execute_merges_on_single_replica_time_threshold.totalSeconds(); if (threshold == 0) - { /// we can reset the settings w/o lock (it's atomic) execute_merges_on_single_replica_time_threshold = threshold; + if (threshold_s3 == 0) + s3_execute_merges_on_single_replica_time_threshold = threshold_s3; + if (threshold == 0 && threshold_s3 == 0) return; - } auto now = time(nullptr); /// the setting was already enabled, and last state refresh was done recently - if (execute_merges_on_single_replica_time_threshold != 0 + if ((execute_merges_on_single_replica_time_threshold != 0 + || s3_execute_merges_on_single_replica_time_threshold != 0) && now - last_refresh_time < REFRESH_STATE_MINIMUM_INTERVAL_SECONDS) return; @@ -130,11 +146,15 @@ void ReplicatedMergeTreeMergeStrategyPicker::refreshState() LOG_WARNING(storage.log, "Can't find current replica in the active replicas list, or too few active replicas to use execute_merges_on_single_replica_time_threshold!"); /// we can reset the settings w/o lock (it's atomic) execute_merges_on_single_replica_time_threshold = 0; + s3_execute_merges_on_single_replica_time_threshold = 0; return; } std::lock_guard lock(mutex); - execute_merges_on_single_replica_time_threshold = threshold; + if (threshold != 0) /// Zeros already reset + execute_merges_on_single_replica_time_threshold = threshold; + if (threshold_s3 != 0) + s3_execute_merges_on_single_replica_time_threshold = threshold_s3; last_refresh_time = now; current_replica_index = current_replica_index_tmp; active_replicas = active_replicas_tmp; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.h b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.h index 02a760d1ace..8adf206676a 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.h @@ -52,6 +52,10 @@ public: /// and we may need to do a fetch (or postpone) instead of merge bool shouldMergeOnSingleReplica(const ReplicatedMergeTreeLogEntryData & entry) const; + /// return true if s3_execute_merges_on_single_replica_time_threshold feature is active + /// and we may need to do a fetch (or postpone) instead of merge + bool shouldMergeOnSingleReplicaS3Shared(const ReplicatedMergeTreeLogEntryData & entry) const; + /// returns the replica name /// and it's not current replica should do the merge /// used in shouldExecuteLogEntry and in tryExecuteMerge @@ -68,6 +72,7 @@ private: uint64_t getEntryHash(const ReplicatedMergeTreeLogEntryData & entry) const; std::atomic execute_merges_on_single_replica_time_threshold = 0; + std::atomic s3_execute_merges_on_single_replica_time_threshold = 0; std::atomic last_refresh_time = 0; std::mutex mutex; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index bfa4efbd739..5c0c27ec354 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -528,7 +528,6 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( void StorageReplicatedMergeTree::createNewZooKeeperNodes() { - auto storage_settings = getSettings(); auto zookeeper = getZooKeeper(); /// Working with quorum. @@ -546,10 +545,9 @@ void StorageReplicatedMergeTree::createNewZooKeeperNodes() zookeeper->createIfNotExists(replica_path + "/mutation_pointer", String()); /// Nodes for zero-copy S3 replication - if (storage_settings->allow_s3_zero_copy_replication) + if (storage_settings.get()->allow_s3_zero_copy_replication) { zookeeper->createIfNotExists(zookeeper_path + "/zero_copy_s3", String()); - zookeeper->createIfNotExists(zookeeper_path + "/zero_copy_s3/merged", String()); zookeeper->createIfNotExists(zookeeper_path + "/zero_copy_s3/shared", String()); } } @@ -1459,9 +1457,12 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) /// In some use cases merging can be more expensive than fetching /// and it may be better to spread merges tasks across the replicas /// instead of doing exactly the same merge cluster-wise + std::optional replica_to_execute_merge; + bool replica_to_execute_merge_picked = false; if (merge_strategy_picker.shouldMergeOnSingleReplica(entry)) { - auto replica_to_execute_merge = merge_strategy_picker.pickReplicaToExecuteMerge(entry); + replica_to_execute_merge = merge_strategy_picker.pickReplicaToExecuteMerge(entry); + replica_to_execute_merge_picked = true; if (replica_to_execute_merge) { @@ -1547,15 +1548,17 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) auto disk = reserved_space->getDisk(); if (disk->getType() == DB::DiskType::Type::S3) { - auto zookeeper = getZooKeeper(); - String zookeeper_node = zookeeper_path + "/zero_copy_s3/merged/" + entry.new_part_name; + if (merge_strategy_picker.shouldMergeOnSingleReplicaS3Shared(entry)) + { + if (!replica_to_execute_merge_picked) + replica_to_execute_merge = merge_strategy_picker.pickReplicaToExecuteMerge(entry); - auto code = zookeeper->tryCreate(zookeeper_node, "lock", zkutil::CreateMode::Ephemeral); - - /// Someone else created or started create this merge, - /// so will try to fetch. - if (code == Coordination::Error::ZNODEEXISTS) - return false; + if (replica_to_execute_merge) + { + LOG_DEBUG(log, "Prefer fetching part {} from replica {} due s3_execute_merges_on_single_replica_time_threshold", entry.new_part_name, replica_to_execute_merge.value()); + return false; + } + } } } From aff13c0c52b5e39fe104957e26bf85307302bea0 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Tue, 9 Mar 2021 20:49:50 +0300 Subject: [PATCH 107/333] Make method StorageReplicatedMergeTree::fetchExistsPart --- src/Storages/StorageReplicatedMergeTree.cpp | 179 +++++++++++++++----- src/Storages/StorageReplicatedMergeTree.h | 16 +- 2 files changed, 146 insertions(+), 49 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 5c0c27ec354..feea94c3cd6 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1988,8 +1988,7 @@ bool StorageReplicatedMergeTree::executeFetchShared( try { - if (!fetchPart(new_part_name, metadata_snapshot, zookeeper_path + "/replicas/" + source_replica, false, 0, - nullptr, true, disk, path)) + if (!fetchExistsPart(new_part_name, metadata_snapshot, zookeeper_path + "/replicas/" + source_replica, disk, path)) return false; } catch (Exception & e) @@ -3546,8 +3545,7 @@ bool StorageReplicatedMergeTree::partIsLastQuorumPart(const MergeTreePartInfo & } bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const StorageMetadataPtr & metadata_snapshot, - const String & source_replica_path, bool to_detached, size_t quorum, zkutil::ZooKeeper::Ptr zookeeper_, bool replace_exists, - DiskPtr replaced_disk, String replaced_part_path) + const String & source_replica_path, bool to_detached, size_t quorum, zkutil::ZooKeeper::Ptr zookeeper_) { auto zookeeper = zookeeper_ ? zookeeper_ : getZooKeeper(); const auto part_info = MergeTreePartInfo::fromPartName(part_name, format_version); @@ -3598,7 +3596,6 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora DataPartPtr part_to_clone; - if (!replace_exists) { /// If the desired part is a result of a part mutation, try to find the source part and compare /// its checksums to the checksums of the desired part. If they match, we can just clone the local part. @@ -3658,8 +3655,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora return fetcher.fetchPart( metadata_snapshot, part_name, source_replica_path, address.host, address.replication_port, - timeouts, user_password.first, user_password.second, interserver_scheme, to_detached, "", true, - replace_exists ? replaced_disk : nullptr); + timeouts, user_password.first, user_password.second, interserver_scheme, to_detached, "", true); }; } @@ -3669,51 +3665,41 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora if (!to_detached) { - if (replace_exists) + Transaction transaction(*this); + renameTempPartAndReplace(part, nullptr, &transaction); + + replaced_parts = checkPartChecksumsAndCommit(transaction, part); + + /** If a quorum is tracked for this part, you must update it. + * If you do not have time, in case of losing the session, when you restart the server - see the `ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart` method. + */ + if (quorum) { - if (part->volume->getDisk()->getName() != replaced_disk->getName()) - throw Exception("Part " + part->name + " fetched on wrong disk " + part->volume->getDisk()->getName(), ErrorCodes::LOGICAL_ERROR); - replaced_disk->removeFileIfExists(replaced_part_path); - replaced_disk->moveDirectory(part->getFullRelativePath(), replaced_part_path); + /// Check if this quorum insert is parallel or not + if (zookeeper->exists(zookeeper_path + "/quorum/parallel/" + part_name)) + updateQuorum(part_name, true); + else if (zookeeper->exists(zookeeper_path + "/quorum/status")) + updateQuorum(part_name, false); } - else + + /// merged parts that are still inserted with quorum. if it only contains one block, it hasn't been merged before + if (part_info.level != 0 || part_info.mutation != 0) { - Transaction transaction(*this); - renameTempPartAndReplace(part, nullptr, &transaction); - - replaced_parts = checkPartChecksumsAndCommit(transaction, part); - - /** If a quorum is tracked for this part, you must update it. - * If you do not have time, in case of losing the session, when you restart the server - see the `ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart` method. - */ - if (quorum) + Strings quorum_parts = zookeeper->getChildren(zookeeper_path + "/quorum/parallel"); + for (const String & quorum_part : quorum_parts) { - /// Check if this quorum insert is parallel or not - if (zookeeper->exists(zookeeper_path + "/quorum/parallel/" + part_name)) - updateQuorum(part_name, true); - else if (zookeeper->exists(zookeeper_path + "/quorum/status")) - updateQuorum(part_name, false); + auto quorum_part_info = MergeTreePartInfo::fromPartName(quorum_part, format_version); + if (part_info.contains(quorum_part_info)) + updateQuorum(quorum_part, true); } + } - /// merged parts that are still inserted with quorum. if it only contains one block, it hasn't been merged before - if (part_info.level != 0 || part_info.mutation != 0) - { - Strings quorum_parts = zookeeper->getChildren(zookeeper_path + "/quorum/parallel"); - for (const String & quorum_part : quorum_parts) - { - auto quorum_part_info = MergeTreePartInfo::fromPartName(quorum_part, format_version); - if (part_info.contains(quorum_part_info)) - updateQuorum(quorum_part, true); - } - } + merge_selecting_task->schedule(); - merge_selecting_task->schedule(); - - for (const auto & replaced_part : replaced_parts) - { - LOG_DEBUG(log, "Part {} is rendered obsolete by fetching part {}", replaced_part->name, part_name); - ProfileEvents::increment(ProfileEvents::ObsoleteReplicatedParts); - } + for (const auto & replaced_part : replaced_parts) + { + LOG_DEBUG(log, "Part {} is rendered obsolete by fetching part {}", replaced_part->name, part_name); + ProfileEvents::increment(ProfileEvents::ObsoleteReplicatedParts); } write_part_log({}); @@ -3753,6 +3739,109 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora } +bool StorageReplicatedMergeTree::fetchExistsPart(const String & part_name, const StorageMetadataPtr & metadata_snapshot, + const String & source_replica_path, DiskPtr replaced_disk, String replaced_part_path) +{ + auto zookeeper = getZooKeeper(); + const auto part_info = MergeTreePartInfo::fromPartName(part_name, format_version); + + if (auto part = getPartIfExists(part_info, {IMergeTreeDataPart::State::Outdated, IMergeTreeDataPart::State::Deleting})) + { + LOG_DEBUG(log, "Part {} should be deleted after previous attempt before fetch", part->name); + /// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt. + cleanup_thread.wakeup(); + return false; + } + + { + std::lock_guard lock(currently_fetching_parts_mutex); + if (!currently_fetching_parts.insert(part_name).second) + { + LOG_DEBUG(log, "Part {} is already fetching right now", part_name); + return false; + } + } + + if (part_name != "foo") + return false; + + SCOPE_EXIT + ({ + std::lock_guard lock(currently_fetching_parts_mutex); + currently_fetching_parts.erase(part_name); + }); + + LOG_DEBUG(log, "Fetching part {} from {}", part_name, source_replica_path); + + TableLockHolder table_lock_holder = lockForShare(RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); + + /// Logging + Stopwatch stopwatch; + MutableDataPartPtr part; + DataPartsVector replaced_parts; + + auto write_part_log = [&] (const ExecutionStatus & execution_status) + { + writePartLog( + PartLogElement::DOWNLOAD_PART, execution_status, stopwatch.elapsed(), + part_name, part, replaced_parts, nullptr); + }; + + std::function get_part; + + { + ReplicatedMergeTreeAddress address(zookeeper->get(source_replica_path + "/host")); + auto timeouts = ConnectionTimeouts::getHTTPTimeouts(global_context); + auto user_password = global_context.getInterserverCredentials(); + String interserver_scheme = global_context.getInterserverScheme(); + + get_part = [&, address, timeouts, user_password, interserver_scheme]() + { + if (interserver_scheme != address.scheme) + throw Exception("Interserver schemes are different: '" + interserver_scheme + + "' != '" + address.scheme + "', can't fetch part from " + address.host, + ErrorCodes::INTERSERVER_SCHEME_DOESNT_MATCH); + + return fetcher.fetchPart( + metadata_snapshot, part_name, source_replica_path, + address.host, address.replication_port, + timeouts, user_password.first, user_password.second, interserver_scheme, false, "", true, + replaced_disk); + }; + } + + try + { + part = get_part(); + + if (part->volume->getDisk()->getName() != replaced_disk->getName()) + throw Exception("Part " + part->name + " fetched on wrong disk " + part->volume->getDisk()->getName(), ErrorCodes::LOGICAL_ERROR); + replaced_disk->removeFileIfExists(replaced_part_path); + replaced_disk->moveDirectory(part->getFullRelativePath(), replaced_part_path); + } + catch (const Exception & e) + { + /// The same part is being written right now (but probably it's not committed yet). + /// We will check the need for fetch later. + if (e.code() == ErrorCodes::DIRECTORY_ALREADY_EXISTS) + return false; + + throw; + } + catch (...) + { + write_part_log(ExecutionStatus::fromCurrentException()); + throw; + } + + ProfileEvents::increment(ProfileEvents::ReplicatedPartFetches); + + LOG_DEBUG(log, "Fetched part {} from {}", part_name, source_replica_path); + + return true; +} + + void StorageReplicatedMergeTree::startup() { if (is_readonly) diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index e3d7e6b2556..2d1b50ede4c 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -532,10 +532,18 @@ private: const String & replica_path, bool to_detached, size_t quorum, - zkutil::ZooKeeper::Ptr zookeeper_ = nullptr, - bool replace_exists = false, - DiskPtr replaced_disk = nullptr, - String replaced_part_path = ""); + zkutil::ZooKeeper::Ptr zookeeper_ = nullptr); + + /** Download the specified part from the specified replica. + * Used for replace local part on the same s3-shared part in hybrid storage. + * Returns false if part is already fetching right now. + */ + bool fetchExistsPart( + const String & part_name, + const StorageMetadataPtr & metadata_snapshot, + const String & replica_path, + DiskPtr replaced_disk, + String replaced_part_path); /// Required only to avoid races between executeLogEntry and fetchPartition std::unordered_set currently_fetching_parts; From 69f516cd47ae2784e1ae59c44db37ebf9e604730 Mon Sep 17 00:00:00 2001 From: fuwhu Date: Wed, 10 Mar 2021 10:10:25 +0800 Subject: [PATCH 108/333] Refine MergeTreeData::loadDataParts to not parse format version file and detached directory --- src/Storages/MergeTree/MergeTreeData.cpp | 18 +++++++++--------- src/Storages/MergeTree/MergeTreeData.h | 3 +++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index d9e24581c0c..a8a34523590 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -202,8 +202,8 @@ MergeTreeData::MergeTreeData( for (const auto & [path, disk] : getRelativeDataPathsWithDisks()) { disk->createDirectories(path); - disk->createDirectories(path + "detached"); - auto current_version_file_path = path + "format_version.txt"; + disk->createDirectories(path + MergeTreeData::DETACHED_DIR_NAME); + auto current_version_file_path = path + MergeTreeData::FORMAT_VERSION_FILE_NAME; if (disk->exists(current_version_file_path)) { if (!version_file.first.empty()) @@ -217,7 +217,7 @@ MergeTreeData::MergeTreeData( /// If not choose any if (version_file.first.empty()) - version_file = {relative_data_path + "format_version.txt", getStoragePolicy()->getAnyDisk()}; + version_file = {relative_data_path + MergeTreeData::FORMAT_VERSION_FILE_NAME, getStoragePolicy()->getAnyDisk()}; bool version_file_exists = version_file.second->exists(version_file.first); @@ -725,8 +725,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) auto disk_ptr = *disk_it; for (auto it = disk_ptr->iterateDirectory(relative_data_path); it->isValid(); it->next()) { - /// Skip temporary directories. - if (startsWith(it->name(), "tmp")) + /// Skip temporary directories, file 'format_version.txt' and directory 'detached'. + if (startsWith(it->name(), "tmp") || it->name() == MergeTreeData::FORMAT_VERSION_FILE_NAME || it->name() == MergeTreeData::DETACHED_DIR_NAME) continue; if (!startsWith(it->name(), MergeTreeWriteAheadLog::WAL_FILE_NAME)) @@ -1318,8 +1318,8 @@ void MergeTreeData::dropIfEmpty() for (const auto & [path, disk] : getRelativeDataPathsWithDisks()) { /// Non recursive, exception is thrown if there are more files. - disk->removeFile(path + "format_version.txt"); - disk->removeDirectory(path + "detached"); + disk->removeFile(path + MergeTreeData::FORMAT_VERSION_FILE_NAME); + disk->removeDirectory(path + MergeTreeData::DETACHED_DIR_NAME); disk->removeDirectory(path); } } @@ -1793,7 +1793,7 @@ void MergeTreeData::changeSettings( { auto disk = new_storage_policy->getDiskByName(disk_name); disk->createDirectories(relative_data_path); - disk->createDirectories(relative_data_path + "detached"); + disk->createDirectories(relative_data_path + MergeTreeData::DETACHED_DIR_NAME); } /// FIXME how would that be done while reloading configuration??? @@ -3064,7 +3064,7 @@ MergeTreeData::getDetachedParts() const for (const auto & [path, disk] : getRelativeDataPathsWithDisks()) { - for (auto it = disk->iterateDirectory(path + "detached"); it->isValid(); it->next()) + for (auto it = disk->iterateDirectory(path + MergeTreeData::DETACHED_DIR_NAME); it->isValid(); it->next()) { res.emplace_back(); auto & part = res.back(); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index f03f3f1dd8c..8f0b223957a 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -116,6 +116,9 @@ public: using DataPartStates = std::initializer_list; using DataPartStateVector = std::vector; + constexpr static auto FORMAT_VERSION_FILE_NAME = "format_version.txt"; + constexpr static auto DETACHED_DIR_NAME = "detached"; + /// Auxiliary structure for index comparison. Keep in mind lifetime of MergeTreePartInfo. struct DataPartStateAndInfo { From 61d40c3600ba6a1c6d6c0cf4919a3cdaebb3a31f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 9 Mar 2021 22:00:38 +0300 Subject: [PATCH 109/333] Fix optimize_skip_unused_shards for zero shards case v2: move check to the beginning of the StorageDistributed::read() --- src/Storages/StorageDistributed.cpp | 19 +++++++++++++++++++ ...e_skip_unused_shards_zero_shards.reference | 0 ...ptimize_skip_unused_shards_zero_shards.sql | 2 ++ .../queries/0_stateless/arcadia_skip_list.txt | 1 + 4 files changed, 22 insertions(+) create mode 100644 tests/queries/0_stateless/01759_optimize_skip_unused_shards_zero_shards.reference create mode 100644 tests/queries/0_stateless/01759_optimize_skip_unused_shards_zero_shards.sql diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 039cf63eca2..3a33e881611 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -48,6 +48,9 @@ #include #include +#include +#include + #include #include @@ -83,6 +86,7 @@ namespace DB namespace ErrorCodes { + extern const int LOGICAL_ERROR; extern const int NOT_IMPLEMENTED; extern const int STORAGE_REQUIRES_PARAMETER; extern const int BAD_ARGUMENTS; @@ -532,6 +536,17 @@ void StorageDistributed::read( Block header = InterpreterSelectQuery(query_info.query, context, SelectQueryOptions(processed_stage)).getSampleBlock(); + /// Return directly (with correct header) if no shard to query. + if (query_info.cluster->getShardsInfo().empty()) + { + Pipe pipe(std::make_shared(header)); + auto read_from_pipe = std::make_unique(std::move(pipe)); + read_from_pipe->setStepDescription("Read from NullSource (Distributed)"); + query_plan.addStep(std::move(read_from_pipe)); + + return; + } + const Scalars & scalars = context.hasQueryContext() ? context.getQueryContext().getScalars() : Scalars{}; bool has_virtual_shard_num_column = std::find(column_names.begin(), column_names.end(), "_shard_num") != column_names.end(); @@ -546,6 +561,10 @@ void StorageDistributed::read( ClusterProxy::executeQuery(query_plan, select_stream_factory, log, modified_query_ast, context, query_info); + + /// This is a bug, it is possible only when there is no shards to query, and this is handled earlier. + if (!query_plan.isInitialized()) + throw Exception("Pipeline is not initialized", ErrorCodes::LOGICAL_ERROR); } diff --git a/tests/queries/0_stateless/01759_optimize_skip_unused_shards_zero_shards.reference b/tests/queries/0_stateless/01759_optimize_skip_unused_shards_zero_shards.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01759_optimize_skip_unused_shards_zero_shards.sql b/tests/queries/0_stateless/01759_optimize_skip_unused_shards_zero_shards.sql new file mode 100644 index 00000000000..b95d640ca1a --- /dev/null +++ b/tests/queries/0_stateless/01759_optimize_skip_unused_shards_zero_shards.sql @@ -0,0 +1,2 @@ +create table dist_01756 (dummy UInt8) ENGINE = Distributed('test_cluster_two_shards', 'system', 'one', dummy); +select ignore(1), * from dist_01756 where 0 settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1 diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 6926f16e027..a85ddba4752 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -218,3 +218,4 @@ 01682_cache_dictionary_complex_key 01684_ssd_cache_dictionary_simple_key 01685_ssd_cache_dictionary_complex_key +01759_optimize_skip_unused_shards_zero_shards From 592dbb8a512622136aa90049e7d1543c80fb76bc Mon Sep 17 00:00:00 2001 From: George Date: Wed, 10 Mar 2021 12:56:13 +0300 Subject: [PATCH 110/333] Added example --- docs/en/sql-reference/statements/detach.md | 23 ++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 19ff8f10ad3..f3e2f88ffc3 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -23,4 +23,27 @@ Note that you can not detach permanently the table which is already detached (te Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query. +**Example** + +Query: + +``` sql +CREATE TABLE test ENGINE = Log AS SELECT * FROM numbers(10); + +DETACH TABLE test; + +SELECT * FROM TEST; +``` + +Result: + +``` text +Ok. + +Ok. + +Received exception from server (version 21.3.1): +Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.TEST doesn't exist. +``` + [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) From a8d78f2916cfe6f8ffa7ddaac0df677e4b3c817a Mon Sep 17 00:00:00 2001 From: George Date: Wed, 10 Mar 2021 13:53:42 +0300 Subject: [PATCH 111/333] translated column.md --- .../en/sql-reference/statements/alter/column.md | 2 +- .../ru/sql-reference/statements/alter/column.md | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 16aa266ebf9..030f1dd92dd 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -191,7 +191,7 @@ Renames an existing column. Syntax: ```sql -ALTER TABLE table_name RENAME COLUMN column_name TO new_column_name; +ALTER TABLE table_name RENAME COLUMN column_name TO new_column_name ``` **Example** diff --git a/docs/ru/sql-reference/statements/alter/column.md b/docs/ru/sql-reference/statements/alter/column.md index 7a394e2f684..850b415e268 100644 --- a/docs/ru/sql-reference/statements/alter/column.md +++ b/docs/ru/sql-reference/statements/alter/column.md @@ -13,6 +13,7 @@ toc_title: "\u041c\u0430\u043d\u0438\u043f\u0443\u043b\u044f\u0446\u0438\u0438\u - [COMMENT COLUMN](#alter_comment-column) — добавляет комментарий к столбцу; - [MODIFY COLUMN](#alter_modify-column) — изменяет тип столбца, выражение для значения по умолчанию и TTL. - [MODIFY COLUMN REMOVE](#modify-remove) — удаляет какое-либо из свойств столбца. +- [RENAME COLUMN](#alter_rename-column) — переименовывает существующий столбец. Подробное описание для каждого действия приведено ниже. @@ -158,6 +159,22 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; - [REMOVE TTL](ttl.md). +## RENAME COLUMN {#alter_rename-column} + +Переименовывает существующий столбец. + +Синтаксис: + +```sql +ALTER TABLE table_name RENAME COLUMN column_name TO new_column_name +``` + +**Пример** + +```sql +ALTER TABLE table_with_ttl RENAME COLUMN column_ttl TO column_ttl_new; +``` + ## Ограничения запроса ALTER {#ogranicheniia-zaprosa-alter} Запрос `ALTER` позволяет создавать и удалять отдельные элементы (столбцы) вложенных структур данных, но не вложенные структуры данных целиком. Для добавления вложенной структуры данных, вы можете добавить столбцы с именем вида `name.nested_name` и типом `Array(T)` - вложенная структура данных полностью эквивалентна нескольким столбцам-массивам с именем, имеющим одинаковый префикс до точки. From deb0b83a12ea4d636931325c7dbb4a13fca96ea0 Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Wed, 10 Mar 2021 14:08:49 +0300 Subject: [PATCH 112/333] Fix refreshState logic --- .../MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp index 61c9126ef04..65da6080e86 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp @@ -116,8 +116,8 @@ void ReplicatedMergeTreeMergeStrategyPicker::refreshState() auto now = time(nullptr); /// the setting was already enabled, and last state refresh was done recently - if ((execute_merges_on_single_replica_time_threshold != 0 - || s3_execute_merges_on_single_replica_time_threshold != 0) + if (((threshold != 0 && execute_merges_on_single_replica_time_threshold != 0) + || (threshold_s3 != 0 && s3_execute_merges_on_single_replica_time_threshold != 0)) && now - last_refresh_time < REFRESH_STATE_MINIMUM_INTERVAL_SECONDS) return; From 2da04f872cfe9e08761137199d0ee2feceb1fcc3 Mon Sep 17 00:00:00 2001 From: George Date: Wed, 10 Mar 2021 14:27:40 +0300 Subject: [PATCH 113/333] Updated and translated Index.md --- .../operations/external-authenticators/index.md | 2 ++ .../operations/external-authenticators/index.md | 15 +++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 docs/ru/operations/external-authenticators/index.md diff --git a/docs/en/operations/external-authenticators/index.md b/docs/en/operations/external-authenticators/index.md index 95f80f192f5..fe4e6a42974 100644 --- a/docs/en/operations/external-authenticators/index.md +++ b/docs/en/operations/external-authenticators/index.md @@ -11,3 +11,5 @@ ClickHouse supports authenticating and managing users using external services. The following external authenticators and directories are supported: - [LDAP](./ldap.md#external-authenticators-ldap) [Authenticator](./ldap.md#ldap-external-authenticator) and [Directory](./ldap.md#ldap-external-user-directory) + +[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/index.md) diff --git a/docs/ru/operations/external-authenticators/index.md b/docs/ru/operations/external-authenticators/index.md new file mode 100644 index 00000000000..db5c89a3d66 --- /dev/null +++ b/docs/ru/operations/external-authenticators/index.md @@ -0,0 +1,15 @@ +--- +toc_folder_title: \u0412\u043d\u0435\u0448\u043d\u0438\u0435\u0020\u0430\u0443\u0442\u0435\u043d\u0442\u0438\u0444\u0438\u043a\u0430\u0442\u043e\u0440\u044b\u0020\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0435\u0439\u0020\u0438\u0020\u043a\u0430\u0442\u0430\u043b\u043e\u0433\u0438 +toc_priority: 48 +toc_title: \u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435 +--- + +# Внешние аутентификаторы пользователей и каталоги {#external-authenticators} + +ClickHouse поддерживает аунтетификацию и управление пользователями внешними сервисами. + +Поддерживаются следующие внешние аутентификаторы и каталоги: + +- [LDAP](./ldap.md#external-authenticators-ldap) [Authenticator](./ldap.md#ldap-external-authenticator) и [Directory](./ldap.md#ldap-external-user-directory) + +[Original article](https://clickhouse.tech/docs/ru/operations/external-authenticators/index.md) From 6f7800ecb4546e74a380553cee3438c0b316184d Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Wed, 10 Mar 2021 16:27:08 +0300 Subject: [PATCH 114/333] Fix PVS detected errors --- src/Storages/StorageReplicatedMergeTree.cpp | 55 +++++++++++---------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index feea94c3cd6..a4f6ddd47c7 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3466,6 +3466,7 @@ void StorageReplicatedMergeTree::updateQuorum(const String & part_name, bool is_ } } + void StorageReplicatedMergeTree::cleanLastPartNode(const String & partition_id) { auto zookeeper = getZooKeeper(); @@ -3517,12 +3518,14 @@ void StorageReplicatedMergeTree::cleanLastPartNode(const String & partition_id) } } + bool StorageReplicatedMergeTree::partIsInsertingWithParallelQuorum(const MergeTreePartInfo & part_info) const { auto zookeeper = getZooKeeper(); return zookeeper->exists(zookeeper_path + "/quorum/parallel/" + part_info.getPartName()); } + bool StorageReplicatedMergeTree::partIsLastQuorumPart(const MergeTreePartInfo & part_info) const { auto zookeeper = getZooKeeper(); @@ -3544,6 +3547,7 @@ bool StorageReplicatedMergeTree::partIsLastQuorumPart(const MergeTreePartInfo & return partition_it->second == part_info.getPartName(); } + bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const StorageMetadataPtr & metadata_snapshot, const String & source_replica_path, bool to_detached, size_t quorum, zkutil::ZooKeeper::Ptr zookeeper_) { @@ -3595,7 +3599,6 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora }; DataPartPtr part_to_clone; - { /// If the desired part is a result of a part mutation, try to find the source part and compare /// its checksums to the checksums of the desired part. If they match, we can just clone the local part. @@ -3630,6 +3633,10 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora } + ReplicatedMergeTreeAddress address; + ConnectionTimeouts timeouts; + std::pair user_password; + String interserver_scheme; std::function get_part; if (part_to_clone) { @@ -3640,10 +3647,10 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora } else { - ReplicatedMergeTreeAddress address(zookeeper->get(source_replica_path + "/host")); - auto timeouts = ConnectionTimeouts::getHTTPTimeouts(global_context); - auto user_password = global_context.getInterserverCredentials(); - String interserver_scheme = global_context.getInterserverScheme(); + address.fromString(zookeeper->get(source_replica_path + "/host")); + timeouts = ConnectionTimeouts::getHTTPTimeouts(global_context); + user_password = global_context.getInterserverCredentials(); + interserver_scheme = global_context.getInterserverScheme(); get_part = [&, address, timeouts, user_password, interserver_scheme]() { @@ -3671,8 +3678,8 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora replaced_parts = checkPartChecksumsAndCommit(transaction, part); /** If a quorum is tracked for this part, you must update it. - * If you do not have time, in case of losing the session, when you restart the server - see the `ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart` method. - */ + * If you do not have time, in case of losing the session, when you restart the server - see the `ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart` method. + */ if (quorum) { /// Check if this quorum insert is parallel or not @@ -3789,26 +3796,24 @@ bool StorageReplicatedMergeTree::fetchExistsPart(const String & part_name, const std::function get_part; + ReplicatedMergeTreeAddress address(zookeeper->get(source_replica_path + "/host")); + auto timeouts = ConnectionTimeouts::getHTTPTimeouts(global_context); + auto user_password = global_context.getInterserverCredentials(); + String interserver_scheme = global_context.getInterserverScheme(); + + get_part = [&, address, timeouts, user_password, interserver_scheme]() { - ReplicatedMergeTreeAddress address(zookeeper->get(source_replica_path + "/host")); - auto timeouts = ConnectionTimeouts::getHTTPTimeouts(global_context); - auto user_password = global_context.getInterserverCredentials(); - String interserver_scheme = global_context.getInterserverScheme(); + if (interserver_scheme != address.scheme) + throw Exception("Interserver schemes are different: '" + interserver_scheme + + "' != '" + address.scheme + "', can't fetch part from " + address.host, + ErrorCodes::INTERSERVER_SCHEME_DOESNT_MATCH); - get_part = [&, address, timeouts, user_password, interserver_scheme]() - { - if (interserver_scheme != address.scheme) - throw Exception("Interserver schemes are different: '" + interserver_scheme - + "' != '" + address.scheme + "', can't fetch part from " + address.host, - ErrorCodes::INTERSERVER_SCHEME_DOESNT_MATCH); - - return fetcher.fetchPart( - metadata_snapshot, part_name, source_replica_path, - address.host, address.replication_port, - timeouts, user_password.first, user_password.second, interserver_scheme, false, "", true, - replaced_disk); - }; - } + return fetcher.fetchPart( + metadata_snapshot, part_name, source_replica_path, + address.host, address.replication_port, + timeouts, user_password.first, user_password.second, interserver_scheme, false, "", true, + replaced_disk); + }; try { From 109cb634e150fbca3376aba4f31e7e5968de581b Mon Sep 17 00:00:00 2001 From: Anton Ivashkin Date: Thu, 11 Mar 2021 12:08:19 +0300 Subject: [PATCH 115/333] Remove debug lines --- src/Storages/StorageReplicatedMergeTree.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a4f6ddd47c7..04c23ae8574 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3769,9 +3769,6 @@ bool StorageReplicatedMergeTree::fetchExistsPart(const String & part_name, const } } - if (part_name != "foo") - return false; - SCOPE_EXIT ({ std::lock_guard lock(currently_fetching_parts_mutex); From 1194d50e48b4d57956967b16c6bb37274c0ffb56 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 11 Mar 2021 15:16:52 +0300 Subject: [PATCH 116/333] Try to fix test_storage_s3: crash in WriteBufferFromS3 --- src/IO/WriteBufferFromS3.cpp | 9 ++++++++- src/Storages/StorageS3.cpp | 5 +++++ tests/integration/test_storage_s3/test.py | 4 ++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index a6ec60b295f..5edf01a940e 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -107,7 +107,14 @@ void WriteBufferFromS3::finalizeImpl() WriteBufferFromS3::~WriteBufferFromS3() { - finalizeImpl(); + try + { + finalizeImpl(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } void WriteBufferFromS3::createMultipartUpload() diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index a31a7fa0944..e50eb1b4249 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -173,6 +173,11 @@ namespace writer->writePrefix(); } + void flush() override + { + writer->flush(); + } + void writeSuffix() override { writer->writeSuffix(); diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 3b4c56b524b..8baa1cd64b0 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -281,9 +281,9 @@ def test_put_get_with_globs(cluster): # Test multipart put. @pytest.mark.parametrize("maybe_auth,positive", [ - ("", True) + ("", True), # ("'minio','minio123',",True), Redirect with credentials not working with nginx. - # ("'wrongid','wrongkey',", False) ClickHouse crashes in some time after this test, local integration tests run fails. + ("'wrongid','wrongkey',", False), ]) def test_multipart_put(cluster, maybe_auth, positive): # type: (ClickHouseCluster) -> None From 38e1cb41cced59dcf8e0a457817ff7c9fd2480e2 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 11 Mar 2021 17:04:59 +0300 Subject: [PATCH 117/333] done --- src/IO/Progress.h | 8 ++++---- src/Interpreters/executeQuery.cpp | 4 ++-- .../Formats/Impl/ParallelFormattingOutputFormat.h | 3 +++ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/IO/Progress.h b/src/IO/Progress.h index a3efb96db98..64bf3a404af 100644 --- a/src/IO/Progress.h +++ b/src/IO/Progress.h @@ -64,12 +64,12 @@ struct Progress std::atomic written_rows {0}; std::atomic written_bytes {0}; - Progress() {} + Progress() = default; Progress(size_t read_rows_, size_t read_bytes_, size_t total_rows_to_read_ = 0) : read_rows(read_rows_), read_bytes(read_bytes_), total_rows_to_read(total_rows_to_read_) {} - Progress(ReadProgress read_progress) + explicit Progress(ReadProgress read_progress) : read_rows(read_progress.read_rows), read_bytes(read_progress.read_bytes), total_rows_to_read(read_progress.total_rows_to_read) {} - Progress(WriteProgress write_progress) + explicit Progress(WriteProgress write_progress) : written_rows(write_progress.written_rows), written_bytes(write_progress.written_bytes) {} void read(ReadBuffer & in, UInt64 server_revision); @@ -86,7 +86,7 @@ struct Progress written_rows += rhs.written_rows; written_bytes += rhs.written_bytes; - return rhs.read_rows || rhs.written_rows ? true : false; + return rhs.read_rows || rhs.written_rows; } void reset() diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 1a0aa031d6f..a5c21405ff1 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -1014,7 +1014,7 @@ void executeQuery( ? getIdentifierName(ast_query_with_output->format) : context.getDefaultFormat(); - auto out = context.getOutputStream(format_name, *out_buf, streams.in->getHeader()); + auto out = context.getOutputStreamParallelIfPossible(format_name, *out_buf, streams.in->getHeader()); /// Save previous progress callback if any. TODO Do it more conveniently. auto previous_progress_callback = context.getProgressCallback(); @@ -1059,7 +1059,7 @@ void executeQuery( return std::make_shared(header); }); - auto out = context.getOutputFormat(format_name, *out_buf, pipeline.getHeader()); + auto out = context.getOutputFormatParallelIfPossible(format_name, *out_buf, pipeline.getHeader()); out->setAutoFlush(); /// Save previous progress callback if any. TODO Do it more conveniently. diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 7e7c44a8aae..2efc369e178 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -101,6 +101,9 @@ public: finishAndWait(); } + /// There are no formats which support parallel formatting and progress writing at the same time + void onProgress(const Progress &) override {} + protected: void consume(Chunk chunk) override final { From 138dedf2df59a1728bd76874d78cd2d3875146e9 Mon Sep 17 00:00:00 2001 From: Nikita Mikhailov Date: Wed, 24 Feb 2021 20:04:37 +0300 Subject: [PATCH 118/333] done --- src/Processors/Formats/IRowInputFormat.h | 1 + .../Formats/Impl/CSVRowInputFormat.cpp | 5 ++- .../Formats/Impl/ParallelParsingInputFormat.h | 3 ++ .../Impl/TabSeparatedRowInputFormat.cpp | 5 +-- ...0161_parallel_parsing_with_names.reference | 8 +++++ .../00161_parallel_parsing_with_names.sh | 31 +++++++++++++++++++ 6 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 tests/queries/1_stateful/00161_parallel_parsing_with_names.reference create mode 100755 tests/queries/1_stateful/00161_parallel_parsing_with_names.sh diff --git a/src/Processors/Formats/IRowInputFormat.h b/src/Processors/Formats/IRowInputFormat.h index b7863704062..b99c3789383 100644 --- a/src/Processors/Formats/IRowInputFormat.h +++ b/src/Processors/Formats/IRowInputFormat.h @@ -53,6 +53,7 @@ public: void resetParser() override; protected: + friend class ParallelParsingInputFormat; /** Read next row and append it to the columns. * If no more rows - return false. */ diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index f7f08411dfa..ba3d449de97 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -8,6 +8,8 @@ #include +#include + namespace DB { @@ -155,7 +157,7 @@ void CSVRowInputFormat::readPrefix() size_t num_columns = data_types.size(); const auto & header = getPort().getHeader(); - if (with_names) + if (with_names && getCurrentUnitNumber() == 0) { /// This CSV file has a header row with column names. Depending on the /// settings, use it or skip it. @@ -492,6 +494,7 @@ static std::pair fileSegmentationEngineCSVImpl(ReadBuffer & in, DB void registerFileSegmentationEngineCSV(FormatFactory & factory) { factory.registerFileSegmentationEngine("CSV", &fileSegmentationEngineCSVImpl); + factory.registerFileSegmentationEngine("CSVWithNames", &fileSegmentationEngineCSVImpl); } } diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index 9dda2dfe55d..606842242bc 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB { @@ -97,6 +98,8 @@ public: segmentator_thread = ThreadFromGlobalPool( &ParallelParsingInputFormat::segmentatorThreadFunction, this, CurrentThread::getGroup()); + + LOG_DEBUG(&Poco::Logger::get("ParallelParsingInputFormat"), "Parallel parsing is used"); } ~ParallelParsingInputFormat() override diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 96b01a5bd9b..5ce612e6e73 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -136,7 +136,7 @@ void TabSeparatedRowInputFormat::readPrefix() skipBOMIfExists(in); } - if (with_names) + if (with_names && getCurrentUnitNumber() == 0) { if (format_settings.with_names_use_header) { @@ -463,9 +463,10 @@ static std::pair fileSegmentationEngineTabSeparatedImpl(ReadBuffer void registerFileSegmentationEngineTabSeparated(FormatFactory & factory) { // We can use the same segmentation engine for TSKV. - for (const auto * name : {"TabSeparated", "TSV", "TSKV"}) + for (const std::string & name : {"TabSeparated", "TSV", "TSKV"}) { factory.registerFileSegmentationEngine(name, &fileSegmentationEngineTabSeparatedImpl); + factory.registerFileSegmentationEngine(name + "WithNames", &fileSegmentationEngineTabSeparatedImpl); } } diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.reference b/tests/queries/1_stateful/00161_parallel_parsing_with_names.reference new file mode 100644 index 00000000000..4e0017f2d78 --- /dev/null +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.reference @@ -0,0 +1,8 @@ +TSVWithNames, false +50000 +TSVWithNames, true +50000 +CSVWithNames, false +50000 +CSVWithNames, true +50000 diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh new file mode 100755 index 00000000000..79a2d5c22c5 --- /dev/null +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +FORMATS=('TSVWithNames' 'CSVWithNames') +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" + +for format in "${FORMATS[@]}" +do + $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(a DateTime, b String, c FixedString(16)) ENGINE=Memory()" + + echo "$format, false"; + $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + "SELECT ClientEventTime as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ + $CLICKHOUSE_CLIENT --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" + + $CLICKHOUSE_CLIENT -q "SELECT count() FROM parsing_with_names;" + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" + + + $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(a DateTime, b String, c FixedString(16)) ENGINE=Memory()" + echo "$format, true"; + $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + "SELECT ClientEventTime as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ + $CLICKHOUSE_CLIENT --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" + + $CLICKHOUSE_CLIENT -q "SELECT count() FROM parsing_with_names;" + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" +done \ No newline at end of file From 61fdeb58972c0b166710e9f862d327f2e10213d1 Mon Sep 17 00:00:00 2001 From: Nikita Mikhailov Date: Wed, 24 Feb 2021 20:12:22 +0300 Subject: [PATCH 119/333] better --- src/Processors/Formats/IRowInputFormat.h | 1 - src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 4 ++-- src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp | 2 ++ 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Processors/Formats/IRowInputFormat.h b/src/Processors/Formats/IRowInputFormat.h index b99c3789383..b7863704062 100644 --- a/src/Processors/Formats/IRowInputFormat.h +++ b/src/Processors/Formats/IRowInputFormat.h @@ -53,7 +53,6 @@ public: void resetParser() override; protected: - friend class ParallelParsingInputFormat; /** Read next row and append it to the columns. * If no more rows - return false. */ diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index ba3d449de97..0f7ca7b9e79 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -8,8 +8,6 @@ #include -#include - namespace DB { @@ -157,6 +155,8 @@ void CSVRowInputFormat::readPrefix() size_t num_columns = data_types.size(); const auto & header = getPort().getHeader(); + /// This is a bit of abstraction leakage, but we have almost the same code in other places. + /// Thus, we check if this InputFormat is working with the "real" beggining of the data in case of parallel parsing. if (with_names && getCurrentUnitNumber() == 0) { /// This CSV file has a header row with column names. Depending on the diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 5ce612e6e73..00926b6e99b 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -136,6 +136,8 @@ void TabSeparatedRowInputFormat::readPrefix() skipBOMIfExists(in); } + /// This is a bit of abstraction leakage, but we have almost the same code in other places. + /// Thus, we check if this InputFormat is working with the "real" beggining of the data in case of parallel parsing. if (with_names && getCurrentUnitNumber() == 0) { if (format_settings.with_names_use_header) From 8b30bb0fb540c1c8db386c0d1f3ca0f6e3a60f1c Mon Sep 17 00:00:00 2001 From: Nikita Mikhailov Date: Wed, 24 Feb 2021 21:03:02 +0300 Subject: [PATCH 120/333] style --- src/Processors/Formats/Impl/CSVRowInputFormat.cpp | 2 +- src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 0f7ca7b9e79..6610a7a0d82 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -156,7 +156,7 @@ void CSVRowInputFormat::readPrefix() const auto & header = getPort().getHeader(); /// This is a bit of abstraction leakage, but we have almost the same code in other places. - /// Thus, we check if this InputFormat is working with the "real" beggining of the data in case of parallel parsing. + /// Thus, we check if this InputFormat is working with the "real" beginning of the data in case of parallel parsing. if (with_names && getCurrentUnitNumber() == 0) { /// This CSV file has a header row with column names. Depending on the diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 00926b6e99b..7dcfd4e930d 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -137,7 +137,7 @@ void TabSeparatedRowInputFormat::readPrefix() } /// This is a bit of abstraction leakage, but we have almost the same code in other places. - /// Thus, we check if this InputFormat is working with the "real" beggining of the data in case of parallel parsing. + /// Thus, we check if this InputFormat is working with the "real" beginning of the data in case of parallel parsing. if (with_names && getCurrentUnitNumber() == 0) { if (format_settings.with_names_use_header) From 91a418b3a9682a4817948264d6cc57a08a117de4 Mon Sep 17 00:00:00 2001 From: Nikita Mikhailov Date: Wed, 24 Feb 2021 21:09:31 +0300 Subject: [PATCH 121/333] better --- src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 7dcfd4e930d..510cf67065f 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -465,10 +465,9 @@ static std::pair fileSegmentationEngineTabSeparatedImpl(ReadBuffer void registerFileSegmentationEngineTabSeparated(FormatFactory & factory) { // We can use the same segmentation engine for TSKV. - for (const std::string & name : {"TabSeparated", "TSV", "TSKV"}) + for (const std::string & name : {"TabSeparated", "TSV", "TSKV", "TabSeparatedWithNames", "TSVWithNames"}) { factory.registerFileSegmentationEngine(name, &fileSegmentationEngineTabSeparatedImpl); - factory.registerFileSegmentationEngine(name + "WithNames", &fileSegmentationEngineTabSeparatedImpl); } } From eab35bfc7f6c18c322c291904ae0514b35a3a61a Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 28 Feb 2021 02:37:28 +0300 Subject: [PATCH 122/333] Update ParallelParsingInputFormat.h --- src/Processors/Formats/Impl/ParallelParsingInputFormat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index 606842242bc..f1a290619fa 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -99,7 +99,7 @@ public: segmentator_thread = ThreadFromGlobalPool( &ParallelParsingInputFormat::segmentatorThreadFunction, this, CurrentThread::getGroup()); - LOG_DEBUG(&Poco::Logger::get("ParallelParsingInputFormat"), "Parallel parsing is used"); + LOG_TRACE(&Poco::Logger::get("ParallelParsingInputFormat"), "Parallel parsing is used"); } ~ParallelParsingInputFormat() override From 369c9da1611a97bc0c0ebf37a8fdba29dd47dc08 Mon Sep 17 00:00:00 2001 From: Nikita Mikhailov Date: Mon, 1 Mar 2021 22:58:55 +0300 Subject: [PATCH 123/333] better --- src/Processors/Formats/IInputFormat.cpp | 1 + src/Processors/Formats/IInputFormat.h | 25 ++++++++ .../Formats/Impl/CSVRowInputFormat.cpp | 50 ++++++++-------- .../Formats/Impl/CSVRowInputFormat.h | 10 ---- .../Impl/ParallelParsingInputFormat.cpp | 9 +++ .../Formats/Impl/ParallelParsingInputFormat.h | 3 + .../Impl/TabSeparatedRowInputFormat.cpp | 58 +++++++++---------- .../Formats/Impl/TabSeparatedRowInputFormat.h | 4 -- src/Processors/ISource.h | 2 +- .../00161_parallel_parsing_with_names.sh | 3 +- 10 files changed, 95 insertions(+), 70 deletions(-) diff --git a/src/Processors/Formats/IInputFormat.cpp b/src/Processors/Formats/IInputFormat.cpp index 0fbc78ea8c0..069d25564b1 100644 --- a/src/Processors/Formats/IInputFormat.cpp +++ b/src/Processors/Formats/IInputFormat.cpp @@ -13,6 +13,7 @@ namespace ErrorCodes IInputFormat::IInputFormat(Block header, ReadBuffer & in_) : ISource(std::move(header)), in(in_) { + column_mapping = std::make_shared(); } void IInputFormat::resetParser() diff --git a/src/Processors/Formats/IInputFormat.h b/src/Processors/Formats/IInputFormat.h index e1537aff6c5..b8ee4d438df 100644 --- a/src/Processors/Formats/IInputFormat.h +++ b/src/Processors/Formats/IInputFormat.h @@ -2,9 +2,26 @@ #include +#include + namespace DB { +/// Used to pass info from header between different InputFormats in ParallelParsing +struct ColumnMapping +{ + /// Maps indexes of columns in the input file to indexes of table columns + using OptionalIndexes = std::vector>; + OptionalIndexes column_indexes_for_input_fields; + + /// Tracks which columns we have read in a single read() call. + /// For columns that are never read, it is initialized to false when we + /// read the file header, and never changed afterwards. + /// For other columns, it is updated on each read() call. + std::vector read_columns; +}; + +using ColumnMappingPtr = std::shared_ptr; class ReadBuffer; @@ -39,9 +56,17 @@ public: return none; } + /// Must be called from ParallelParsingInputFormat after readSuffix + ColumnMappingPtr getColumnMapping() const { return column_mapping; } + /// Must be called from ParallelParsingInputFormat before readPrefix + void setColumnMapping(ColumnMappingPtr column_mapping_ ) { column_mapping = column_mapping_; } + size_t getCurrentUnitNumber() const { return current_unit_number; } void setCurrentUnitNumber(size_t current_unit_number_) { current_unit_number = current_unit_number_; } +protected: + ColumnMappingPtr column_mapping{}; + private: /// Number of currently parsed chunk (if parallel parsing is enabled) size_t current_unit_number = 0; diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 6610a7a0d82..efb8ad93d72 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -55,13 +55,13 @@ void CSVRowInputFormat::addInputColumn(const String & column_name) { if (format_settings.skip_unknown_fields) { - column_indexes_for_input_fields.push_back(std::nullopt); + column_mapping->column_indexes_for_input_fields.push_back(std::nullopt); return; } throw Exception( "Unknown field found in CSV header: '" + column_name + "' " + - "at position " + std::to_string(column_indexes_for_input_fields.size()) + + "at position " + std::to_string(column_mapping->column_indexes_for_input_fields.size()) + "\nSet the 'input_format_skip_unknown_fields' parameter explicitly to ignore and proceed", ErrorCodes::INCORRECT_DATA ); @@ -69,11 +69,11 @@ void CSVRowInputFormat::addInputColumn(const String & column_name) const auto column_index = column_it->second; - if (read_columns[column_index]) + if (column_mapping->read_columns[column_index]) throw Exception("Duplicate field found while parsing CSV header: " + column_name, ErrorCodes::INCORRECT_DATA); - read_columns[column_index] = true; - column_indexes_for_input_fields.emplace_back(column_index); + column_mapping->read_columns[column_index] = true; + column_mapping->column_indexes_for_input_fields.emplace_back(column_index); } static void skipEndOfLine(ReadBuffer & in) @@ -165,7 +165,7 @@ void CSVRowInputFormat::readPrefix() { /// Look at the file header to see which columns we have there. /// The missing columns are filled with defaults. - read_columns.assign(header.columns(), false); + column_mapping->read_columns.assign(header.columns(), false); do { String column_name; @@ -179,7 +179,7 @@ void CSVRowInputFormat::readPrefix() skipDelimiter(in, format_settings.csv.delimiter, true); - for (auto read_column : read_columns) + for (auto read_column : column_mapping->read_columns) { if (!read_column) { @@ -196,12 +196,12 @@ void CSVRowInputFormat::readPrefix() /// The default: map each column of the file to the column of the table with /// the same index. - read_columns.assign(header.columns(), true); - column_indexes_for_input_fields.resize(header.columns()); + column_mapping->read_columns.assign(header.columns(), true); + column_mapping->column_indexes_for_input_fields.resize(header.columns()); - for (size_t i = 0; i < column_indexes_for_input_fields.size(); ++i) + for (size_t i = 0; i < column_mapping->column_indexes_for_input_fields.size(); ++i) { - column_indexes_for_input_fields[i] = i; + column_mapping->column_indexes_for_input_fields[i] = i; } } @@ -218,12 +218,12 @@ bool CSVRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & ext /// it doesn't have to check it. bool have_default_columns = have_always_default_columns; - ext.read_columns.assign(read_columns.size(), true); + ext.read_columns.assign(column_mapping->read_columns.size(), true); const auto delimiter = format_settings.csv.delimiter; - for (size_t file_column = 0; file_column < column_indexes_for_input_fields.size(); ++file_column) + for (size_t file_column = 0; file_column < column_mapping->column_indexes_for_input_fields.size(); ++file_column) { - const auto & table_column = column_indexes_for_input_fields[file_column]; - const bool is_last_file_column = file_column + 1 == column_indexes_for_input_fields.size(); + const auto & table_column = column_mapping->column_indexes_for_input_fields[file_column]; + const bool is_last_file_column = file_column + 1 == column_mapping->column_indexes_for_input_fields.size(); if (table_column) { @@ -245,9 +245,9 @@ bool CSVRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & ext if (have_default_columns) { - for (size_t i = 0; i < read_columns.size(); i++) + for (size_t i = 0; i < column_mapping->read_columns.size(); i++) { - if (!read_columns[i]) + if (!column_mapping->read_columns[i]) { /// The column value for this row is going to be overwritten /// with default by the caller, but the general assumption is @@ -268,7 +268,7 @@ bool CSVRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & columns, { const char delimiter = format_settings.csv.delimiter; - for (size_t file_column = 0; file_column < column_indexes_for_input_fields.size(); ++file_column) + for (size_t file_column = 0; file_column < column_mapping->column_indexes_for_input_fields.size(); ++file_column) { if (file_column == 0 && in.eof()) { @@ -277,10 +277,10 @@ bool CSVRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & columns, } skipWhitespacesAndTabs(in); - if (column_indexes_for_input_fields[file_column].has_value()) + if (column_mapping->column_indexes_for_input_fields[file_column].has_value()) { const auto & header = getPort().getHeader(); - size_t col_idx = column_indexes_for_input_fields[file_column].value(); + size_t col_idx = column_mapping->column_indexes_for_input_fields[file_column].value(); if (!deserializeFieldAndPrintDiagnosticInfo(header.getByPosition(col_idx).name, data_types[col_idx], *columns[col_idx], out, file_column)) return false; @@ -296,7 +296,7 @@ bool CSVRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & columns, skipWhitespacesAndTabs(in); /// Delimiters - if (file_column + 1 == column_indexes_for_input_fields.size()) + if (file_column + 1 == column_mapping->column_indexes_for_input_fields.size()) { if (in.eof()) return false; @@ -358,9 +358,9 @@ void CSVRowInputFormat::syncAfterError() void CSVRowInputFormat::tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) { - if (column_indexes_for_input_fields[file_column]) + if (column_mapping->column_indexes_for_input_fields[file_column]) { - const bool is_last_file_column = file_column + 1 == column_indexes_for_input_fields.size(); + const bool is_last_file_column = file_column + 1 == column_mapping->column_indexes_for_input_fields.size(); readField(column, type, is_last_file_column); } else @@ -406,8 +406,8 @@ bool CSVRowInputFormat::readField(IColumn & column, const DataTypePtr & type, bo void CSVRowInputFormat::resetParser() { RowInputFormatWithDiagnosticInfo::resetParser(); - column_indexes_for_input_fields.clear(); - read_columns.clear(); + column_mapping->column_indexes_for_input_fields.clear(); + column_mapping->read_columns.clear(); have_always_default_columns = false; } diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h index c884eb6c3db..3cdafd8ec8d 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -38,16 +38,6 @@ private: using IndexesMap = std::unordered_map; IndexesMap column_indexes_by_names; - /// Maps indexes of columns in the input file to indexes of table columns - using OptionalIndexes = std::vector>; - OptionalIndexes column_indexes_for_input_fields; - - /// Tracks which columns we have read in a single read() call. - /// For columns that are never read, it is initialized to false when we - /// read the file header, and never changed afterwards. - /// For other columns, it is updated on each read() call. - std::vector read_columns; - /// Whether we have any columns that are not read from file at all, /// and must be always initialized with defaults. bool have_always_default_columns = false; diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index d1660b53019..ce041f6636b 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -89,6 +89,11 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa unit.chunk_ext.chunk.clear(); unit.chunk_ext.block_missing_values.clear(); + /// Propagate column_mapping to other parsers. + /// Note: column_mapping is used only for *WithNames types + if (current_ticket_number != 0) + input_format->setColumnMapping(column_mapping); + // We don't know how many blocks will be. So we have to read them all // until an empty block occurred. Chunk chunk; @@ -100,6 +105,10 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa unit.chunk_ext.block_missing_values.emplace_back(parser.getMissingValues()); } + /// Extract column_mapping from first parser to propage it to others + if (current_ticket_number == 0) + column_mapping = input_format->getColumnMapping(); + // We suppose we will get at least some blocks for a non-empty buffer, // except at the end of file. Also see a matching assert in readImpl(). assert(unit.is_last || !unit.chunk_ext.chunk.empty() || parsing_finished); diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index f1a290619fa..49bb69faeac 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -253,6 +253,9 @@ private: { parserThreadFunction(group, ticket_number); }); + /// We have to wait here to possibly extract ColumnMappingPtr from the first parser. + if (ticket_number == 0) + pool.wait(); } void finishAndWait() diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 510cf67065f..04c6b4c3ee0 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -62,19 +62,19 @@ TabSeparatedRowInputFormat::TabSeparatedRowInputFormat(const Block & header_, Re column_indexes_by_names.emplace(column_info.name, i); } - column_indexes_for_input_fields.reserve(num_columns); - read_columns.assign(num_columns, false); + column_mapping->column_indexes_for_input_fields.reserve(num_columns); + column_mapping->read_columns.assign(num_columns, false); } void TabSeparatedRowInputFormat::setupAllColumnsByTableSchema() { const auto & header = getPort().getHeader(); - read_columns.assign(header.columns(), true); - column_indexes_for_input_fields.resize(header.columns()); + column_mapping->read_columns.assign(header.columns(), true); + column_mapping->column_indexes_for_input_fields.resize(header.columns()); - for (size_t i = 0; i < column_indexes_for_input_fields.size(); ++i) - column_indexes_for_input_fields[i] = i; + for (size_t i = 0; i < column_mapping->column_indexes_for_input_fields.size(); ++i) + column_mapping->column_indexes_for_input_fields[i] = i; } @@ -85,13 +85,13 @@ void TabSeparatedRowInputFormat::addInputColumn(const String & column_name) { if (format_settings.skip_unknown_fields) { - column_indexes_for_input_fields.push_back(std::nullopt); + column_mapping->column_indexes_for_input_fields.push_back(std::nullopt); return; } throw Exception( "Unknown field found in TSV header: '" + column_name + "' " + - "at position " + std::to_string(column_indexes_for_input_fields.size()) + + "at position " + std::to_string(column_mapping->column_indexes_for_input_fields.size()) + "\nSet the 'input_format_skip_unknown_fields' parameter explicitly to ignore and proceed", ErrorCodes::INCORRECT_DATA ); @@ -99,11 +99,11 @@ void TabSeparatedRowInputFormat::addInputColumn(const String & column_name) const auto column_index = column_it->second; - if (read_columns[column_index]) + if (column_mapping->read_columns[column_index]) throw Exception("Duplicate field found while parsing TSV header: " + column_name, ErrorCodes::INCORRECT_DATA); - read_columns[column_index] = true; - column_indexes_for_input_fields.emplace_back(column_index); + column_mapping->read_columns[column_index] = true; + column_mapping->column_indexes_for_input_fields.emplace_back(column_index); } @@ -113,8 +113,8 @@ void TabSeparatedRowInputFormat::fillUnreadColumnsWithDefaults(MutableColumns & if (unlikely(row_num == 1)) { columns_to_fill_with_default_values.clear(); - for (size_t index = 0; index < read_columns.size(); ++index) - if (read_columns[index] == 0) + for (size_t index = 0; index < column_mapping->read_columns.size(); ++index) + if (column_mapping->read_columns[index] == 0) columns_to_fill_with_default_values.push_back(index); } @@ -167,7 +167,7 @@ void TabSeparatedRowInputFormat::readPrefix() else { setupAllColumnsByTableSchema(); - skipTSVRow(in, column_indexes_for_input_fields.size()); + skipTSVRow(in, column_mapping->column_indexes_for_input_fields.size()); } } else @@ -175,7 +175,7 @@ void TabSeparatedRowInputFormat::readPrefix() if (with_types) { - skipTSVRow(in, column_indexes_for_input_fields.size()); + skipTSVRow(in, column_mapping->column_indexes_for_input_fields.size()); } } @@ -187,11 +187,11 @@ bool TabSeparatedRowInputFormat::readRow(MutableColumns & columns, RowReadExtens updateDiagnosticInfo(); - ext.read_columns.assign(read_columns.size(), true); - for (size_t file_column = 0; file_column < column_indexes_for_input_fields.size(); ++file_column) + ext.read_columns.assign(column_mapping->read_columns.size(), true); + for (size_t file_column = 0; file_column < column_mapping->column_indexes_for_input_fields.size(); ++file_column) { - const auto & column_index = column_indexes_for_input_fields[file_column]; - const bool is_last_file_column = file_column + 1 == column_indexes_for_input_fields.size(); + const auto & column_index = column_mapping->column_indexes_for_input_fields[file_column]; + const bool is_last_file_column = file_column + 1 == column_mapping->column_indexes_for_input_fields.size(); if (column_index) { const auto & type = data_types[*column_index]; @@ -204,7 +204,7 @@ bool TabSeparatedRowInputFormat::readRow(MutableColumns & columns, RowReadExtens } /// skip separators - if (file_column + 1 < column_indexes_for_input_fields.size()) + if (file_column + 1 < column_mapping->column_indexes_for_input_fields.size()) { assertChar('\t', in); } @@ -240,7 +240,7 @@ bool TabSeparatedRowInputFormat::readField(IColumn & column, const DataTypePtr & bool TabSeparatedRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & columns, WriteBuffer & out) { - for (size_t file_column = 0; file_column < column_indexes_for_input_fields.size(); ++file_column) + for (size_t file_column = 0; file_column < column_mapping->column_indexes_for_input_fields.size(); ++file_column) { if (file_column == 0 && in.eof()) { @@ -248,10 +248,10 @@ bool TabSeparatedRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & return false; } - if (column_indexes_for_input_fields[file_column].has_value()) + if (column_mapping->column_indexes_for_input_fields[file_column].has_value()) { const auto & header = getPort().getHeader(); - size_t col_idx = column_indexes_for_input_fields[file_column].value(); + size_t col_idx = column_mapping->column_indexes_for_input_fields[file_column].value(); if (!deserializeFieldAndPrintDiagnosticInfo(header.getByPosition(col_idx).name, data_types[col_idx], *columns[col_idx], out, file_column)) return false; @@ -266,7 +266,7 @@ bool TabSeparatedRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & } /// Delimiters - if (file_column + 1 == column_indexes_for_input_fields.size()) + if (file_column + 1 == column_mapping->column_indexes_for_input_fields.size()) { if (!in.eof()) { @@ -332,7 +332,7 @@ bool TabSeparatedRowInputFormat::parseRowAndPrintDiagnosticInfo(MutableColumns & void TabSeparatedRowInputFormat::tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) { - if (column_indexes_for_input_fields[file_column]) + if (column_mapping->column_indexes_for_input_fields[file_column]) { // check null value for type is not nullable. don't cross buffer bound for simplicity, so maybe missing some case if (!type->isNullable() && !in.eof()) @@ -351,7 +351,7 @@ void TabSeparatedRowInputFormat::tryDeserializeField(const DataTypePtr & type, I } } } - const bool is_last_file_column = file_column + 1 == column_indexes_for_input_fields.size(); + const bool is_last_file_column = file_column + 1 == column_mapping->column_indexes_for_input_fields.size(); readField(column, type, is_last_file_column); } else @@ -370,8 +370,8 @@ void TabSeparatedRowInputFormat::resetParser() { RowInputFormatWithDiagnosticInfo::resetParser(); const auto & sample = getPort().getHeader(); - read_columns.assign(sample.columns(), false); - column_indexes_for_input_fields.clear(); + column_mapping->read_columns.assign(sample.columns(), false); + column_mapping->column_indexes_for_input_fields.clear(); columns_to_fill_with_default_values.clear(); } @@ -465,7 +465,7 @@ static std::pair fileSegmentationEngineTabSeparatedImpl(ReadBuffer void registerFileSegmentationEngineTabSeparated(FormatFactory & factory) { // We can use the same segmentation engine for TSKV. - for (const std::string & name : {"TabSeparated", "TSV", "TSKV", "TabSeparatedWithNames", "TSVWithNames"}) + for (const auto & name : {"TabSeparated", "TSV", "TSKV", "TabSeparatedWithNames", "TSVWithNames"}) { factory.registerFileSegmentationEngine(name, &fileSegmentationEngineTabSeparatedImpl); } diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h index 0141d87403a..db70b4d3fea 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h @@ -41,10 +41,6 @@ private: using IndexesMap = std::unordered_map; IndexesMap column_indexes_by_names; - using OptionalIndexes = std::vector>; - OptionalIndexes column_indexes_for_input_fields; - - std::vector read_columns; std::vector columns_to_fill_with_default_values; void addInputColumn(const String & column_name); diff --git a/src/Processors/ISource.h b/src/Processors/ISource.h index b7e2b5dce8e..db91c0c5bce 100644 --- a/src/Processors/ISource.h +++ b/src/Processors/ISource.h @@ -19,7 +19,7 @@ protected: virtual std::optional tryGenerate(); public: - ISource(Block header); + explicit ISource(Block header); Status prepare() override; void work() override; diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh index 79a2d5c22c5..ab7706e100f 100755 --- a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh @@ -9,7 +9,8 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" for format in "${FORMATS[@]}" do - $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(a DateTime, b String, c FixedString(16)) ENGINE=Memory()" + # Columns are permuted + $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime, b String) ENGINE=Memory()" echo "$format, false"; $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ From 3a7d48f3a4c8c1a708ace4b0d9800e5ba3d1b8e6 Mon Sep 17 00:00:00 2001 From: Nikita Mikhailov Date: Tue, 2 Mar 2021 01:32:11 +0300 Subject: [PATCH 124/333] style and tsan fix --- src/Processors/Formats/IInputFormat.h | 4 +++- src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp | 6 +++++- src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/Processors/Formats/IInputFormat.h b/src/Processors/Formats/IInputFormat.h index b8ee4d438df..a297f487318 100644 --- a/src/Processors/Formats/IInputFormat.h +++ b/src/Processors/Formats/IInputFormat.h @@ -10,6 +10,8 @@ namespace DB /// Used to pass info from header between different InputFormats in ParallelParsing struct ColumnMapping { + /// Non-atomic because only read access in possible + bool is_set; /// Maps indexes of columns in the input file to indexes of table columns using OptionalIndexes = std::vector>; OptionalIndexes column_indexes_for_input_fields; @@ -59,7 +61,7 @@ public: /// Must be called from ParallelParsingInputFormat after readSuffix ColumnMappingPtr getColumnMapping() const { return column_mapping; } /// Must be called from ParallelParsingInputFormat before readPrefix - void setColumnMapping(ColumnMappingPtr column_mapping_ ) { column_mapping = column_mapping_; } + void setColumnMapping(ColumnMappingPtr column_mapping_) { column_mapping = column_mapping_; } size_t getCurrentUnitNumber() const { return current_unit_number; } void setCurrentUnitNumber(size_t current_unit_number_) { current_unit_number = current_unit_number_; } diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index ce041f6636b..4c2b9df304b 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -92,7 +92,11 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa /// Propagate column_mapping to other parsers. /// Note: column_mapping is used only for *WithNames types if (current_ticket_number != 0) + { + column_mapping->is_set = true; input_format->setColumnMapping(column_mapping); + } + // We don't know how many blocks will be. So we have to read them all // until an empty block occurred. @@ -105,7 +109,7 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa unit.chunk_ext.block_missing_values.emplace_back(parser.getMissingValues()); } - /// Extract column_mapping from first parser to propage it to others + /// Extract column_mapping from first parser to propagate it to others if (current_ticket_number == 0) column_mapping = input_format->getColumnMapping(); diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 04c6b4c3ee0..ffb1b96f70e 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -170,7 +170,7 @@ void TabSeparatedRowInputFormat::readPrefix() skipTSVRow(in, column_mapping->column_indexes_for_input_fields.size()); } } - else + else if (!column_mapping->is_set) setupAllColumnsByTableSchema(); if (with_types) From 3372dd7b6a69d67abfa658c8518dfe9a9e56d03d Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 2 Mar 2021 16:31:19 +0300 Subject: [PATCH 125/333] fix stupid bug --- src/Processors/Formats/IInputFormat.h | 3 ++- .../Formats/Impl/CSVRowInputFormat.cpp | 22 ++++++++++--------- .../Formats/Impl/CSVRowInputFormat.h | 1 + .../Impl/ParallelParsingInputFormat.cpp | 7 +++--- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/Processors/Formats/IInputFormat.h b/src/Processors/Formats/IInputFormat.h index a297f487318..95910bf51e5 100644 --- a/src/Processors/Formats/IInputFormat.h +++ b/src/Processors/Formats/IInputFormat.h @@ -10,7 +10,8 @@ namespace DB /// Used to pass info from header between different InputFormats in ParallelParsing struct ColumnMapping { - /// Non-atomic because only read access in possible + /// Non-atomic because there is strict `happens-before` between read and write access + /// See InputFormatParallelParsing bool is_set; /// Maps indexes of columns in the input file to indexes of table columns using OptionalIndexes = std::vector>; diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index efb8ad93d72..4cec07f38dc 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -145,6 +145,16 @@ static void skipRow(ReadBuffer & in, const FormatSettings::CSV & settings, size_ } } +void CSVRowInputFormat::setupAllColumnsByTableSchema() +{ + const auto & header = getPort().getHeader(); + column_mapping->read_columns.assign(header.columns(), true); + column_mapping->column_indexes_for_input_fields.resize(header.columns()); + + for (size_t i = 0; i < column_mapping->column_indexes_for_input_fields.size(); ++i) + column_mapping->column_indexes_for_input_fields[i] = i; +} + void CSVRowInputFormat::readPrefix() { @@ -193,16 +203,8 @@ void CSVRowInputFormat::readPrefix() else skipRow(in, format_settings.csv, num_columns); } - - /// The default: map each column of the file to the column of the table with - /// the same index. - column_mapping->read_columns.assign(header.columns(), true); - column_mapping->column_indexes_for_input_fields.resize(header.columns()); - - for (size_t i = 0; i < column_mapping->column_indexes_for_input_fields.size(); ++i) - { - column_mapping->column_indexes_for_input_fields[i] = i; - } + else if (!column_mapping->is_set) + setupAllColumnsByTableSchema(); } diff --git a/src/Processors/Formats/Impl/CSVRowInputFormat.h b/src/Processors/Formats/Impl/CSVRowInputFormat.h index 3cdafd8ec8d..86e41cf0a43 100644 --- a/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -44,6 +44,7 @@ private: void addInputColumn(const String & column_name); + void setupAllColumnsByTableSchema(); bool parseRowAndPrintDiagnosticInfo(MutableColumns & columns, WriteBuffer & out) override; void tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) override; bool isGarbageAfterField(size_t, ReadBuffer::Position pos) override diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 4c2b9df304b..a1d115c734c 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -92,11 +92,7 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa /// Propagate column_mapping to other parsers. /// Note: column_mapping is used only for *WithNames types if (current_ticket_number != 0) - { - column_mapping->is_set = true; input_format->setColumnMapping(column_mapping); - } - // We don't know how many blocks will be. So we have to read them all // until an empty block occurred. @@ -111,7 +107,10 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa /// Extract column_mapping from first parser to propagate it to others if (current_ticket_number == 0) + { column_mapping = input_format->getColumnMapping(); + column_mapping->is_set = true; + } // We suppose we will get at least some blocks for a non-empty buffer, // except at the end of file. Also see a matching assert in readImpl(). From e7a1398def3dfc553a932af908a4bbe6defba8ba Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 9 Mar 2021 22:02:56 +0300 Subject: [PATCH 126/333] try fix tests --- .../Formats/Impl/ParallelParsingInputFormat.cpp | 1 + .../Formats/Impl/ParallelParsingInputFormat.h | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index a1d115c734c..1ead36b99eb 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -110,6 +110,7 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa { column_mapping = input_format->getColumnMapping(); column_mapping->is_set = true; + first_parser_finished.Notify(); } // We suppose we will get at least some blocks for a non-empty buffer, diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index 49bb69faeac..f42cd3eadd0 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -12,6 +12,18 @@ #include #include +/// I don't know why, but clang warns about static annotations +/// error: macro name is a reserved identifier [-Werror,-Wreserved-id-macro] +/// #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wreserved-id-macro" +#endif +#include +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + namespace DB { @@ -202,6 +214,8 @@ private: std::condition_variable reader_condvar; std::condition_variable segmentator_condvar; + absl::Notification first_parser_finished; + std::atomic parsing_finished{false}; /// There are multiple "parsers", that's why we use thread pool. @@ -255,7 +269,7 @@ private: }); /// We have to wait here to possibly extract ColumnMappingPtr from the first parser. if (ticket_number == 0) - pool.wait(); + first_parser_finished.WaitForNotification(); } void finishAndWait() From f3ee1290745d8a5fdf00ae0ab5fa4e9572f9295c Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 10 Mar 2021 00:22:04 +0300 Subject: [PATCH 127/333] fix grpc --- src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 1ead36b99eb..0029a7e1084 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -123,6 +123,7 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa } catch (...) { + first_parser_finished.Notify(); onBackgroundException(unit.offset); } } From 761b369fa2212dbe4f58fd67622dad0e03c12630 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 10 Mar 2021 01:20:38 +0300 Subject: [PATCH 128/333] move to poco::event --- .../Formats/Impl/ParallelParsingInputFormat.cpp | 4 ++-- .../Formats/Impl/ParallelParsingInputFormat.h | 17 +++-------------- 2 files changed, 5 insertions(+), 16 deletions(-) diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 0029a7e1084..1055342cbea 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -110,7 +110,7 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa { column_mapping = input_format->getColumnMapping(); column_mapping->is_set = true; - first_parser_finished.Notify(); + first_parser_finished.set(); } // We suppose we will get at least some blocks for a non-empty buffer, @@ -123,7 +123,7 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa } catch (...) { - first_parser_finished.Notify(); + first_parser_finished.set(); onBackgroundException(unit.offset); } } diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index f42cd3eadd0..559507055b9 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -11,18 +11,7 @@ #include #include #include - -/// I don't know why, but clang warns about static annotations -/// error: macro name is a reserved identifier [-Werror,-Wreserved-id-macro] -/// #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wreserved-id-macro" -#endif -#include -#ifdef __clang__ -#pragma clang diagnostic pop -#endif +#include namespace DB { @@ -214,7 +203,7 @@ private: std::condition_variable reader_condvar; std::condition_variable segmentator_condvar; - absl::Notification first_parser_finished; + Poco::Event first_parser_finished; std::atomic parsing_finished{false}; @@ -269,7 +258,7 @@ private: }); /// We have to wait here to possibly extract ColumnMappingPtr from the first parser. if (ticket_number == 0) - first_parser_finished.WaitForNotification(); + first_parser_finished.wait(); } void finishAndWait() From b434a2f0bdff85f679bd53b7b9ecbe12a83eabfb Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 11 Mar 2021 02:15:16 +0300 Subject: [PATCH 129/333] disable parallel parsing for grpc tests --- .../Formats/Impl/ParallelParsingInputFormat.cpp | 4 +--- tests/integration/test_grpc_protocol/configs/users.xml | 8 ++++++++ tests/integration/test_grpc_protocol/test.py | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 tests/integration/test_grpc_protocol/configs/users.xml diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 1055342cbea..1ad913a1a59 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -123,7 +123,6 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa } catch (...) { - first_parser_finished.set(); onBackgroundException(unit.offset); } } @@ -131,8 +130,6 @@ void ParallelParsingInputFormat::parserThreadFunction(ThreadGroupStatusPtr threa void ParallelParsingInputFormat::onBackgroundException(size_t offset) { - tryLogCurrentException(__PRETTY_FUNCTION__); - std::unique_lock lock(mutex); if (!background_exception) { @@ -143,6 +140,7 @@ void ParallelParsingInputFormat::onBackgroundException(size_t offset) } tryLogCurrentException(__PRETTY_FUNCTION__); parsing_finished = true; + first_parser_finished.set(); reader_condvar.notify_all(); segmentator_condvar.notify_all(); } diff --git a/tests/integration/test_grpc_protocol/configs/users.xml b/tests/integration/test_grpc_protocol/configs/users.xml new file mode 100644 index 00000000000..2ae1a397fe5 --- /dev/null +++ b/tests/integration/test_grpc_protocol/configs/users.xml @@ -0,0 +1,8 @@ + + + + + 0 + + + diff --git a/tests/integration/test_grpc_protocol/test.py b/tests/integration/test_grpc_protocol/test.py index d8604276281..594879427ca 100644 --- a/tests/integration/test_grpc_protocol/test.py +++ b/tests/integration/test_grpc_protocol/test.py @@ -27,7 +27,7 @@ import clickhouse_grpc_pb2_grpc config_dir = os.path.join(SCRIPT_DIR, './configs') cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/grpc_config.xml']) +node = cluster.add_instance('node', main_configs=['configs/grpc_config.xml'], user_configs=["configs/users.xml"]) grpc_port = 9100 main_channel = None From 87eb2592deb9dca4eb39fa424493db4bf9d9e403 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 11 Mar 2021 18:10:06 +0300 Subject: [PATCH 130/333] add input_format_skip_unknown_fields --- .../1_stateful/00161_parallel_parsing_with_names.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh index ab7706e100f..ad7b83c55a4 100755 --- a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh @@ -14,8 +14,8 @@ do echo "$format, false"; $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ - "SELECT ClientEventTime as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ - $CLICKHOUSE_CLIENT --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" + "SELECT URLRegions as d, ClientEventTime as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ + $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT count() FROM parsing_with_names;" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" @@ -24,8 +24,8 @@ do $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(a DateTime, b String, c FixedString(16)) ENGINE=Memory()" echo "$format, true"; $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ - "SELECT ClientEventTime as a, MobilePhoneModel as b, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ - $CLICKHOUSE_CLIENT --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" + "SELECT URLRegions as d, ClientEventTime as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ + $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT count() FROM parsing_with_names;" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" From 8e68edebea30779f0e8856dc2e5f000d3bec6387 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 11 Mar 2021 18:21:58 +0300 Subject: [PATCH 131/333] better test --- .../00161_parallel_parsing_with_names.reference | 8 ++++---- .../1_stateful/00161_parallel_parsing_with_names.sh | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.reference b/tests/queries/1_stateful/00161_parallel_parsing_with_names.reference index 4e0017f2d78..fb0ba75c148 100644 --- a/tests/queries/1_stateful/00161_parallel_parsing_with_names.reference +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.reference @@ -1,8 +1,8 @@ TSVWithNames, false -50000 +29caf86494f169d6339f6c5610b20731 - TSVWithNames, true -50000 +29caf86494f169d6339f6c5610b20731 - CSVWithNames, false -50000 +29caf86494f169d6339f6c5610b20731 - CSVWithNames, true -50000 +29caf86494f169d6339f6c5610b20731 - diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh index ad7b83c55a4..ca9984900e1 100755 --- a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh @@ -17,16 +17,16 @@ do "SELECT URLRegions as d, ClientEventTime as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" - $CLICKHOUSE_CLIENT -q "SELECT count() FROM parsing_with_names;" + $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" - $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(a DateTime, b String, c FixedString(16)) ENGINE=Memory()" + $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime, b String) ENGINE=Memory()" echo "$format, true"; $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ "SELECT URLRegions as d, ClientEventTime as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" - $CLICKHOUSE_CLIENT -q "SELECT count() FROM parsing_with_names;" + $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" done \ No newline at end of file From 65f90f2ce9ea9e9d4076f06c58ddd981c82cc098 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 11 Mar 2021 21:52:10 +0300 Subject: [PATCH 132/333] Fix distributed requests cancellation with async_socket_for_remote=1 Before this patch for distributed queries, that requires cancellation (simple select from multiple shards with limit, i.e. `select * from remote('127.{2,3}', system.numbers) limit 100`) it is very easy to trigger the situation when remote shard is in the middle of sending Data block while the initiator already send Cancel and expecting some new packet, but it will receive not new packet, but part of the Data block that was in the middle of sending before cancellation, and this will lead to some various errors, like: - Unknown packet X from server Y - Unexpected packet from server Y - and a lot more... Fix this, by correctly waiting for the pending packet before cancellation. It is not very easy to write a test, since localhost is too fast. Also note, that it is not possible to get these errors with hedged requests (use_hedged_requests=1) since handle fibers correctly. But it had been disabled by default for 21.3 in #21534, while async_socket_for_remote is enabled by default. --- .../RemoteQueryExecutorReadContext.cpp | 15 +++++++++++---- src/DataStreams/RemoteQueryExecutorReadContext.h | 4 ++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/DataStreams/RemoteQueryExecutorReadContext.cpp b/src/DataStreams/RemoteQueryExecutorReadContext.cpp index c2a65f02d08..11cc2dcd8e4 100644 --- a/src/DataStreams/RemoteQueryExecutorReadContext.cpp +++ b/src/DataStreams/RemoteQueryExecutorReadContext.cpp @@ -104,11 +104,11 @@ void RemoteQueryExecutorReadContext::setConnectionFD(int fd, const Poco::Timespa connection_fd_description = fd_description; } -bool RemoteQueryExecutorReadContext::checkTimeout() const +bool RemoteQueryExecutorReadContext::checkTimeout(bool blocking) const { try { - return checkTimeoutImpl(); + return checkTimeoutImpl(blocking); } catch (DB::Exception & e) { @@ -118,13 +118,13 @@ bool RemoteQueryExecutorReadContext::checkTimeout() const } } -bool RemoteQueryExecutorReadContext::checkTimeoutImpl() const +bool RemoteQueryExecutorReadContext::checkTimeoutImpl(bool blocking) const { /// Wait for epoll will not block if it was polled externally. epoll_event events[3]; events[0].data.fd = events[1].data.fd = events[2].data.fd = -1; - int num_events = epoll.getManyReady(3, events,/* blocking = */ false); + int num_events = epoll.getManyReady(3, events, blocking); bool is_socket_ready = false; bool is_pipe_alarmed = false; @@ -184,9 +184,16 @@ bool RemoteQueryExecutorReadContext::resumeRoutine() void RemoteQueryExecutorReadContext::cancel() { std::lock_guard guard(fiber_lock); + /// It is safe to just destroy fiber - we are not in the process of reading from socket. boost::context::fiber to_destroy = std::move(fiber); + while (is_read_in_progress.load(std::memory_order_relaxed)) + { + checkTimeout(/* blocking= */ true); + to_destroy = std::move(to_destroy).resume(); + } + /// Send something to pipe to cancel executor waiting. uint64_t buf = 0; while (-1 == write(pipe_fd[1], &buf, sizeof(buf))) diff --git a/src/DataStreams/RemoteQueryExecutorReadContext.h b/src/DataStreams/RemoteQueryExecutorReadContext.h index cb6421f78d0..5fbe52469cd 100644 --- a/src/DataStreams/RemoteQueryExecutorReadContext.h +++ b/src/DataStreams/RemoteQueryExecutorReadContext.h @@ -54,8 +54,8 @@ public: explicit RemoteQueryExecutorReadContext(IConnections & connections_); ~RemoteQueryExecutorReadContext(); - bool checkTimeout() const; - bool checkTimeoutImpl() const; + bool checkTimeout(bool blocking = false) const; + bool checkTimeoutImpl(bool blocking) const; void setConnectionFD(int fd, const Poco::Timespan & timeout = 0, const std::string & fd_description = ""); void setTimer() const; From e4b4665ff556f7da153dd0bfb8afe210e1fc7d24 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 12 Mar 2021 13:58:03 +0300 Subject: [PATCH 133/333] better --- programs/client/Client.cpp | 8 ++++---- .../Formats/Impl/ParallelFormattingOutputFormat.h | 9 +++++++++ src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp | 2 ++ .../00416_pocopatch_progress_in_http_headers.sh | 2 +- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 3c27908741c..c878a3071c4 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -2096,10 +2096,10 @@ private: current_format = "Vertical"; /// It is not clear how to write progress with parallel formatting. It may increase code complexity significantly. - if (!need_render_progress) - block_out_stream = context.getOutputStreamParallelIfPossible(current_format, *out_buf, block); - else - block_out_stream = context.getOutputStream(current_format, *out_buf, block); + // if (!need_render_progress) + // block_out_stream = context.getOutputStreamParallelIfPossible(current_format, *out_buf, block); + // else + block_out_stream = context.getOutputStream(current_format, *out_buf, block); block_out_stream->writePrefix(); } diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 2efc369e178..3fcd1f0aadf 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -6,6 +6,9 @@ #include #include #include +#include "IO/ReadBuffer.h" +#include "IO/ReadBufferFromString.h" +#include "IO/WriteBufferFromString.h" #include #include #include @@ -104,6 +107,12 @@ public: /// There are no formats which support parallel formatting and progress writing at the same time void onProgress(const Progress &) override {} + String getContentType() const override + { + WriteBufferFromOwnString buffer; + return internal_formatter_creator(buffer)->getContentType(); + } + protected: void consume(Chunk chunk) override final { diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp index 355af038da9..7ded716b34e 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp @@ -149,6 +149,8 @@ void WriteBufferFromHTTPServerResponse::onProgress(const Progress & progress) { std::lock_guard lock(mutex); + std::cout << StackTrace().toString() << std::endl; + /// Cannot add new headers if body was started to send. if (headers_finished_sending) return; diff --git a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh index 5d9cd12e4bf..6e9814cbca8 100755 --- a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh +++ b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]' # This test will fail with external poco (progress not supported) -${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT number FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]' +${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&output_format_parallel_formatting=0" -d 'SELECT number FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&max_block_size=1&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0&enable_http_compression=1" -H 'Accept-Encoding: gzip' -d 'SELECT number FROM system.numbers LIMIT 10' | gzip -d # 'send_progress_in_http_headers' is false by default From b83564bdadf0b092e1aa517d4e1fb3f1633ced8a Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Fri, 12 Mar 2021 15:08:00 +0300 Subject: [PATCH 134/333] Add test for path as a query parameter in system.zookeeper --- .../01753_system_zookeeper_query_param_path.reference | 1 + .../01753_system_zookeeper_query_param_path.sh | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference create mode 100755 tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference new file mode 100644 index 00000000000..938ba95c63a --- /dev/null +++ b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference @@ -0,0 +1 @@ +clickhouse diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh new file mode 100755 index 00000000000..9e98e6d125e --- /dev/null +++ b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path "/" + From 7f2d03b12802c392f114b00ca667e278d9016761 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Fri, 12 Mar 2021 18:24:59 +0300 Subject: [PATCH 135/333] Change test --- .../01753_system_zookeeper_query_param_path.reference | 2 +- .../0_stateless/01753_system_zookeeper_query_param_path.sh | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference index 938ba95c63a..9daeafb9864 100644 --- a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference +++ b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference @@ -1 +1 @@ -clickhouse +test diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh index 9e98e6d125e..75a8e1b95ac 100755 --- a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh +++ b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh @@ -5,5 +5,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path "/" +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_01753"; +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_01753 (n Int8) ENGINE=ReplicatedMergeTree('/test_01753/test', 'r') ORDER BY n" +${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path "/test_01753" + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test_01753 SYNC"; From 8b27da2b8168ba5369b8ae530019282b2c06d223 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 12 Mar 2021 19:00:41 +0300 Subject: [PATCH 136/333] first draft --- .../external-authenticators/ldap.md | 71 ++++---- .../external-authenticators/ldap.md | 154 ++++++++++++++++++ 2 files changed, 184 insertions(+), 41 deletions(-) create mode 100644 docs/ru/operations/external-authenticators/ldap.md diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 36a13227852..523a4ff2993 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -35,30 +35,27 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`. F Note, that you can define multiple LDAP servers inside the `ldap_servers` section using distinct names. -Parameters: +**Parameters** -- `host` - LDAP server hostname or IP, this parameter is mandatory and cannot be empty. -- `port` - LDAP server port, default is `636` if `enable_tls` is set to `true`, `389` otherwise. -- `bind_dn` - template used to construct the DN to bind to. - - The resulting DN will be constructed by replacing all `{user_name}` substrings of the - template with the actual user name during each authentication attempt. -- `verification_cooldown` - a period of time, in seconds, after a successful bind attempt, - during which the user will be assumed to be successfully authenticated for all consecutive - requests without contacting the LDAP server. +- `host` — LDAP server hostname or IP, this parameter is mandatory and cannot be empty. +- `port` — LDAP server port, default is `636` if `enable_tls` is set to `true`, `389` otherwise. +- `bind_dn` — template used to construct the DN to bind to. + - The resulting DN will be constructed by replacing all `{user_name}` substrings of the template with the actual user name during each authentication attempt. +- `verification_cooldown` — a period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. - Specify `0` (the default) to disable caching and force contacting the LDAP server for each authentication request. -- `enable_tls` - flag to trigger use of secure connection to the LDAP server. +- `enable_tls` — flag to trigger use of secure connection to the LDAP server. - Specify `no` for plain text `ldap://` protocol (not recommended). - Specify `yes` for LDAP over SSL/TLS `ldaps://` protocol (recommended, the default). - Specify `starttls` for legacy StartTLS protocol (plain text `ldap://` protocol, upgraded to TLS). -- `tls_minimum_protocol_version` - the minimum protocol version of SSL/TLS. +- `tls_minimum_protocol_version` — the minimum protocol version of SSL/TLS. - Accepted values are: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (the default). -- `tls_require_cert` - SSL/TLS peer certificate verification behavior. +- `tls_require_cert` — SSL/TLS peer certificate verification behavior. - Accepted values are: `never`, `allow`, `try`, `demand` (the default). -- `tls_cert_file` - path to certificate file. -- `tls_key_file` - path to certificate key file. -- `tls_ca_cert_file` - path to CA certificate file. -- `tls_ca_cert_dir` - path to the directory containing CA certificates. -- `tls_cipher_suite` - allowed cipher suite (in OpenSSL notation). +- `tls_cert_file` — path to certificate file. +- `tls_key_file` — path to certificate key file. +- `tls_ca_cert_file` — path to CA certificate file. +- `tls_ca_cert_dir` — path to the directory containing CA certificates. +- `tls_cipher_suite` — allowed cipher suite (in OpenSSL notation). ## LDAP External Authenticator {#ldap-external-authenticator} @@ -87,9 +84,10 @@ Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be c When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled in ClickHouse, users that are authenticated by LDAP servers can also be created using the [CRATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. +Query: ```sql -CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server' +CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server'; ``` ## LDAP Exernal User Directory {#ldap-external-user-directory} @@ -123,34 +121,25 @@ Example (goes into `config.xml`): ``` -Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously -defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)). +Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)). Parameters: -- `server` - one of LDAP server names defined in the `ldap_servers` config section above. +- `server` — one of LDAP server names defined in the `ldap_servers` config section above. This parameter is mandatory and cannot be empty. -- `roles` - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. - - If no roles are specified here or assigned during role mapping (below), user will not be able - to perform any actions after authentication. -- `role_mapping` - section with LDAP search parameters and mapping rules. - - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` - and the name of the logged in user. For each entry found during that search, the value of the specified - attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, - and the rest of the value becomes the name of a local role defined in ClickHouse, - which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. +- `roles` — section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. + - If no roles are specified here or assigned during role mapping (below), user will not be able to perform any actions after authentication. +- `role_mapping` — section with LDAP search parameters and mapping rules. + - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. - - `base_dn` - template used to construct the base DN for the LDAP search. - - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` - substrings of the template with the actual user name and bind DN during each LDAP search. - - `scope` - scope of the LDAP search. + - `base_dn` — template used to construct the base DN for the LDAP search. + - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during each LDAP search. + - `scope` — scope of the LDAP search. - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). - - `search_filter` - template used to construct the search filter for the LDAP search. - - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` - substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. + - `search_filter` — template used to construct the search filter for the LDAP search. + - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. - Note, that the special characters must be escaped properly in XML. - - `attribute` - attribute name whose values will be returned by the LDAP search. - - `prefix` - prefix, that will be expected to be in front of each string in the original - list of strings returned by the LDAP search. Prefix will be removed from the original - strings and resulting strings will be treated as local role names. Empty, by default. + - `attribute` — attribute name whose values will be returned by the LDAP search. + - `prefix` — prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated as local role names. Empty, by default. +[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md new file mode 100644 index 00000000000..fe364c69f05 --- /dev/null +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -0,0 +1,154 @@ +# LDAP {#external-authenticators-ldap} + +Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Для этого есть два разных подхода: + +- использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных путях управления контролем +- использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере + +Для этих обоих подходов необходимо определить в ClickHouse конфиге внутренне названный LDAP сервер, чтобы другие части конфига могли ссылаться на него. + +## Определение LDAP сервера {#ldap-server-definition} + +Чтобы определить LDAP сервер, необходимо добавить секцию `ldap_servers` в `config.xml`. Например: + +```xml + + + + + localhost + 636 + uid={user_name},ou=users,dc=example,dc=com + 300 + yes + tls1.2 + demand + /path/to/tls_cert_file + /path/to/tls_key_file + /path/to/tls_ca_cert_file + /path/to/tls_ca_cert_dir + ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384 + + + +``` + +Обратите внимание, что можно определить несколько LDAP серверов внутри секции `ldap_servers` используя различные имена. + +**Параметры** + +- `host` — LDAP server hostname or IP, this parameter is mandatory and cannot be empty. имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть пустым. +- `port` — порт сервера LDAP. По-умолчанию: при значение `true` настройки `enable_tls` — `636`, иначе `389`. +- `bind_dn` — шаблон для создания DN для привязки. + - конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на настоящее имя пользователя при каждой попытке аутентификации. +- `verification_cooldown` — a period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. + - Specify `0` (the default) to disable caching and force contacting the LDAP server for each authentication request. +- `enable_tls` — flag to trigger use of secure connection to the LDAP server. + - Specify `no` for plain text `ldap://` protocol (not recommended). + - Specify `yes` for LDAP over SSL/TLS `ldaps://` protocol (recommended, the default). + - Specify `starttls` for legacy StartTLS protocol (plain text `ldap://` protocol, upgraded to TLS). +- `tls_minimum_protocol_version` — the minimum protocol version of SSL/TLS. + - Accepted values are: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (the default). +- `tls_require_cert` — SSL/TLS peer certificate verification behavior. + - Accepted values are: `never`, `allow`, `try`, `demand` (the default). +- `tls_cert_file` — path to certificate file. +- `tls_key_file` — path to certificate key file. +- `tls_ca_cert_file` — path to CA certificate file. +- `tls_ca_cert_dir` — path to the directory containing CA certificates. +- `tls_cipher_suite` — allowed cipher suite (in OpenSSL notation). + +## LDAP External Authenticator {#ldap-external-authenticator} + +A remote LDAP server can be used as a method for verifying passwords for locally defined users (users defined in `users.xml` or in local access control paths). In order to achieve this, specify previously defined LDAP server name instead of `password` or similar sections in the user definition. + +At each login attempt, ClickHouse will try to "bind" to the specified DN defined by the `bind_dn` parameter in the [LDAP server definition](#ldap-server-definition) using the provided credentials, and if successful, the user will be considered authenticated. This is often called a "simple bind" method. + +For example, + +```xml + + + + + + + + my_ldap_server + + + + +``` + +Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously. + +When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled in ClickHouse, users that are authenticated by LDAP servers can also be created using the [CRATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. + + +```sql +CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server' +``` + +## LDAP Exernal User Directory {#ldap-external-user-directory} + +In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. In order to achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file. + +At each login attempt, ClickHouse will try to find the user definition locally and authenticate it as usual, but if the user is not defined, ClickHouse will assume it exists in the external LDAP directory, and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. + +Example (goes into `config.xml`): + +```xml + + + + + + my_ldap_server + + + + + + ou=groups,dc=example,dc=com + subtree + (&(objectClass=groupOfNames)(member={bind_dn})) + cn + clickhouse_ + + + + +``` + +Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously +defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)). + +Parameters: + +- `server` - one of LDAP server names defined in the `ldap_servers` config section above. + This parameter is mandatory and cannot be empty. +- `roles` - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. + - If no roles are specified here or assigned during role mapping (below), user will not be able + to perform any actions after authentication. +- `role_mapping` - section with LDAP search parameters and mapping rules. + - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` + and the name of the logged in user. For each entry found during that search, the value of the specified + attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, + and the rest of the value becomes the name of a local role defined in ClickHouse, + which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. + - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. + - `base_dn` - template used to construct the base DN for the LDAP search. + - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` + substrings of the template with the actual user name and bind DN during each LDAP search. + - `scope` - scope of the LDAP search. + - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). + - `search_filter` - template used to construct the search filter for the LDAP search. + - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` + substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. + - Note, that the special characters must be escaped properly in XML. + - `attribute` - attribute name whose values will be returned by the LDAP search. + - `prefix` - prefix, that will be expected to be in front of each string in the original + list of strings returned by the LDAP search. Prefix will be removed from the original + strings and resulting strings will be treated as local role names. Empty, by default. + +[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) From b3f8ac796c24dfebb0eacfbb0c51d0c0ca094750 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Fri, 12 Mar 2021 19:09:55 +0300 Subject: [PATCH 137/333] Update docs/en/sql-reference/statements/detach.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/statements/detach.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index f3e2f88ffc3..6da49c4d3d4 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -5,7 +5,7 @@ toc_title: DETACH # DETACH Statement {#detach} -Deletes information about the table or view from the server. The server stops knowing about their existence. +Deletes information about the table or materialized view from the server. The server stops knowing about their existence. Syntax: From ed87521fcfc3c6a8bc81eb105d0a2e1e9167a7f2 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Fri, 12 Mar 2021 19:10:01 +0300 Subject: [PATCH 138/333] Update docs/en/sql-reference/statements/detach.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/statements/detach.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 6da49c4d3d4..47490f3e0f9 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -13,7 +13,7 @@ Syntax: DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] ``` -Detaching does not delete the data or metadata for the table or view. If the table or view was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view again. If the table or view was detached `PERMANENTLY`, there will be no automatic recall. +Detaching does not delete the data or metadata for the table or materialized view. If the table or view was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view again. If the table or view was detached `PERMANENTLY`, there will be no automatic recall. Whether the table was detached permanently or not, in both cases you can reattach it using the [ATTACH](../../sql-reference/statements/attach.md). System log tables can be also attached back (e.g. `query_log`, `text_log`, etc). Other system tables can't be reattached. On the next server launch the server will recall those tables again. From ca5e20a57dc576cb1a03d5a952b76732ed6c6309 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Fri, 12 Mar 2021 19:11:27 +0300 Subject: [PATCH 139/333] Update docs/en/sql-reference/statements/detach.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/en/sql-reference/statements/detach.md | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 47490f3e0f9..ae97d716a5e 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -25,25 +25,10 @@ Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the d **Example** -Query: +Creating a table: ``` sql CREATE TABLE test ENGINE = Log AS SELECT * FROM numbers(10); - -DETACH TABLE test; - -SELECT * FROM TEST; -``` - -Result: - -``` text -Ok. - -Ok. - -Received exception from server (version 21.3.1): -Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.TEST doesn't exist. -``` +SELECT * FROM test; [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) From 53fd7cb8a840719af7a4316b216d931a5a1806b3 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 12 Mar 2021 19:14:40 +0300 Subject: [PATCH 140/333] Updated example --- docs/en/sql-reference/statements/detach.md | 36 ++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index ae97d716a5e..cb0d7cf7b66 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -27,8 +27,44 @@ Also you can not [DROP](../../sql-reference/statements/drop.md#drop-table) the d Creating a table: +Query: + ``` sql CREATE TABLE test ENGINE = Log AS SELECT * FROM numbers(10); SELECT * FROM test; +``` + +Result: + +``` text +┌─number─┐ +│ 0 │ +│ 1 │ +│ 2 │ +│ 3 │ +│ 4 │ +│ 5 │ +│ 6 │ +│ 7 │ +│ 8 │ +│ 9 │ +└────────┘ +``` + +Detaching the table: + +Query: + +``` sql +DETACH TABLE test; +SELECT * FROM test; +``` + +Result: + +``` text +Received exception from server (version 21.4.1): +Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.test doesn't exist. +``` [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/detach/) From f89247d7bd6d790d5d59679add762b082e905329 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 12 Mar 2021 19:28:04 +0300 Subject: [PATCH 141/333] updated attach.md --- docs/en/sql-reference/statements/attach.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/attach.md b/docs/en/sql-reference/statements/attach.md index 035441ef5f1..ffb577a8839 100644 --- a/docs/en/sql-reference/statements/attach.md +++ b/docs/en/sql-reference/statements/attach.md @@ -17,4 +17,8 @@ If the table was previously detached ([DETACH](../../sql-reference/statements/de ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] ``` -This query is used when starting the server. The server stores table metadata as files with `ATTACH` queries, which it simply runs at launch (with the exception of system tables, which are explicitly created on the server). +This query is used when starting the server. The server stores table metadata as files with `ATTACH` queries, which it simply runs at launch (with the exception of some system tables, which are explicitly created on the server). + +If the table was detached permanently, it won't be reattached at the server start, so you need to use `ATTACH` query explicitly. + +[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/attach/) From 3269e1d3316bf2be3a1f3f6ea74111004d8db703 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Fri, 12 Mar 2021 12:32:32 -0500 Subject: [PATCH 142/333] Updating TestFlows to 1.6.74 --- docker/test/testflows/runner/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index 10014851a82..bd7eee4c166 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -35,7 +35,7 @@ RUN apt-get update \ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -RUN pip3 install urllib3 testflows==1.6.72 docker-compose docker dicttoxml kazoo tzlocal +RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal ENV DOCKER_CHANNEL stable ENV DOCKER_VERSION 17.09.1-ce From 5af5c90b8baa2a701f3c688023f7d78fa61c4551 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 12 Mar 2021 21:10:16 +0300 Subject: [PATCH 143/333] Fix test_replace_partition flakiness MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit test_replace_partition/test.py::test_replace_after_replace_failover statistics: ```sql ┌──────ymd─┬─success─┬─failure─┐ │ 20200613 │ 40 │ 2 │ │ 20200614 │ 98 │ 2 │ │ 20200615 │ 126 │ 9 │ │ 20200616 │ 141 │ 10 │ │ 20200617 │ 119 │ 7 │ │ 20200618 │ 87 │ 12 │ │ 20200619 │ 81 │ 9 │ ... │ 20210305 │ 60 │ 28 │ │ 20210306 │ 69 │ 7 │ │ 20210307 │ 42 │ 0 │ │ 20210308 │ 53 │ 8 │ │ 20210309 │ 137 │ 23 │ │ 20210310 │ 84 │ 14 │ │ 20210311 │ 81 │ 20 │ └──────────┴─────────┴─────────┘ ``` --- .../test_replace_partition/test.py | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/integration/test_replace_partition/test.py b/tests/integration/test_replace_partition/test.py index 06e7f4be82b..c87a650fce7 100644 --- a/tests/integration/test_replace_partition/test.py +++ b/tests/integration/test_replace_partition/test.py @@ -1,3 +1,4 @@ +import time import pytest from helpers.cluster import ClickHouseCluster @@ -97,12 +98,13 @@ def test_drop_failover(drop_failover): # Drop partition on source node node3.query("ALTER TABLE test_table DROP PARTITION 201706") - # connection restored + # Wait few seconds for connection to zookeeper to be restored + time.sleep(5) - node4.query_with_retry("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'", - check_callback=lambda x: 'Not found part' not in x, sleep_time=1) - assert 'Not found part' not in node4.query( - "select last_exception from system.replication_queue where type = 'REPLACE_RANGE'") + msg = node4.query_with_retry( + "select last_exception from system.replication_queue where type = 'REPLACE_RANGE'", + check_callback=lambda x: 'Not found part' not in x, sleep_time=1) + assert 'Not found part' not in msg assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '') @@ -151,8 +153,11 @@ def test_replace_after_replace_failover(replace_after_replace_failover): assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '333') - node6.query_with_retry("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'", - check_callback=lambda x: 'Not found part' not in x, sleep_time=1) - assert 'Not found part' not in node6.query( - "select last_exception from system.replication_queue where type = 'REPLACE_RANGE'") + # Wait few seconds for connection to zookeeper to be restored + time.sleep(5) + + msg = node6.query_with_retry( + "select last_exception from system.replication_queue where type = 'REPLACE_RANGE'", + check_callback=lambda x: 'Not found part' not in x, sleep_time=1) + assert 'Not found part' not in msg assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '333') From 2e99dad56213413a72969d47650050b42cbcc073 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 12 Mar 2021 21:10:16 +0300 Subject: [PATCH 144/333] Tiny "machine" cleanup of test_replace_partition --- tests/integration/test_replace_partition/test.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_replace_partition/test.py b/tests/integration/test_replace_partition/test.py index c87a650fce7..d30a038825f 100644 --- a/tests/integration/test_replace_partition/test.py +++ b/tests/integration/test_replace_partition/test.py @@ -1,3 +1,7 @@ +# pylint: disable=line-too-long +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name: + import time import pytest @@ -14,13 +18,13 @@ def _fill_nodes(nodes, shard): node.query( ''' CREATE DATABASE test; - + CREATE TABLE real_table(date Date, id UInt32, dummy UInt32) ENGINE = MergeTree(date, id, 8192); - + CREATE TABLE other_table(date Date, id UInt32, dummy UInt32) ENGINE = MergeTree(date, id, 8192); - + CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192); '''.format(shard=shard, replica=node.name)) From 34915d031145cfab1ca6f5f365c59b4e3acc77f3 Mon Sep 17 00:00:00 2001 From: Michael Monashev Date: Fri, 12 Mar 2021 21:13:20 +0300 Subject: [PATCH 145/333] Fix ORDER BY syntax --- docs/ru/engines/table-engines/mergetree-family/mergetree.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index bc74b2592b9..0615613533b 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -517,7 +517,7 @@ CREATE TABLE table_for_aggregation y Int ) ENGINE = MergeTree -ORDER BY k1, k2 +ORDER BY (k1, k2) TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); ``` From ff1cb65f0b3f6c8a0dc12818a92d8884aaa4d8d1 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 12 Mar 2021 21:44:59 +0300 Subject: [PATCH 146/333] Start accepting connections after DDLWorker and dictionaries initialization Found by integration tests [1]: Code: 139. DB::Exception: Received from 172.18.0.6:9000. DB::Exception: DDL background thread is not initialized. Stack trace: [1]: https://clickhouse-test-reports.s3.yandex.net/21643/65f90f2ce9ea9e9d4076f06c58ddd981c82cc098/integration_tests_(thread).html#fail1 --- programs/server/Server.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 9889b08828b..57d9257df5d 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1280,9 +1280,6 @@ int Server::main(const std::vector & /*args*/) async_metrics.start(); global_context->enableNamedSessions(); - for (auto & server : *servers) - server.start(); - { String level_str = config().getString("text_log.level", ""); int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str); @@ -1334,6 +1331,8 @@ int Server::main(const std::vector & /*args*/) "distributed_ddl", "DDLWorker", &CurrentMetrics::MaxDDLEntryID)); } + for (auto & server : *servers) + server.start(); LOG_INFO(log, "Ready for connections."); SCOPE_EXIT({ From 508953ca006f29e82d8428edd540492f165b3c96 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 12 Mar 2021 23:51:53 +0300 Subject: [PATCH 147/333] more debug info --- programs/client/Client.cpp | 47 +++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c5b579f2046..da99541a3e7 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1255,6 +1255,29 @@ private: return true; } + // Prints changed settings to stderr. Useful for debugging fuzzing failures. + void printChangedSettings() const + { + const auto & changes = context.getSettingsRef().changes(); + if (!changes.empty()) + { + fmt::print(stderr, "Changed settings: "); + for (size_t i = 0; i < changes.size(); ++i) + { + if (i) + { + fmt::print(stderr, ", "); + } + fmt::print(stderr, "{} = '{}'", changes[i].name, + toString(changes[i].value)); + } + fmt::print(stderr, "\n"); + } + else + { + fmt::print(stderr, "No changed settings.\n"); + } + } /// Returns false when server is not available. bool processWithFuzzing(const String & text) @@ -1323,6 +1346,8 @@ private: // child elements. if (base_before_fuzz != base_after_fuzz) { + printChangedSettings(); + fmt::print(stderr, "Base before fuzz: {}\n" "Base after fuzz: {}\n", @@ -1388,6 +1413,8 @@ private: if (formatted_twice != fuzzed_text) { + printChangedSettings(); + fmt::print(stderr, "The query formatting is broken. Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", formatted_twice, fuzzed_text); fmt::print(stderr, "AST parsed back:\n'{}'\nSource AST:\n'{}'\n", @@ -1433,25 +1460,7 @@ private: // Print the changed settings because they might be needed to // reproduce the error. - const auto & changes = context.getSettingsRef().changes(); - if (!changes.empty()) - { - fmt::print(stderr, "Changed settings: "); - for (size_t i = 0; i < changes.size(); ++i) - { - if (i) - { - fmt::print(stderr, ", "); - } - fmt::print(stderr, "{} = '{}'", changes[i].name, - toString(changes[i].value)); - } - fmt::print(stderr, "\n"); - } - else - { - fmt::print(stderr, "No changed settings.\n"); - } + printChangedSettings(); return false; } From 58e415c7d42066ff478c1409e9bd7b84c34d044e Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Sat, 13 Mar 2021 00:17:19 +0300 Subject: [PATCH 148/333] Update clusters only if their configs were updated --- .../AbstractConfigurationComparison.cpp | 4 + .../Config/AbstractConfigurationComparison.h | 5 + src/Interpreters/Cluster.cpp | 38 ++- src/Interpreters/Cluster.h | 2 +- src/Interpreters/Context.cpp | 9 +- .../configs/remote_servers.xml | 30 +++ .../test_reload_clusters_config/test.py | 235 ++++++++++++++++++ 7 files changed, 315 insertions(+), 8 deletions(-) create mode 100644 tests/integration/test_reload_clusters_config/configs/remote_servers.xml create mode 100644 tests/integration/test_reload_clusters_config/test.py diff --git a/src/Common/Config/AbstractConfigurationComparison.cpp b/src/Common/Config/AbstractConfigurationComparison.cpp index 0e603cb1056..59c0c895a89 100644 --- a/src/Common/Config/AbstractConfigurationComparison.cpp +++ b/src/Common/Config/AbstractConfigurationComparison.cpp @@ -26,6 +26,10 @@ bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, const P return isSameConfiguration(left, String(), right, String()); } +bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, const Poco::Util::AbstractConfiguration & right, const String & key) +{ + return isSameConfiguration(left, key, right, key); +} bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, const String & left_key, const Poco::Util::AbstractConfiguration & right, const String & right_key) diff --git a/src/Common/Config/AbstractConfigurationComparison.h b/src/Common/Config/AbstractConfigurationComparison.h index f825ad4e53d..795fca2af8e 100644 --- a/src/Common/Config/AbstractConfigurationComparison.h +++ b/src/Common/Config/AbstractConfigurationComparison.h @@ -13,6 +13,11 @@ namespace DB bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, const Poco::Util::AbstractConfiguration & right); + /// Returns true if the specified subview of the two configurations contains the same keys and values. + bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, + const Poco::Util::AbstractConfiguration & right, + const String & key); + /// Returns true if specified subviews of the two configurations contains the same keys and values. bool isSameConfiguration(const Poco::Util::AbstractConfiguration & left, const String & left_key, const Poco::Util::AbstractConfiguration & right, const String & right_key); diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index fb9788e84c4..b77d5019d48 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -265,20 +266,45 @@ void Clusters::setCluster(const String & cluster_name, const std::shared_ptrkeys(config_prefix, old_config_keys); + std::sort(old_config_keys.begin(), old_config_keys.end()); + + std::set_difference( + old_config_keys.begin(), old_config_keys.end(), new_config_keys.begin(), new_config_keys.end(), std::back_inserter(deleted_keys)); + } std::lock_guard lock(mutex); - impl.clear(); - for (const auto & key : config_keys) + /// If old congig is set, remove deleted clusters from impl, otherwise just clear it. + if (old_config) + { + for (const auto & key : deleted_keys) + impl.erase(key); + } + else + impl.clear(); + + for (const auto & key : new_config_keys) { if (key.find('.') != String::npos) throw Exception("Cluster names with dots are not supported: '" + key + "'", ErrorCodes::SYNTAX_ERROR); - impl.emplace(key, std::make_shared(config, settings, config_prefix, key)); + /// If old config is set and cluster config wasn't changed, don't update this cluster. + if (!old_config || !isSameConfiguration(new_config, *old_config, config_prefix + "." + key)) + impl[key] = std::make_shared(new_config, settings, config_prefix, key); } } diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index c64d52724e5..a047f199204 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -276,7 +276,7 @@ public: ClusterPtr getCluster(const std::string & cluster_name) const; void setCluster(const String & cluster_name, const ClusterPtr & cluster); - void updateClusters(const Poco::Util::AbstractConfiguration & config, const Settings & settings, const String & config_prefix); + void updateClusters(const Poco::Util::AbstractConfiguration & new_config, const Settings & settings, const String & config_prefix, Poco::Util::AbstractConfiguration * old_config = nullptr); public: using Impl = std::map; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 8615cf70343..bb2d553b8e8 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -1833,12 +1834,18 @@ void Context::setClustersConfig(const ConfigurationPtr & config, const String & { std::lock_guard lock(shared->clusters_mutex); + /// Do not update clusters if this part of config wasn't changed. + if (shared->clusters && isSameConfiguration(*config, *shared->clusters_config, config_name)) { + return; + } + + auto old_clusters_config = shared->clusters_config; shared->clusters_config = config; if (!shared->clusters) shared->clusters = std::make_unique(*shared->clusters_config, settings, config_name); else - shared->clusters->updateClusters(*shared->clusters_config, settings, config_name); + shared->clusters->updateClusters(*shared->clusters_config, settings, config_name, old_clusters_config); } diff --git a/tests/integration/test_reload_clusters_config/configs/remote_servers.xml b/tests/integration/test_reload_clusters_config/configs/remote_servers.xml new file mode 100644 index 00000000000..b827fce02be --- /dev/null +++ b/tests/integration/test_reload_clusters_config/configs/remote_servers.xml @@ -0,0 +1,30 @@ + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + diff --git a/tests/integration/test_reload_clusters_config/test.py b/tests/integration/test_reload_clusters_config/test.py new file mode 100644 index 00000000000..f1fb0d820d4 --- /dev/null +++ b/tests/integration/test_reload_clusters_config/test.py @@ -0,0 +1,235 @@ +import os +import sys +import time + +import pytest + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node', with_zookeeper=True, main_configs=['configs/remote_servers.xml']) +node_1 = cluster.add_instance('node_1', with_zookeeper=True) +node_2 = cluster.add_instance('node_2', with_zookeeper=True) + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + node.query('''CREATE TABLE distributed (id UInt32) ENGINE = + Distributed('test_cluster', 'default', 'replicated')''') + + node.query('''CREATE TABLE distributed2 (id UInt32) ENGINE = + Distributed('test_cluster2', 'default', 'replicated')''') + + cluster.pause_container('node_1') + cluster.pause_container('node_2') + + yield cluster + + finally: + cluster.shutdown() + + +base_config = ''' + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + +''' + +test_config1 = ''' + + + + + true + + node_1 + 9000 + + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + +''' + +test_config2 = ''' + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + +''' + +test_config3 = ''' + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + + true + + node_1 + 9000 + + + node_2 + 9000 + + + + + + true + + node_1 + 9000 + + + + + +''' + + +def send_repeated_query(table, count=5): + for i in range(count): + node.query_and_get_error("SELECT count() FROM {} SETTINGS receive_timeout=1".format(table)) + + +def get_errors_count(cluster, host_name="node_1"): + return int(node.query("SELECT errors_count FROM system.clusters WHERE cluster='{}' and host_name='{}'".format(cluster, host_name))) + + +def set_config(config): + node.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config) + node.query("SYSTEM RELOAD CONFIG") + + +def test_simple_reload(started_cluster): + send_repeated_query("distributed") + + assert get_errors_count("test_cluster") > 0 + + node.query("SYSTEM RELOAD CONFIG") + + assert get_errors_count("test_cluster") > 0 + + +def test_update_one_cluster(started_cluster): + send_repeated_query("distributed") + send_repeated_query("distributed2") + + assert get_errors_count("test_cluster") > 0 + assert get_errors_count("test_cluster2") > 0 + + set_config(test_config1) + + assert get_errors_count("test_cluster") == 0 + assert get_errors_count("test_cluster2") > 0 + + set_config(base_config) + + +def test_delete_cluster(started_cluster): + send_repeated_query("distributed") + send_repeated_query("distributed2") + + assert get_errors_count("test_cluster") > 0 + assert get_errors_count("test_cluster2") > 0 + + set_config(test_config2) + + assert get_errors_count("test_cluster") > 0 + + result = node.query("SELECT * FROM system.clusters WHERE cluster='test_cluster2'") + assert result == '' + + set_config(base_config) + + +def test_add_cluster(started_cluster): + send_repeated_query("distributed") + send_repeated_query("distributed2") + + assert get_errors_count("test_cluster") > 0 + assert get_errors_count("test_cluster2") > 0 + + set_config(test_config3) + + assert get_errors_count("test_cluster") > 0 + assert get_errors_count("test_cluster2") > 0 + + result = node.query("SELECT * FROM system.clusters WHERE cluster='test_cluster3'") + assert result != '' + + set_config(base_config) + From 8312553d24fd59d2b134f9e60a9efd2e27ae46a7 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 13 Mar 2021 00:30:59 +0300 Subject: [PATCH 149/333] LibraryDictionarySource fix possible leak --- src/Dictionaries/LibraryDictionarySource.cpp | 25 ++++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/src/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp index 6d763444b54..0e692a5893a 100644 --- a/src/Dictionaries/LibraryDictionarySource.cpp +++ b/src/Dictionaries/LibraryDictionarySource.cpp @@ -84,9 +84,7 @@ namespace + (columns_received->error_string ? columns_received->error_string : ""), ErrorCodes::EXTERNAL_LIBRARY_ERROR); - MutableColumns columns(sample_block.columns()); - for (const auto i : ext::range(0, columns.size())) - columns[i] = sample_block.getByPosition(i).column->cloneEmpty(); + MutableColumns columns = sample_block.cloneEmptyColumns(); for (size_t col_n = 0; col_n < columns_received->size; ++col_n) { @@ -151,6 +149,7 @@ LibraryDictionarySource::LibraryDictionarySource( #endif ); settings = std::make_shared(getLibSettings(config, config_prefix + lib_config_settings)); + if (auto lib_new = library->tryGetstrings), decltype(&ClickHouseLibrary::log))>( "ClickHouseDictionary_v3_libNew")) lib_data = lib_new(&settings->strings, ClickHouseLibrary::log); @@ -193,15 +192,20 @@ BlockInputStreamPtr LibraryDictionarySource::loadAll() columns.data[i] = a.name.c_str(); ++i; } + void * data_ptr = nullptr; /// Get function pointer before dataNew call because library->get may throw. auto func_load_all = library->getstrings), decltype(&columns))>("ClickHouseDictionary_v3_loadAll"); + data_ptr = library->get("ClickHouseDictionary_v3_dataNew")(lib_data); - auto * data = func_load_all(data_ptr, &settings->strings, &columns); - auto block = dataToBlock(description.sample_block, data); SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); + + auto * data = func_load_all(data_ptr, &settings->strings, &columns); + + auto block = dataToBlock(description.sample_block, data); + return std::make_shared(block); } @@ -219,16 +223,20 @@ BlockInputStreamPtr LibraryDictionarySource::loadIds(const std::vector & columns_pass.data[i] = a.name.c_str(); ++i; } + void * data_ptr = nullptr; /// Get function pointer before dataNew call because library->get may throw. auto func_load_ids = library->getstrings), decltype(&columns_pass), decltype(&ids_data))>( "ClickHouseDictionary_v3_loadIds"); + data_ptr = library->get("ClickHouseDictionary_v3_dataNew")(lib_data); + SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); + auto * data = func_load_ids(data_ptr, &settings->strings, &columns_pass, &ids_data); auto block = dataToBlock(description.sample_block, data); - SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); + return std::make_shared(block); } @@ -258,10 +266,13 @@ BlockInputStreamPtr LibraryDictionarySource::loadKeys(const Columns & key_column /// Get function pointer before dataNew call because library->get may throw. auto func_load_keys = library->getstrings), decltype(&request_cols))>( "ClickHouseDictionary_v3_loadKeys"); + data_ptr = library->get("ClickHouseDictionary_v3_dataNew")(lib_data); + SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); + auto * data = func_load_keys(data_ptr, &settings->strings, &request_cols); auto block = dataToBlock(description.sample_block, data); - SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); + return std::make_shared(block); } From 092b0b49f02bda79fe21b8ffd1b65b76ab2e51b3 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 13 Mar 2021 08:56:54 +0300 Subject: [PATCH 150/333] Set SOCK_CLOEXEC for sockets (hardcoded via poco update) Found this in [1]: [Detaching after fork from child process 184152] ... Cannot get server pid with {}, got {}: {} lsof -i tcp:9000 -s tcp:LISTEN -Fp | awk '/^p[0-9]+$/{print substr($0, 2)}' b'301\n184152\n' invalid literal for int() with base 10: b'301\n184152\n' But the major idea is that parent process should not export any file descriptors to childrens, otherwise this may create security and other breaches (like extra pipe endpoints, ...) [1]: https://clickhouse-test-reports.s3.yandex.net/21511/d7d9638ceb6bd702f34a88ee54f8f83197e90af5/stress_test_(address).html#fail1 Refs: https://github.com/ClickHouse-Extras/poco/pull/35 --- contrib/poco | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/poco b/contrib/poco index c55b91f394e..83beecccb09 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit c55b91f394efa9c238c33957682501681ef9b716 +Subproject commit 83beecccb09eec0c9fd2669cacea03ede1d9f138 From 6be5c271ae0aea1797ef04ba08ca0d7b76a7dad4 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sat, 13 Mar 2021 14:09:56 +0300 Subject: [PATCH 151/333] Added explicit function interface --- src/Dictionaries/LibraryDictionarySource.cpp | 66 ++++++++----------- .../LibraryDictionarySourceExternal.cpp | 21 +++++- .../LibraryDictionarySourceExternal.h | 46 +++++++++++++ 3 files changed, 94 insertions(+), 39 deletions(-) diff --git a/src/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp index 0e692a5893a..0632dd3e30f 100644 --- a/src/Dictionaries/LibraryDictionarySource.cpp +++ b/src/Dictionaries/LibraryDictionarySource.cpp @@ -72,7 +72,7 @@ namespace } - Block dataToBlock(const Block & sample_block, const void * data) + Block dataToBlock(const Block & sample_block, const ClickHouseLibrary::RawClickHouseLibraryTable data) { if (!data) throw Exception("LibraryDictionarySource: No data returned", ErrorCodes::EXTERNAL_LIBRARY_ERROR); @@ -150,8 +150,7 @@ LibraryDictionarySource::LibraryDictionarySource( ); settings = std::make_shared(getLibSettings(config, config_prefix + lib_config_settings)); - if (auto lib_new = library->tryGetstrings), decltype(&ClickHouseLibrary::log))>( - "ClickHouseDictionary_v3_libNew")) + if (auto lib_new = library->tryGet(ClickHouseLibrary::LIBRARY_CREATE_NEW_FUNC_NAME)) lib_data = lib_new(&settings->strings, ClickHouseLibrary::log); } @@ -165,17 +164,15 @@ LibraryDictionarySource::LibraryDictionarySource(const LibraryDictionarySource & , description{other.description} , settings{other.settings} { - if (auto lib_clone = library->tryGet("ClickHouseDictionary_v3_libClone")) + if (auto lib_clone = library->tryGet(ClickHouseLibrary::LIBRARY_CLONE_FUNC_NAME)) lib_data = lib_clone(other.lib_data); - else if ( - auto lib_new = library->tryGetstrings), decltype(&ClickHouseLibrary::log))>( - "ClickHouseDictionary_v3_libNew")) + else if (auto lib_new = library->tryGet(ClickHouseLibrary::LIBRARY_CREATE_NEW_FUNC_NAME)) lib_data = lib_new(&settings->strings, ClickHouseLibrary::log); } LibraryDictionarySource::~LibraryDictionarySource() { - if (auto lib_delete = library->tryGet("ClickHouseDictionary_v3_libDelete")) + if (auto lib_delete = library->tryGet(ClickHouseLibrary::LIBRARY_DELETE_FUNC_NAME)) lib_delete(lib_data); } @@ -193,17 +190,14 @@ BlockInputStreamPtr LibraryDictionarySource::loadAll() ++i; } - void * data_ptr = nullptr; + auto load_all_func = library->get(ClickHouseLibrary::LIBRARY_LOAD_ALL_FUNC_NAME); + auto data_new_func = library->get(ClickHouseLibrary::LIBRARY_DATA_NEW_FUNC_NAME); + auto data_delete_func = library->get(ClickHouseLibrary::LIBRARY_DATA_DELETE_FUNC_NAME); - /// Get function pointer before dataNew call because library->get may throw. - auto func_load_all - = library->getstrings), decltype(&columns))>("ClickHouseDictionary_v3_loadAll"); - - data_ptr = library->get("ClickHouseDictionary_v3_dataNew")(lib_data); - SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); - - auto * data = func_load_all(data_ptr, &settings->strings, &columns); + ClickHouseLibrary::LibraryData data_ptr = data_new_func(lib_data); + SCOPE_EXIT(data_delete_func(lib_data, data_ptr)); + ClickHouseLibrary::RawClickHouseLibraryTable data = load_all_func(data_ptr, &settings->strings, &columns); auto block = dataToBlock(description.sample_block, data); return std::make_shared(block); @@ -224,17 +218,14 @@ BlockInputStreamPtr LibraryDictionarySource::loadIds(const std::vector & ++i; } - void * data_ptr = nullptr; + auto load_ids_func = library->get(ClickHouseLibrary::LIBRARY_LOAD_IDS_FUNC_NAME); + auto data_new_func = library->get(ClickHouseLibrary::LIBRARY_DATA_NEW_FUNC_NAME); + auto data_delete_func = library->get(ClickHouseLibrary::LIBRARY_DATA_DELETE_FUNC_NAME); - /// Get function pointer before dataNew call because library->get may throw. - auto func_load_ids - = library->getstrings), decltype(&columns_pass), decltype(&ids_data))>( - "ClickHouseDictionary_v3_loadIds"); + ClickHouseLibrary::LibraryData data_ptr = data_new_func(lib_data); + SCOPE_EXIT(data_delete_func(lib_data, data_ptr)); - data_ptr = library->get("ClickHouseDictionary_v3_dataNew")(lib_data); - SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); - - auto * data = func_load_ids(data_ptr, &settings->strings, &columns_pass, &ids_data); + ClickHouseLibrary::RawClickHouseLibraryTable data = load_ids_func(data_ptr, &settings->strings, &columns_pass, &ids_data); auto block = dataToBlock(description.sample_block, data); return std::make_shared(block); @@ -262,15 +253,14 @@ BlockInputStreamPtr LibraryDictionarySource::loadKeys(const Columns & key_column ClickHouseLibrary::Table request_cols{.data = static_cast(holder.get()), .size = key_columns.size()}; - void * data_ptr = nullptr; - /// Get function pointer before dataNew call because library->get may throw. - auto func_load_keys = library->getstrings), decltype(&request_cols))>( - "ClickHouseDictionary_v3_loadKeys"); + auto load_keys_func = library->get(ClickHouseLibrary::LIBRARY_LOAD_KEYS_FUNC_NAME); + auto data_new_func = library->get(ClickHouseLibrary::LIBRARY_DATA_NEW_FUNC_NAME); + auto data_delete_func = library->get(ClickHouseLibrary::LIBRARY_DATA_DELETE_FUNC_NAME); - data_ptr = library->get("ClickHouseDictionary_v3_dataNew")(lib_data); - SCOPE_EXIT(library->get("ClickHouseDictionary_v3_dataDelete")(lib_data, data_ptr)); + ClickHouseLibrary::LibraryData data_ptr = data_new_func(lib_data); + SCOPE_EXIT(data_delete_func(lib_data, data_ptr)); - auto * data = func_load_keys(data_ptr, &settings->strings, &request_cols); + ClickHouseLibrary::RawClickHouseLibraryTable data = load_keys_func(data_ptr, &settings->strings, &request_cols); auto block = dataToBlock(description.sample_block, data); return std::make_shared(block); @@ -278,17 +268,19 @@ BlockInputStreamPtr LibraryDictionarySource::loadKeys(const Columns & key_column bool LibraryDictionarySource::isModified() const { - if (auto func_is_modified - = library->tryGetstrings))>("ClickHouseDictionary_v3_isModified")) + if (auto func_is_modified = library->tryGet( + ClickHouseLibrary::LIBRARY_IS_MODIFIED_FUNC_NAME)) return func_is_modified(lib_data, &settings->strings); + return true; } bool LibraryDictionarySource::supportsSelectiveLoad() const { - if (auto func_supports_selective_load - = library->tryGetstrings))>("ClickHouseDictionary_v3_supportsSelectiveLoad")) + if (auto func_supports_selective_load = library->tryGet( + ClickHouseLibrary::LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME)) return func_supports_selective_load(lib_data, &settings->strings); + return true; } diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/src/Dictionaries/LibraryDictionarySourceExternal.cpp index 2e944056283..eba088c2c55 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.cpp +++ b/src/Dictionaries/LibraryDictionarySourceExternal.cpp @@ -6,10 +6,25 @@ namespace const char DICT_LOGGER_NAME[] = "LibraryDictionarySourceExternal"; } -void ClickHouseLibrary::log(ClickHouseLibrary::LogLevel level, ClickHouseLibrary::CString msg) +namespace ClickHouseLibrary { - using ClickHouseLibrary::LogLevel; +std::string LIBRARY_CREATE_NEW_FUNC_NAME = "ClickHouseDictionary_v3_libNew"; +std::string LIBRARY_CLONE_FUNC_NAME = "ClickHouseDictionary_v3_libClone"; +std::string LIBRARY_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_libDelete"; + +std::string LIBRARY_DATA_NEW_FUNC_NAME = "ClickHouseDictionary_v3_dataNew"; +std::string LIBRARY_DATA_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_dataDelete"; + +std::string LIBRARY_LOAD_ALL_FUNC_NAME = "ClickHouseDictionary_v3_loadAll"; +std::string LIBRARY_LOAD_IDS_FUNC_NAME = "ClickHouseDictionary_v3_loadIds"; +std::string LIBRARY_LOAD_KEYS_FUNC_NAME = "ClickHouseDictionary_v3_loadKeys"; + +std::string LIBRARY_IS_MODIFIED_FUNC_NAME = "ClickHouseDictionary_v3_isModified"; +std::string LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME = "ClickHouseDictionary_v3_supportsSelectiveLoad"; + +void log(LogLevel level, CString msg) +{ auto & logger = Poco::Logger::get(DICT_LOGGER_NAME); switch (level) { @@ -47,3 +62,5 @@ void ClickHouseLibrary::log(ClickHouseLibrary::LogLevel level, ClickHouseLibrary break; } } + +} diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.h b/src/Dictionaries/LibraryDictionarySourceExternal.h index 7a031cdb315..64a5f678578 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.h +++ b/src/Dictionaries/LibraryDictionarySourceExternal.h @@ -1,6 +1,7 @@ #pragma once #include +#include #define CLICKHOUSE_DICTIONARY_LIBRARY_API 1 @@ -61,4 +62,49 @@ enum LogLevel }; void log(LogLevel level, CString msg); + +extern std::string LIBRARY_CREATE_NEW_FUNC_NAME; +extern std::string LIBRARY_CLONE_FUNC_NAME; +extern std::string LIBRARY_DELETE_FUNC_NAME; + +extern std::string LIBRARY_DATA_NEW_FUNC_NAME; +extern std::string LIBRARY_DATA_DELETE_FUNC_NAME; + +extern std::string LIBRARY_LOAD_ALL_FUNC_NAME; +extern std::string LIBRARY_LOAD_IDS_FUNC_NAME; +extern std::string LIBRARY_LOAD_KEYS_FUNC_NAME; + +extern std::string LIBRARY_IS_MODIFIED_FUNC_NAME; +extern std::string LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME; + +using LibraryContext = void *; + +using LibraryLoggerFunc = void (*)(LogLevel, CString /* message */); + +using LibrarySettings = CStrings *; + +using LibraryNewFunc = LibraryContext (*)(LibrarySettings, LibraryLoggerFunc); +using LibraryCloneFunc = LibraryContext (*)(LibraryContext); +using LibraryDeleteFunc = void (*)(LibraryContext); + +using LibraryData = void *; +using LibraryDataNewFunc = LibraryData (*)(LibraryContext); +using LibraryDataDeleteFunc = void (*)(LibraryContext, LibraryData); + +/// Can be safely casted into const Table * with static_cast +using RawClickHouseLibraryTable = void *; +using RequestedColumnsNames = CStrings *; + +using LibraryLoadAllFunc = RawClickHouseLibraryTable (*)(LibraryData, LibrarySettings, RequestedColumnsNames); + +using RequestedIds = const VectorUInt64 *; +using LibraryLoadIdsFunc = RawClickHouseLibraryTable (*)(LibraryData, LibrarySettings, RequestedColumnsNames, RequestedIds); + +using RequestedKeys = Table *; +/// There is no requested columns names for load keys func +using LibraryLoadKeysFunc = RawClickHouseLibraryTable (*)(LibraryData, LibrarySettings, RequestedKeys); + +using LibraryIsModifiedFunc = bool (*)(LibraryContext, LibrarySettings); +using LibrarySupportsSelectiveLoadFunc = bool (*)(LibraryContext, LibrarySettings); + } From aad98b368e5f6a738e5f42abb462443f63669cdd Mon Sep 17 00:00:00 2001 From: Dmitriy Date: Sat, 13 Mar 2021 21:18:45 +0300 Subject: [PATCH 152/333] Edit and translate to Russian MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Поправил шаблоны в английской и русской версиях. --- .../parametric-functions.md | 10 +- .../aggregate-functions/reference/count.md | 5 +- .../reference/grouparrayinsertat.md | 6 +- .../reference/groupbitmapor.md | 2 +- .../reference/groupbitmapxor.md | 2 +- .../reference/groupbitor.md | 2 +- .../reference/initializeAggregation.md | 2 +- .../aggregate-functions/reference/kurtpop.md | 2 +- .../aggregate-functions/reference/kurtsamp.md | 2 +- .../reference/mannwhitneyutest.md | 2 +- .../aggregate-functions/reference/skewpop.md | 2 +- .../aggregate-functions/reference/skewsamp.md | 2 +- .../reference/studentttest.md | 4 +- .../aggregate-functions/reference/topk.md | 4 +- .../reference/topkweighted.md | 2 +- .../reference/welchttest.md | 4 +- .../functions/array-functions.md | 39 ++--- .../sql-reference/functions/bit-functions.md | 26 ++-- .../functions/bitmap-functions.md | 90 ++++++----- .../functions/conditional-functions.md | 8 +- .../functions/date-time-functions.md | 2 +- .../functions/encoding-functions.md | 2 +- .../functions/functions-for-nulls.md | 24 +-- .../en/sql-reference/functions/geo/geohash.md | 7 +- docs/en/sql-reference/functions/geo/h3.md | 17 +- .../sql-reference/functions/hash-functions.md | 28 ++-- .../sql-reference/functions/introspection.md | 19 +-- .../functions/ip-address-functions.md | 12 +- .../sql-reference/functions/json-functions.md | 12 +- .../functions/machine-learning-functions.md | 14 +- .../sql-reference/functions/math-functions.md | 2 +- .../functions/other-functions.md | 1 - .../functions/rounding-functions.md | 2 +- .../functions/string-functions.md | 29 ++-- .../functions/string-search-functions.md | 56 +++---- .../functions/tuple-functions.md | 2 +- .../functions/tuple-map-functions.md | 1 - .../functions/type-conversion-functions.md | 2 +- .../sql-reference/functions/url-functions.md | 4 +- .../functions/ym-dict-functions.md | 2 +- .../sql-reference/table-functions/generate.md | 2 +- docs/en/sql-reference/table-functions/view.md | 9 +- docs/ru/getting-started/tutorial.md | 2 +- .../aggregate-functions/combinators.md | 14 +- .../parametric-functions.md | 38 +++-- .../aggregate-functions/reference/argmax.md | 2 +- .../aggregate-functions/reference/argmin.md | 2 +- .../reference/avgweighted.md | 6 +- .../aggregate-functions/reference/count.md | 6 +- .../reference/grouparrayinsertat.md | 18 +-- .../reference/grouparraymovingavg.md | 8 +- .../reference/grouparraymovingsum.md | 2 +- .../reference/grouparraysample.md | 2 +- .../reference/groupbitand.md | 2 +- .../reference/groupbitmap.md | 2 +- .../reference/groupbitor.md | 2 +- .../reference/groupbitxor.md | 2 +- .../reference/initializeAggregation.md | 4 +- .../aggregate-functions/reference/kurtpop.md | 6 +- .../aggregate-functions/reference/kurtsamp.md | 6 +- .../reference/mannwhitneyutest.md | 10 +- .../aggregate-functions/reference/quantile.md | 6 +- .../reference/quantiledeterministic.md | 8 +- .../reference/quantileexact.md | 18 +-- .../reference/quantileexactweighted.md | 8 +- .../reference/quantiletdigest.md | 6 +- .../reference/quantiletdigestweighted.md | 8 +- .../reference/quantiletiming.md | 6 +- .../reference/quantiletimingweighted.md | 8 +- .../aggregate-functions/reference/rankCorr.md | 6 +- .../aggregate-functions/reference/skewpop.md | 6 +- .../aggregate-functions/reference/skewsamp.md | 6 +- .../reference/studentttest.md | 2 +- .../aggregate-functions/reference/topk.md | 4 +- .../reference/topkweighted.md | 6 +- .../aggregate-functions/reference/uniq.md | 2 +- .../reference/uniqcombined.md | 2 +- .../reference/uniqexact.md | 2 +- .../reference/uniqhll12.md | 2 +- .../reference/welchttest.md | 2 +- .../functions/array-functions.md | 147 +++++++++--------- .../sql-reference/functions/bit-functions.md | 38 ++--- .../functions/bitmap-functions.md | 59 ++++--- .../functions/conditional-functions.md | 24 +-- .../functions/date-time-functions.md | 24 +-- .../functions/encoding-functions.md | 14 +- .../functions/encryption-functions.md | 10 +- .../functions/ext-dict-functions.md | 12 +- .../functions/functions-for-nulls.md | 38 ++--- .../ru/sql-reference/functions/geo/geohash.md | 17 +- docs/ru/sql-reference/functions/geo/h3.md | 29 ++-- .../sql-reference/functions/hash-functions.md | 72 ++++----- .../sql-reference/functions/introspection.md | 35 +++-- .../functions/ip-address-functions.md | 14 +- .../sql-reference/functions/json-functions.md | 29 ++-- .../functions/machine-learning-functions.md | 16 +- .../sql-reference/functions/math-functions.md | 18 +-- .../functions/other-functions.md | 135 ++++++++-------- .../functions/random-functions.md | 6 +- .../functions/rounding-functions.md | 18 +-- .../functions/splitting-merging-functions.md | 14 +- .../functions/string-functions.md | 72 ++++----- .../functions/string-search-functions.md | 84 +++++----- .../functions/tuple-functions.md | 4 +- .../functions/tuple-map-functions.md | 21 +-- .../functions/type-conversion-functions.md | 48 +++--- .../sql-reference/functions/url-functions.md | 16 +- .../functions/ym-dict-functions.md | 8 +- .../sql-reference/table-functions/generate.md | 5 +- .../ru/sql-reference/table-functions/mysql.md | 4 +- docs/ru/sql-reference/table-functions/view.md | 11 +- 111 files changed, 897 insertions(+), 852 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/parametric-functions.md b/docs/en/sql-reference/aggregate-functions/parametric-functions.md index c6c97b5428b..f7db3c7fb50 100644 --- a/docs/en/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/en/sql-reference/aggregate-functions/parametric-functions.md @@ -254,8 +254,8 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) **Parameters** - `window` — Length of the sliding window. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond2 <= timestamp of cond1 + window`. -- `mode` - It is an optional argument. - - `'strict'` - When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values. +- `mode` — It is an optional argument. + - `'strict'` — When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values. **Returned value** @@ -336,14 +336,14 @@ retention(cond1, cond2, ..., cond32); **Arguments** -- `cond` — an expression that returns a `UInt8` result (1 or 0). +- `cond` — An expression that returns a `UInt8` result (1 or 0). **Returned value** The array of 1 or 0. -- 1 — condition was met for the event. -- 0 — condition wasn’t met for the event. +- 1 — Condition was met for the event. +- 0 — Condition wasn’t met for the event. Type: `UInt8`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md index 0a5aef2fe97..48c6f3f8c05 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/count.md +++ b/docs/en/sql-reference/aggregate-functions/reference/count.md @@ -7,8 +7,9 @@ toc_priority: 1 Counts the number of rows or not-NULL values. ClickHouse supports the following syntaxes for `count`: -- `count(expr)` or `COUNT(DISTINCT expr)`. -- `count()` or `COUNT(*)`. The `count()` syntax is ClickHouse-specific. + +- `count(expr)` or `COUNT(DISTINCT expr)`. +- `count()` or `COUNT(*)`. The `count()` syntax is ClickHouse-specific. **Arguments** diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index 68456bf7844..d29550b007e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -9,7 +9,7 @@ Inserts a value into the array at the specified position. **Syntax** ``` sql -groupArrayInsertAt(default_x, size)(x, pos); +groupArrayInsertAt(default_x, size)(x, pos) ``` If in one query several values are inserted into the same position, the function behaves in the following ways: @@ -21,8 +21,8 @@ If in one query several values are inserted into the same position, the function - `x` — Value to be inserted. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in one of the [supported data types](../../../sql-reference/data-types/index.md). - `pos` — Position at which the specified element `x` is to be inserted. Index numbering in the array starts from zero. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges). -- `default_x`— Default value for substituting in empty positions. Optional parameter. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in the data type configured for the `x` parameter. If `default_x` is not defined, the [default values](../../../sql-reference/statements/create/table.md#create-default-values) are used. -- `size`— Length of the resulting array. Optional parameter. When using this parameter, the default value `default_x` must be specified. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges). +- `default_x` — Default value for substituting in empty positions. Optional parameter. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in the data type configured for the `x` parameter. If `default_x` is not defined, the [default values](../../../sql-reference/statements/create/table.md#create-default-values) are used. +- `size` — Length of the resulting array. Optional parameter. When using this parameter, the default value `default_x` must be specified. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges). **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md index a4d99fd29e3..d3f40f63f65 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md @@ -14,7 +14,7 @@ groupBitmapOr(expr) `expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. -**Return value** +**Returned value** Value of the `UInt64` type. diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md index 834f088d02f..cbe01e08145 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md @@ -14,7 +14,7 @@ groupBitmapOr(expr) `expr` – An expression that results in `AggregateFunction(groupBitmap, UInt*)` type. -**Return value** +**Returned value** Value of the `UInt64` type. diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md index e427a9ad970..24077de0adc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md @@ -14,7 +14,7 @@ groupBitOr(expr) `expr` – An expression that results in `UInt*` type. -**Return value** +**Returned value** Value of the `UInt*` type. diff --git a/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md b/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md index 313d6bf81f5..c8fb535089b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md +++ b/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation.md @@ -10,7 +10,7 @@ Use it for tests or to process columns of types `AggregateFunction` and `Aggrega **Syntax** ``` sql -initializeAggregation (aggregate_function, column_1, column_2); +initializeAggregation (aggregate_function, column_1, column_2) ``` **Arguments** diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md index db402c99663..c51c4b92e74 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md @@ -21,5 +21,5 @@ The kurtosis of the given distribution. Type — [Float64](../../../sql-referenc **Example** ``` sql -SELECT kurtPop(value) FROM series_with_value_column +SELECT kurtPop(value) FROM series_with_value_column; ``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md index 4bb9f76763b..0ee40138adc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -23,5 +23,5 @@ The kurtosis of the given distribution. Type — [Float64](../../../sql-referenc **Example** ``` sql -SELECT kurtSamp(value) FROM series_with_value_column +SELECT kurtSamp(value) FROM series_with_value_column; ``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md index dc5fc45b878..34e8188299c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md @@ -27,7 +27,7 @@ The null hypothesis is that two populations are stochastically equal. Also one-s - `'two-sided'`; - `'greater'`; - `'less'`. -- `continuity_correction` - if not 0 then continuity correction in the normal approximation for the p-value is applied. (Optional, default: 1.) [UInt64](../../../sql-reference/data-types/int-uint.md). +- `continuity_correction` — if not 0 then continuity correction in the normal approximation for the p-value is applied. (Optional, default: 1.) [UInt64](../../../sql-reference/data-types/int-uint.md). **Returned values** diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md index b9dfc390f9d..f84f8897a35 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md +++ b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md @@ -21,5 +21,5 @@ The skewness of the given distribution. Type — [Float64](../../../sql-referenc **Example** ``` sql -SELECT skewPop(value) FROM series_with_value_column +SELECT skewPop(value) FROM series_with_value_column; ``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md index f7a6df8f507..48a049ca69d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md +++ b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md @@ -23,5 +23,5 @@ The skewness of the given distribution. Type — [Float64](../../../sql-referenc **Example** ``` sql -SELECT skewSamp(value) FROM series_with_value_column +SELECT skewSamp(value) FROM series_with_value_column; ``` diff --git a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md index a1d7ae33fe1..3398fc1ca8c 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md @@ -18,8 +18,8 @@ The null hypothesis is that means of populations are equal. Normal distribution **Arguments** -- `sample_data` — sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). -- `sample_index` — sample index. [Integer](../../../sql-reference/data-types/int-uint.md). +- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). +- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md). **Returned values** diff --git a/docs/en/sql-reference/aggregate-functions/reference/topk.md b/docs/en/sql-reference/aggregate-functions/reference/topk.md index b3e79803ba1..b9bea013ea8 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topk.md @@ -18,13 +18,13 @@ We recommend using the `N < 10` value; performance is reduced with large `N` val **Arguments** -- ‘N’ is the number of elements to return. +- `N` – The number of elements to return. If the parameter is omitted, default value 10 is used. **Arguments** -- ’ x ’ – The value to calculate frequency. +- `x` – The value to calculate frequency. **Example** diff --git a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md index 02b9f77ea6f..8562336c829 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md @@ -18,7 +18,7 @@ topKWeighted(N)(x, weight) **Arguments** -- `x` – The value. +- `x` — The value. - `weight` — The weight. [UInt8](../../../sql-reference/data-types/int-uint.md). **Returned value** diff --git a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md index b391fb1d979..02238de42ef 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md +++ b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md @@ -18,8 +18,8 @@ The null hypothesis is that means of populations are equal. Normal distribution **Arguments** -- `sample_data` — sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). -- `sample_index` — sample index. [Integer](../../../sql-reference/data-types/int-uint.md). +- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). +- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md). **Returned values** diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index c9c418d57a4..69124827255 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -376,7 +376,7 @@ arrayPopBack(array) **Example** ``` sql -SELECT arrayPopBack([1, 2, 3]) AS res +SELECT arrayPopBack([1, 2, 3]) AS res; ``` ``` text @@ -400,7 +400,7 @@ arrayPopFront(array) **Example** ``` sql -SELECT arrayPopFront([1, 2, 3]) AS res +SELECT arrayPopFront([1, 2, 3]) AS res; ``` ``` text @@ -425,7 +425,7 @@ arrayPushBack(array, single_value) **Example** ``` sql -SELECT arrayPushBack(['a'], 'b') AS res +SELECT arrayPushBack(['a'], 'b') AS res; ``` ``` text @@ -450,7 +450,7 @@ arrayPushFront(array, single_value) **Example** ``` sql -SELECT arrayPushFront(['b'], 'a') AS res +SELECT arrayPushFront(['b'], 'a') AS res; ``` ``` text @@ -482,7 +482,7 @@ An array of length `size`. **Examples of calls** ``` sql -SELECT arrayResize([1], 3) +SELECT arrayResize([1], 3); ``` ``` text @@ -492,7 +492,7 @@ SELECT arrayResize([1], 3) ``` ``` sql -SELECT arrayResize([1], 3, NULL) +SELECT arrayResize([1], 3, NULL); ``` ``` text @@ -513,12 +513,12 @@ arraySlice(array, offset[, length]) - `array` – Array of data. - `offset` – Indent from the edge of the array. A positive value indicates an offset on the left, and a negative value is an indent on the right. Numbering of the array items begins with 1. -- `length` - The length of the required slice. If you specify a negative value, the function returns an open slice `[offset, array_length - length)`. If you omit the value, the function returns the slice `[offset, the_end_of_array]`. +- `length` – The length of the required slice. If you specify a negative value, the function returns an open slice `[offset, array_length - length)`. If you omit the value, the function returns the slice `[offset, the_end_of_array]`. **Example** ``` sql -SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res +SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res; ``` ``` text @@ -766,7 +766,7 @@ Type: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges) Query: ``` sql -SELECT arrayDifference([1, 2, 3, 4]) +SELECT arrayDifference([1, 2, 3, 4]); ``` Result: @@ -782,7 +782,7 @@ Example of the overflow due to result type Int64: Query: ``` sql -SELECT arrayDifference([0, 10000000000000000000]) +SELECT arrayDifference([0, 10000000000000000000]); ``` Result: @@ -816,7 +816,7 @@ Returns an array containing the distinct elements. Query: ``` sql -SELECT arrayDistinct([1, 2, 2, 3, 1]) +SELECT arrayDistinct([1, 2, 2, 3, 1]); ``` Result: @@ -883,7 +883,7 @@ arrayReduce(agg_func, arr1, arr2, ..., arrN) Query: ``` sql -SELECT arrayReduce('max', [1, 2, 3]) +SELECT arrayReduce('max', [1, 2, 3]); ``` Result: @@ -899,7 +899,7 @@ If an aggregate function takes multiple arguments, then this function must be ap Query: ``` sql -SELECT arrayReduce('maxIf', [3, 5], [1, 0]) +SELECT arrayReduce('maxIf', [3, 5], [1, 0]); ``` Result: @@ -915,7 +915,7 @@ Example with a parametric aggregate function: Query: ``` sql -SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) +SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); ``` Result: @@ -1014,7 +1014,7 @@ Alias: `flatten`. **Examples** ``` sql -SELECT flatten([[[1]], [[2], [3]]]) +SELECT flatten([[[1]], [[2], [3]]]); ``` ``` text @@ -1048,7 +1048,7 @@ Type: `Array`. Query: ``` sql -SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]); ``` Result: @@ -1086,7 +1086,7 @@ Type: [Array](../../sql-reference/data-types/array.md). Query: ``` sql -SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]) +SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]); ``` Result: @@ -1108,17 +1108,20 @@ arrayAUC(arr_scores, arr_labels) ``` **Arguments** + - `arr_scores` — scores prediction model gives. - `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample. **Returned value** + Returns AUC value with type Float64. **Example** + Query: ``` sql -select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) +select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]); ``` Result: diff --git a/docs/en/sql-reference/functions/bit-functions.md b/docs/en/sql-reference/functions/bit-functions.md index a3d0c82d8ab..ce08c569bab 100644 --- a/docs/en/sql-reference/functions/bit-functions.md +++ b/docs/en/sql-reference/functions/bit-functions.md @@ -37,8 +37,8 @@ SELECT bitTest(number, index) **Arguments** -- `number` – integer number. -- `index` – position of bit. +- `number` – Integer number. +- `index` – Position of bit. **Returned values** @@ -53,7 +53,7 @@ For example, the number 43 in base-2 (binary) numeral system is 101011. Query: ``` sql -SELECT bitTest(43, 1) +SELECT bitTest(43, 1); ``` Result: @@ -69,7 +69,7 @@ Another example: Query: ``` sql -SELECT bitTest(43, 2) +SELECT bitTest(43, 2); ``` Result: @@ -102,8 +102,8 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...) **Arguments** -- `number` – integer number. -- `index1`, `index2`, `index3`, `index4` – positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) is true if and only if all of its positions are true (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). +- `number` – Integer number. +- `index1`, `index2`, `index3`, `index4` – Positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) is true if and only if all of its positions are true (`index1` ⋀ `index2`, ⋀ `index3` ⋀ `index4`). **Returned values** @@ -118,7 +118,7 @@ For example, the number 43 in base-2 (binary) numeral system is 101011. Query: ``` sql -SELECT bitTestAll(43, 0, 1, 3, 5) +SELECT bitTestAll(43, 0, 1, 3, 5); ``` Result: @@ -134,7 +134,7 @@ Another example: Query: ``` sql -SELECT bitTestAll(43, 0, 1, 3, 5, 2) +SELECT bitTestAll(43, 0, 1, 3, 5, 2); ``` Result: @@ -167,8 +167,8 @@ SELECT bitTestAny(number, index1, index2, index3, index4, ...) **Arguments** -- `number` – integer number. -- `index1`, `index2`, `index3`, `index4` – positions of bit. +- `number` – Integer number. +- `index1`, `index2`, `index3`, `index4` – Positions of bit. **Returned values** @@ -183,7 +183,7 @@ For example, the number 43 in base-2 (binary) numeral system is 101011. Query: ``` sql -SELECT bitTestAny(43, 0, 2) +SELECT bitTestAny(43, 0, 2); ``` Result: @@ -199,7 +199,7 @@ Another example: Query: ``` sql -SELECT bitTestAny(43, 4, 2) +SELECT bitTestAny(43, 4, 2); ``` Result: @@ -239,7 +239,7 @@ Take for example the number 333. Its binary representation: 0000000101001101. Query: ``` sql -SELECT bitCount(333) +SELECT bitCount(333); ``` Result: diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index bfff70576f2..c809aee85fe 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -23,12 +23,12 @@ bitmapBuild(array) **Arguments** -- `array` – unsigned integer array. +- `array` – Unsigned integer array. **Example** ``` sql -SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) +SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res); ``` ``` text @@ -47,12 +47,12 @@ bitmapToArray(bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res +SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res; ``` ``` text @@ -72,13 +72,13 @@ bitmapSubsetInRange(bitmap, range_start, range_end) **Arguments** - `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). -- `range_start` – range start point. Type: [UInt32](../../sql-reference/data-types/int-uint.md). -- `range_end` – range end point(excluded). Type: [UInt32](../../sql-reference/data-types/int-uint.md). +- `range_start` – Range start point. Type: [UInt32](../../sql-reference/data-types/int-uint.md). +- `range_end` – Range end point(excluded). Type: [UInt32](../../sql-reference/data-types/int-uint.md). **Example** ``` sql -SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res; ``` ``` text @@ -114,7 +114,7 @@ Type: `Bitmap object`. Query: ``` sql -SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res; ``` Result: @@ -148,7 +148,7 @@ Type: `UInt8`. **Example** ``` sql -SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res +SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res; ``` ``` text @@ -169,7 +169,7 @@ If you are sure that `bitmap2` contains strictly one element, consider using the **Arguments** -- `bitmap*` – bitmap object. +- `bitmap*` – Bitmap object. **Return values** @@ -179,7 +179,7 @@ If you are sure that `bitmap2` contains strictly one element, consider using the **Example** ``` sql -SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; ``` ``` text @@ -199,12 +199,12 @@ bitmapHasAll(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; ``` ``` text @@ -223,12 +223,12 @@ bitmapCardinality(bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res +SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res; ``` ``` text @@ -245,17 +245,19 @@ Retrun the smallest value of type UInt64 in the set, UINT32_MAX if the set is em **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res +SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res; ``` - ┌─res─┐ - │ 1 │ - └─────┘ +``` text + ┌─res─┐ + │ 1 │ + └─────┘ +``` ## bitmapMax {#bitmapmax} @@ -265,17 +267,19 @@ Retrun the greatest value of type UInt64 in the set, 0 if the set is empty. **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res +SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res; ``` - ┌─res─┐ - │ 5 │ - └─────┘ +``` text + ┌─res─┐ + │ 5 │ + └─────┘ +``` ## bitmapTransform {#bitmaptransform} @@ -285,19 +289,21 @@ Transform an array of values in a bitmap to another array of values, the result **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. - `from_array` – UInt32 array. For idx in range \[0, from_array.size()), if bitmap contains from_array\[idx\], then replace it with to_array\[idx\]. Note that the result depends on array ordering if there are common elements between from_array and to_array. - `to_array` – UInt32 array, its size shall be the same to from_array. **Example** ``` sql -SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res +SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res; ``` - ┌─res───────────────────┐ - │ [1,3,4,6,7,8,9,10,20] │ - └───────────────────────┘ +``` text + ┌─res───────────────────┐ + │ [1,3,4,6,7,8,9,10,20] │ + └───────────────────────┘ +``` ## bitmapAnd {#bitmapand} @@ -309,12 +315,12 @@ bitmapAnd(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -333,12 +339,12 @@ bitmapOr(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -357,12 +363,12 @@ bitmapXor(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -381,12 +387,12 @@ bitmapAndnot(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** ``` sql -SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -405,7 +411,7 @@ bitmapAndCardinality(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** @@ -429,7 +435,7 @@ bitmapOrCardinality(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** @@ -453,7 +459,7 @@ bitmapXorCardinality(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** @@ -477,7 +483,7 @@ bitmapAndnotCardinality(bitmap,bitmap) **Arguments** -- `bitmap` – bitmap object. +- `bitmap` – Bitmap object. **Example** diff --git a/docs/en/sql-reference/functions/conditional-functions.md b/docs/en/sql-reference/functions/conditional-functions.md index 2d57cbb3bd5..462307012b5 100644 --- a/docs/en/sql-reference/functions/conditional-functions.md +++ b/docs/en/sql-reference/functions/conditional-functions.md @@ -20,8 +20,8 @@ If the condition `cond` evaluates to a non-zero value, returns the result of the **Arguments** - `cond` – The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL. -- `then` - The expression to return if condition is met. -- `else` - The expression to return if condition is not met. +- `then` – The expression to return if condition is met. +- `else` – The expression to return if condition is not met. **Returned values** @@ -32,7 +32,7 @@ The function executes `then` and `else` expressions and returns its result, depe Query: ``` sql -SELECT if(1, plus(2, 2), plus(2, 6)) +SELECT if(1, plus(2, 2), plus(2, 6)); ``` Result: @@ -46,7 +46,7 @@ Result: Query: ``` sql -SELECT if(0, plus(2, 2), plus(2, 6)) +SELECT if(0, plus(2, 2), plus(2, 6)); ``` Result: diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 01c8ae59e02..77a64eee41f 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -470,7 +470,7 @@ Aliases: `dateAdd`, `DATE_ADD`. - `unit` — The type of interval to add. [String](../../sql-reference/data-types/string.md). Supported values: second, minute, hour, day, week, month, quarter, year. -- `value` - Value in specified unit - [Int](../../sql-reference/data-types/int-uint.md) +- `value` — Value in specified unit - [Int](../../sql-reference/data-types/int-uint.md) - `date` — [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index c1013ebb0e1..3779e5accc5 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -30,7 +30,7 @@ Type: `String`. Query: ``` sql -SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello +SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello; ``` Result: diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index f57f0f7e27d..43e38f459d0 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -38,7 +38,7 @@ Input table Query ``` sql -SELECT x FROM t_null WHERE isNull(y) +SELECT x FROM t_null WHERE isNull(y); ``` ``` text @@ -78,7 +78,7 @@ Input table Query ``` sql -SELECT x FROM t_null WHERE isNotNull(y) +SELECT x FROM t_null WHERE isNotNull(y); ``` ``` text @@ -120,7 +120,7 @@ The `mail` and `phone` fields are of type String, but the `icq` field is `UInt32 Get the first available contact method for the customer from the contact list: ``` sql -SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook +SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook; ``` ``` text @@ -151,7 +151,7 @@ ifNull(x,alt) **Example** ``` sql -SELECT ifNull('a', 'b') +SELECT ifNull('a', 'b'); ``` ``` text @@ -161,7 +161,7 @@ SELECT ifNull('a', 'b') ``` ``` sql -SELECT ifNull(NULL, 'b') +SELECT ifNull(NULL, 'b'); ``` ``` text @@ -190,7 +190,7 @@ nullIf(x, y) **Example** ``` sql -SELECT nullIf(1, 1) +SELECT nullIf(1, 1); ``` ``` text @@ -200,7 +200,7 @@ SELECT nullIf(1, 1) ``` ``` sql -SELECT nullIf(1, 2) +SELECT nullIf(1, 2); ``` ``` text @@ -231,7 +231,7 @@ assumeNotNull(x) Consider the `t_null` table. ``` sql -SHOW CREATE TABLE t_null +SHOW CREATE TABLE t_null; ``` ``` text @@ -250,7 +250,7 @@ SHOW CREATE TABLE t_null Apply the `assumeNotNull` function to the `y` column. ``` sql -SELECT assumeNotNull(y) FROM t_null +SELECT assumeNotNull(y) FROM t_null; ``` ``` text @@ -261,7 +261,7 @@ SELECT assumeNotNull(y) FROM t_null ``` ``` sql -SELECT toTypeName(assumeNotNull(y)) FROM t_null +SELECT toTypeName(assumeNotNull(y)) FROM t_null; ``` ``` text @@ -290,7 +290,7 @@ toNullable(x) **Example** ``` sql -SELECT toTypeName(10) +SELECT toTypeName(10); ``` ``` text @@ -300,7 +300,7 @@ SELECT toTypeName(10) ``` ``` sql -SELECT toTypeName(toNullable(10)) +SELECT toTypeName(toNullable(10)); ``` ``` text diff --git a/docs/en/sql-reference/functions/geo/geohash.md b/docs/en/sql-reference/functions/geo/geohash.md index c27eab0b421..cfe35746809 100644 --- a/docs/en/sql-reference/functions/geo/geohash.md +++ b/docs/en/sql-reference/functions/geo/geohash.md @@ -29,7 +29,7 @@ geohashEncode(longitude, latitude, [precision]) **Example** ``` sql -SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res +SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res; ``` ``` text @@ -53,7 +53,7 @@ Decodes any [geohash](#geohash)-encoded string into longitude and latitude. **Example** ``` sql -SELECT geohashDecode('ezs42') AS res +SELECT geohashDecode('ezs42') AS res; ``` ``` text @@ -98,8 +98,9 @@ Type: [Array](../../../sql-reference/data-types/array.md)([String](../../../sql- Query: ``` sql -SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos +SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos; ``` + Result: ``` text diff --git a/docs/en/sql-reference/functions/geo/h3.md b/docs/en/sql-reference/functions/geo/h3.md index 9dda947b3a7..20dc7b29902 100644 --- a/docs/en/sql-reference/functions/geo/h3.md +++ b/docs/en/sql-reference/functions/geo/h3.md @@ -40,8 +40,9 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md). Query: ``` sql -SELECT h3IsValid(630814730351855103) as h3IsValid +SELECT h3IsValid(630814730351855103) as h3IsValid; ``` + Result: ``` text @@ -76,8 +77,9 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md). Query: ``` sql -SELECT h3GetResolution(639821929606596015) as resolution +SELECT h3GetResolution(639821929606596015) as resolution; ``` + Result: ``` text @@ -109,8 +111,9 @@ h3EdgeAngle(resolution) Query: ``` sql -SELECT h3EdgeAngle(10) as edgeAngle +SELECT h3EdgeAngle(10) as edgeAngle; ``` + Result: ``` text @@ -142,8 +145,9 @@ h3EdgeLengthM(resolution) Query: ``` sql -SELECT h3EdgeLengthM(15) as edgeLengthM +SELECT h3EdgeLengthM(15) as edgeLengthM; ``` + Result: ``` text @@ -180,7 +184,7 @@ Type: [UInt64](../../../sql-reference/data-types/int-uint.md). Query: ``` sql -SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index +SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index; ``` Result: @@ -217,8 +221,9 @@ Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql- Query: ``` sql -SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index +SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index; ``` + Result: ``` text diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index 465ad01527f..cb850101311 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -29,7 +29,7 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** ``` sql -SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type +SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type; ``` ``` text @@ -72,7 +72,7 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** ``` sql -SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type +SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type; ``` ``` text @@ -110,7 +110,7 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. Call example: ``` sql -SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type +SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type; ``` ``` text @@ -177,7 +177,7 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** ``` sql -SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type +SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type; ``` ``` text @@ -193,7 +193,7 @@ Calculates [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add97 **Syntax** ``` sql -SELECT javaHash(''); +SELECT javaHash('') ``` **Returned value** @@ -241,7 +241,7 @@ Correct query with UTF-16LE encoded string. Query: ``` sql -SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) +SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')); ``` Result: @@ -257,7 +257,7 @@ Result: Calculates `HiveHash` from a string. ``` sql -SELECT hiveHash(''); +SELECT hiveHash('') ``` This is just [JavaHash](#hash_functions-javahash) with zeroed out sign bit. This function is used in [Apache Hive](https://en.wikipedia.org/wiki/Apache_Hive) for versions before 3.0. This hash function is neither fast nor having a good quality. The only reason to use it is when this algorithm is already used in another system and you have to calculate exactly the same result. @@ -303,7 +303,7 @@ A [UInt64](../../sql-reference/data-types/int-uint.md) data type hash value. **Example** ``` sql -SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type +SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type; ``` ``` text @@ -339,7 +339,7 @@ Both functions take a variable number of input parameters. Arguments can be any **Example** ``` sql -SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type +SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type; ``` ``` text @@ -355,7 +355,7 @@ Calculates a 64-bit [MurmurHash2](https://github.com/aappleby/smhasher) hash val **Syntax** ``` sql -gccMurmurHash(par1, ...); +gccMurmurHash(par1, ...) ``` **Arguments** @@ -407,7 +407,7 @@ Both functions take a variable number of input parameters. Arguments can be any **Example** ``` sql -SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type +SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text @@ -435,7 +435,7 @@ A [FixedString(16)](../../sql-reference/data-types/fixedstring.md) data type has **Example** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type +SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text @@ -449,11 +449,11 @@ SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) Calculates `xxHash` from a string. It is proposed in two flavors, 32 and 64 bits. ``` sql -SELECT xxHash32(''); +SELECT xxHash32('') OR -SELECT xxHash64(''); +SELECT xxHash64('') ``` **Returned value** diff --git a/docs/en/sql-reference/functions/introspection.md b/docs/en/sql-reference/functions/introspection.md index 964265a461b..823fd4dfebe 100644 --- a/docs/en/sql-reference/functions/introspection.md +++ b/docs/en/sql-reference/functions/introspection.md @@ -53,13 +53,13 @@ Type: [String](../../sql-reference/data-types/string.md). Enabling introspection functions: ``` sql -SET allow_introspection_functions=1 +SET allow_introspection_functions=1; ``` Selecting the first string from the `trace_log` system table: ``` sql -SELECT * FROM system.trace_log LIMIT 1 \G +SELECT * FROM system.trace_log LIMIT 1 \G; ``` ``` text @@ -79,7 +79,7 @@ The `trace` field contains the stack trace at the moment of sampling. Getting the source code filename and the line number for a single address: ``` sql -SELECT addressToLine(94784076370703) \G +SELECT addressToLine(94784076370703) \G; ``` ``` text @@ -139,13 +139,13 @@ Type: [String](../../sql-reference/data-types/string.md). Enabling introspection functions: ``` sql -SET allow_introspection_functions=1 +SET allow_introspection_functions=1; ``` Selecting the first string from the `trace_log` system table: ``` sql -SELECT * FROM system.trace_log LIMIT 1 \G +SELECT * FROM system.trace_log LIMIT 1 \G; ``` ``` text @@ -165,7 +165,7 @@ The `trace` field contains the stack trace at the moment of sampling. Getting a symbol for a single address: ``` sql -SELECT addressToSymbol(94138803686098) \G +SELECT addressToSymbol(94138803686098) \G; ``` ``` text @@ -236,13 +236,13 @@ Type: [String](../../sql-reference/data-types/string.md). Enabling introspection functions: ``` sql -SET allow_introspection_functions=1 +SET allow_introspection_functions=1; ``` Selecting the first string from the `trace_log` system table: ``` sql -SELECT * FROM system.trace_log LIMIT 1 \G +SELECT * FROM system.trace_log LIMIT 1 \G; ``` ``` text @@ -262,7 +262,7 @@ The `trace` field contains the stack trace at the moment of sampling. Getting a function name for a single address: ``` sql -SELECT demangle(addressToSymbol(94138803686098)) \G +SELECT demangle(addressToSymbol(94138803686098)) \G; ``` ``` text @@ -335,6 +335,7 @@ Result: │ 3878 │ └───────┘ ``` + ## logTrace {#logtrace} Emits trace log message to server log for each [Block](https://clickhouse.tech/docs/en/development/architecture/#block). diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 64457627cce..9217838cd35 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -60,7 +60,7 @@ Alias: `INET6_NTOA`. Examples: ``` sql -SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr +SELECT IPv6NumToString(toFixedString(unhex('2A0206B8000000000000000000000011'), 16)) AS addr; ``` ``` text @@ -164,7 +164,7 @@ Result: └────────────┴──────────────────────────────────────┘ ``` -**See also** +**See Also** - [cutIPv6](#cutipv6x-bytestocutforipv6-bytestocutforipv4). @@ -173,7 +173,7 @@ Result: Takes a `UInt32` number. Interprets it as an IPv4 address in [big endian](https://en.wikipedia.org/wiki/Endianness). Returns a `FixedString(16)` value containing the IPv6 address in binary format. Examples: ``` sql -SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr +SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr; ``` ``` text @@ -206,7 +206,7 @@ SELECT Accepts an IPv4 and an UInt8 value containing the [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Return a tuple with two IPv4 containing the lower range and the higher range of the subnet. ``` sql -SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) +SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16); ``` ``` text @@ -342,7 +342,7 @@ Type: [UInt8](../../sql-reference/data-types/int-uint.md). Query: ```sql -SELECT addr, isIPv4String(addr) FROM ( SELECT ['0.0.0.0', '127.0.0.1', '::ffff:127.0.0.1'] AS addr ) ARRAY JOIN addr +SELECT addr, isIPv4String(addr) FROM ( SELECT ['0.0.0.0', '127.0.0.1', '::ffff:127.0.0.1'] AS addr ) ARRAY JOIN addr; ``` Result: @@ -380,7 +380,7 @@ Type: [UInt8](../../sql-reference/data-types/int-uint.md). Query: ``` sql -SELECT addr, isIPv6String(addr) FROM ( SELECT ['::', '1111::ffff', '::ffff:127.0.0.1', '127.0.0.1'] AS addr ) ARRAY JOIN addr +SELECT addr, isIPv6String(addr) FROM ( SELECT ['::', '1111::ffff', '::ffff:127.0.0.1', '127.0.0.1'] AS addr ) ARRAY JOIN addr; ``` Result: diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index edee048eb77..6c2a807492d 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -199,7 +199,7 @@ Parses key-value pairs from a JSON where the values are of the given ClickHouse Example: ``` sql -SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)] +SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') = [('a',5),('b',7),('c',11)]; ``` ## JSONExtractRaw(json\[, indices_or_keys\]…) {#jsonextractrawjson-indices-or-keys} @@ -211,7 +211,7 @@ If the part does not exist or has a wrong type, an empty string will be returned Example: ``` sql -SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]'; ``` ## JSONExtractArrayRaw(json\[, indices_or_keys…\]) {#jsonextractarrayrawjson-indices-or-keys} @@ -223,7 +223,7 @@ If the part does not exist or isn’t array, an empty array will be returned. Example: ``` sql -SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']'; ``` ## JSONExtractKeysAndValuesRaw {#json-extract-keys-and-values-raw} @@ -253,7 +253,7 @@ Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-referen Query: ``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}') +SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}'); ``` Result: @@ -267,7 +267,7 @@ Result: Query: ``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', 'b') +SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', 'b'); ``` Result: @@ -281,7 +281,7 @@ Result: Query: ``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', -1, 'c') +SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', -1, 'c'); ``` Result: diff --git a/docs/en/sql-reference/functions/machine-learning-functions.md b/docs/en/sql-reference/functions/machine-learning-functions.md index f103a4ea421..d1bb66993b2 100644 --- a/docs/en/sql-reference/functions/machine-learning-functions.md +++ b/docs/en/sql-reference/functions/machine-learning-functions.md @@ -36,14 +36,14 @@ bayesAB(distribution_name, higher_is_better, variant_names, x, y) - `higher_is_better` — Boolean flag. [Boolean](../../sql-reference/data-types/boolean.md). Possible values: - - `0` - lower values are considered to be better than higher - - `1` - higher values are considered to be better than lower + - `0` — lower values are considered to be better than higher + - `1` — higher values are considered to be better than lower -- `variant_names` - Variant names. [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). +- `variant_names` — Variant names. [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -- `x` - Numbers of tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). +- `x` — Numbers of tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). -- `y` - Numbers of successful tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). +- `y` — Numbers of successful tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). !!! note "Note" All three arrays must have the same size. All `x` and `y` values must be non-negative constant numbers. `y` cannot be larger than `x`. @@ -51,8 +51,8 @@ bayesAB(distribution_name, higher_is_better, variant_names, x, y) **Returned values** For each variant the function calculates: -- `beats_control` - long-term probability to out-perform the first (control) variant -- `to_be_best` - long-term probability to out-perform all other variants +- `beats_control` — long-term probability to out-perform the first (control) variant +- `to_be_best` — long-term probability to out-perform all other variants Type: JSON. diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md index f56a721c0c0..843c1e52454 100644 --- a/docs/en/sql-reference/functions/math-functions.md +++ b/docs/en/sql-reference/functions/math-functions.md @@ -54,7 +54,7 @@ If ‘x’ is non-negative, then `erf(x / σ√2)` is the probability that a ran Example (three sigma rule): ``` sql -SELECT erf(3 / sqrt(2)) +SELECT erf(3 / sqrt(2)); ``` ``` text diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 2c7f8da881e..d99d671418f 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -1762,7 +1762,6 @@ Result: ``` - ## randomStringUTF8 {#randomstringutf8} Generates a random string of a specified length. Result string contains valid UTF-8 code points. The value of code points may be outside of the range of assigned Unicode. diff --git a/docs/en/sql-reference/functions/rounding-functions.md b/docs/en/sql-reference/functions/rounding-functions.md index 83db1975366..102a1fff5a0 100644 --- a/docs/en/sql-reference/functions/rounding-functions.md +++ b/docs/en/sql-reference/functions/rounding-functions.md @@ -35,7 +35,7 @@ The function returns the nearest number of the specified order. In case when giv round(expression [, decimal_places]) ``` -**Arguments:** +**Arguments** - `expression` — A number to be rounded. Can be any [expression](../../sql-reference/syntax.md#syntax-expressions) returning the numeric [data type](../../sql-reference/data-types/index.md#data_types). - `decimal-places` — An integer value. diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 2c08fa3acb7..40481975886 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -73,19 +73,19 @@ Returns 1, if the set of bytes is valid UTF-8 encoded, otherwise 0. Replaces invalid UTF-8 characters by the `�` (U+FFFD) character. All running in a row invalid characters are collapsed into the one replacement character. ``` sql -toValidUTF8( input_string ) +toValidUTF8(input_string) ``` **Arguments** -- input_string — Any set of bytes represented as the [String](../../sql-reference/data-types/string.md) data type object. +- `input_string` — Any set of bytes represented as the [String](../../sql-reference/data-types/string.md) data type object. Returned value: Valid UTF-8 string. **Example** ``` sql -SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') +SELECT toValidUTF8('\x61\xF0\x80\x80\x80b'); ``` ``` text @@ -122,7 +122,7 @@ Type: `String`. Query: ``` sql -SELECT repeat('abc', 10) +SELECT repeat('abc', 10); ``` Result: @@ -190,7 +190,7 @@ If any of argument values is `NULL`, `concat` returns `NULL`. Query: ``` sql -SELECT concat('Hello, ', 'World!') +SELECT concat('Hello, ', 'World!'); ``` Result: @@ -245,7 +245,7 @@ SELECT * from key_val; Query: ``` sql -SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY concatAssumeInjective(key1, key2) +SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY concatAssumeInjective(key1, key2); ``` Result: @@ -336,8 +336,8 @@ trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) **Arguments** -- `trim_character` — specified characters for trim. [String](../../sql-reference/data-types/string.md). -- `input_string` — string for trim. [String](../../sql-reference/data-types/string.md). +- `trim_character` — Specified characters for trim. [String](../../sql-reference/data-types/string.md). +- `input_string` — String for trim. [String](../../sql-reference/data-types/string.md). **Returned value** @@ -350,7 +350,7 @@ Type: `String`. Query: ``` sql -SELECT trim(BOTH ' ()' FROM '( Hello, world! )') +SELECT trim(BOTH ' ()' FROM '( Hello, world! )'); ``` Result: @@ -388,7 +388,7 @@ Type: `String`. Query: ``` sql -SELECT trimLeft(' Hello, world! ') +SELECT trimLeft(' Hello, world! '); ``` Result: @@ -426,7 +426,7 @@ Type: `String`. Query: ``` sql -SELECT trimRight(' Hello, world! ') +SELECT trimRight(' Hello, world! '); ``` Result: @@ -464,7 +464,7 @@ Type: `String`. Query: ``` sql -SELECT trimBoth(' Hello, world! ') +SELECT trimBoth(' Hello, world! '); ``` Result: @@ -497,7 +497,8 @@ The result type is UInt64. Replaces literals, sequences of literals and complex aliases with placeholders. -**Syntax** +**Syntax** + ``` sql normalizeQuery(x) ``` @@ -617,7 +618,7 @@ This function also replaces numeric character references with Unicode characters decodeXMLComponent(x) ``` -**Parameters** +**Arguments** - `x` — A sequence of characters. [String](../../sql-reference/data-types/string.md). diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index 83b0edea438..6ca7473cdc2 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -26,9 +26,9 @@ Alias: `locate(haystack, needle[, start_pos])`. **Arguments** -- `haystack` — string, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md) +- `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `start_pos` — Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md). **Returned values** @@ -44,7 +44,7 @@ The phrase “Hello, world!” contains a set of bytes representing a single-byt Query: ``` sql -SELECT position('Hello, world!', '!') +SELECT position('Hello, world!', '!'); ``` Result: @@ -72,7 +72,7 @@ The same phrase in Russian contains characters which can’t be represented usin Query: ``` sql -SELECT position('Привет, мир!', '!') +SELECT position('Привет, мир!', '!'); ``` Result: @@ -97,9 +97,9 @@ positionCaseInsensitive(haystack, needle[, start_pos]) **Arguments** -- `haystack` — string, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md) +- `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `start_pos` — Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md). **Returned values** @@ -113,7 +113,7 @@ Type: `Integer`. Query: ``` sql -SELECT positionCaseInsensitive('Hello, world!', 'hello') +SELECT positionCaseInsensitive('Hello, world!', 'hello'); ``` Result: @@ -140,9 +140,9 @@ positionUTF8(haystack, needle[, start_pos]) **Arguments** -- `haystack` — string, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md) +- `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `start_pos` — Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md) **Returned values** @@ -158,7 +158,7 @@ The phrase “Hello, world!” in Russian contains a set of Unicode points repre Query: ``` sql -SELECT positionUTF8('Привет, мир!', '!') +SELECT positionUTF8('Привет, мир!', '!'); ``` Result: @@ -174,7 +174,7 @@ The phrase “Salut, étudiante!”, where character `é` can be represented usi Query for the letter `é`, which is represented one Unicode point `U+00E9`: ``` sql -SELECT positionUTF8('Salut, étudiante!', '!') +SELECT positionUTF8('Salut, étudiante!', '!'); ``` Result: @@ -188,7 +188,7 @@ Result: Query for the letter `é`, which is represented two Unicode points `U+0065U+0301`: ``` sql -SELECT positionUTF8('Salut, étudiante!', '!') +SELECT positionUTF8('Salut, étudiante!', '!'); ``` Result: @@ -213,9 +213,9 @@ positionCaseInsensitiveUTF8(haystack, needle[, start_pos]) **Arguments** -- `haystack` — string, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md) +- `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `start_pos` — Optional parameter, position of the first character in the string to start search. [UInt](../../sql-reference/data-types/int-uint.md) **Returned value** @@ -229,7 +229,7 @@ Type: `Integer`. Query: ``` sql -SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир') +SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир'); ``` Result: @@ -258,8 +258,8 @@ multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) **Arguments** -- `haystack` — string, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `needle` — substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `haystack` — String, in which substring will to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `needle` — Substring to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal). **Returned values** @@ -270,7 +270,7 @@ multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) Query: ``` sql -SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']) +SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']); ``` Result: @@ -387,7 +387,7 @@ If `haystack` doesn’t match the `pattern` regex, an array of empty arrays is r Query: ``` sql -SELECT extractAllGroupsHorizontal('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)') +SELECT extractAllGroupsHorizontal('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)'); ``` Result: @@ -428,7 +428,7 @@ If `haystack` doesn’t match the `pattern` regex, an empty array is returned. Query: ``` sql -SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)') +SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)'); ``` Result: @@ -506,7 +506,7 @@ Input table: Query: ``` sql -SELECT * FROM Months WHERE ilike(name, '%j%') +SELECT * FROM Months WHERE ilike(name, '%j%'); ``` Result: @@ -618,7 +618,7 @@ countSubstringsCaseInsensitive(haystack, needle[, start_pos]) - `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal). - `needle` — The substring to search for. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – Position of the first character in the string to start search. Optional. [UInt](../../sql-reference/data-types/int-uint.md). +- `start_pos` — Position of the first character in the string to start search. Optional. [UInt](../../sql-reference/data-types/int-uint.md). **Returned values** @@ -631,7 +631,7 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md). Query: ``` sql -select countSubstringsCaseInsensitive('aba', 'B'); +SELECT countSubstringsCaseInsensitive('aba', 'B'); ``` Result: @@ -684,7 +684,7 @@ SELECT countSubstringsCaseInsensitiveUTF8(haystack, needle[, start_pos]) - `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal). - `needle` — The substring to search for. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – Position of the first character in the string to start search. Optional. [UInt](../../sql-reference/data-types/int-uint.md). +- `start_pos` — Position of the first character in the string to start search. Optional. [UInt](../../sql-reference/data-types/int-uint.md). **Returned values** diff --git a/docs/en/sql-reference/functions/tuple-functions.md b/docs/en/sql-reference/functions/tuple-functions.md index 1006b68b8ee..884e1ef754f 100644 --- a/docs/en/sql-reference/functions/tuple-functions.md +++ b/docs/en/sql-reference/functions/tuple-functions.md @@ -47,7 +47,7 @@ You can use the `EXCEPT` expression to skip columns as a result of the query. **Arguments** -- `x` - A `tuple` function, column, or tuple of elements. [Tuple](../../sql-reference/data-types/tuple.md). +- `x` — A `tuple` function, column, or tuple of elements. [Tuple](../../sql-reference/data-types/tuple.md). **Returned value** diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index 1d4839cbbf9..8b0710c0182 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -66,7 +66,6 @@ Result: - [Map(key, value)](../../sql-reference/data-types/map.md) data type - ## mapAdd {#function-mapadd} Collect all the keys and sum corresponding values. diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 8a793b99ac9..8d2d253046b 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -381,7 +381,7 @@ This function accepts 16 bytes string, and returns UUID containing bytes represe reinterpretAsUUID(fixed_string) ``` -**Parameters** +**Arguments** - `fixed_string` — Big-endian byte string. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring). diff --git a/docs/en/sql-reference/functions/url-functions.md b/docs/en/sql-reference/functions/url-functions.md index 9e79ef2d0cb..54ec463ae66 100644 --- a/docs/en/sql-reference/functions/url-functions.md +++ b/docs/en/sql-reference/functions/url-functions.md @@ -55,7 +55,7 @@ Type: `String`. **Example** ``` sql -SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') +SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk'); ``` ``` text @@ -98,7 +98,7 @@ Type: `String`. **Example** ``` sql -SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') +SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk'); ``` ``` text diff --git a/docs/en/sql-reference/functions/ym-dict-functions.md b/docs/en/sql-reference/functions/ym-dict-functions.md index 56530b5e83b..d42d44f8336 100644 --- a/docs/en/sql-reference/functions/ym-dict-functions.md +++ b/docs/en/sql-reference/functions/ym-dict-functions.md @@ -112,7 +112,7 @@ Finds the highest continent in the hierarchy for the region. **Syntax** ``` sql -regionToTopContinent(id[, geobase]); +regionToTopContinent(id[, geobase]) ``` **Arguments** diff --git a/docs/en/sql-reference/table-functions/generate.md b/docs/en/sql-reference/table-functions/generate.md index be6ba2b8bc4..6bd82d4e88b 100644 --- a/docs/en/sql-reference/table-functions/generate.md +++ b/docs/en/sql-reference/table-functions/generate.md @@ -10,7 +10,7 @@ Allows to populate test tables with data. Supports all data types that can be stored in table except `LowCardinality` and `AggregateFunction`. ``` sql -generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); +generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]) ``` **Arguments** diff --git a/docs/en/sql-reference/table-functions/view.md b/docs/en/sql-reference/table-functions/view.md index 08096c2b019..73618dd2c96 100644 --- a/docs/en/sql-reference/table-functions/view.md +++ b/docs/en/sql-reference/table-functions/view.md @@ -37,7 +37,7 @@ Input table: Query: ``` sql -SELECT * FROM view(SELECT name FROM months) +SELECT * FROM view(SELECT name FROM months); ``` Result: @@ -54,14 +54,15 @@ Result: You can use the `view` function as a parameter of the [remote](https://clickhouse.tech/docs/en/sql-reference/table-functions/remote/#remote-remotesecure) and [cluster](https://clickhouse.tech/docs/en/sql-reference/table-functions/cluster/#cluster-clusterallreplicas) table functions: ``` sql -SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name)) +SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name)); ``` ``` sql -SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)) +SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)); ``` **See Also** - [View Table Engine](https://clickhouse.tech/docs/en/engines/table-engines/special/view/) -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/view/) \ No newline at end of file + +[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/view/) diff --git a/docs/ru/getting-started/tutorial.md b/docs/ru/getting-started/tutorial.md index f5455ba2b9a..68b3e4dbae7 100644 --- a/docs/ru/getting-started/tutorial.md +++ b/docs/ru/getting-started/tutorial.md @@ -644,7 +644,7 @@ If there are no replicas at the moment on replicated table creation, a new first ``` sql CREATE TABLE tutorial.hits_replica (...) -ENGINE = ReplcatedMergeTree( +ENGINE = ReplicatedMergeTree( '/clickhouse_perftest/tables/{shard}/hits', '{replica}' ) diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index 592c61f87ff..5270a269111 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -70,9 +70,9 @@ toc_title: "\u041a\u043e\u043c\u0431\u0438\u043d\u0430\u0442\u043e\u0440\u044b\u OrDefault(x) ``` -**Параметры** +**Аргументы** -- `x` — Параметры агрегатной функции. +- `x` — параметры агрегатной функции. **Возращаемые зачения** @@ -131,14 +131,14 @@ FROM OrNull(x) ``` -**Параметры** +**Аргументы** -- `x` — Параметры агрегатной функции. +- `x` — параметры агрегатной функции. **Возвращаемые значения** -- Результат агрегатной функции, преобразованный в тип данных `Nullable`. -- `NULL`, если у агрегатной функции нет входных данных. +- Результат агрегатной функции, преобразованный в тип данных `Nullable`. +- `NULL`, если у агрегатной функции нет входных данных. Тип: `Nullable(aggregate function return type)`. @@ -188,7 +188,7 @@ FROM Resample(start, end, step)(, resampling_key) ``` -**Параметры** +**Аргументы** - `start` — начальное значение для интервала значений `resampling_key`. - `stop` — конечное значение для интервала значений `resampling_key`. Интервал не включает значение `stop` (`[start, stop)`). diff --git a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md index d96f7a13bcc..83742fb3dc4 100644 --- a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md @@ -11,14 +11,19 @@ toc_title: "\u041f\u0430\u0440\u0430\u043c\u0435\u0442\u0440\u0438\u0447\u0435\u Рассчитывает адаптивную гистограмму. Не гарантирует точного результата. - histogram(number_of_bins)(values) +``` sql +histogram(number_of_bins)(values) +``` Функция использует [A Streaming Parallel Decision Tree Algorithm](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). Границы столбцов устанавливаются по мере поступления новых данных в функцию. В общем случае столбцы имею разную ширину. +**Аргументы** + +`values` — [выражение](../syntax.md#syntax-expressions), предоставляющее входные значения. + **Параметры** `number_of_bins` — максимальное количество корзин в гистограмме. Функция автоматически вычисляет количество корзин. Она пытается получить указанное количество корзин, но если не получилось, то в результате корзин будет меньше. -`values` — [выражение](../syntax.md#syntax-expressions), предоставляющее входные значения. **Возвращаемые значения** @@ -87,14 +92,16 @@ sequenceMatch(pattern)(timestamp, cond1, cond2, ...) !!! warning "Предупреждение" События, произошедшие в одну и ту же секунду, располагаются в последовательности в неопределенном порядке, что может повлиять на результат работы функции. -**Параметры** - -- `pattern` — строка с шаблоном. Смотрите [Синтаксис шаблонов](#sequence-function-pattern-syntax). +**Аргументы** - `timestamp` — столбец, содержащий метки времени. Типичный тип данных столбца — `Date` или `DateTime`. Также можно использовать любой из поддержанных типов данных [UInt](../../sql-reference/aggregate-functions/parametric-functions.md). - `cond1`, `cond2` — условия, описывающие цепочку событий. Тип данных — `UInt8`. Можно использовать до 32 условий. Функция учитывает только те события, которые указаны в условиях. Функция пропускает данные из последовательности, если они не описаны ни в одном из условий. +**Параметры** + +- `pattern` — строка с шаблоном. Смотрите [Синтаксис шаблонов](#sequence-function-pattern-syntax). + **Возвращаемые значения** - 1, если цепочка событий, соответствующая шаблону найдена. @@ -174,14 +181,16 @@ SELECT sequenceMatch('(?1)(?2)')(time, number = 1, number = 2, number = 4) FROM sequenceCount(pattern)(timestamp, cond1, cond2, ...) ``` -**Параметры** - -- `pattern` — строка с шаблоном. Смотрите [Синтаксис шаблонов](#sequence-function-pattern-syntax). +**Аргументы** - `timestamp` — столбец, содержащий метки времени. Типичный тип данных столбца — `Date` или `DateTime`. Также можно использовать любой из поддержанных типов данных [UInt](../../sql-reference/aggregate-functions/parametric-functions.md). - `cond1`, `cond2` — условия, описывающие цепочку событий. Тип данных — `UInt8`. Можно использовать до 32 условий. Функция учитывает только те события, которые указаны в условиях. Функция пропускает данные из последовательности, если они не описаны ни в одном из условий. +**Параметры** + +- `pattern` — строка с шаблоном. Смотрите [Синтаксис шаблонов](#sequence-function-pattern-syntax). + **Возвращаемое значение** - Число непересекающихся цепочек событий, соответствущих шаблону. @@ -237,12 +246,15 @@ SELECT sequenceCount('(?1).*(?2)')(time, number = 1, number = 2) FROM t windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) ``` +**Аргументы** + +- `timestamp` — имя столбца, содержащего временные отметки. [Date](../../sql-reference/aggregate-functions/parametric-functions.md), [DateTime](../../sql-reference/aggregate-functions/parametric-functions.md#data_type-datetime) и другие параметры с типом `Integer`. В случае хранения меток времени в столбцах с типом `UInt64`, максимально допустимое значение соответствует ограничению для типа `Int64`, т.е. равно `2^63-1`. +- `cond` — условия или данные, описывающие цепочку событий. [UInt8](../../sql-reference/aggregate-functions/parametric-functions.md). + **Параметры** - `window` — ширина скользящего окна по времени. Единица измерения зависит от `timestamp` и может варьироваться. Должно соблюдаться условие `timestamp события cond2 <= timestamp события cond1 + window`. -- `mode` - необязательный параметр. Если установлено значение `'strict'`, то функция `windowFunnel()` применяет условия только для уникальных значений. -- `timestamp` — имя столбца, содержащего временные отметки. [Date](../../sql-reference/aggregate-functions/parametric-functions.md), [DateTime](../../sql-reference/aggregate-functions/parametric-functions.md#data_type-datetime) и другие параметры с типом `Integer`. В случае хранения меток времени в столбцах с типом `UInt64`, максимально допустимое значение соответствует ограничению для типа `Int64`, т.е. равно `2^63-1`. -- `cond` — условия или данные, описывающие цепочку событий. [UInt8](../../sql-reference/aggregate-functions/parametric-functions.md). +- `mode` — необязательный параметр. Если установлено значение `'strict'`, то функция `windowFunnel()` применяет условия только для уникальных значений. **Возвращаемое значение** @@ -306,7 +318,7 @@ ORDER BY level ASC Функция принимает набор (от 1 до 32) логических условий, как в [WHERE](../../sql-reference/statements/select/where.md#select-where), и применяет их к заданному набору данных. -Условия, кроме первого, применяются попарно: результат второго будет истинным, если истинно первое и второе, третьего - если истинно первое и третье и т. д. +Условия, кроме первого, применяются попарно: результат второго будет истинным, если истинно первое и второе, третьего - если истинно первое и третье и т.д. **Синтаксис** @@ -314,7 +326,7 @@ ORDER BY level ASC retention(cond1, cond2, ..., cond32) ``` -**Параметры** +**Аргументы** - `cond` — вычисляемое условие или выражение, которое возвращает `UInt8` результат (1/0). diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmax.md b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md index f44e65831a9..c5585d58485 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md @@ -20,7 +20,7 @@ argMax(arg, val) argMax(tuple(arg, val)) ``` -**Параметры** +**Аргументы** - `arg` — аргумент. - `val` — значение. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmin.md b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md index 8c25b79f92a..d508042d474 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md @@ -20,7 +20,7 @@ argMin(arg, val) argMin(tuple(arg, val)) ``` -**Параметры** +**Аргументы** - `arg` — аргумент. - `val` — значение. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md index 72e6ca5c88c..0275fb77bbb 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md @@ -12,10 +12,10 @@ toc_priority: 107 avgWeighted(x, weight) ``` -**Параметры** +**Аргументы** -- `x` — Значения. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md). -- `weight` — Веса отдельных значений. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md). +- `x` — значения. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md). +- `weight` — веса отдельных значений. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md). Типы параметров должны совпадать. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/count.md b/docs/ru/sql-reference/aggregate-functions/reference/count.md index d99c3b2aeb2..a9135a35f20 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/count.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/count.md @@ -4,14 +4,14 @@ toc_priority: 1 # count {#agg_function-count} -Вычисляет количество строк или не NULL значений . +Вычисляет количество строк или не NULL значений. ClickHouse поддерживает следующие виды синтаксиса для `count`: - `count(expr)` или `COUNT(DISTINCT expr)`. - `count()` или `COUNT(*)`. Синтаксис `count()` специфичен для ClickHouse. -**Параметры** +**Аргументы** Функция может принимать: @@ -21,7 +21,7 @@ ClickHouse поддерживает следующие виды синтакси **Возвращаемое значение** - Если функция вызывается без параметров, она вычисляет количество строк. -- Если передаётся [выражение](../../syntax.md#syntax-expressions) , то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../../sql-reference/data-types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк. +- Если передаётся [выражение](../../syntax.md#syntax-expressions), то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../../sql-reference/data-types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк. В обоих случаях тип возвращаемого значения [UInt64](../../../sql-reference/data-types/int-uint.md). diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index 5c73bccc2bb..37447189155 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -9,24 +9,24 @@ toc_priority: 112 **Синтаксис** ```sql -groupArrayInsertAt(default_x, size)(x, pos); +groupArrayInsertAt(default_x, size)(x, pos) ``` Если запрос вставляет вставляется несколько значений в одну и ту же позицию, то функция ведет себя следующим образом: -- Если запрос выполняется в одном потоке, то используется первое из вставляемых значений. -- Если запрос выполняется в нескольких потоках, то в результирующем массиве может оказаться любое из вставляемых значений. +- Если запрос выполняется в одном потоке, то используется первое из вставляемых значений. +- Если запрос выполняется в нескольких потоках, то в результирующем массиве может оказаться любое из вставляемых значений. -**Параметры** +**Аргументы** -- `x` — Значение, которое будет вставлено. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../../../sql-reference/data-types/index.md#data_types). -- `pos` — Позиция, в которую вставляется заданный элемент `x`. Нумерация индексов в массиве начинается с нуля. [UInt32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64). -- `default_x` — Значение по умолчанию для подстановки на пустые позиции. Опциональный параметр. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение с типом параметра `x`. Если `default_x` не определен, используются [значения по умолчанию](../../../sql-reference/statements/create/table.md#create-default-values). -- `size`— Длина результирующего массива. Опциональный параметр. При использовании этого параметра должно быть указано значение по умолчанию `default_x`. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges). +- `x` — значение, которое будет вставлено. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../../../sql-reference/data-types/index.md#data_types). +- `pos` — позиция, в которую вставляется заданный элемент `x`. Нумерация индексов в массиве начинается с нуля. [UInt32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64). +- `default_x` — значение по умолчанию для подстановки на пустые позиции. Опциональный параметр. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение с типом параметра `x`. Если `default_x` не определен, используются [значения по умолчанию](../../../sql-reference/statements/create/table.md#create-default-values). +- `size` — длина результирующего массива. Опциональный параметр. При использовании этого параметра должно быть указано значение по умолчанию `default_x`. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges). **Возвращаемое значение** -- Массив со вставленными значениями. +- Массив со вставленными значениями. Тип: [Array](../../../sql-reference/data-types/array.md#data-type-array). diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md index 6307189c440..a66dacd64f4 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md @@ -6,12 +6,14 @@ toc_priority: 114 Вычисляет скользящее среднее для входных значений. - groupArrayMovingAvg(numbers_for_summing) - groupArrayMovingAvg(window_size)(numbers_for_summing) +``` sql +groupArrayMovingAvg(numbers_for_summing) +groupArrayMovingAvg(window_size)(numbers_for_summing) +``` Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце. -**Параметры** +**Аргументы** - `numbers_for_summing` — [выражение](../../syntax.md#syntax-expressions), возвращающее значение числового типа. - `window_size` — размер окна. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md index c95f1b0b0eb..fb825703c9d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md @@ -13,7 +13,7 @@ groupArrayMovingSum(window_size)(numbers_for_summing) Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце. -**Параметры** +**Аргументы** - `numbers_for_summing` — [выражение](../../syntax.md#syntax-expressions), возвращающее значение числового типа. - `window_size` — размер окна. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md index 4c2dafe1a3c..1d58b3397ab 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraysample.md @@ -12,7 +12,7 @@ toc_priority: 114 groupArraySample(max_size[, seed])(x) ``` -**Параметры** +**Аргументы** - `max_size` — максимальное количество элементов в возвращаемом массиве. [UInt64](../../data-types/int-uint.md). - `seed` — состояние генератора случайных чисел. Необязательный параметр. [UInt64](../../data-types/int-uint.md). Значение по умолчанию: `123456`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md index 03aff64fecf..7f381ca2906 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md @@ -10,7 +10,7 @@ toc_priority: 125 groupBitAnd(expr) ``` -**Параметры** +**Аргументы** `expr` – выражение, результат которого имеет тип данных `UInt*`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md index a4be18b75ec..6aeb492add0 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -10,7 +10,7 @@ Bitmap или агрегатные вычисления для столбца с groupBitmap(expr) ``` -**Параметры** +**Аргументы** `expr` – выражение, результат которого имеет тип данных `UInt*`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md index e1afced014f..9f7165286ed 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md @@ -10,7 +10,7 @@ toc_priority: 126 groupBitOr(expr) ``` -**Параметры** +**Аргументы** `expr` – выражение, результат которого имеет тип данных `UInt*`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md index a80f86b2a5f..7d43d4a06f4 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md @@ -10,7 +10,7 @@ toc_priority: 127 groupBitXor(expr) ``` -**Параметры** +**Аргументы** `expr` – выражение, результат которого имеет тип данных `UInt*`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md b/docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md index a2e3764193e..3565115d8de 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/initializeAggregation.md @@ -10,10 +10,10 @@ toc_priority: 150 **Синтаксис** ``` sql -initializeAggregation (aggregate_function, column_1, column_2); +initializeAggregation (aggregate_function, column_1, column_2) ``` -**Параметры** +**Аргументы** - `aggregate_function` — название функции агрегации, состояние которой нужно создать. [String](../../../sql-reference/data-types/string.md#string). - `column_n` — столбец, который передается в функцию агрегации как аргумент. [String](../../../sql-reference/data-types/string.md#string). diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md index a00dae51ed6..4c10ecb5abc 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md @@ -10,9 +10,9 @@ toc_priority: 153 kurtPop(expr) ``` -**Параметры** +**Аргументы** -`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. +`expr` — [выражение](../../syntax.md#syntax-expressions), возвращающее число. **Возвращаемое значение** @@ -21,7 +21,7 @@ kurtPop(expr) **Пример** ``` sql -SELECT kurtPop(value) FROM series_with_value_column +SELECT kurtPop(value) FROM series_with_value_column; ``` [Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md index 379d74ec0c3..4562a193b3e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -12,9 +12,9 @@ toc_priority: 154 kurtSamp(expr) ``` -**Параметры** +**Аргументы** -`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. +`expr` — [выражение](../../syntax.md#syntax-expressions), возвращающее число. **Возвращаемое значение** @@ -23,7 +23,7 @@ kurtSamp(expr) **Пример** ``` sql -SELECT kurtSamp(value) FROM series_with_value_column +SELECT kurtSamp(value) FROM series_with_value_column; ``` [Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md index a4647ecfb34..068ff9990b4 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md @@ -17,16 +17,18 @@ mannWhitneyUTest[(alternative[, continuity_correction])](sample_data, sample_ind Проверяется нулевая гипотеза, что генеральные совокупности стохастически равны. Наряду с двусторонней гипотезой могут быть проверены и односторонние. Для применения U-критерия Манна — Уитни закон распределения генеральных совокупностей не обязан быть нормальным. +**Аргументы** + +- `sample_data` — данные выборок. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) или [Decimal](../../../sql-reference/data-types/decimal.md). +- `sample_index` — индексы выборок. [Integer](../../../sql-reference/data-types/int-uint.md). + **Параметры** - `alternative` — альтернативная гипотеза. (Необязательный параметр, по умолчанию: `'two-sided'`.) [String](../../../sql-reference/data-types/string.md). - `'two-sided'`; - `'greater'`; - `'less'`. -- `continuity_correction` - если не 0, то при вычислении p-значения применяется коррекция непрерывности. (Необязательный параметр, по умолчанию: 1.) [UInt64](../../../sql-reference/data-types/int-uint.md). -- `sample_data` — данные выборок. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). -- `sample_index` — индексы выборок. [Integer](../../../sql-reference/data-types/int-uint.md). - +- `continuity_correction` — если не 0, то при вычислении p-значения применяется коррекция непрерывности. (Необязательный параметр, по умолчанию: 1.) [UInt64](../../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md index 10fec16ab94..7cc4f8c7aef 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md @@ -18,10 +18,10 @@ quantile(level)(expr) Алиас: `median`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md index fdbcda821f6..3c03c356ab9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -18,11 +18,11 @@ quantileDeterministic(level)(expr, determinator) Алиас: `medianDeterministic`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). -- `determinator` — Число, хэш которого используется при сэмплировании в алгоритме reservoir sampling, чтобы сделать результат детерминированным. В качестве детерминатора можно использовать любое определённое положительное число, например, идентификатор пользователя или события. Если одно и то же значение детерминатора попадается в выборке слишком часто, то функция выдаёт некорректный результат. +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `determinator` — число, хэш которого используется при сэмплировании в алгоритме reservoir sampling, чтобы сделать результат детерминированным. В качестве детерминатора можно использовать любое определённое положительное число, например, идентификатор пользователя или события. Если одно и то же значение детерминатора попадается в выборке слишком часто, то функция выдаёт некорректный результат. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 4ee815a94fb..452b810cf03 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -18,10 +18,10 @@ quantileExact(level)(expr) Алиас: `medianExact`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** @@ -77,10 +77,10 @@ quantileExact(level)(expr) Алиас: `medianExactLow`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +- `level` — уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** @@ -127,10 +127,10 @@ quantileExactHigh(level)(expr) Алиас: `medianExactHigh`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +- `level` — уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md index f6982d4566f..ee55aaec121 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -18,11 +18,11 @@ quantileExactWeighted(level)(expr, weight) Алиас: `medianExactWeighted`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). -- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `weight` — столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md index f372e308e73..a119ca940b6 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -20,10 +20,10 @@ quantileTDigest(level)(expr) Алиас: `medianTDigest`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index b6dd846967b..a00c1f8af58 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -20,11 +20,11 @@ quantileTDigestWeighted(level)(expr, weight) Алиас: `medianTDigest`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). -- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `weight` — столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md index 32e5e6ce31b..6131f2035cb 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -18,11 +18,11 @@ quantileTiming(level)(expr) Алиас: `medianTiming`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — [Выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). +- `expr` — [выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). - Если в функцию передать отрицательные значения, то её поведение не определено. - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index 4a7fcc666d5..58a0a4599f9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -18,16 +18,16 @@ quantileTimingWeighted(level)(expr, weight) Алиас: `medianTimingWeighted`. -**Параметры** +**Аргументы** -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — [Выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). +- `expr` — [выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). - Если в функцию передать отрицательные значения, то её поведение не определено. - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. -- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. +- `weight` — столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. **Точность** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md index 48a19e87c52..c98e7b88bcf 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/rankCorr.md @@ -8,10 +8,10 @@ rankCorr(x, y) ``` -**Параметры** +**Аргументы** -- `x` — Произвольное значение. [Float32](../../../sql-reference/data-types/float.md#float32-float64) или [Float64](../../../sql-reference/data-types/float.md#float32-float64). -- `y` — Произвольное значение. [Float32](../../../sql-reference/data-types/float.md#float32-float64) или [Float64](../../../sql-reference/data-types/float.md#float32-float64). +- `x` — произвольное значение. [Float32](../../../sql-reference/data-types/float.md#float32-float64) или [Float64](../../../sql-reference/data-types/float.md#float32-float64). +- `y` — произвольное значение. [Float32](../../../sql-reference/data-types/float.md#float32-float64) или [Float64](../../../sql-reference/data-types/float.md#float32-float64). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md index a6dee5dc5ef..03fec8bd895 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md @@ -10,9 +10,9 @@ toc_priority: 150 skewPop(expr) ``` -**Параметры** +**Аргументы** -`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. +`expr` — [выражение](../../syntax.md#syntax-expressions), возвращающее число. **Возвращаемое значение** @@ -21,7 +21,7 @@ skewPop(expr) **Пример** ``` sql -SELECT skewPop(value) FROM series_with_value_column +SELECT skewPop(value) FROM series_with_value_column; ``` [Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/skewpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md index 171eb5e304a..2be9e338d00 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md @@ -12,9 +12,9 @@ toc_priority: 151 skewSamp(expr) ``` -**Параметры** +**Аргументы** -`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. +`expr` — [выражение](../../syntax.md#syntax-expressions), возвращающее число. **Возвращаемое значение** @@ -23,7 +23,7 @@ skewSamp(expr) **Пример** ``` sql -SELECT skewSamp(value) FROM series_with_value_column +SELECT skewSamp(value) FROM series_with_value_column; ``` [Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/skewsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md b/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md index 77378de95d1..d2e680e5ed9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md @@ -16,7 +16,7 @@ studentTTest(sample_data, sample_index) Значения выборок берутся из столбца `sample_data`. Если `sample_index` равно 0, то значение из этой строки принадлежит первой выборке. Во всех остальных случаях значение принадлежит второй выборке. Проверяется нулевая гипотеза, что средние значения генеральных совокупностей совпадают. Для применения t-критерия Стьюдента распределение в генеральных совокупностях должно быть нормальным и дисперсии должны совпадать. -**Параметры** +**Аргументы** - `sample_data` — данные выборок. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). - `sample_index` — индексы выборок. [Integer](../../../sql-reference/data-types/int-uint.md). diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topk.md b/docs/ru/sql-reference/aggregate-functions/reference/topk.md index 6aefd38bf34..86b9f5fe5b3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/topk.md @@ -18,8 +18,8 @@ topK(N)(column) **Аргументы** -- ‘N’ - Количество значений. -- ‘x’ – Столбец. +- `N` – количество значений. +- `x` – столбец. **Пример** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md index 20bd3ee85ff..cc0e433cfb3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md @@ -12,13 +12,13 @@ toc_priority: 109 topKWeighted(N)(x, weight) ``` -**Параметры** +**Аргументы** -- `N` — Количество элементов для выдачи. +- `N` — количество элементов для выдачи. **Аргументы** -- `x` – значение. +- `x` — значение. - `weight` — вес. [UInt8](../../../sql-reference/data-types/int-uint.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniq.md b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md index f5f3f198139..f9b4fad7d59 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md @@ -10,7 +10,7 @@ toc_priority: 190 uniq(x[, ...]) ``` -**Параметры** +**Аргументы** Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md index 751dc1a8c98..323454f7778 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -12,7 +12,7 @@ uniqCombined(HLL_precision)(x[, ...]) Функция `uniqCombined` — это хороший выбор для вычисления количества различных значений. -**Параметры** +**Аргументы** Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md index 3dd22b2b4bc..20c05ab20f9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md @@ -14,7 +14,7 @@ uniqExact(x[, ...]) Функция `uniqExact` расходует больше оперативной памяти, чем функция `uniq`, так как размер состояния неограниченно растёт по мере роста количества различных значений. -**Параметры** +**Аргументы** Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md index 09e52ac6833..290dd65ad8b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -10,7 +10,7 @@ toc_priority: 194 uniqHLL12(x[, ...]) ``` -**Параметры** +**Аргументы** Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md b/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md index 16c122d1b49..0af5d865dae 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md @@ -16,7 +16,7 @@ welchTTest(sample_data, sample_index) Значения выборок берутся из столбца `sample_data`. Если `sample_index` равно 0, то значение из этой строки принадлежит первой выборке. Во всех остальных случаях значение принадлежит второй выборке. Проверяется нулевая гипотеза, что средние значения генеральных совокупностей совпадают. Для применения t-критерия Уэлча распределение в генеральных совокупностях должно быть нормальным. Дисперсии могут не совпадать. -**Параметры** +**Аргументы** - `sample_data` — данные выборок. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). - `sample_index` — индексы выборок. [Integer](../../../sql-reference/data-types/int-uint.md). diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index fe216b1aed1..3c7e2ec619a 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -58,7 +58,7 @@ toc_title: "\u041c\u0430\u0441\u0441\u0438\u0432\u044b" arrayConcat(arrays) ``` -**Параметры** +**Аргументы** - `arrays` – произвольное количество элементов типа [Array](../../sql-reference/functions/array-functions.md) **Пример** @@ -108,7 +108,7 @@ SELECT has([1, 2, NULL], NULL) hasAll(set, subset) ``` -**Параметры** +**Аргументы** - `set` – массив любого типа с набором элементов. - `subset` – массив любого типа со значениями, которые проверяются на вхождение в `set`. @@ -146,7 +146,7 @@ hasAll(set, subset) hasAny(array1, array2) ``` -**Параметры** +**Аргументы** - `array1` – массив любого типа с набором элементов. - `array2` – массив любого типа с набором элементов. @@ -320,21 +320,21 @@ SELECT arrayEnumerateUniq([1, 1, 1, 2, 2, 2], [1, 1, 2, 1, 1, 2]) AS res arrayPopBack(array) ``` -**Параметры** +**Аргументы** -- `array` - Массив. +- `array` – массив. **Пример** ``` sql -SELECT arrayPopBack([1, 2, 3]) AS res +SELECT arrayPopBack([1, 2, 3]) AS res; ``` -text - - ┌─res───┐ - │ [1,2] │ - └───────┘ +``` text +┌─res───┐ +│ [1,2] │ +└───────┘ +``` ## arrayPopFront {#arraypopfront} @@ -344,14 +344,14 @@ text arrayPopFront(array) ``` -**Параметры** +**Аргументы** -- `array` - Массив. +- `array` – массив. **Пример** ``` sql -SELECT arrayPopFront([1, 2, 3]) AS res +SELECT arrayPopFront([1, 2, 3]) AS res; ``` ``` text @@ -368,15 +368,15 @@ SELECT arrayPopFront([1, 2, 3]) AS res arrayPushBack(array, single_value) ``` -**Параметры** +**Аргументы** -- `array` - Массив. -- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. +- `array` – массив. +- `single_value` – одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** ``` sql -SELECT arrayPushBack(['a'], 'b') AS res +SELECT arrayPushBack(['a'], 'b') AS res; ``` ``` text @@ -393,15 +393,15 @@ SELECT arrayPushBack(['a'], 'b') AS res arrayPushFront(array, single_value) ``` -**Параметры** +**Аргументы** -- `array` - Массив. -- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. +- `array` – массив. +- `single_value` – одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** ``` sql -SELECT arrayPushFront(['b'], 'a') AS res +SELECT arrayPushFront(['b'], 'a') AS res; ``` ``` text @@ -418,7 +418,7 @@ SELECT arrayPushFront(['b'], 'a') AS res arrayResize(array, size[, extender]) ``` -**Параметры** +**Аргументы** - `array` — массив. - `size` — необходимая длина массива. @@ -433,7 +433,7 @@ arrayResize(array, size[, extender]) **Примеры вызовов** ``` sql -SELECT arrayResize([1], 3) +SELECT arrayResize([1], 3); ``` ``` text @@ -443,7 +443,7 @@ SELECT arrayResize([1], 3) ``` ``` sql -SELECT arrayResize([1], 3, NULL) +SELECT arrayResize([1], 3, NULL); ``` ``` text @@ -460,16 +460,16 @@ SELECT arrayResize([1], 3, NULL) arraySlice(array, offset[, length]) ``` -**Параметры** +**Аргументы** -- `array` - Массив данных. -- `offset` - Отступ от края массива. Положительное значение - отступ слева, отрицательное значение - отступ справа. Отсчет элементов массива начинается с 1. -- `length` - Длина необходимого среза. Если указать отрицательное значение, то функция вернёт открытый срез `[offset, array_length - length)`. Если не указать значение, то функция вернёт срез `[offset, the_end_of_array]`. +- `array` – массив данных. +- `offset` – отступ от края массива. Положительное значение - отступ слева, отрицательное значение - отступ справа. Отсчет элементов массива начинается с 1. +- `length` – длина необходимого среза. Если указать отрицательное значение, то функция вернёт открытый срез `[offset, array_length - length)`. Если не указать значение, то функция вернёт срез `[offset, the_end_of_array]`. **Пример** ``` sql -SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res +SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res; ``` ``` text @@ -702,9 +702,9 @@ SELECT arrayReverseSort((x, y) -> -y, [4, 3, 5], [1, 2, 3]) AS res; arrayDifference(array) ``` -**Параметры** +**Аргументы** -- `array` – [Массив](https://clickhouse.tech/docs/ru/data_types/array/). +- `array` – [массив](https://clickhouse.tech/docs/ru/data_types/array/). **Возвращаемое значение** @@ -715,10 +715,10 @@ arrayDifference(array) Запрос: ``` sql -SELECT arrayDifference([1, 2, 3, 4]) +SELECT arrayDifference([1, 2, 3, 4]); ``` -Ответ: +Результат: ``` text ┌─arrayDifference([1, 2, 3, 4])─┐ @@ -731,10 +731,10 @@ SELECT arrayDifference([1, 2, 3, 4]) Запрос: ``` sql -SELECT arrayDifference([0, 10000000000000000000]) +SELECT arrayDifference([0, 10000000000000000000]); ``` -Ответ: +Результат: ``` text ┌─arrayDifference([0, 10000000000000000000])─┐ @@ -752,9 +752,9 @@ SELECT arrayDifference([0, 10000000000000000000]) arrayDistinct(array) ``` -**Параметры** +**Аргументы** -- `array` – [Массив](https://clickhouse.tech/docs/ru/data_types/array/). +- `array` – [массив](https://clickhouse.tech/docs/ru/data_types/array/). **Возвращаемое значение** @@ -765,7 +765,7 @@ arrayDistinct(array) Запрос: ``` sql -SELECT arrayDistinct([1, 2, 2, 3, 1]) +SELECT arrayDistinct([1, 2, 2, 3, 1]); ``` Ответ: @@ -820,7 +820,7 @@ SELECT arrayReduce(agg_func, arr1, arr2, ..., arrN) ``` -**Параметры** +**Аргументы** - `agg_func` — Имя агрегатной функции, которая должна быть константой [string](../../sql-reference/data-types/string.md). - `arr` — Любое количество столбцов типа [array](../../sql-reference/data-types/array.md) в качестве параметров агрегатной функции. @@ -832,10 +832,10 @@ arrayReduce(agg_func, arr1, arr2, ..., arrN) Запрос: ```sql -SELECT arrayReduce('max', [1, 2, 3]) +SELECT arrayReduce('max', [1, 2, 3]); ``` -Ответ: +Результат: ```text ┌─arrayReduce('max', [1, 2, 3])─┐ @@ -850,10 +850,10 @@ SELECT arrayReduce('max', [1, 2, 3]) Запрос: ```sql -SELECT arrayReduce('maxIf', [3, 5], [1, 0]) +SELECT arrayReduce('maxIf', [3, 5], [1, 0]); ``` -Ответ: +Результат: ```text ┌─arrayReduce('maxIf', [3, 5], [1, 0])─┐ @@ -866,10 +866,10 @@ SELECT arrayReduce('maxIf', [3, 5], [1, 0]) Запрос: ```sql -SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) +SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); ``` -Ответ: +Результат: ```text ┌─arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])─┐ @@ -887,15 +887,15 @@ SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) ``` -**Параметры** +**Аргументы** -- `agg_func` — Имя агрегатной функции, которая должна быть [строковой](../../sql-reference/data-types/string.md) константой. -- `ranges` — Диапазоны для агрегирования, которые должны быть [массивом](../../sql-reference/data-types/array.md) of [кортежей](../../sql-reference/data-types/tuple.md) который содержит индекс и длину каждого диапазона. -- `arr` — Любое количество столбцов типа [Array](../../sql-reference/data-types/array.md) в качестве параметров агрегатной функции. +- `agg_func` — имя агрегатной функции, которая должна быть [строковой](../../sql-reference/data-types/string.md) константой. +- `ranges` — диапазоны для агрегирования, которые должны быть [массивом](../../sql-reference/data-types/array.md) of [кортежей](../../sql-reference/data-types/tuple.md) который содержит индекс и длину каждого диапазона. +- `arr` — любое количество столбцов типа [Array](../../sql-reference/data-types/array.md) в качестве параметров агрегатной функции. **Возвращаемое значение** -- Массив, содержащий результаты агрегатной функции для указанных диапазонов. +- Массив, содержащий результаты агрегатной функции для указанных диапазонов. Тип: [Array](../../sql-reference/data-types/array.md). @@ -911,7 +911,7 @@ SELECT arrayReduceInRanges( ) AS res ``` -Ответ: +Результат: ```text ┌─res─────────────────────────┐ @@ -958,14 +958,14 @@ flatten(array_of_arrays) Синоним: `flatten`. -**Параметры** +**Аргументы** -- `array_of_arrays` — [Массив](../../sql-reference/functions/array-functions.md) массивов. Например, `[[1,2,3], [4,5]]`. +- `array_of_arrays` — [массив](../../sql-reference/functions/array-functions.md) массивов. Например, `[[1,2,3], [4,5]]`. **Примеры** ``` sql -SELECT flatten([[[1]], [[2], [3]]]) +SELECT flatten([[[1]], [[2], [3]]]); ``` ``` text @@ -984,9 +984,9 @@ SELECT flatten([[[1]], [[2], [3]]]) arrayCompact(arr) ``` -**Параметры** +**Аргументы** -`arr` — [Массив](../../sql-reference/functions/array-functions.md) для обхода. +`arr` — [массив](../../sql-reference/functions/array-functions.md) для обхода. **Возвращаемое значение** @@ -999,10 +999,10 @@ arrayCompact(arr) Запрос: ``` sql -SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) +SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]); ``` -Ответ: +Результат: ``` text ┌─arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])─┐ @@ -1020,9 +1020,9 @@ SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]) arrayZip(arr1, arr2, ..., arrN) ``` -**Параметры** +**Аргументы** -- `arrN` — [Массив](../data-types/array.md). +- `arrN` — [массив](../data-types/array.md). Функция принимает любое количество массивов, которые могут быть различных типов. Все массивы должны иметь одинаковую длину. @@ -1037,10 +1037,10 @@ arrayZip(arr1, arr2, ..., arrN) Запрос: ``` sql -SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]) +SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]); ``` -Ответ: +Результат: ``` text ┌─arrayZip(['a', 'b', 'c'], [5, 2, 1])─┐ @@ -1067,7 +1067,7 @@ SELECT arrayMap(x -> (x + 2), [1, 2, 3]) as res; Следующий пример показывает, как создать кортежи из элементов разных массивов: ``` sql -SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res +SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) AS res; ``` ``` text @@ -1149,7 +1149,7 @@ SELECT arrayMin([func,] arr) ``` -**Параметры** +**Аргументы** - `func` — функция. [Expression](../../sql-reference/data-types/special-data-types/expression.md). - `arr` — массив. [Array](../../sql-reference/data-types/array.md). @@ -1204,7 +1204,7 @@ SELECT arrayMin(x -> (-x), [1, 2, 4]) AS res; arrayMax([func,] arr) ``` -**Параметры** +**Аргументы** - `func` — функция. [Expression](../../sql-reference/data-types/special-data-types/expression.md). - `arr` — массив. [Array](../../sql-reference/data-types/array.md). @@ -1259,7 +1259,7 @@ SELECT arrayMax(x -> (-x), [1, 2, 4]) AS res; arraySum([func,] arr) ``` -**Параметры** +**Аргументы** - `func` — функция. [Expression](../../sql-reference/data-types/special-data-types/expression.md). - `arr` — массив. [Array](../../sql-reference/data-types/array.md). @@ -1314,7 +1314,7 @@ SELECT arraySum(x -> x*x, [2, 3]) AS res; arrayAvg([func,] arr) ``` -**Параметры** +**Аргументы** - `func` — функция. [Expression](../../sql-reference/data-types/special-data-types/expression.md). - `arr` — массив. [Array](../../sql-reference/data-types/array.md). @@ -1367,9 +1367,9 @@ arraySum(arr) Тип: [Int](../../sql-reference/data-types/int-uint.md) или [Float](../../sql-reference/data-types/float.md). -**Параметры** +**Аргументы** -- `arr` — [Массив](../../sql-reference/data-types/array.md). +- `arr` — [массив](../../sql-reference/data-types/array.md). **Примеры** @@ -1429,7 +1429,8 @@ SELECT arrayCumSum([1, 1, 1, 1]) AS res arrayAUC(arr_scores, arr_labels) ``` -**Параметры** +**Аргументы** + - `arr_scores` — оценка, которую дает модель предсказания. - `arr_labels` — ярлыки выборок, обычно 1 для содержательных выборок и 0 для бессодержательных выборок. @@ -1444,10 +1445,10 @@ arrayAUC(arr_scores, arr_labels) Запрос: ``` sql -select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) +SELECT arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]); ``` -Ответ: +Результат: ``` text ┌─arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])─┐ diff --git a/docs/ru/sql-reference/functions/bit-functions.md b/docs/ru/sql-reference/functions/bit-functions.md index 8c7808437a5..9f9808bf0d2 100644 --- a/docs/ru/sql-reference/functions/bit-functions.md +++ b/docs/ru/sql-reference/functions/bit-functions.md @@ -31,10 +31,10 @@ toc_title: "\u0411\u0438\u0442\u043e\u0432\u044b\u0435\u0020\u0444\u0443\u043d\u SELECT bitTest(number, index) ``` -**Параметры** +**Аргументы** - `number` – целое число. -- `index` – position of bit. +- `index` – позиция бита. **Возвращаемое значение** @@ -49,10 +49,10 @@ SELECT bitTest(number, index) Запрос: ``` sql -SELECT bitTest(43, 1) +SELECT bitTest(43, 1); ``` -Ответ: +Результат: ``` text ┌─bitTest(43, 1)─┐ @@ -65,10 +65,10 @@ SELECT bitTest(43, 1) Запрос: ``` sql -SELECT bitTest(43, 2) +SELECT bitTest(43, 2); ``` -Ответ: +Результат: ``` text ┌─bitTest(43, 2)─┐ @@ -93,7 +93,7 @@ SELECT bitTest(43, 2) SELECT bitTestAll(number, index1, index2, index3, index4, ...) ``` -**Параметры** +**Аргументы** - `number` – целое число. - `index1`, `index2`, `index3`, `index4` – позиция бита. Например, конъюнкция для набора позиций `index1`, `index2`, `index3`, `index4` является истинной, если все его позиции истинны `index1` ⋀ `index2` ⋀ `index3` ⋀ `index4`. @@ -111,10 +111,10 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...) Запрос: ``` sql -SELECT bitTestAll(43, 0, 1, 3, 5) +SELECT bitTestAll(43, 0, 1, 3, 5); ``` -Ответ: +Результат: ``` text ┌─bitTestAll(43, 0, 1, 3, 5)─┐ @@ -127,10 +127,10 @@ SELECT bitTestAll(43, 0, 1, 3, 5) Запрос: ``` sql -SELECT bitTestAll(43, 0, 1, 3, 5, 2) +SELECT bitTestAll(43, 0, 1, 3, 5, 2); ``` -Ответ: +Результат: ``` text ┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐ @@ -155,7 +155,7 @@ SELECT bitTestAll(43, 0, 1, 3, 5, 2) SELECT bitTestAny(number, index1, index2, index3, index4, ...) ``` -**Параметры** +**Аргументы** - `number` – целое число. - `index1`, `index2`, `index3`, `index4` – позиции бита. @@ -173,10 +173,10 @@ SELECT bitTestAny(number, index1, index2, index3, index4, ...) Запрос: ``` sql -SELECT bitTestAny(43, 0, 2) +SELECT bitTestAny(43, 0, 2); ``` -Ответ: +Результат: ``` text ┌─bitTestAny(43, 0, 2)─┐ @@ -189,10 +189,10 @@ SELECT bitTestAny(43, 0, 2) Запрос: ``` sql -SELECT bitTestAny(43, 4, 2) +SELECT bitTestAny(43, 4, 2); ``` -Ответ: +Результат: ``` text ┌─bitTestAny(43, 4, 2)─┐ @@ -210,9 +210,9 @@ SELECT bitTestAny(43, 4, 2) bitCount(x) ``` -**Параметры** +**Аргументы** -- `x` — [Целое число](../../sql-reference/functions/bit-functions.md) или [число с плавающей запятой](../../sql-reference/functions/bit-functions.md). Функция использует представление числа в памяти, что позволяет поддержать числа с плавающей запятой. +- `x` — [целое число](../../sql-reference/functions/bit-functions.md) или [число с плавающей запятой](../../sql-reference/functions/bit-functions.md). Функция использует представление числа в памяти, что позволяет поддержать числа с плавающей запятой. **Возвращаемое значение** @@ -229,7 +229,7 @@ bitCount(x) Запрос: ``` sql -SELECT bitCount(333) +SELECT bitCount(333); ``` Результат: diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md index b21ddea94e4..949fa29acb3 100644 --- a/docs/ru/sql-reference/functions/bitmap-functions.md +++ b/docs/ru/sql-reference/functions/bitmap-functions.md @@ -13,14 +13,14 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u bitmapBuild(array) ``` -**Параметры** +**Аргументы** - `array` – массив типа `UInt*`. **Пример** ``` sql -SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) +SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res); ``` ``` text @@ -37,14 +37,14 @@ SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res) bitmapToArray(bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. **Пример** ``` sql -SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res +SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res; ``` ``` text @@ -63,11 +63,11 @@ SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res bitmapSubsetLimit(bitmap, range_start, cardinality_limit) ``` -**Параметры** +**Аргументы** -- `bitmap` – Битмап. [Bitmap object](#bitmap_functions-bitmapbuild). +- `bitmap` – битмап. [Bitmap object](#bitmap_functions-bitmapbuild). -- `range_start` – Начальная точка подмножества. [UInt32](../../sql-reference/functions/bitmap-functions.md#bitmap-functions). +- `range_start` – начальная точка подмножества. [UInt32](../../sql-reference/functions/bitmap-functions.md#bitmap-functions). - `cardinality_limit` – Верхний предел подмножества. [UInt32](../../sql-reference/functions/bitmap-functions.md#bitmap-functions). **Возвращаемое значение** @@ -81,10 +81,10 @@ bitmapSubsetLimit(bitmap, range_start, cardinality_limit) Запрос: ``` sql -SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res; ``` -Ответ: +Результат: ``` text ┌─res───────────────────────┐ @@ -100,12 +100,11 @@ SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12 bitmapContains(haystack, needle) ``` -**Параметры** +**Аргументы** - `haystack` – [объект Bitmap](#bitmap_functions-bitmapbuild), в котором функция ищет значение. - `needle` – значение, которое функция ищет. Тип — [UInt32](../../sql-reference/data-types/int-uint.md). - **Возвращаемые значения** - 0 — если в `haystack` нет `needle`. @@ -116,7 +115,7 @@ bitmapContains(haystack, needle) **Пример** ``` sql -SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res +SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res; ``` ``` text @@ -135,7 +134,7 @@ bitmapHasAny(bitmap1, bitmap2) Если вы уверены, что `bitmap2` содержит строго один элемент, используйте функцию [bitmapContains](#bitmap_functions-bitmapcontains). Она работает эффективнее. -**Параметры** +**Аргументы** - `bitmap*` – массив любого типа с набором элементов. @@ -147,7 +146,7 @@ bitmapHasAny(bitmap1, bitmap2) **Пример** ``` sql -SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; ``` ``` text @@ -165,14 +164,14 @@ SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res bitmapHasAll(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. **Пример** ``` sql -SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res +SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; ``` ``` text @@ -189,14 +188,14 @@ SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res bitmapAnd(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. **Пример** ``` sql -SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -213,14 +212,14 @@ SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS re bitmapOr(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. **Пример** ``` sql -SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -237,14 +236,14 @@ SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res bitmapXor(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. **Пример** ``` sql -SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -261,14 +260,14 @@ SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS re bitmapAndnot(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. **Пример** ``` sql -SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res +SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res; ``` ``` text @@ -285,14 +284,14 @@ SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS bitmapCardinality(bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. **Пример** ``` sql -SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res +SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res; ``` ``` text @@ -309,7 +308,7 @@ SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res bitmapAndCardinality(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. @@ -333,7 +332,7 @@ SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; bitmapOrCardinality(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. @@ -357,7 +356,7 @@ SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; bitmapXorCardinality(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. @@ -381,7 +380,7 @@ SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res; bitmapAndnotCardinality(bitmap,bitmap) ``` -**Параметры** +**Аргументы** - `bitmap` – битовый массив. diff --git a/docs/ru/sql-reference/functions/conditional-functions.md b/docs/ru/sql-reference/functions/conditional-functions.md index 83268b68959..823c0489de2 100644 --- a/docs/ru/sql-reference/functions/conditional-functions.md +++ b/docs/ru/sql-reference/functions/conditional-functions.md @@ -17,11 +17,11 @@ SELECT if(cond, then, else) Если условие `cond` не равно нулю, то возвращается результат выражения `then`. Если условие `cond` равно нулю или является NULL, то результат выражения `then` пропускается и возвращается результат выражения `else`. -**Параметры** +**Аргументы** -- `cond` – Условие, которое может быть равно 0 или нет. Может быть [UInt8](../../sql-reference/functions/conditional-functions.md) или `NULL`. -- `then` - Возвращается результат выражения, если условие `cond` истинно. -- `else` - Возвращается результат выражения, если условие `cond` ложно. +- `cond` – условие, которое может быть равно 0 или нет. Может быть [UInt8](../../sql-reference/functions/conditional-functions.md) или `NULL`. +- `then` – возвращается результат выражения, если условие `cond` истинно. +- `else` – возвращается результат выражения, если условие `cond` ложно. **Возвращаемые значения** @@ -32,10 +32,10 @@ SELECT if(cond, then, else) Запрос: ``` sql -SELECT if(1, plus(2, 2), plus(2, 6)) +SELECT if(1, plus(2, 2), plus(2, 6)); ``` -Ответ: +Результат: ``` text ┌─plus(2, 2)─┐ @@ -46,10 +46,10 @@ SELECT if(1, plus(2, 2), plus(2, 6)) Запрос: ``` sql -SELECT if(0, plus(2, 2), plus(2, 6)) +SELECT if(0, plus(2, 2), plus(2, 6)); ``` -Ответ: +Результат: ``` text ┌─plus(2, 6)─┐ @@ -79,11 +79,11 @@ SELECT if(0, plus(2, 2), plus(2, 6)) multiIf(cond_1, then_1, cond_2, then_2...else) -**Параметры** +**Аргументы** -- `cond_N` — Условие, при выполнении которого функция вернёт `then_N`. -- `then_N` — Результат функции при выполнении. -- `else` — Результат функции, если ни одно из условий не выполнено. +- `cond_N` — условие, при выполнении которого функция вернёт `then_N`. +- `then_N` — результат функции при выполнении. +- `else` — результат функции, если ни одно из условий не выполнено. Функция принимает `2N+1` параметров. diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 1cd5ec74540..3e78a5d294a 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -136,7 +136,7 @@ toUnixTimestamp(str, [timezone]) Запрос: ``` sql -SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp +SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp; ``` Результат: @@ -162,6 +162,7 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp ```sql SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101; ``` + ```text ┌─ISOYear20170101─┐ │ 2016-01-04 │ @@ -215,14 +216,14 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101; toStartOfSecond(value[, timezone]) ``` -**Параметры** +**Аргументы** -- `value` — Дата и время. [DateTime64](../data-types/datetime64.md). -- `timezone` — [Часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) для возвращаемого значения (необязательно). Если параметр не задан, используется часовой пояс параметра `value`. [String](../data-types/string.md). +- `value` — дата и время. [DateTime64](../data-types/datetime64.md). +- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) для возвращаемого значения (необязательно). Если параметр не задан, используется часовой пояс параметра `value`. [String](../data-types/string.md). **Возвращаемое значение** -- Входное значение с отсеченными долями секунды. +- Входное значение с отсеченными долями секунды. Тип: [DateTime64](../data-types/datetime64.md). @@ -256,9 +257,9 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d └────────────────────────────────────────┘ ``` -**См. также** +**Смотрите также** -- Часовая зона сервера, конфигурационный параметр [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). +- Часовая зона сервера, конфигурационный параметр [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). ## toStartOfFiveMinute {#tostartoffiveminute} @@ -497,7 +498,7 @@ SELECT now(), date_trunc('hour', now(), 'Europe/Moscow'); └─────────────────────┴────────────────────────────────────────────┘ ``` -**См. также** +**Смотрите также** - [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone) @@ -511,7 +512,7 @@ SELECT now(), date_trunc('hour', now(), 'Europe/Moscow'); now([timezone]) ``` -**Параметры** +**Аргументы** - `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) для возвращаемого значения (необязательно). [String](../../sql-reference/data-types/string.md) @@ -571,9 +572,9 @@ SELECT now('Europe/Moscow'); dateDiff('unit', startdate, enddate, [timezone]) ``` -**Параметры** +**Аргументы** -- `unit` — Единица измерения времени, в которой будет вычислена разница между `startdate` и `enddate`. [String](../syntax.md#syntax-string-literal). +- `unit` — единица измерения времени, в которой будет вычислена разница между `startdate` и `enddate`. [String](../syntax.md#syntax-string-literal). Поддерживаемые значения: @@ -706,6 +707,7 @@ formatDateTime(Time, Format\[, Timezone\]) Возвращает значение времени и даты в определенном вами формате. **Поля подстановки** + Используйте поля подстановки для того, чтобы определить шаблон для выводимой строки. В колонке «Пример» результат работы функции для времени `2018-01-02 22:33:44`. | Поле | Описание | Пример | diff --git a/docs/ru/sql-reference/functions/encoding-functions.md b/docs/ru/sql-reference/functions/encoding-functions.md index 8c3065e5a77..53a6e126f29 100644 --- a/docs/ru/sql-reference/functions/encoding-functions.md +++ b/docs/ru/sql-reference/functions/encoding-functions.md @@ -15,13 +15,13 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043a\u043e\u0434\u char(number_1, [number_2, ..., number_n]); ``` -**Параметры** +**Аргументы** -- `number_1, number_2, ..., number_n` — Числовые аргументы, которые интерпретируются как целые числа. Типы: [Int](../../sql-reference/functions/encoding-functions.md), [Float](../../sql-reference/functions/encoding-functions.md). +- `number_1, number_2, ..., number_n` — числовые аргументы, которые интерпретируются как целые числа. Типы: [Int](../../sql-reference/functions/encoding-functions.md), [Float](../../sql-reference/functions/encoding-functions.md). **Возвращаемое значение** -- строка из соответствующих байт. +- Строка из соответствующих байт. Тип: `String`. @@ -30,10 +30,10 @@ char(number_1, [number_2, ..., number_n]); Запрос: ``` sql -SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello +SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello; ``` -Ответ: +Результат: ``` text ┌─hello─┐ @@ -49,7 +49,7 @@ SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x82) AS hello; ``` -Ответ: +Результат: ``` text ┌─hello──┐ @@ -63,7 +63,7 @@ SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello; ``` -Ответ: +Результат: ``` text ┌─hello─┐ diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 0216a6b2356..2309aa85478 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -31,7 +31,7 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438 \u0434\u043b\u044f \u0448 encrypt('mode', 'plaintext', 'key' [, iv, aad]) ``` -**Параметры** +**Аргументы** - `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string). - `plaintext` — текст, который будет зашифрован. [String](../../sql-reference/data-types/string.md#string). @@ -127,7 +127,7 @@ SELECT comment, hex(secret) FROM encryption_test WHERE comment LIKE '%gcm%'; aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv]) ``` -**Параметры** +**Аргументы** - `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string). - `plaintext` — текст, который будет зашифрован. [String](../../sql-reference/data-types/string.md#string). @@ -236,13 +236,13 @@ mysql> SELECT aes_encrypt('Secret', '123456789101213141516171819202122', 'iviviv decrypt('mode', 'ciphertext', 'key' [, iv, aad]) ``` -**Параметры** +**Аргументы** - `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string). - `ciphertext` — зашифрованный текст, который будет расшифрован. [String](../../sql-reference/data-types/string.md#string). - `key` — ключ шифрования. [String](../../sql-reference/data-types/string.md#string). - `iv` — инициализирующий вектор. Обязателен для `-gcm` режимов, для остальных режимов опциональный. [String](../../sql-reference/data-types/string.md#string). -- `aad` — дополнительные аутентифицированные данные. Текст не будет расшифрован, если это значение неверно. Работает только с `-gcm` режимами. Для остальных вызовет исключение. [String](../../sql-reference/data-types/string.md#string). +- `aad` — дополнительные аутентифицированные данные. Текст не будет расшифрован, если это значение неверно. Работает только с `-gcm` режимами. Для остальных вызовет исключение. [String](../../sql-reference/data-types/string.md#string). **Возвращаемое значение** @@ -316,7 +316,7 @@ SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920 aes_decrypt_mysql('mode', 'ciphertext', 'key' [, iv]) ``` -**Параметры** +**Аргументы** - `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string). - `ciphertext` — зашифрованный текст, который будет расшифрован. [String](../../sql-reference/data-types/string.md#string). diff --git a/docs/ru/sql-reference/functions/ext-dict-functions.md b/docs/ru/sql-reference/functions/ext-dict-functions.md index 6054ed141d4..edc6282108b 100644 --- a/docs/ru/sql-reference/functions/ext-dict-functions.md +++ b/docs/ru/sql-reference/functions/ext-dict-functions.md @@ -16,7 +16,7 @@ dictGet('dict_name', 'attr_name', id_expr) dictGetOrDefault('dict_name', 'attr_name', id_expr, default_value_expr) ``` -**Параметры** +**Аргументы** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `attr_name` — имя столбца словаря. [Строковый литерал](../syntax.md#syntax-string-literal). @@ -105,7 +105,7 @@ LIMIT 3 dictHas('dict_name', id) ``` -**Параметры** +**Аргументы** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md) в зависимости от конфигурации словаря. @@ -127,7 +127,7 @@ dictHas('dict_name', id) dictGetHierarchy('dict_name', key) ``` -**Параметры** +**Аргументы** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `key` — значение ключа. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md). @@ -144,7 +144,7 @@ Type: [Array(UInt64)](../../sql-reference/functions/ext-dict-functions.md). `dictIsIn ('dict_name', child_id_expr, ancestor_id_expr)` -**Параметры** +**Аргументы** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `child_id_expr` — ключ для проверки. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md). @@ -180,12 +180,12 @@ dictGet[Type]('dict_name', 'attr_name', id_expr) dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) ``` -**Параметры** +**Аргументы** - `dict_name` — имя словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `attr_name` — имя столбца словаря. [Строковый литерал](../syntax.md#syntax-string-literal). - `id_expr` — значение ключа словаря. [Выражение](../syntax.md#syntax-expressions), возвращающее значение типа [UInt64](../../sql-reference/functions/ext-dict-functions.md) или [Tuple](../../sql-reference/functions/ext-dict-functions.md) в зависимости от конфигурации словаря. -- `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions) возвращающее значение с типом данных, сконфигурированным для атрибута `attr_name`. +- `default_value_expr` — значение, возвращаемое в том случае, когда словарь не содержит строки с заданным ключом `id_expr`. [Выражение](../syntax.md#syntax-expressions), возвращающее значение с типом данных, сконфигурированным для атрибута `attr_name`. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/functions/functions-for-nulls.md b/docs/ru/sql-reference/functions/functions-for-nulls.md index 0db55847631..ed91af85473 100644 --- a/docs/ru/sql-reference/functions/functions-for-nulls.md +++ b/docs/ru/sql-reference/functions/functions-for-nulls.md @@ -15,7 +15,7 @@ isNull(x) Синоним: `ISNULL`. -**Параметры** +**Аргументы** - `x` — значение с не составным типом данных. @@ -38,7 +38,7 @@ isNull(x) Запрос ``` sql -SELECT x FROM t_null WHERE isNull(y) +SELECT x FROM t_null WHERE isNull(y); ``` ``` text @@ -55,7 +55,7 @@ SELECT x FROM t_null WHERE isNull(y) isNotNull(x) ``` -**Параметры** +**Аргументы** - `x` — значение с не составным типом данных. @@ -78,7 +78,7 @@ isNotNull(x) Запрос ``` sql -SELECT x FROM t_null WHERE isNotNull(y) +SELECT x FROM t_null WHERE isNotNull(y); ``` ``` text @@ -95,7 +95,7 @@ SELECT x FROM t_null WHERE isNotNull(y) coalesce(x,...) ``` -**Параметры** +**Аргументы** - Произвольное количество параметров не составного типа. Все параметры должны быть совместимы по типу данных. @@ -120,7 +120,7 @@ coalesce(x,...) Получим из адресной книги первый доступный способ связаться с клиентом: ``` sql -SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook +SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook; ``` ``` text @@ -138,7 +138,7 @@ SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook ifNull(x,alt) ``` -**Параметры** +**Аргументы** - `x` — значение для проверки на `NULL`, - `alt` — значение, которое функция вернёт, если `x` — `NULL`. @@ -151,7 +151,7 @@ ifNull(x,alt) **Пример** ``` sql -SELECT ifNull('a', 'b') +SELECT ifNull('a', 'b'); ``` ``` text @@ -161,7 +161,7 @@ SELECT ifNull('a', 'b') ``` ``` sql -SELECT ifNull(NULL, 'b') +SELECT ifNull(NULL, 'b'); ``` ``` text @@ -178,7 +178,7 @@ SELECT ifNull(NULL, 'b') nullIf(x, y) ``` -**Параметры** +**Аргументы** `x`, `y` — значения для сравнивания. Они должны быть совместимых типов, иначе ClickHouse сгенерирует исключение. @@ -190,7 +190,7 @@ nullIf(x, y) **Пример** ``` sql -SELECT nullIf(1, 1) +SELECT nullIf(1, 1); ``` ``` text @@ -200,7 +200,7 @@ SELECT nullIf(1, 1) ``` ``` sql -SELECT nullIf(1, 2) +SELECT nullIf(1, 2); ``` ``` text @@ -217,7 +217,7 @@ SELECT nullIf(1, 2) assumeNotNull(x) ``` -**Параметры** +**Аргументы** - `x` — исходное значение. @@ -231,7 +231,7 @@ assumeNotNull(x) Рассмотрим таблицу `t_null`. ``` sql -SHOW CREATE TABLE t_null +SHOW CREATE TABLE t_null; ``` ``` text @@ -250,7 +250,7 @@ SHOW CREATE TABLE t_null Применим функцию `assumeNotNull` к столбцу `y`. ``` sql -SELECT assumeNotNull(y) FROM t_null +SELECT assumeNotNull(y) FROM t_null; ``` ``` text @@ -261,7 +261,7 @@ SELECT assumeNotNull(y) FROM t_null ``` ``` sql -SELECT toTypeName(assumeNotNull(y)) FROM t_null +SELECT toTypeName(assumeNotNull(y)) FROM t_null; ``` ``` text @@ -279,7 +279,7 @@ SELECT toTypeName(assumeNotNull(y)) FROM t_null toNullable(x) ``` -**Параметры** +**Аргументы** - `x` — значение произвольного не составного типа. @@ -290,7 +290,7 @@ toNullable(x) **Пример** ``` sql -SELECT toTypeName(10) +SELECT toTypeName(10); ``` ``` text @@ -300,7 +300,7 @@ SELECT toTypeName(10) ``` ``` sql -SELECT toTypeName(toNullable(10)) +SELECT toTypeName(toNullable(10)); ``` ``` text diff --git a/docs/ru/sql-reference/functions/geo/geohash.md b/docs/ru/sql-reference/functions/geo/geohash.md index 38c64f11b10..2559e26fcd2 100644 --- a/docs/ru/sql-reference/functions/geo/geohash.md +++ b/docs/ru/sql-reference/functions/geo/geohash.md @@ -29,7 +29,7 @@ geohashEncode(longitude, latitude, [precision]) **Пример** ``` sql -SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res +SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res; ``` ``` text @@ -57,7 +57,7 @@ geohashDecode(geohash_string) **Пример** ``` sql -SELECT geohashDecode('ezs42') AS res +SELECT geohashDecode('ezs42') AS res; ``` ``` text @@ -76,13 +76,13 @@ SELECT geohashDecode('ezs42') AS res geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision) ``` -**Параметры** +**Аргументы** - `longitude_min` — минимальная долгота. Диапазон возможных значений: `[-180°, 180°]`. Тип данных: [Float](../../../sql-reference/data-types/float.md)). -- `latitude_min` - минимальная широта. Диапазон возможных значений: `[-90°, 90°]`. Тип данных: [Float](../../../sql-reference/data-types/float.md). -- `longitude_max` - максимальная долгота. Диапазон возможных значений: `[-180°, 180°]`. Тип данных: [Float](../../../sql-reference/data-types/float.md). -- `latitude_max` - максимальная широта. Диапазон возможных значений: `[-90°, 90°]`. Тип данных: [Float](../../../sql-reference/data-types/float.md). -- `precision` - точность geohash. Диапазон возможных значений: `[1, 12]`. Тип данных: [UInt8](../../../sql-reference/data-types/int-uint.md). +- `latitude_min` — минимальная широта. Диапазон возможных значений: `[-90°, 90°]`. Тип данных: [Float](../../../sql-reference/data-types/float.md). +- `longitude_max` — максимальная долгота. Диапазон возможных значений: `[-180°, 180°]`. Тип данных: [Float](../../../sql-reference/data-types/float.md). +- `latitude_max` — максимальная широта. Диапазон возможных значений: `[-90°, 90°]`. Тип данных: [Float](../../../sql-reference/data-types/float.md). +- `precision` — точность geohash. Диапазон возможных значений: `[1, 12]`. Тип данных: [UInt8](../../../sql-reference/data-types/int-uint.md). !!! info "Замечание" Все передаваемые координаты должны быть одного и того же типа: либо `Float32`, либо `Float64`. @@ -102,8 +102,9 @@ geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precisi Запрос: ``` sql -SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos +SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos; ``` + Результат: ``` text diff --git a/docs/ru/sql-reference/functions/geo/h3.md b/docs/ru/sql-reference/functions/geo/h3.md index 69d06b5dfa6..55da8864cf2 100644 --- a/docs/ru/sql-reference/functions/geo/h3.md +++ b/docs/ru/sql-reference/functions/geo/h3.md @@ -38,8 +38,9 @@ h3IsValid(h3index) Запрос: ``` sql -SELECT h3IsValid(630814730351855103) as h3IsValid +SELECT h3IsValid(630814730351855103) as h3IsValid; ``` + Результат: ``` text @@ -74,8 +75,9 @@ h3GetResolution(h3index) Запрос: ``` sql -SELECT h3GetResolution(639821929606596015) as resolution +SELECT h3GetResolution(639821929606596015) as resolution; ``` + Результат: ``` text @@ -107,8 +109,9 @@ h3EdgeAngle(resolution) Запрос: ``` sql -SELECT h3EdgeAngle(10) as edgeAngle +SELECT h3EdgeAngle(10) as edgeAngle; ``` + Результат: ``` text @@ -140,8 +143,9 @@ h3EdgeLengthM(resolution) Запрос: ``` sql -SELECT h3EdgeLengthM(15) as edgeLengthM +SELECT h3EdgeLengthM(15) as edgeLengthM; ``` + Результат: ``` text @@ -160,7 +164,7 @@ SELECT h3EdgeLengthM(15) as edgeLengthM geoToH3(lon, lat, resolution) ``` -**Параметры** +**Аргументы** - `lon` — географическая долгота. Тип данных — [Float64](../../../sql-reference/data-types/float.md). - `lat` — географическая широта. Тип данных — [Float64](../../../sql-reference/data-types/float.md). @@ -178,10 +182,10 @@ geoToH3(lon, lat, resolution) Запрос: ``` sql -SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index +SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index; ``` -Ответ: +Результат: ``` text ┌────────────h3Index─┐ @@ -199,7 +203,7 @@ SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index h3kRing(h3index, k) ``` -**Параметры** +**Аргументы** - `h3index` — идентификатор шестигранника. Тип данных: [UInt64](../../../sql-reference/data-types/int-uint.md). - `k` — радиус. Тип данных: [целое число](../../../sql-reference/data-types/int-uint.md) @@ -215,8 +219,9 @@ h3kRing(h3index, k) Запрос: ``` sql -SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index +SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index; ``` + Результат: ``` text @@ -311,7 +316,7 @@ SELECT h3HexAreaM2(13) as area; h3IndexesAreNeighbors(index1, index2) ``` -**Параметры** +**Аргументы** - `index1` — индекс шестиугольной ячейки. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md). - `index2` — индекс шестиугольной ячейки. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md). @@ -349,7 +354,7 @@ SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n; h3ToChildren(index, resolution) ``` -**Параметры** +**Аргументы** - `index` — индекс шестиугольной ячейки. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md). - `resolution` — разрешение. Диапазон: `[0, 15]`. Тип: [UInt8](../../../sql-reference/data-types/int-uint.md). @@ -386,7 +391,7 @@ SELECT h3ToChildren(599405990164561919, 6) AS children; h3ToParent(index, resolution) ``` -**Параметры** +**Аргументы** - `index` — индекс шестиугольной ячейки. Тип: [UInt64](../../../sql-reference/data-types/int-uint.md). - `resolution` — разрешение. Диапазон: `[0, 15]`. Тип: [UInt8](../../../sql-reference/data-types/int-uint.md). diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index f7820889ea9..31229f49889 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -18,9 +18,9 @@ halfMD5(par1, ...) Функция относительно медленная (5 миллионов коротких строк в секунду на ядро процессора). По возможности, используйте функцию [sipHash64](#hash_functions-siphash64) вместо неё. -**Параметры** +**Аргументы** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). +Функция принимает переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -29,7 +29,7 @@ halfMD5(par1, ...) **Пример** ``` sql -SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type +SELECT halfMD5(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS halfMD5hash, toTypeName(halfMD5hash) AS type; ``` ``` text @@ -61,9 +61,9 @@ sipHash64(par1,...) 3. Затем функция принимает хэш-значение, вычисленное на предыдущем шаге, и третий элемент исходного хэш-массива, и вычисляет хэш для массива из них. 4. Предыдущий шаг повторяется для всех остальных элементов исходного хэш-массива. -**Параметры** +**Аргументы** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). +Функция принимает переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -72,7 +72,7 @@ sipHash64(par1,...) **Пример** ``` sql -SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type +SELECT sipHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS SipHash, toTypeName(SipHash) AS type; ``` ``` text @@ -97,9 +97,9 @@ cityHash64(par1,...) Это не криптографическая хэш-функция. Она использует CityHash алгоритм для строковых параметров и зависящую от реализации быструю некриптографическую хэш-функцию для параметров с другими типами данных. Функция использует комбинатор CityHash для получения конечных результатов. -**Параметры** +**Аргументы** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). +Функция принимает переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -110,7 +110,7 @@ cityHash64(par1,...) Пример вызова: ``` sql -SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type +SELECT cityHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS CityHash, toTypeName(CityHash) AS type; ``` ``` text @@ -166,9 +166,9 @@ farmHash64(par1, ...) Эти функции используют методы `Fingerprint64` и `Hash64` из всех [доступных методов](https://github.com/google/farmhash/blob/master/src/farmhash.h). -**Параметры** +**Аргументы** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). +Функция принимает переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -177,7 +177,7 @@ farmHash64(par1, ...) **Пример** ``` sql -SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type +SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS FarmHash, toTypeName(FarmHash) AS type; ``` ``` text @@ -191,7 +191,7 @@ SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0 Вычисляет [JavaHash](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452) от строки. `JavaHash` не отличается ни скоростью, ни качеством, поэтому эту функцию следует считать устаревшей. Используйте эту функцию, если вам необходимо получить значение хэша по такому же алгоритму. ``` sql -SELECT javaHash(''); +SELECT javaHash('') ``` **Возвращаемое значение** @@ -208,7 +208,7 @@ SELECT javaHash(''); SELECT javaHash('Hello, world!'); ``` -Ответ: +Результат: ``` text ┌─javaHash('Hello, world!')─┐ @@ -226,7 +226,7 @@ SELECT javaHash('Hello, world!'); javaHashUTF16LE(stringUtf16le) ``` -**Параметры** +**Аргументы** - `stringUtf16le` — строка в `UTF-16LE`. @@ -243,10 +243,10 @@ javaHashUTF16LE(stringUtf16le) Запрос: ``` sql -SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) +SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')); ``` -Ответ: +Результат: ``` text ┌─javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le'))─┐ @@ -259,7 +259,7 @@ SELECT javaHashUTF16LE(convertCharset('test', 'utf-8', 'utf-16le')) Вычисляет `HiveHash` от строки. ``` sql -SELECT hiveHash(''); +SELECT hiveHash('') ``` `HiveHash` — это результат [JavaHash](#hash_functions-javahash) с обнулённым битом знака числа. Функция используется в [Apache Hive](https://en.wikipedia.org/wiki/Apache_Hive) вплоть до версии 3.0. @@ -278,7 +278,7 @@ SELECT hiveHash(''); SELECT hiveHash('Hello, world!'); ``` -Ответ: +Результат: ``` text ┌─hiveHash('Hello, world!')─┐ @@ -294,9 +294,9 @@ SELECT hiveHash('Hello, world!'); metroHash64(par1, ...) ``` -**Параметры** +**Аргументы** -Функция принимает переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). +Функция принимает переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -305,7 +305,7 @@ metroHash64(par1, ...) **Пример** ``` sql -SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type +SELECT metroHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MetroHash, toTypeName(MetroHash) AS type; ``` ``` text @@ -329,9 +329,9 @@ murmurHash2_32(par1, ...) murmurHash2_64(par1, ...) ``` -**Параметры** +**Аргументы** -Обе функции принимают переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). +Обе функции принимают переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -341,7 +341,7 @@ murmurHash2_64(par1, ...) **Пример** ``` sql -SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type +SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash2, toTypeName(MurmurHash2) AS type; ``` ``` text @@ -360,9 +360,9 @@ SELECT murmurHash2_64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: gccMurmurHash(par1, ...); ``` -**Параметры** +**Аргументы** -- `par1, ...` — Переменное число параметров. Каждый параметр может быть любого из [поддерживаемых типов данных](../../sql-reference/data-types/index.md). +- `par1, ...` — переменное число параметров. Каждый параметр может быть любого из [поддерживаемых типов данных](../../sql-reference/data-types/index.md). **Возвращаемое значение** @@ -397,9 +397,9 @@ murmurHash3_32(par1, ...) murmurHash3_64(par1, ...) ``` -**Параметры** +**Аргументы** -Обе функции принимают переменное число входных параметров. Параметры могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). +Обе функции принимают переменное число входных параметров. Аргументы могут быть любого [поддерживаемого типа данных](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -409,7 +409,7 @@ murmurHash3_64(par1, ...) **Пример** ``` sql -SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type +SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text @@ -426,9 +426,9 @@ SELECT murmurHash3_32(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23: murmurHash3_128( expr ) ``` -**Параметры** +**Аргументы** -- `expr` — [выражение](../syntax.md#syntax-expressions) возвращающее значение типа[String](../../sql-reference/functions/hash-functions.md). +- `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа[String](../../sql-reference/functions/hash-functions.md). **Возвращаемое значение** @@ -437,7 +437,7 @@ murmurHash3_128( expr ) **Пример** ``` sql -SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type +SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) AS type; ``` ``` text @@ -451,11 +451,11 @@ SELECT murmurHash3_128('example_string') AS MurmurHash3, toTypeName(MurmurHash3) Вычисляет `xxHash` от строки. Предлагается в двух вариантах: 32 и 64 бита. ``` sql -SELECT xxHash32(''); +SELECT xxHash32('') OR -SELECT xxHash64(''); +SELECT xxHash64('') ``` **Возвращаемое значение** @@ -472,7 +472,7 @@ SELECT xxHash64(''); SELECT xxHash32('Hello, world!'); ``` -Ответ: +Результат: ``` text ┌─xxHash32('Hello, world!')─┐ diff --git a/docs/ru/sql-reference/functions/introspection.md b/docs/ru/sql-reference/functions/introspection.md index 00dd660bc16..7177f947f8a 100644 --- a/docs/ru/sql-reference/functions/introspection.md +++ b/docs/ru/sql-reference/functions/introspection.md @@ -32,7 +32,7 @@ ClickHouse сохраняет отчеты профилировщика в [жу addressToLine(address_of_binary_instruction) ``` -**Параметры** +**Аргументы** - `address_of_binary_instruction` ([Тип UInt64](../../sql-reference/functions/introspection.md))- Адрес инструкции в запущенном процессе. @@ -53,13 +53,13 @@ addressToLine(address_of_binary_instruction) Включение функций самоанализа: ``` sql -SET allow_introspection_functions=1 +SET allow_introspection_functions=1; ``` Выбор первой строки из списка `trace_log` системная таблица: ``` sql -SELECT * FROM system.trace_log LIMIT 1 \G +SELECT * FROM system.trace_log LIMIT 1 \G; ``` ``` text @@ -79,7 +79,7 @@ trace: [140658411141617,94784174532828,94784076370703,94784076 Получение имени файла исходного кода и номера строки для одного адреса: ``` sql -SELECT addressToLine(94784076370703) \G +SELECT addressToLine(94784076370703) \G; ``` ``` text @@ -123,9 +123,9 @@ trace_source_code_lines: /lib/x86_64-linux-gnu/libpthread-2.27.so addressToSymbol(address_of_binary_instruction) ``` -**Параметры** +**Аргументы** -- `address_of_binary_instruction` ([Тип uint64](../../sql-reference/functions/introspection.md)) — Адрес инструкции в запущенном процессе. +- `address_of_binary_instruction` ([Тип uint64](../../sql-reference/functions/introspection.md)) — адрес инструкции в запущенном процессе. **Возвращаемое значение** @@ -139,13 +139,13 @@ addressToSymbol(address_of_binary_instruction) Включение функций самоанализа: ``` sql -SET allow_introspection_functions=1 +SET allow_introspection_functions=1; ``` Выбор первой строки из списка `trace_log` системная таблица: ``` sql -SELECT * FROM system.trace_log LIMIT 1 \G +SELECT * FROM system.trace_log LIMIT 1 \G; ``` ``` text @@ -165,7 +165,7 @@ trace: [94138803686098,94138815010911,94138815096522,94138815101224,9413 Получение символа для одного адреса: ``` sql -SELECT addressToSymbol(94138803686098) \G +SELECT addressToSymbol(94138803686098) \G; ``` ``` text @@ -220,9 +220,9 @@ clone demangle(symbol) ``` -**Параметры** +**Аргументы** -- `symbol` ([Строка](../../sql-reference/functions/introspection.md)) - Символ из объектного файла. +- `symbol` ([Строка](../../sql-reference/functions/introspection.md)) - символ из объектного файла. **Возвращаемое значение** @@ -236,13 +236,13 @@ demangle(symbol) Включение функций самоанализа: ``` sql -SET allow_introspection_functions=1 +SET allow_introspection_functions=1; ``` Выбор первой строки из списка `trace_log` системная таблица: ``` sql -SELECT * FROM system.trace_log LIMIT 1 \G +SELECT * FROM system.trace_log LIMIT 1 \G; ``` ``` text @@ -262,7 +262,7 @@ trace: [94138803686098,94138815010911,94138815096522,94138815101224,9413 Получение имени функции для одного адреса: ``` sql -SELECT demangle(addressToSymbol(94138803686098)) \G +SELECT demangle(addressToSymbol(94138803686098)) \G; ``` ``` text @@ -336,6 +336,7 @@ SELECT tid(); │ 3878 │ └───────┘ ``` + ## logTrace {#logtrace} Выводит сообщение в лог сервера для каждого [Block](https://clickhouse.tech/docs/ru/development/architecture/#block). @@ -346,7 +347,7 @@ SELECT tid(); logTrace('message') ``` -**Параметры** +**Аргументы** - `message` — сообщение, которое отправляется в серверный лог. [String](../../sql-reference/data-types/string.md#string). @@ -354,7 +355,7 @@ logTrace('message') - Всегда возвращает 0. -**Example** +**Пример** Запрос: @@ -370,4 +371,4 @@ SELECT logTrace('logTrace message'); └──────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/introspection/) \ No newline at end of file +[Original article](https://clickhouse.tech/docs/en/query_language/functions/introspection/) diff --git a/docs/ru/sql-reference/functions/ip-address-functions.md b/docs/ru/sql-reference/functions/ip-address-functions.md index bc48419473d..aa0ff54069c 100644 --- a/docs/ru/sql-reference/functions/ip-address-functions.md +++ b/docs/ru/sql-reference/functions/ip-address-functions.md @@ -174,7 +174,7 @@ SELECT addr, cutIPv6(IPv6StringToNum(addr), 0, 0) FROM (SELECT ['notaddress', '1 Принимает число типа `UInt32`. Интерпретирует его, как IPv4-адрес в [big endian](https://en.wikipedia.org/wiki/Endianness). Возвращает значение `FixedString(16)`, содержащее адрес IPv6 в двоичном формате. Примеры: ``` sql -SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr +SELECT IPv6NumToString(IPv4ToIPv6(IPv4StringToNum('192.168.0.1'))) AS addr; ``` ``` text @@ -207,7 +207,7 @@ SELECT Принимает на вход IPv4 и значение `UInt8`, содержащее [CIDR](https://ru.wikipedia.org/wiki/Бесклассовая_адресация). Возвращает кортеж с двумя IPv4, содержащими нижний и более высокий диапазон подсети. ``` sql -SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) +SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16); ``` ``` text @@ -221,7 +221,7 @@ SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 16) Принимает на вход IPv6 и значение `UInt8`, содержащее CIDR. Возвращает кортеж с двумя IPv6, содержащими нижний и более высокий диапазон подсети. ``` sql -SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32) +SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); ``` ``` text @@ -328,7 +328,7 @@ SELECT toIPv6('127.0.0.1'); isIPv4String(string) ``` -**Параметры** +**Аргументы** - `string` — IP адрес. [String](../../sql-reference/data-types/string.md). @@ -343,7 +343,7 @@ isIPv4String(string) Запрос: ```sql -SELECT addr, isIPv4String(addr) FROM ( SELECT ['0.0.0.0', '127.0.0.1', '::ffff:127.0.0.1'] AS addr ) ARRAY JOIN addr +SELECT addr, isIPv4String(addr) FROM ( SELECT ['0.0.0.0', '127.0.0.1', '::ffff:127.0.0.1'] AS addr ) ARRAY JOIN addr; ``` Результат: @@ -366,7 +366,7 @@ SELECT addr, isIPv4String(addr) FROM ( SELECT ['0.0.0.0', '127.0.0.1', '::ffff:1 isIPv6String(string) ``` -**Параметры** +**Аргументы** - `string` — IP адрес. [String](../../sql-reference/data-types/string.md). @@ -381,7 +381,7 @@ isIPv6String(string) Запрос: ``` sql -SELECT addr, isIPv6String(addr) FROM ( SELECT ['::', '1111::ffff', '::ffff:127.0.0.1', '127.0.0.1'] AS addr ) ARRAY JOIN addr +SELECT addr, isIPv6String(addr) FROM ( SELECT ['::', '1111::ffff', '::ffff:127.0.0.1', '127.0.0.1'] AS addr ) ARRAY JOIN addr; ``` Результат: diff --git a/docs/ru/sql-reference/functions/json-functions.md b/docs/ru/sql-reference/functions/json-functions.md index 69b8f8f98f5..770b8dec37c 100644 --- a/docs/ru/sql-reference/functions/json-functions.md +++ b/docs/ru/sql-reference/functions/json-functions.md @@ -211,7 +211,7 @@ SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8') Пример: ``` sql -SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]' +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, 200.0, 300]'; ``` ## JSONExtractArrayRaw(json\[, indices_or_keys\]…) {#jsonextractarrayrawjson-indices-or-keys} @@ -223,7 +223,7 @@ SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b') = '[-100, Пример: ``` sql -SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']' +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = ['-100', '200.0', '"hello"']'; ``` ## JSONExtractKeysAndValuesRaw {#json-extract-keys-and-values-raw} @@ -236,29 +236,28 @@ SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, "hello"]}', 'b') = JSONExtractKeysAndValuesRaw(json[, p, a, t, h]) ``` -**Параметры** +**Аргументы** -- `json` — [Строка](../data-types/string.md), содержащая валидный JSON. -- `p, a, t, h` — Индексы или ключи, разделенные запятыми, которые указывают путь к внутреннему полю во вложенном объекте JSON. Каждый аргумент может быть либо [строкой](../data-types/string.md) для получения поля по ключу, либо [целым числом](../data-types/int-uint.md) для получения N-го поля (индексирование начинается с 1, отрицательные числа используются для отсчета с конца). Если параметр не задан, весь JSON парсится как объект верхнего уровня. Необязательный параметр. +- `json` — [строка](../data-types/string.md), содержащая валидный JSON. +- `p, a, t, h` — индексы или ключи, разделенные запятыми, которые указывают путь к внутреннему полю во вложенном объекте JSON. Каждый аргумент может быть либо [строкой](../data-types/string.md) для получения поля по ключу, либо [целым числом](../data-types/int-uint.md) для получения N-го поля (индексирование начинается с 1, отрицательные числа используются для отсчета с конца). Если параметр не задан, весь JSON парсится как объект верхнего уровня. Необязательный параметр. **Возвращаемые значения** -- Массив с кортежами `('key', 'value')`. Члены кортежа — строки. +- Массив с кортежами `('key', 'value')`. Члены кортежа — строки. -- Пустой массив, если заданный объект не существует или входные данные не валидный JSON. +- Пустой массив, если заданный объект не существует или входные данные не валидный JSON. -Тип: Type: [Array](../data-types/array.md)([Tuple](../data-types/tuple.md)([String](../data-types/string.md), [String](../data-types/string.md)). -. +Тип: [Array](../data-types/array.md)([Tuple](../data-types/tuple.md)([String](../data-types/string.md), [String](../data-types/string.md)). **Примеры** Запрос: ``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}') +SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}'); ``` -Ответ: +Результат: ``` text ┌─JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}')─┐ @@ -269,10 +268,10 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello" Запрос: ``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', 'b') +SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', 'b'); ``` -Ответ: +Результат: ``` text ┌─JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', 'b')─┐ @@ -283,10 +282,10 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello" Запрос: ``` sql -SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', -1, 'c') +SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', -1, 'c'); ``` -Ответ: +Результат: ``` text ┌─JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello", "f": "world"}}}', -1, 'c')─┐ diff --git a/docs/ru/sql-reference/functions/machine-learning-functions.md b/docs/ru/sql-reference/functions/machine-learning-functions.md index 2ffdfd05613..7cba6ffccc2 100644 --- a/docs/ru/sql-reference/functions/machine-learning-functions.md +++ b/docs/ru/sql-reference/functions/machine-learning-functions.md @@ -27,7 +27,7 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043c\u0430\u0448\u bayesAB(distribution_name, higher_is_better, variant_names, x, y) ``` -**Параметры** +**Аргументы** - `distribution_name` — вероятностное распределение. [String](../../sql-reference/data-types/string.md). Возможные значения: @@ -36,14 +36,14 @@ bayesAB(distribution_name, higher_is_better, variant_names, x, y) - `higher_is_better` — способ определения предпочтений. [Boolean](../../sql-reference/data-types/boolean.md). Возможные значения: - - `0` - чем меньше значение, тем лучше - - `1` - чем больше значение, тем лучше + - `0` — чем меньше значение, тем лучше + - `1` — чем больше значение, тем лучше -- `variant_names` - массив, содержащий названия вариантов. [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). +- `variant_names` — массив, содержащий названия вариантов. [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). -- `x` - массив, содержащий число проведенных тестов (испытаний) для каждого варианта. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). +- `x` — массив, содержащий число проведенных тестов (испытаний) для каждого варианта. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). -- `y` - массив, содержащий число успешных тестов (испытаний) для каждого варианта. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). +- `y` — массив, содержащий число успешных тестов (испытаний) для каждого варианта. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)). !!! note "Замечание" Все три массива должны иметь одинаковый размер. Все значения `x` и `y` должны быть неотрицательными числами (константами). Значение `y` не может превышать соответствующее значение `x`. @@ -51,8 +51,8 @@ bayesAB(distribution_name, higher_is_better, variant_names, x, y) **Возвращаемые значения** Для каждого варианта рассчитываются: -- `beats_control` - вероятность, что данный вариант превосходит контрольный в долгосрочной перспективе -- `to_be_best` - вероятность, что данный вариант является лучшим в долгосрочной перспективе +- `beats_control` — вероятность, что данный вариант превосходит контрольный в долгосрочной перспективе +- `to_be_best` — вероятность, что данный вариант является лучшим в долгосрочной перспективе Тип: JSON. diff --git a/docs/ru/sql-reference/functions/math-functions.md b/docs/ru/sql-reference/functions/math-functions.md index 2e57aca6a0a..b78fb7223a9 100644 --- a/docs/ru/sql-reference/functions/math-functions.md +++ b/docs/ru/sql-reference/functions/math-functions.md @@ -54,7 +54,7 @@ toc_title: "\u041c\u0430\u0442\u0435\u043c\u0430\u0442\u0438\u0447\u0435\u0441\u Пример (правило трёх сигм): ``` sql -SELECT erf(3 / sqrt(2)) +SELECT erf(3 / sqrt(2)); ``` ``` text @@ -113,7 +113,7 @@ SELECT erf(3 / sqrt(2)) cosh(x) ``` -**Параметры** +**Аргументы** - `x` — угол в радианах. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64). @@ -149,7 +149,7 @@ SELECT cosh(0); acosh(x) ``` -**Параметры** +**Аргументы** - `x` — гиперболический косинус угла. Значения из интервала: `1 <= x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64). @@ -189,7 +189,7 @@ SELECT acosh(1); sinh(x) ``` -**Параметры** +**Аргументы** - `x` — угол в радианах. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64). @@ -225,7 +225,7 @@ SELECT sinh(0); asinh(x) ``` -**Параметры** +**Аргументы** - `x` — гиперболический синус угла. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64). @@ -265,7 +265,7 @@ SELECT asinh(0); atanh(x) ``` -**Параметры** +**Аргументы** - `x` — гиперболический тангенс угла. Значения из интервала: `–1 < x < 1`. [Float64](../../sql-reference/data-types/float.md#float32-float64). @@ -301,7 +301,7 @@ SELECT atanh(0); atan2(y, x) ``` -**Параметры** +**Аргументы** - `y` — координата y точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64). - `x` — координата х точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64). @@ -338,7 +338,7 @@ SELECT atan2(1, 1); hypot(x, y) ``` -**Параметры** +**Аргументы** - `x` — первый катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64). - `y` — второй катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64). @@ -375,7 +375,7 @@ SELECT hypot(1, 1); log1p(x) ``` -**Параметры** +**Аргументы** - `x` — значения из интервала: `-1 < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64). diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index 19494c0aa10..061824498dc 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -16,16 +16,16 @@ toc_title: "\u041f\u0440\u043e\u0447\u0438\u0435\u0020\u0444\u0443\u043d\u043a\u **Синтаксис** ```sql -getMacro(name); +getMacro(name) ``` -**Параметры** +**Аргументы** -- `name` — Имя, которое необходимо получить из секции `macros`. [String](../../sql-reference/data-types/string.md#string). +- `name` — имя, которое необходимо получить из секции `macros`. [String](../../sql-reference/data-types/string.md#string). **Возвращаемое значение** -- Значение по указанному имени. +- Значение по указанному имени. Тип: [String](../../sql-reference/data-types/string.md). @@ -66,7 +66,6 @@ WHERE macro = 'test' └───────┴──────────────┘ ``` - ## FQDN {#fqdn} Возвращает полное имя домена. @@ -74,7 +73,7 @@ WHERE macro = 'test' **Синтаксис** ``` sql -fqdn(); +fqdn() ``` Эта функция регистронезависимая. @@ -93,7 +92,7 @@ fqdn(); SELECT FQDN(); ``` -Ответ: +Результат: ``` text ┌─FQDN()──────────────────────────┐ @@ -109,9 +108,9 @@ SELECT FQDN(); basename( expr ) ``` -**Параметры** +**Аргументы** -- `expr` — Выражение, возвращающее значение типа [String](../../sql-reference/functions/other-functions.md). В результирующем значении все бэкслэши должны быть экранированы. +- `expr` — выражение, возвращающее значение типа [String](../../sql-reference/functions/other-functions.md). В результирующем значении все бэкслэши должны быть экранированы. **Возвращаемое значение** @@ -126,7 +125,7 @@ basename( expr ) **Пример** ``` sql -SELECT 'some/long/path/to/file' AS a, basename(a) +SELECT 'some/long/path/to/file' AS a, basename(a); ``` ``` text @@ -136,7 +135,7 @@ SELECT 'some/long/path/to/file' AS a, basename(a) ``` ``` sql -SELECT 'some\\long\\path\\to\\file' AS a, basename(a) +SELECT 'some\\long\\path\\to\\file' AS a, basename(a); ``` ``` text @@ -146,7 +145,7 @@ SELECT 'some\\long\\path\\to\\file' AS a, basename(a) ``` ``` sql -SELECT 'some-file-name' AS a, basename(a) +SELECT 'some-file-name' AS a, basename(a); ``` ``` text @@ -193,7 +192,7 @@ SELECT visibleWidth(NULL) byteSize(argument [, ...]) ``` -**Параметры** +**Аргументы** - `argument` — значение. @@ -246,7 +245,7 @@ INSERT INTO test VALUES(1, 8, 16, 32, 64, -8, -16, -32, -64, 32.32, 64.64); SELECT key, byteSize(u8) AS `byteSize(UInt8)`, byteSize(u16) AS `byteSize(UInt16)`, byteSize(u32) AS `byteSize(UInt32)`, byteSize(u64) AS `byteSize(UInt64)`, byteSize(i8) AS `byteSize(Int8)`, byteSize(i16) AS `byteSize(Int16)`, byteSize(i32) AS `byteSize(Int32)`, byteSize(i64) AS `byteSize(Int64)`, byteSize(f32) AS `byteSize(Float32)`, byteSize(f64) AS `byteSize(Float64)` FROM test ORDER BY key ASC FORMAT Vertical; ``` -Result: +Результат: ``` text Row 1: @@ -324,7 +323,7 @@ SELECT currentUser(); SELECT currentUser(); ``` -Ответ: +Результат: ``` text ┌─currentUser()─┐ @@ -346,14 +345,14 @@ SELECT currentUser(); isConstant(x) ``` -**Параметры** +**Аргументы** -- `x` — Выражение для проверки. +- `x` — выражение для проверки. **Возвращаемые значения** -- `1` — Выражение `x` является константным. -- `0` — Выражение `x` не является константным. +- `1` — выражение `x` является константным. +- `0` — выражение `x` не является константным. Тип: [UInt8](../data-types/int-uint.md). @@ -362,7 +361,7 @@ isConstant(x) Запрос: ```sql -SELECT isConstant(x + 1) FROM (SELECT 43 AS x) +SELECT isConstant(x + 1) FROM (SELECT 43 AS x); ``` Результат: @@ -376,7 +375,7 @@ SELECT isConstant(x + 1) FROM (SELECT 43 AS x) Запрос: ```sql -WITH 3.14 AS pi SELECT isConstant(cos(pi)) +WITH 3.14 AS pi SELECT isConstant(cos(pi)); ``` Результат: @@ -413,10 +412,10 @@ SELECT isConstant(number) FROM numbers(1) ifNotFinite(x,y) -**Параметры** +**Аргументы** -- `x` — Значение, которое нужно проверить на бесконечность. Тип: [Float\*](../../sql-reference/functions/other-functions.md). -- `y` — Запасное значение. Тип: [Float\*](../../sql-reference/functions/other-functions.md). +- `x` — значение, которое нужно проверить на бесконечность. Тип: [Float\*](../../sql-reference/functions/other-functions.md). +- `y` — запасное значение. Тип: [Float\*](../../sql-reference/functions/other-functions.md). **Возвращаемые значения** @@ -458,7 +457,7 @@ SELECT isConstant(number) FROM numbers(1) `bar(x, min, max, width)` рисует полосу ширины пропорциональной `(x - min)` и равной `width` символов при `x = max`. -Параметры: +Аргументы: - `x` — Величина для отображения. - `min, max` — Целочисленные константы, значение должно помещаться в `Int64`. @@ -675,11 +674,11 @@ neighbor(column, offset[, default_value]) Порядок строк, используемый при вычислении функции `neighbor`, может отличаться от порядка строк, возвращаемых пользователю. Чтобы этого не случилось, вы можете сделать подзапрос с [ORDER BY](../../sql-reference/statements/select/order-by.md) и вызвать функцию изне подзапроса. -**Параметры** +**Аргументы** -- `column` — Имя столбца или скалярное выражение. -- `offset` - Смещение от текущей строки `column`. [Int64](../../sql-reference/functions/other-functions.md). -- `default_value` - Опциональный параметр. Значение, которое будет возвращено, если смещение выходит за пределы блока данных. +- `column` — имя столбца или скалярное выражение. +- `offset` — смещение от текущей строки `column`. [Int64](../../sql-reference/functions/other-functions.md). +- `default_value` — опциональный параметр. Значение, которое будет возвращено, если смещение выходит за пределы блока данных. **Возвращаемое значение** @@ -696,7 +695,7 @@ neighbor(column, offset[, default_value]) SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; ``` -Ответ: +Результат: ``` text ┌─number─┬─neighbor(number, 2)─┐ @@ -719,7 +718,7 @@ SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; SELECT number, neighbor(number, 2, 999) FROM system.numbers LIMIT 10; ``` -Ответ: +Результат: ``` text ┌─number─┬─neighbor(number, 2, 999)─┐ @@ -750,7 +749,7 @@ SELECT FROM numbers(16) ``` -Ответ: +Результат: ``` text ┌──────month─┬─money─┬─prev_year─┬─year_over_year─┐ @@ -872,9 +871,9 @@ WHERE diff != 1 getSizeOfEnumType(value) ``` -**Параметры** +**Аргументы** -- `value` — Значение типа `Enum`. +- `value` — значение типа `Enum`. **Возвращаемые значения** @@ -901,9 +900,9 @@ SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x blockSerializedSize(value[, value[, ...]]) ``` -**Параметры** +**Аргументы** -- `value` — Значение произвольного типа. +- `value` — значение произвольного типа. **Возвращаемые значения** @@ -933,9 +932,9 @@ SELECT blockSerializedSize(maxState(1)) as x toColumnTypeName(value) ``` -**Параметры** +**Аргументы** -- `value` — Значение произвольного типа. +- `value` — значение произвольного типа. **Возвращаемые значения** @@ -973,9 +972,9 @@ SELECT toColumnTypeName(CAST('2018-01-01 01:02:03' AS DateTime)) dumpColumnStructure(value) ``` -**Параметры** +**Аргументы** -- `value` — Значение произвольного типа. +- `value` — значение произвольного типа. **Возвращаемые значения** @@ -1003,9 +1002,9 @@ SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) defaultValueOfArgumentType(expression) ``` -**Параметры** +**Аргументы** -- `expression` — Значение произвольного типа или выражение, результатом которого является значение произвольного типа. +- `expression` — значение произвольного типа или выражение, результатом которого является значение произвольного типа. **Возвращаемые значения** @@ -1045,7 +1044,7 @@ SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) defaultValueOfTypeName(type) ``` -**Параметры:** +**Аргументы** - `type` — тип данных. @@ -1087,10 +1086,10 @@ SELECT defaultValueOfTypeName('Nullable(Int8)') SELECT replicate(x, arr); ``` -**Параметры** +**Аргументы** -- `arr` — Исходный массив. ClickHouse создаёт новый массив такой же длины как исходный и заполняет его значением `x`. -- `x` — Значение, которым будет заполнен результирующий массив. +- `arr` — исходный массив. ClickHouse создаёт новый массив такой же длины как исходный и заполняет его значением `x`. +- `x` — значение, которым будет заполнен результирующий массив. **Возвращаемое значение** @@ -1170,7 +1169,7 @@ filesystemFree() SELECT formatReadableSize(filesystemFree()) AS "Free space", toTypeName(filesystemFree()) AS "Type"; ``` -Ответ: +Результат: ``` text ┌─Free space─┬─Type───┐ @@ -1202,7 +1201,7 @@ filesystemCapacity() SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesystemCapacity()) AS "Type" ``` -Ответ: +Результат: ``` text ┌─Capacity──┬─Type───┐ @@ -1220,7 +1219,7 @@ SELECT formatReadableSize(filesystemCapacity()) AS "Capacity", toTypeName(filesy finalizeAggregation(state) ``` -**Параметры** +**Аргументы** - `state` — состояние агрегатной функции. [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction). @@ -1321,17 +1320,17 @@ FROM numbers(10); **Синтаксис** ```sql -runningAccumulate(agg_state[, grouping]); +runningAccumulate(agg_state[, grouping]) ``` -**Параметры** +**Аргументы** -- `agg_state` — Состояние агрегатной функции. [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction). -- `grouping` — Ключ группировки. Опциональный параметр. Состояние функции обнуляется, если значение `grouping` меняется. Параметр может быть любого [поддерживаемого типа данных](../../sql-reference/data-types/index.md), для которого определен оператор равенства. +- `agg_state` — состояние агрегатной функции. [AggregateFunction](../../sql-reference/data-types/aggregatefunction.md#data-type-aggregatefunction). +- `grouping` — ключ группировки. Опциональный параметр. Состояние функции обнуляется, если значение `grouping` меняется. Параметр может быть любого [поддерживаемого типа данных](../../sql-reference/data-types/index.md), для которого определен оператор равенства. **Возвращаемое значение** -- Каждая результирующая строка содержит результат агрегатной функции, накопленный для всех входных строк от 0 до текущей позиции. `runningAccumulate` обнуляет состояния для каждого нового блока данных или при изменении значения `grouping`. +- Каждая результирующая строка содержит результат агрегатной функции, накопленный для всех входных строк от 0 до текущей позиции. `runningAccumulate` обнуляет состояния для каждого нового блока данных или при изменении значения `grouping`. Тип зависит от используемой агрегатной функции. @@ -1430,7 +1429,7 @@ FROM joinGet(join_storage_table_name, `value_column`, join_keys) ``` -**Параметры** +**Аргументы** - `join_storage_table_name` — [идентификатор](../syntax.md#syntax-identifiers), который указывает, откуда производится выборка данных. Поиск по идентификатору осуществляется в базе данных по умолчанию (см. конфигурацию `default_database`). Чтобы переопределить базу данных по умолчанию, используйте команду `USE db_name`, или укажите базу данных и таблицу через разделитель `db_name.db_table`, см. пример. - `value_column` — столбец, из которого нужно произвести выборку данных. @@ -1535,9 +1534,9 @@ SELECT identity(42) randomPrintableASCII(length) ``` -**Параметры** +**Аргументы** -- `length` — Длина результирующей строки. Положительное целое число. +- `length` — длина результирующей строки. Положительное целое число. Если передать `length < 0`, то поведение функции не определено. @@ -1571,7 +1570,7 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers randomString(length) ``` -**Параметры** +**Аргументы** - `length` — длина строки. Положительное целое число. @@ -1619,11 +1618,11 @@ len: 30 randomFixedString(length); ``` -**Параметры** +**Аргументы** -- `length` — Длина строки в байтах. [UInt64](../../sql-reference/data-types/int-uint.md). +- `length` — длина строки в байтах. [UInt64](../../sql-reference/data-types/int-uint.md). -**Returned value(s)** +**Возвращаемое значение** - Строка, заполненная случайными байтами. @@ -1653,12 +1652,12 @@ SELECT randomFixedString(13) as rnd, toTypeName(rnd) **Синтаксис** ``` sql -randomStringUTF8(length); +randomStringUTF8(length) ``` -**Параметры** +**Аргументы** -- `length` — Длина итоговой строки в кодовых точках. [UInt64](../../sql-reference/data-types/int-uint.md). +- `length` — длина итоговой строки в кодовых точках. [UInt64](../../sql-reference/data-types/int-uint.md). **Возвращаемое значение** @@ -1690,7 +1689,7 @@ SELECT randomStringUTF8(13) **Синтаксис** ```sql -getSetting('custom_setting'); +getSetting('custom_setting') ``` **Параметр** @@ -1728,7 +1727,7 @@ SELECT getSetting('custom_a'); isDecimalOverflow(d, [p]) ``` -**Параметры** +**Аргументы** - `d` — число. [Decimal](../../sql-reference/data-types/decimal.md). - `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). @@ -1765,7 +1764,7 @@ SELECT isDecimalOverflow(toDecimal32(1000000000, 0), 9), countDigits(x) ``` -**Параметры** +**Аргументы** - `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md) число. @@ -1824,7 +1823,7 @@ UNSUPPORTED_METHOD tcpPort() ``` -**Параметры** +**Аргументы** - Нет. diff --git a/docs/ru/sql-reference/functions/random-functions.md b/docs/ru/sql-reference/functions/random-functions.md index f3889504fa6..0bbd46c6018 100644 --- a/docs/ru/sql-reference/functions/random-functions.md +++ b/docs/ru/sql-reference/functions/random-functions.md @@ -31,9 +31,9 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0433\u0435\u043d\u randConstant([x]) ``` -**Параметры** +**Аргументы** -- `x` — [Выражение](../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../data-types/index.md#data_types). Значение используется, чтобы избежать [склейки одинаковых выражений](index.md#common-subexpression-elimination), если функция вызывается несколько раз в одном запросе. Необязательный параметр. +- `x` — [выражение](../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../data-types/index.md#data_types). Значение используется, чтобы избежать [склейки одинаковых выражений](index.md#common-subexpression-elimination), если функция вызывается несколько раз в одном запросе. Необязательный параметр. **Возвращаемое значение** @@ -79,7 +79,7 @@ fuzzBits([s], [prob]) ``` Инвертирует каждый бит `s` с вероятностью `prob`. -**Параметры** +**Аргументы** - `s` — `String` or `FixedString` - `prob` — constant `Float32/64` diff --git a/docs/ru/sql-reference/functions/rounding-functions.md b/docs/ru/sql-reference/functions/rounding-functions.md index 78033160396..cfbc8b46960 100644 --- a/docs/ru/sql-reference/functions/rounding-functions.md +++ b/docs/ru/sql-reference/functions/rounding-functions.md @@ -33,10 +33,10 @@ N может быть отрицательным. round(expression [, decimal_places]) ``` -**Параметры:** +**Аргументы** -- `expression` — Число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../sql-reference/functions/rounding-functions.md#data_types). -- `decimal-places` — Целое значение. +- `expression` — число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../sql-reference/functions/rounding-functions.md#data_types). +- `decimal-places` — целое значение. - Если `decimal-places > 0`, то функция округляет значение справа от запятой. - Если `decimal-places < 0` то функция округляет значение слева от запятой. - Если `decimal-places = 0`, то функция округляет значение до целого. В этом случае аргумент можно опустить. @@ -112,13 +112,13 @@ round(3.65, 1) = 3.6 roundBankers(expression [, decimal_places]) ``` -**Параметры** +**Аргументы** -- `expression` — Число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../sql-reference/functions/rounding-functions.md#data_types). -- `decimal-places` — Десятичный разряд. Целое число. - - `decimal-places > 0` — Функция округляет значение выражения до ближайшего чётного числа на соответствующей позиции справа от запятой. Например, `roundBankers(3.55, 1) = 3.6`. - - `decimal-places < 0` — Функция округляет значение выражения до ближайшего чётного числа на соответствующей позиции слева от запятой. Например, `roundBankers(24.55, -1) = 20`. - - `decimal-places = 0` — Функция округляет значение до целого. В этом случае аргумент можно не передавать. Например, `roundBankers(2.5) = 2`. +- `expression` — число для округления. Может быть любым [выражением](../syntax.md#syntax-expressions), возвращающим числовой [тип данных](../../sql-reference/functions/rounding-functions.md#data_types). +- `decimal-places` — десятичный разряд. Целое число. + - `decimal-places > 0` — функция округляет значение выражения до ближайшего чётного числа на соответствующей позиции справа от запятой. Например, `roundBankers(3.55, 1) = 3.6`. + - `decimal-places < 0` — функция округляет значение выражения до ближайшего чётного числа на соответствующей позиции слева от запятой. Например, `roundBankers(24.55, -1) = 20`. + - `decimal-places = 0` — функция округляет значение до целого. В этом случае аргумент можно не передавать. Например, `roundBankers(2.5) = 2`. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/functions/splitting-merging-functions.md b/docs/ru/sql-reference/functions/splitting-merging-functions.md index d451eabc407..8f8fd9925b2 100644 --- a/docs/ru/sql-reference/functions/splitting-merging-functions.md +++ b/docs/ru/sql-reference/functions/splitting-merging-functions.md @@ -17,10 +17,10 @@ separator должен быть константной строкой из ро splitByChar(, ) ``` -**Параметры** +**Аргументы** -- `separator` — Разделитель, состоящий из одного символа. [String](../../sql-reference/data-types/string.md). -- `s` — Разбиваемая строка. [String](../../sql-reference/data-types/string.md). +- `separator` — разделитель, состоящий из одного символа. [String](../../sql-reference/data-types/string.md). +- `s` — разбиваемая строка. [String](../../sql-reference/data-types/string.md). **Возвращаемые значения** @@ -54,10 +54,10 @@ SELECT splitByChar(',', '1,2,3,abcde') splitByString(separator, s) ``` -**Параметры** +**Аргументы** -- `separator` — Разделитель. [String](../../sql-reference/data-types/string.md). -- `s` — Разбиваемая строка. [String](../../sql-reference/data-types/string.md). +- `separator` — разделитель. [String](../../sql-reference/data-types/string.md). +- `s` — разбиваемая строка. [String](../../sql-reference/data-types/string.md). **Возвращаемые значения** @@ -67,7 +67,7 @@ splitByString(separator, s) - Задано несколько последовательных разделителей; - Исходная строка `s` пуста. -Type: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md). +Тип: [Array](../../sql-reference/data-types/array.md) of [String](../../sql-reference/data-types/string.md). **Примеры** diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md index 1159a1f5823..8c794a920e3 100644 --- a/docs/ru/sql-reference/functions/string-functions.md +++ b/docs/ru/sql-reference/functions/string-functions.md @@ -70,19 +70,19 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u Заменяет некорректные символы UTF-8 на символ `�` (U+FFFD). Все идущие подряд некорректные символы схлопываются в один заменяющий символ. ``` sql -toValidUTF8( input_string ) +toValidUTF8(input_string) ``` -Параметры: +**Аргументы** -- input_string — произвольный набор байтов, представленный как объект типа [String](../../sql-reference/functions/string-functions.md). +- `input_string` — произвольный набор байтов, представленный как объект типа [String](../../sql-reference/functions/string-functions.md). Возвращаемое значение: Корректная строка UTF-8. **Пример** ``` sql -SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') +SELECT toValidUTF8('\x61\xF0\x80\x80\x80b'); ``` ``` text @@ -103,10 +103,10 @@ SELECT toValidUTF8('\x61\xF0\x80\x80\x80b') repeat(s, n) ``` -**Параметры** +**Аргументы** -- `s` — Строка для повторения. [String](../../sql-reference/functions/string-functions.md). -- `n` — Количество повторов. [UInt](../../sql-reference/functions/string-functions.md). +- `s` — строка для повторения. [String](../../sql-reference/functions/string-functions.md). +- `n` — количество повторов. [UInt](../../sql-reference/functions/string-functions.md). **Возвращаемое значение** @@ -119,10 +119,10 @@ repeat(s, n) Запрос: ``` sql -SELECT repeat('abc', 10) +SELECT repeat('abc', 10); ``` -Ответ: +Результат: ``` text ┌─repeat('abc', 10)──────────────┐ @@ -172,7 +172,7 @@ SELECT format('{} {}', 'Hello', 'World') concat(s1, s2, ...) ``` -**Параметры** +**Аргументы** Значения типа String или FixedString. @@ -187,10 +187,10 @@ concat(s1, s2, ...) Запрос: ``` sql -SELECT concat('Hello, ', 'World!') +SELECT concat('Hello, ', 'World!'); ``` -Ответ: +Результат: ``` text ┌─concat('Hello, ', 'World!')─┐ @@ -210,7 +210,7 @@ SELECT concat('Hello, ', 'World!') concatAssumeInjective(s1, s2, ...) ``` -**Параметры** +**Аргументы** Значения типа String или FixedString. @@ -242,10 +242,10 @@ SELECT * from key_val Запрос: ``` sql -SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY (key1, key2) +SELECT concat(key1, key2), sum(value) FROM key_val GROUP BY (key1, key2); ``` -Ответ: +Результат: ``` text ┌─concat(key1, key2)─┬─sum(value)─┐ @@ -312,7 +312,7 @@ SELECT startsWith('Spider-Man', 'Spi'); SELECT startsWith('Hello, world!', 'He'); ``` -Ответ: +Результат: ``` text ┌─startsWith('Hello, world!', 'He')─┐ @@ -331,7 +331,7 @@ SELECT startsWith('Hello, world!', 'He'); trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) ``` -**Параметры** +**Аргументы** - `trim_character` — один или несколько символов, подлежащие удалению. [String](../../sql-reference/functions/string-functions.md). - `input_string` — строка для обрезки. [String](../../sql-reference/functions/string-functions.md). @@ -347,10 +347,10 @@ trim([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) Запрос: ``` sql -SELECT trim(BOTH ' ()' FROM '( Hello, world! )') +SELECT trim(BOTH ' ()' FROM '( Hello, world! )'); ``` -Ответ: +Результат: ``` text ┌─trim(BOTH ' ()' FROM '( Hello, world! )')─┐ @@ -370,7 +370,7 @@ trimLeft(input_string) Алиас: `ltrim(input_string)`. -**Параметры** +**Аргументы** - `input_string` — строка для обрезки. [String](../../sql-reference/functions/string-functions.md). @@ -385,10 +385,10 @@ trimLeft(input_string) Запрос: ``` sql -SELECT trimLeft(' Hello, world! ') +SELECT trimLeft(' Hello, world! '); ``` -Ответ: +Результат: ``` text ┌─trimLeft(' Hello, world! ')─┐ @@ -408,7 +408,7 @@ trimRight(input_string) Алиас: `rtrim(input_string)`. -**Параметры** +**Аргументы** - `input_string` — строка для обрезки. [String](../../sql-reference/functions/string-functions.md). @@ -423,10 +423,10 @@ trimRight(input_string) Запрос: ``` sql -SELECT trimRight(' Hello, world! ') +SELECT trimRight(' Hello, world! '); ``` -Ответ: +Результат: ``` text ┌─trimRight(' Hello, world! ')─┐ @@ -446,7 +446,7 @@ trimBoth(input_string) Алиас: `trim(input_string)`. -**Параметры** +**Аргументы** - `input_string` — строка для обрезки. [String](../../sql-reference/functions/string-functions.md). @@ -461,10 +461,10 @@ trimBoth(input_string) Запрос: ``` sql -SELECT trimBoth(' Hello, world! ') +SELECT trimBoth(' Hello, world! '); ``` -Ответ: +Результат: ``` text ┌─trimBoth(' Hello, world! ')─┐ @@ -494,14 +494,15 @@ SELECT trimBoth(' Hello, world! ') Заменяет литералы, последовательности литералов и сложные псевдонимы заполнителями. -**Синтаксис** +**Синтаксис** + ``` sql normalizeQuery(x) ``` -**Параметры** +**Аргументы** -- `x` — Последовательность символов. [String](../../sql-reference/data-types/string.md). +- `x` — последовательность символов. [String](../../sql-reference/data-types/string.md). **Возвращаемое значение** @@ -535,9 +536,9 @@ SELECT normalizeQuery('[1, 2, 3, x]') AS query; normalizedQueryHash(x) ``` -**Параметры** +**Аргументы** -- `x` — Последовательность символов. [String](../../sql-reference/data-types/string.md). +- `x` — последовательность символов. [String](../../sql-reference/data-types/string.md). **Возвращаемое значение** @@ -573,7 +574,7 @@ SELECT normalizedQueryHash('SELECT 1 AS `xyz`') != normalizedQueryHash('SELECT 1 encodeXMLComponent(x) ``` -**Параметры** +**Аргументы** - `x` — последовательность символов. [String](../../sql-reference/data-types/string.md). @@ -603,7 +604,6 @@ Hello, "world"! 'foo' ``` - ## decodeXMLComponent {#decode-xml-component} Заменяет символами предопределенные мнемоники XML: `"` `&` `'` `>` `<` @@ -615,7 +615,7 @@ Hello, "world"! decodeXMLComponent(x) ``` -**Параметры** +**Аргументы** - `x` — последовательность символов. [String](../../sql-reference/data-types/string.md). diff --git a/docs/ru/sql-reference/functions/string-search-functions.md b/docs/ru/sql-reference/functions/string-search-functions.md index b7193da6f33..509e28a009e 100644 --- a/docs/ru/sql-reference/functions/string-search-functions.md +++ b/docs/ru/sql-reference/functions/string-search-functions.md @@ -23,11 +23,11 @@ position(haystack, needle[, start_pos]) Алиас: `locate(haystack, needle[, start_pos])`. -**Параметры** +**Аргументы** - `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal). - `needle` — подстрока, которую необходимо найти. [Строка](../syntax.md#syntax-string-literal). -- `start_pos` – Опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md) +- `start_pos` — опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** @@ -43,10 +43,10 @@ position(haystack, needle[, start_pos]) Запрос: ``` sql -SELECT position('Hello, world!', '!') +SELECT position('Hello, world!', '!'); ``` -Ответ: +Результат: ``` text ┌─position('Hello, world!', '!')─┐ @@ -59,10 +59,10 @@ SELECT position('Hello, world!', '!') Запрос: ``` sql -SELECT position('Привет, мир!', '!') +SELECT position('Привет, мир!', '!'); ``` -Ответ: +Результат: ``` text ┌─position('Привет, мир!', '!')─┐ @@ -82,11 +82,11 @@ SELECT position('Привет, мир!', '!') positionCaseInsensitive(haystack, needle[, start_pos]) ``` -**Параметры** +**Аргументы** - `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal). - `needle` — подстрока, которую необходимо найти. [Строка](../syntax.md#syntax-string-literal). -- `start_pos` – Опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md) +- `start_pos` — опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** @@ -100,10 +100,10 @@ positionCaseInsensitive(haystack, needle[, start_pos]) Запрос: ``` sql -SELECT positionCaseInsensitive('Hello, world!', 'hello') +SELECT positionCaseInsensitive('Hello, world!', 'hello'); ``` -Ответ: +Результат: ``` text ┌─positionCaseInsensitive('Hello, world!', 'hello')─┐ @@ -125,11 +125,11 @@ SELECT positionCaseInsensitive('Hello, world!', 'hello') positionUTF8(haystack, needle[, start_pos]) ``` -**Параметры** +**Аргументы** - `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal). - `needle` — подстрока, которую необходимо найти. [Строка](../syntax.md#syntax-string-literal). -- `start_pos` – Опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md) +- `start_pos` — опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** @@ -145,10 +145,10 @@ positionUTF8(haystack, needle[, start_pos]) Запрос: ``` sql -SELECT positionUTF8('Привет, мир!', '!') +SELECT positionUTF8('Привет, мир!', '!'); ``` -Ответ: +Результат: ``` text ┌─positionUTF8('Привет, мир!', '!')─┐ @@ -161,7 +161,7 @@ SELECT positionUTF8('Привет, мир!', '!') Запрос для символа `é`, который представлен одной кодовой точкой `U+00E9`: ``` sql -SELECT positionUTF8('Salut, étudiante!', '!') +SELECT positionUTF8('Salut, étudiante!', '!'); ``` Result: @@ -175,10 +175,10 @@ Result: Запрос для символа `é`, который представлен двумя кодовыми точками `U+0065U+0301`: ``` sql -SELECT positionUTF8('Salut, étudiante!', '!') +SELECT positionUTF8('Salut, étudiante!', '!'); ``` -Ответ: +Результат: ``` text ┌─positionUTF8('Salut, étudiante!', '!')─┐ @@ -198,11 +198,11 @@ SELECT positionUTF8('Salut, étudiante!', '!') positionCaseInsensitiveUTF8(haystack, needle[, start_pos]) ``` -**Параметры** +**Аргументы** - `haystack` — строка, по которой выполняется поиск. [Строка](../syntax.md#syntax-string-literal). - `needle` — подстрока, которую необходимо найти. [Строка](../syntax.md#syntax-string-literal). -- `start_pos` – Опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md) +- `start_pos` — опциональный параметр, позиция символа в строке, с которого начинается поиск. [UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** @@ -216,10 +216,10 @@ positionCaseInsensitiveUTF8(haystack, needle[, start_pos]) Запрос: ``` sql -SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир') +SELECT positionCaseInsensitiveUTF8('Привет, мир!', 'Мир'); ``` -Ответ: +Результат: ``` text ┌─positionCaseInsensitiveUTF8('Привет, мир!', 'Мир')─┐ @@ -257,7 +257,7 @@ multiSearchAllPositions(haystack, [needle1, needle2, ..., needlen]) Query: ``` sql -SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']) +SELECT multiSearchAllPositions('Hello, World!', ['hello', '!', 'world']); ``` Result: @@ -357,7 +357,7 @@ Result: extractAllGroupsHorizontal(haystack, pattern) ``` -**Параметры** +**Аргументы** - `haystack` — строка для разбора. Тип: [String](../../sql-reference/data-types/string.md). - `pattern` — регулярное выражение, построенное по синтаксическим правилам [re2](https://github.com/google/re2/wiki/Syntax). Выражение должно содержать группы, заключенные в круглые скобки. Если выражение не содержит групп, генерируется исключение. Тип: [String](../../sql-reference/data-types/string.md). @@ -373,7 +373,7 @@ extractAllGroupsHorizontal(haystack, pattern) Запрос: ``` sql -SELECT extractAllGroupsHorizontal('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)') +SELECT extractAllGroupsHorizontal('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)'); ``` Результат: @@ -384,8 +384,9 @@ SELECT extractAllGroupsHorizontal('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=( └──────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**См. также** -- функция [extractAllGroupsVertical](#extractallgroups-vertical) +**Смотрите также** + +- Функция [extractAllGroupsVertical](#extractallgroups-vertical) ## extractAllGroupsVertical {#extractallgroups-vertical} @@ -397,7 +398,7 @@ SELECT extractAllGroupsHorizontal('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=( extractAllGroupsVertical(haystack, pattern) ``` -**Параметры** +**Аргументы** - `haystack` — строка для разбора. Тип: [String](../../sql-reference/data-types/string.md). - `pattern` — регулярное выражение, построенное по синтаксическим правилам [re2](https://github.com/google/re2/wiki/Syntax). Выражение должно содержать группы, заключенные в круглые скобки. Если выражение не содержит групп, генерируется исключение. Тип: [String](../../sql-reference/data-types/string.md). @@ -413,7 +414,7 @@ extractAllGroupsVertical(haystack, pattern) Запрос: ``` sql -SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)') +SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[^"]+"|\\w+)'); ``` Результат: @@ -424,8 +425,9 @@ SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[ └────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**См. также** -- функция [extractAllGroupsHorizontal](#extractallgroups-horizontal) +**Смотрите также** + +- Функция [extractAllGroupsHorizontal](#extractallgroups-horizontal) ## like(haystack, pattern), оператор haystack LIKE pattern {#function-like} @@ -455,10 +457,10 @@ SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333', '("[^"]+"|\\w+)=("[ ilike(haystack, pattern) ``` -**Параметры** +**Аргументы** -- `haystack` — Входная строка. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `pattern` — Если `pattern` не содержит процента или нижнего подчеркивания, тогда `pattern` представляет саму строку. Нижнее подчеркивание (`_`) в `pattern` обозначает любой отдельный символ. Знак процента (`%`) соответствует последовательности из любого количества символов: от нуля и более. +- `haystack` — входная строка. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `pattern` — если `pattern` не содержит процента или нижнего подчеркивания, тогда `pattern` представляет саму строку. Нижнее подчеркивание (`_`) в `pattern` обозначает любой отдельный символ. Знак процента (`%`) соответствует последовательности из любого количества символов: от нуля и более. Некоторые примеры `pattern`: @@ -490,7 +492,7 @@ ilike(haystack, pattern) Запрос: ``` sql -SELECT * FROM Months WHERE ilike(name, '%j%') +SELECT * FROM Months WHERE ilike(name, '%j%'); ``` Результат: @@ -530,7 +532,7 @@ SELECT * FROM Months WHERE ilike(name, '%j%') countMatches(haystack, pattern) ``` -**Параметры** +**Аргументы** - `haystack` — строка, по которой выполняется поиск. [String](../../sql-reference/syntax.md#syntax-string-literal). - `pattern` — регулярное выражение, построенное по синтаксическим правилам [re2](https://github.com/google/re2/wiki/Syntax). [String](../../sql-reference/data-types/string.md). @@ -583,11 +585,11 @@ SELECT countMatches('aaaa', 'aa'); countSubstrings(haystack, needle[, start_pos]) ``` -**Параметры** +**Аргументы** - `haystack` — строка, в которой ведется поиск. [String](../../sql-reference/syntax.md#syntax-string-literal). - `needle` — искомая подстрока. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – позиция первого символа в строке, с которого начнется поиск. Необязательный параметр. [UInt](../../sql-reference/data-types/int-uint.md). +- `start_pos` — позиция первого символа в строке, с которого начнется поиск. Необязательный параметр. [UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** @@ -649,11 +651,11 @@ SELECT countSubstrings('abc___abc', 'abc', 4); countSubstringsCaseInsensitive(haystack, needle[, start_pos]) ``` -**Параметры** +**Аргументы** - `haystack` — строка, в которой ведется поиск. [String](../../sql-reference/syntax.md#syntax-string-literal). - `needle` — искомая подстрока. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – позиция первого символа в строке, с которого начнется поиск. Необязательный параметр. [UInt](../../sql-reference/data-types/int-uint.md). +- `start_pos` — позиция первого символа в строке, с которого начнется поиск. Необязательный параметр. [UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** @@ -715,11 +717,11 @@ SELECT countSubstringsCaseInsensitive('abC___abC', 'aBc', 2); SELECT countSubstringsCaseInsensitiveUTF8(haystack, needle[, start_pos]) ``` -**Параметры** +**Аргументы** - `haystack` — строка, в которой ведется поиск. [String](../../sql-reference/syntax.md#syntax-string-literal). - `needle` — искомая подстрока. [String](../../sql-reference/syntax.md#syntax-string-literal). -- `start_pos` – позиция первого символа в строке, с которого начнется поиск. Необязательный параметр. [UInt](../../sql-reference/data-types/int-uint.md). +- `start_pos` — позиция первого символа в строке, с которого начнется поиск. Необязательный параметр. [UInt](../../sql-reference/data-types/int-uint.md). **Возвращаемые значения** diff --git a/docs/ru/sql-reference/functions/tuple-functions.md b/docs/ru/sql-reference/functions/tuple-functions.md index f88886ec6f1..244998b15d0 100644 --- a/docs/ru/sql-reference/functions/tuple-functions.md +++ b/docs/ru/sql-reference/functions/tuple-functions.md @@ -45,9 +45,9 @@ untuple(x) Чтобы пропустить некоторые столбцы в результате запроса, вы можете использовать выражение `EXCEPT`. -**Параметры** +**Аргументы** -- `x` - функция `tuple`, столбец или кортеж элементов. [Tuple](../../sql-reference/data-types/tuple.md). +- `x` — функция `tuple`, столбец или кортеж элементов. [Tuple](../../sql-reference/data-types/tuple.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/functions/tuple-map-functions.md b/docs/ru/sql-reference/functions/tuple-map-functions.md index 696fdb9e5ae..4d36f2ef18b 100644 --- a/docs/ru/sql-reference/functions/tuple-map-functions.md +++ b/docs/ru/sql-reference/functions/tuple-map-functions.md @@ -15,7 +15,7 @@ toc_title: Работа с контейнерами map map(key1, value1[, key2, value2, ...]) ``` -**Параметры** +**Аргументы** - `key` — ключ. [String](../../sql-reference/data-types/string.md) или [Integer](../../sql-reference/data-types/int-uint.md). - `value` — значение. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md) или [Array](../../sql-reference/data-types/array.md). @@ -62,9 +62,10 @@ SELECT a['key2'] FROM table_map; └─────────────────────────┘ ``` -**См. также** +**Смотрите также** - тип данных [Map(key, value)](../../sql-reference/data-types/map.md) + ## mapAdd {#function-mapadd} Собирает все ключи и суммирует соответствующие значения. @@ -75,7 +76,7 @@ SELECT a['key2'] FROM table_map; mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...]) ``` -**Параметры** +**Аргументы** Аргументами являются [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа. Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые можно приводить к одному типу ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) или [Float64](../../sql-reference/data-types/float.md#float32-float64)). @@ -111,7 +112,7 @@ SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTy mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) ``` -**Параметры** +**Аргументы** Аргументами являются [кортежи](../../sql-reference/data-types/tuple.md#tuplet1-t2) из двух [массивов](../../sql-reference/data-types/array.md#data-type-array), где элементы в первом массиве представляют ключи, а второй массив содержит значения для каждого ключа. Все массивы ключей должны иметь один и тот же тип, а все массивы значений должны содержать элементы, которые можно приводить к одному типу ([Int64](../../sql-reference/data-types/int-uint.md#int-ranges), [UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges) или [Float64](../../sql-reference/data-types/float.md#float32-float64)). @@ -151,10 +152,10 @@ mapPopulateSeries(keys, values[, max]) Количество элементов в `keys` и `values` должно быть одинаковым для каждой строки. -**Параметры** +**Аргументы** -- `keys` — Массив ключей [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#int-ranges)). -- `values` — Массив значений. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#int-ranges)). +- `keys` — массив ключей [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#int-ranges)). +- `values` — массив значений. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#int-ranges)). **Возвращаемое значение** @@ -186,7 +187,7 @@ select mapPopulateSeries([1,2,4], [11,22,44], 5) as res, toTypeName(res) as type mapContains(map, key) ``` -**Параметры** +**Аргументы** - `map` — контейнер Map. [Map](../../sql-reference/data-types/map.md). - `key` — ключ. Тип соответстует типу ключей параметра `map`. @@ -229,7 +230,7 @@ SELECT mapContains(a, 'name') FROM test; mapKeys(map) ``` -**Параметры** +**Аргументы** - `map` — контейнер Map. [Map](../../sql-reference/data-types/map.md). @@ -270,7 +271,7 @@ SELECT mapKeys(a) FROM test; mapKeys(map) ``` -**Параметры** +**Аргументы** - `map` — контейнер Map. [Map](../../sql-reference/data-types/map.md). diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 53e7bc1300e..7fea524a233 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -22,7 +22,7 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043f\u0440\u0435\u - `toInt128(expr)` — возвращает значение типа `Int128`. - `toInt256(expr)` — возвращает значение типа `Int256`. -**Параметры** +**Аргументы** - `expr` — [выражение](../syntax.md#syntax-expressions) возвращающее число или строку с десятичным представление числа. Бинарное, восьмеричное и шестнадцатеричное представление числа не поддержаны. Ведущие нули обрезаются. @@ -100,7 +100,7 @@ SELECT toInt64OrNull('123123'), toInt8OrNull('123qwe123'); - `toUInt64(expr)` — возвращает значение типа `UInt64`. - `toUInt256(expr)` — возвращает значение типа `UInt256`. -**Параметры** +**Аргументы** - `expr` — [выражение](../syntax.md#syntax-expressions) возвращающее число или строку с десятичным представление числа. Бинарное, восьмеричное и шестнадцатеричное представление числа не поддержаны. Ведущие нули обрезаются. @@ -172,7 +172,7 @@ Cиноним: `DATE`. Эти функции следует использовать вместо функций `toDecimal*()`, если при ошибке обработки входного значения вы хотите получать `NULL` вместо исключения. -**Параметры** +**Аргументы** - `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql-reference/functions/type-conversion-functions.md). ClickHouse ожидает текстовое представление десятичного числа. Например, `'1.111'`. - `S` — количество десятичных знаков в результирующем значении. @@ -225,7 +225,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val); Эти функции следует использовать вместо функций `toDecimal*()`, если при ошибке обработки входного значения вы хотите получать `0` вместо исключения. -**Параметры** +**Аргументы** - `expr` — [выражение](../syntax.md#syntax-expressions), возвращающее значение типа [String](../../sql-reference/functions/type-conversion-functions.md). ClickHouse ожидает текстовое представление десятичного числа. Например, `'1.111'`. - `S` — количество десятичных знаков в результирующем значении. @@ -377,7 +377,7 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut; reinterpretAsUUID(fixed_string) ``` -**Параметры** +**Аргументы** - `fixed_string` — cтрока с big-endian порядком байтов. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring). @@ -488,7 +488,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null; └─────────────────────────────────────────┘ ``` -**См. также** +**Смотрите также** - Настройка [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable) @@ -537,7 +537,7 @@ Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in c accurateCastOrNull(x, T) ``` -**Параметры** +**Аргументы** - `x` — входное значение. - `T` — имя возвращаемого типа данных. @@ -596,7 +596,7 @@ toIntervalQuarter(number) toIntervalYear(number) ``` -**Параметры** +**Аргументы** - `number` — длительность интервала. Положительное целое число. @@ -638,7 +638,7 @@ SELECT parseDateTimeBestEffort(time_string[, time_zone]) ``` -**Параметры** +**Аргументы** - `time_string` — строка, содержащая дату и время для преобразования. [String](../../sql-reference/functions/type-conversion-functions.md). - `time_zone` — часовой пояс. Функция анализирует `time_string` в соответствии с заданным часовым поясом. [String](../../sql-reference/functions/type-conversion-functions.md). @@ -733,7 +733,7 @@ SELECT parseDateTimeBestEffort('10 20:19'); └─────────────────────────────────────┘ ``` -**См. также** +**Смотрите также** - [Информация о формате ISO 8601 от @xkcd](https://xkcd.com/1179/) - [RFC 1123](https://tools.ietf.org/html/rfc1123) @@ -750,7 +750,7 @@ SELECT parseDateTimeBestEffort('10 20:19'); parseDateTimeBestEffortUS(time_string [, time_zone]) ``` -**Параметры** +**Аргументы** - `time_string` — строка, содержащая дату и время для преобразования. [String](../../sql-reference/data-types/string.md). - `time_zone` — часовой пояс. Функция анализирует `time_string` в соответствии с часовым поясом. [String](../../sql-reference/data-types/string.md). @@ -824,7 +824,7 @@ AS parseDateTimeBestEffortUS; parseDateTimeBestEffortUSOrNull(time_string[, time_zone]) ``` -**Параметры** +**Аргументы** - `time_string` — строка, содержащая дату или дату со временем для преобразования. Дата должна быть в американском формате (`MM/DD/YYYY` и т.д.). [String](../../sql-reference/data-types/string.md). - `time_zone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция анализирует `time_string` в соответствии с заданным часовым поясом. Опциональный параметр. [String](../../sql-reference/data-types/string.md). @@ -910,7 +910,7 @@ SELECT parseDateTimeBestEffortUSOrNull('10.2021') AS parseDateTimeBestEffortUSOr parseDateTimeBestEffortUSOrZero(time_string[, time_zone]) ``` -**Параметры** +**Аргументы** - `time_string` — строка, содержащая дату или дату со временем для преобразования. Дата должна быть в американском формате (`MM/DD/YYYY` и т.д.). [String](../../sql-reference/data-types/string.md). - `time_zone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция анализирует `time_string` в соответствии с заданным часовым поясом. Опциональный параметр. [String](../../sql-reference/data-types/string.md). @@ -999,7 +999,7 @@ SELECT parseDateTimeBestEffortUSOrZero('02.2021') AS parseDateTimeBestEffortUSOr toUnixTimestamp64Milli(value) ``` -**Параметры** +**Аргументы** - `value` — значение `DateTime64` с любой точностью. @@ -1051,7 +1051,7 @@ SELECT toUnixTimestamp64Nano(dt64); fromUnixTimestamp64Milli(value [, ti]) ``` -**Параметры** +**Аргументы** - `value` — значение типы `Int64` с любой точностью. - `timezone` — (не обязательный параметр) часовой пояс в формате `String` для возвращаемого результата. @@ -1089,14 +1089,14 @@ SELECT fromUnixTimestamp64Milli(i64, 'UTC'); toLowCardinality(expr) ``` -**Параметры** +**Аргументы** -- `expr` — [Выражение](../syntax.md#syntax-expressions), которое в результате преобразуется в один из [поддерживаемых типов данных](../data-types/index.md#data_types). +- `expr` — [выражение](../syntax.md#syntax-expressions), которое в результате преобразуется в один из [поддерживаемых типов данных](../data-types/index.md#data_types). **Возвращаемое значение** -- Результат преобразования `expr`. +- Результат преобразования `expr`. Тип: `LowCardinality(expr_result_type)` @@ -1126,10 +1126,10 @@ SELECT toLowCardinality('1'); formatRow(format, x, y, ...) ``` -**Параметры** +**Аргументы** -- `format` — Текстовый формат. Например, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated). -- `x`,`y`, ... — Выражения. +- `format` — текстовый формат. Например, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated). +- `x`,`y`, ... — выражения. **Возвращаемое значение** @@ -1167,10 +1167,10 @@ FROM numbers(3); formatRowNoNewline(format, x, y, ...) ``` -**Параметры** +**Аргументы** -- `format` — Текстовый формат. Например, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated). -- `x`,`y`, ... — Выражения. +- `format` — текстовый формат. Например, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated). +- `x`,`y`, ... — выражения. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/functions/url-functions.md b/docs/ru/sql-reference/functions/url-functions.md index 7541e16bed4..bd1f9987291 100644 --- a/docs/ru/sql-reference/functions/url-functions.md +++ b/docs/ru/sql-reference/functions/url-functions.md @@ -23,7 +23,7 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u domain(url) ``` -**Параметры** +**Аргументы** - `url` — URL. Тип — [String](../../sql-reference/functions/url-functions.md). @@ -53,7 +53,7 @@ yandex.com **Пример** ``` sql -SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') +SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk'); ``` ``` text @@ -74,7 +74,7 @@ SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk') topLevelDomain(url) ``` -**Параметры** +**Аргументы** - `url` — URL. Тип — [String](../../sql-reference/functions/url-functions.md). @@ -96,7 +96,7 @@ https://yandex.com/time/ **Пример** ``` sql -SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') +SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk'); ``` ``` text @@ -138,7 +138,7 @@ SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk') cutToFirstSignificantSubdomain(URL, TLD) ``` -**Parameters** +**Аргументы** - `URL` — URL. [String](../../sql-reference/data-types/string.md). - `TLD` — имя пользовательского списка доменов верхнего уровня. [String](../../sql-reference/data-types/string.md). @@ -192,7 +192,7 @@ SELECT cutToFirstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', ' cutToFirstSignificantSubdomainCustomWithWWW(URL, TLD) ``` -**Параметры** +**Аргументы** - `URL` — URL. [String](../../sql-reference/data-types/string.md). - `TLD` — имя пользовательского списка доменов верхнего уровня. [String](../../sql-reference/data-types/string.md). @@ -246,7 +246,7 @@ SELECT cutToFirstSignificantSubdomainCustomWithWWW('www.foo', 'public_suffix_lis firstSignificantSubdomainCustom(URL, TLD) ``` -**Параметры** +**Аргументы** - `URL` — URL. [String](../../sql-reference/data-types/string.md). - `TLD` — имя пользовательского списка доменов верхнего уровня. [String](../../sql-reference/data-types/string.md). @@ -355,7 +355,7 @@ SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS Decod netloc(URL) ``` -**Параметры** +**Аргументы** - `url` — URL. Тип — [String](../../sql-reference/data-types/string.md). diff --git a/docs/ru/sql-reference/functions/ym-dict-functions.md b/docs/ru/sql-reference/functions/ym-dict-functions.md index c3b04e4ab66..63ea76907f9 100644 --- a/docs/ru/sql-reference/functions/ym-dict-functions.md +++ b/docs/ru/sql-reference/functions/ym-dict-functions.md @@ -113,13 +113,13 @@ LIMIT 15 **Синтаксис** ``` sql -regionToTopContinent(id[, geobase]); +regionToTopContinent(id[, geobase]) ``` -**Параметры** +**Аргументы** -- `id` — Идентификатор региона из геобазы Яндекса. [UInt32](../../sql-reference/functions/ym-dict-functions.md). -- `geobase` — Ключ словаря. Смотрите [Множественные геобазы](#multiple-geobases). [String](../../sql-reference/functions/ym-dict-functions.md). Опциональный параметр. +- `id` — идентификатор региона из геобазы Яндекса. [UInt32](../../sql-reference/functions/ym-dict-functions.md). +- `geobase` — ключ словаря. Смотрите [Множественные геобазы](#multiple-geobases). [String](../../sql-reference/functions/ym-dict-functions.md). Опциональный параметр. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/table-functions/generate.md b/docs/ru/sql-reference/table-functions/generate.md index 47b7e43bc86..cb1bcda01b3 100644 --- a/docs/ru/sql-reference/table-functions/generate.md +++ b/docs/ru/sql-reference/table-functions/generate.md @@ -10,10 +10,11 @@ toc_title: generateRandom Поддерживает все типы данных, которые могут храниться в таблице, за исключением `LowCardinality` и `AggregateFunction`. ``` sql -generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]); +generateRandom('name TypeName[, name TypeName]...', [, 'random_seed'[, 'max_string_length'[, 'max_array_length']]]) ``` -**Входные параметры** +**Аргументы** + - `name` — название соответствующего столбца. - `TypeName` — тип соответствующего столбца. - `max_array_length` — максимальная длина массива для всех сгенерированных массивов. По умолчанию `10`. diff --git a/docs/ru/sql-reference/table-functions/mysql.md b/docs/ru/sql-reference/table-functions/mysql.md index 18b34d0bf6c..9af46d61e45 100644 --- a/docs/ru/sql-reference/table-functions/mysql.md +++ b/docs/ru/sql-reference/table-functions/mysql.md @@ -10,10 +10,10 @@ toc_title: mysql **Синтаксис** ``` sql -mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); +mysql('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']) ``` -**Параметры** +**Аргументы** - `host:port` — адрес сервера MySQL. diff --git a/docs/ru/sql-reference/table-functions/view.md b/docs/ru/sql-reference/table-functions/view.md index 8a97253d048..91b68b31653 100644 --- a/docs/ru/sql-reference/table-functions/view.md +++ b/docs/ru/sql-reference/table-functions/view.md @@ -8,7 +8,7 @@ view(subquery) ``` -**Входные параметры** +**Аргументы** - `subquery` — запрос `SELECT`. @@ -32,7 +32,7 @@ view(subquery) Запрос: ``` sql -SELECT * FROM view(SELECT name FROM months) +SELECT * FROM view(SELECT name FROM months); ``` Результат: @@ -49,14 +49,15 @@ SELECT * FROM view(SELECT name FROM months) Вы можете использовать функцию `view` как параметр табличных функций [remote](https://clickhouse.tech/docs/ru/sql-reference/table-functions/remote/#remote-remotesecure) и [cluster](https://clickhouse.tech/docs/ru/sql-reference/table-functions/cluster/#cluster-clusterallreplicas): ``` sql -SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name)) +SELECT * FROM remote(`127.0.0.1`, view(SELECT a, b, c FROM table_name)); ``` ``` sql -SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)) +SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)); ``` **Смотрите также** - [view](https://clickhouse.tech/docs/ru/engines/table-engines/special/view/#table_engines-view) -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/view/) \ No newline at end of file + +[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/view/) From ee483d89e5887d6e8494c37f8896082d9af048d0 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Sat, 13 Mar 2021 23:16:24 +0300 Subject: [PATCH 153/333] Fix style --- src/Interpreters/Context.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index bb2d553b8e8..1f2da5c3946 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1835,9 +1835,8 @@ void Context::setClustersConfig(const ConfigurationPtr & config, const String & std::lock_guard lock(shared->clusters_mutex); /// Do not update clusters if this part of config wasn't changed. - if (shared->clusters && isSameConfiguration(*config, *shared->clusters_config, config_name)) { + if (shared->clusters && isSameConfiguration(*config, *shared->clusters_config, config_name)) return; - } auto old_clusters_config = shared->clusters_config; shared->clusters_config = config; From d83b4d3ab6c342bf369e441e3e442386f2309daf Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Sat, 13 Mar 2021 23:43:33 +0300 Subject: [PATCH 154/333] Try fix flacky test --- .../0_stateless/01753_system_zookeeper_query_param_path.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh index 75a8e1b95ac..33d979d4bb7 100755 --- a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh +++ b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh @@ -6,9 +6,9 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_01753"; -${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_01753 (n Int8) ENGINE=ReplicatedMergeTree('/test_01753/test', 'r') ORDER BY n" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_01753 (n Int8) ENGINE=ReplicatedMergeTree('/"$CLICKHOUSE_DATABASE"/test_01753/test', '1') ORDER BY n" -${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path "/test_01753" +${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path $CLICKHOUSE_DATABASE"/test_01753" ${CLICKHOUSE_CLIENT} --query="DROP TABLE test_01753 SYNC"; From 57d1dc7a6cdc6f87c9f64f91debf47355c6a0e32 Mon Sep 17 00:00:00 2001 From: George Date: Sun, 14 Mar 2021 00:33:55 +0300 Subject: [PATCH 155/333] Work-in-progress --- .../external-authenticators/ldap.md | 55 ++++----- .../external-authenticators/ldap.md | 108 +++++++++--------- 2 files changed, 79 insertions(+), 84 deletions(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 523a4ff2993..cb8aa07dc41 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -2,10 +2,10 @@ LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this: -- use LDAP as an external authenticator for existing users, which are defined in `users.xml` or in local access control paths -- use LDAP as an external user directory and allow locally undefined users to be authenticated if they exist on the LDAP server +- Use LDAP as an external authenticator for existing users, which are defined in `users.xml` or in local access control paths. +- Use LDAP as an external user directory and allow locally undefined users to be authenticated if they exist on the LDAP server. -For both of these approaches, an internally named LDAP server must be defined in the ClickHouse config so that other parts of config are able to refer to it. +For both of these approaches, an internally named LDAP server must be defined in the ClickHouse config so that other parts of the config can refer to it. ## LDAP Server Definition {#ldap-server-definition} @@ -43,27 +43,27 @@ Note, that you can define multiple LDAP servers inside the `ldap_servers` sectio - The resulting DN will be constructed by replacing all `{user_name}` substrings of the template with the actual user name during each authentication attempt. - `verification_cooldown` — a period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. - Specify `0` (the default) to disable caching and force contacting the LDAP server for each authentication request. -- `enable_tls` — flag to trigger use of secure connection to the LDAP server. +- `enable_tls` — a flag to trigger the use of the secure connection to the LDAP server. - Specify `no` for plain text `ldap://` protocol (not recommended). - Specify `yes` for LDAP over SSL/TLS `ldaps://` protocol (recommended, the default). - Specify `starttls` for legacy StartTLS protocol (plain text `ldap://` protocol, upgraded to TLS). -- `tls_minimum_protocol_version` — the minimum protocol version of SSL/TLS. +- `tls_minimum_protocol_version` — The minimum protocol version of SSL/TLS. - Accepted values are: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (the default). - `tls_require_cert` — SSL/TLS peer certificate verification behavior. - Accepted values are: `never`, `allow`, `try`, `demand` (the default). -- `tls_cert_file` — path to certificate file. -- `tls_key_file` — path to certificate key file. -- `tls_ca_cert_file` — path to CA certificate file. -- `tls_ca_cert_dir` — path to the directory containing CA certificates. -- `tls_cipher_suite` — allowed cipher suite (in OpenSSL notation). +- `tls_cert_file` — Path to certificate file. +- `tls_key_file` — Path to certificate key file. +- `tls_ca_cert_file` — Path to CA certificate file. +- `tls_ca_cert_dir` — Path to the directory containing CA certificates. +- `tls_cipher_suite` — Allowed cipher suite (in OpenSSL notation). ## LDAP External Authenticator {#ldap-external-authenticator} -A remote LDAP server can be used as a method for verifying passwords for locally defined users (users defined in `users.xml` or in local access control paths). In order to achieve this, specify previously defined LDAP server name instead of `password` or similar sections in the user definition. +A remote LDAP server can be used as a method for verifying passwords for locally defined users (users defined in `users.xml` or in local access control paths). To achieve this, specify previously defined LDAP server name instead of `password` or similar sections in the user definition. -At each login attempt, ClickHouse will try to "bind" to the specified DN defined by the `bind_dn` parameter in the [LDAP server definition](#ldap-server-definition) using the provided credentials, and if successful, the user will be considered authenticated. This is often called a "simple bind" method. +At each login attempt, ClickHouse tries to "bind" to the specified DN defined by the `bind_dn` parameter in the [LDAP server definition](#ldap-server-definition) using the provided credentials, and if successful, the user is considered authenticated. This is often called a "simple bind" method. -For example, +**Example** ```xml @@ -82,7 +82,7 @@ For example, Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously. -When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled in ClickHouse, users that are authenticated by LDAP servers can also be created using the [CRATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. +When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. Query: @@ -92,11 +92,13 @@ CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server'; ## LDAP Exernal User Directory {#ldap-external-user-directory} -In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. In order to achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file. +In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. To achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file. -At each login attempt, ClickHouse will try to find the user definition locally and authenticate it as usual, but if the user is not defined, ClickHouse will assume it exists in the external LDAP directory, and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. +At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. -Example (goes into `config.xml`): +**Example** + +Goes into `config.xml`. ```xml @@ -123,23 +125,22 @@ Example (goes into `config.xml`): Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)). -Parameters: +**Parameters** -- `server` — one of LDAP server names defined in the `ldap_servers` config section above. - This parameter is mandatory and cannot be empty. -- `roles` — section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. +- `server` — One of LDAP server names defined in the `ldap_servers` config section above. This parameter is mandatory and cannot be empty. Одно из имен +- `roles` — Section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. - If no roles are specified here or assigned during role mapping (below), user will not be able to perform any actions after authentication. -- `role_mapping` — section with LDAP search parameters and mapping rules. +- `role_mapping` — Section with LDAP search parameters and mapping rules. - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. - - `base_dn` — template used to construct the base DN for the LDAP search. + - `base_dn` — Template used to construct the base DN for the LDAP search. - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during each LDAP search. - - `scope` — scope of the LDAP search. + - `scope` — Scope of the LDAP search. - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). - - `search_filter` — template used to construct the search filter for the LDAP search. + - `search_filter` — Template used to construct the search filter for the LDAP search. - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. - Note, that the special characters must be escaped properly in XML. - - `attribute` — attribute name whose values will be returned by the LDAP search. - - `prefix` — prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated as local role names. Empty, by default. + - `attribute` — Attribute name whose values will be returned by the LDAP search. + - `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated as local role names. Empty, by default. [Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index fe364c69f05..3d71ec1eba3 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -1,11 +1,11 @@ # LDAP {#external-authenticators-ldap} -Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Для этого есть два разных подхода: +Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Можно использовать два подхода: -- использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных путях управления контролем -- использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере +- Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных путях управления контролем. +- Использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере. -Для этих обоих подходов необходимо определить в ClickHouse конфиге внутренне названный LDAP сервер, чтобы другие части конфига могли ссылаться на него. +Для обоих подходов необходимо определить в конфиге ClickHouse внутренне названный LDAP сервер, чтобы другие части конфига могли ссылаться на него. ## Определение LDAP сервера {#ldap-server-definition} @@ -37,33 +37,33 @@ **Параметры** -- `host` — LDAP server hostname or IP, this parameter is mandatory and cannot be empty. имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть пустым. -- `port` — порт сервера LDAP. По-умолчанию: при значение `true` настройки `enable_tls` — `636`, иначе `389`. +- `host` — имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть пустым. +- `port` — порт сервера LDAP. По-умолчанию: при значении `true` настройки `enable_tls` — `636`, иначе `389`. - `bind_dn` — шаблон для создания DN для привязки. - - конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на настоящее имя пользователя при каждой попытке аутентификации. -- `verification_cooldown` — a period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. - - Specify `0` (the default) to disable caching and force contacting the LDAP server for each authentication request. -- `enable_tls` — flag to trigger use of secure connection to the LDAP server. - - Specify `no` for plain text `ldap://` protocol (not recommended). - - Specify `yes` for LDAP over SSL/TLS `ldaps://` protocol (recommended, the default). - - Specify `starttls` for legacy StartTLS protocol (plain text `ldap://` protocol, upgraded to TLS). -- `tls_minimum_protocol_version` — the minimum protocol version of SSL/TLS. - - Accepted values are: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (the default). -- `tls_require_cert` — SSL/TLS peer certificate verification behavior. - - Accepted values are: `never`, `allow`, `try`, `demand` (the default). -- `tls_cert_file` — path to certificate file. -- `tls_key_file` — path to certificate key file. -- `tls_ca_cert_file` — path to CA certificate file. -- `tls_ca_cert_dir` — path to the directory containing CA certificates. -- `tls_cipher_suite` — allowed cipher suite (in OpenSSL notation). + - конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на фактическое имя пользователя при каждой попытке аутентификации. +- `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться успешно аутентифицированным без с сервером LDAP для всех последующих запросов. + - Укажите `0` (по-умолчанию), чтобы отключить кеширования и заставить связываться с сервером LDAP для каждого запроса аутетификации. +- `enable_tls` — флаг, включающий использование защищенного соединения с сервером LDAP. + - Укажите `no` для текстового `ldap://` протокола (не рекомендовано). + - Укажите `yes` для LDAP через SSL/TLS `ldaps://` протокола (рекомендовано, используется по-умолчанию). + - Укажите `starttls` для устаревшего StartTLS протокола (текстовый `ldap://` протокол, модернизированный до TLS). +- `tls_minimum_protocol_version` — минимальная версия протокола SSL/TLS. + - Принимаемые значения: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (по-умолчанию). +- `tls_require_cert` — поведение при проверке сертификата SSL/TLS. + - Принимаемые значения: `never`, `allow`, `try`, `demand` (по-умолчанию). +- `tls_cert_file` — путь до файла сертификата. +- `tls_key_file` — путь к файлу ключа сертификата. +- `tls_ca_cert_file` — путь к файлу ЦС сертификата. +- `tls_ca_cert_dir` — путь к каталогу, содержащая сертификаты ЦС. +- `tls_cipher_suite` — разрешить набор шифров (в нотации OpenSSL). -## LDAP External Authenticator {#ldap-external-authenticator} +## LDAP внешний аутентификатор {#ldap-external-authenticator} -A remote LDAP server can be used as a method for verifying passwords for locally defined users (users defined in `users.xml` or in local access control paths). In order to achieve this, specify previously defined LDAP server name instead of `password` or similar sections in the user definition. +Удаленный сервер LDAP можно использовать как метод верификации паролей локально определенных пользователей (пользователей, которые определены в `users.xml` или в локальных путях управления контролем). Для этого укажите имя определенного до этого сервера LDAP вместо `password` или другой похожей секции в определении пользователя. -At each login attempt, ClickHouse will try to "bind" to the specified DN defined by the `bind_dn` parameter in the [LDAP server definition](#ldap-server-definition) using the provided credentials, and if successful, the user will be considered authenticated. This is often called a "simple bind" method. +При каждой попытке авторизации, ClickHouse пытается "привязаться" к DN, указанному в [определение LDAP сервера](#ldap-server-definition) параметром `bind_dn`, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается аутентифицированным. Обычно это называют методом "простой привязки". -For example, +**Например** ```xml @@ -80,22 +80,25 @@ For example, ``` -Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously. +Обратите внимание, что пользователь `my_user` ссылается на `my_ldap_server`. Этот LDAP сервер должен быть настроен в основном файле `config.xml`, как это было описано ранее. -When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled in ClickHouse, users that are authenticated by LDAP servers can also be created using the [CRATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. +При включенном SQL-ориентированным [Управлением доступом](../access-rights.md#access-control) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). +Запрос: ```sql -CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server' +CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server'; ``` -## LDAP Exernal User Directory {#ldap-external-user-directory} +## Внешний пользовательский каталог LDAP {#ldap-external-user-directory} -In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. In order to achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file. +В добавок к локально определенным пользователям, удаленный LDAP сервер может быть использован как источник определения пользователей. Для этого укажите имя определенного до этого сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -At each login attempt, ClickHouse will try to find the user definition locally and authenticate it as usual, but if the user is not defined, ClickHouse will assume it exists in the external LDAP directory, and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. +При каждой попытке авторизации, ClicHouse пытается локально найти определение пользователя и авторизовать его как обычно. Если определение не будет найдено, ClickHouse предполагает, что оно находится во внешнем LDAP каталоге, и попытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю будут присвоены роли из списка, указанного в секции `roles`. Кроме того, может быть выполнен LDAP поиск, а его результаты могут быть преобразованы в имена ролей и присвоены пользователям, если была настроена секция `role_mapping`. Все это работает при условии, что SQL-ориентированное [Управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением[CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). -Example (goes into `config.xml`): +**Пример** + +В `config.xml`. ```xml @@ -120,35 +123,26 @@ Example (goes into `config.xml`): ``` -Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously -defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)). +Обратите внимание, что `my_ldap_server`, указанный в секции `ldap` внутри секции `user_directories`, должен быть настроен в файле `config.xml`, как это было описано ранее. (см. [Определение LDAP сервера](#ldap-server-definition)). -Parameters: +**Параметры** -- `server` - one of LDAP server names defined in the `ldap_servers` config section above. +- `server` — One of LDAP server names defined in the `ldap_servers` config section above. This parameter is mandatory and cannot be empty. -- `roles` - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. - - If no roles are specified here or assigned during role mapping (below), user will not be able - to perform any actions after authentication. -- `role_mapping` - section with LDAP search parameters and mapping rules. - - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` - and the name of the logged in user. For each entry found during that search, the value of the specified - attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, - and the rest of the value becomes the name of a local role defined in ClickHouse, - which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. +- `roles` — Section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. + - If no roles are specified here or assigned during role mapping (below), user will not be able to perform any actions after authentication. +- `role_mapping` — Section with LDAP search parameters and mapping rules. + - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. - - `base_dn` - template used to construct the base DN for the LDAP search. - - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` - substrings of the template with the actual user name and bind DN during each LDAP search. - - `scope` - scope of the LDAP search. + - `base_dn` — Template used to construct the base DN for the LDAP search. + - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during each LDAP search. + - `scope` — Scope of the LDAP search. - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). - - `search_filter` - template used to construct the search filter for the LDAP search. - - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` - substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. + - `search_filter` — Template used to construct the search filter for the LDAP search. + - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. - Note, that the special characters must be escaped properly in XML. - - `attribute` - attribute name whose values will be returned by the LDAP search. - - `prefix` - prefix, that will be expected to be in front of each string in the original - list of strings returned by the LDAP search. Prefix will be removed from the original - strings and resulting strings will be treated as local role names. Empty, by default. + - `attribute` — Attribute name whose values will be returned by the LDAP search. + - `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated as local role names. Empty, by default. + [Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) From 3e3b5c64bf33c6777d4df0113fda0d9beeaac6f4 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Sun, 14 Mar 2021 02:34:32 +0300 Subject: [PATCH 156/333] add init file --- tests/integration/test_reload_clusters_config/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/integration/test_reload_clusters_config/__init__.py diff --git a/tests/integration/test_reload_clusters_config/__init__.py b/tests/integration/test_reload_clusters_config/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From e116e75d2cb52c09ca31b5851e22bf100e36c07b Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Sun, 14 Mar 2021 02:36:40 +0300 Subject: [PATCH 157/333] Fix style --- .../0_stateless/01753_system_zookeeper_query_param_path.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh index 33d979d4bb7..1f4ba412a19 100755 --- a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh +++ b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh @@ -6,9 +6,9 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_01753"; -${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_01753 (n Int8) ENGINE=ReplicatedMergeTree('/"$CLICKHOUSE_DATABASE"/test_01753/test', '1') ORDER BY n" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_01753 (n Int8) ENGINE=ReplicatedMergeTree('/$CLICKHOUSE_DATABASE/test_01753/test', '1') ORDER BY n" -${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path $CLICKHOUSE_DATABASE"/test_01753" +${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path "$CLICKHOUSE_DATABASE/test_01753" ${CLICKHOUSE_CLIENT} --query="DROP TABLE test_01753 SYNC"; From 8eea6a87e991eec1a196ea5291b64ec8293ab583 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 14 Mar 2021 08:35:07 +0000 Subject: [PATCH 158/333] Support replicas priority for postgres dictionary source --- .../compose/docker_compose_postgres.yml | 7 ++ src/Common/ErrorCodes.cpp | 1 + .../PostgreSQL/DatabasePostgreSQL.cpp | 2 +- .../PostgreSQLDictionarySource.cpp | 31 +++---- src/Dictionaries/PostgreSQLDictionarySource.h | 6 +- .../PostgreSQL/PostgreSQLConnection.cpp | 44 +++++++++- .../PostgreSQL/PostgreSQLConnection.h | 18 ++-- .../PostgreSQLReplicaConnection.cpp | 77 +++++++++++++++++ .../PostgreSQL/PostgreSQLReplicaConnection.h | 35 ++++++++ .../configs/dictionaries/postgres_dict.xml | 83 +++++++++++++++++++ .../configs/postgres_dict.xml | 37 --------- .../test_dictionaries_postgresql/test.py | 76 +++++++++++++---- 12 files changed, 332 insertions(+), 85 deletions(-) create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConnection.cpp create mode 100644 src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h create mode 100644 tests/integration/test_dictionaries_postgresql/configs/dictionaries/postgres_dict.xml delete mode 100644 tests/integration/test_dictionaries_postgresql/configs/postgres_dict.xml diff --git a/docker/test/integration/runner/compose/docker_compose_postgres.yml b/docker/test/integration/runner/compose/docker_compose_postgres.yml index 5657352e1b3..58ed97251fb 100644 --- a/docker/test/integration/runner/compose/docker_compose_postgres.yml +++ b/docker/test/integration/runner/compose/docker_compose_postgres.yml @@ -11,3 +11,10 @@ services: default: aliases: - postgre-sql.local + postgres2: + image: postgres + restart: always + environment: + POSTGRES_PASSWORD: mysecretpassword + ports: + - 5441:5432 diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index f44cfd938d6..3a0fccc5358 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -546,6 +546,7 @@ M(577, INVALID_SHARD_ID) \ M(578, INVALID_FORMAT_INSERT_QUERY_WITH_DATA) \ \ + M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ M(1001, STD_EXCEPTION) \ diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 722b9c64edb..511cf8d847c 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -168,7 +168,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, const Conte return StoragePtr{}; auto storage = StoragePostgreSQL::create( - StorageID(database_name, table_name), table_name, std::make_shared(connection->conn_str()), + StorageID(database_name, table_name), table_name, std::make_shared(*connection), ColumnsDescription{*columns}, ConstraintsDescription{}, context); if (cache_tables) diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index aa852404750..5b71ad61120 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include "readInvalidateQuery.h" #endif @@ -29,11 +28,10 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config_, const std::string & config_prefix, - PostgreSQLConnectionPtr connection_, const Block & sample_block_) : dict_struct{dict_struct_} , sample_block(sample_block_) - , connection(std::move(connection_)) + , connection(std::make_shared(config_, config_prefix)) , log(&Poco::Logger::get("PostgreSQLDictionarySource")) , db(config_.getString(fmt::format("{}.db", config_prefix), "")) , table(config_.getString(fmt::format("{}.table", config_prefix), "")) @@ -50,7 +48,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource( PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionarySource & other) : dict_struct(other.dict_struct) , sample_block(other.sample_block) - , connection(std::make_shared(other.connection->conn_str())) + , connection(other.connection) , log(&Poco::Logger::get("PostgreSQLDictionarySource")) , db(other.db) , table(other.table) @@ -68,8 +66,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar BlockInputStreamPtr PostgreSQLDictionarySource::loadAll() { LOG_TRACE(log, load_all_query); - return std::make_shared( - connection->conn(), load_all_query, sample_block, max_block_size); + return loadBase(load_all_query); } @@ -77,23 +74,28 @@ BlockInputStreamPtr PostgreSQLDictionarySource::loadUpdatedAll() { auto load_update_query = getUpdateFieldAndDate(); LOG_TRACE(log, load_update_query); - return std::make_shared(connection->conn(), load_update_query, sample_block, max_block_size); + return loadBase(load_update_query); } BlockInputStreamPtr PostgreSQLDictionarySource::loadIds(const std::vector & ids) { const auto query = query_builder.composeLoadIdsQuery(ids); - return std::make_shared(connection->conn(), query, sample_block, max_block_size); + return loadBase(query); } BlockInputStreamPtr PostgreSQLDictionarySource::loadKeys(const Columns & key_columns, const std::vector & requested_rows) { const auto query = query_builder.composeLoadKeysQuery(key_columns, requested_rows, ExternalQueryBuilder::AND_OR_CHAIN); - return std::make_shared(connection->conn(), query, sample_block, max_block_size); + return loadBase(query); } +BlockInputStreamPtr PostgreSQLDictionarySource::loadBase(const String & query) +{ + return std::make_shared(connection->get(), query, sample_block, max_block_size); +} + bool PostgreSQLDictionarySource::isModified() const { if (!invalidate_query.empty()) @@ -112,7 +114,7 @@ std::string PostgreSQLDictionarySource::doInvalidateQuery(const std::string & re Block invalidate_sample_block; ColumnPtr column(ColumnString::create()); invalidate_sample_block.insert(ColumnWithTypeAndName(column, std::make_shared(), "Sample Block")); - PostgreSQLBlockInputStream block_input_stream(connection->conn(), request, invalidate_sample_block, 1); + PostgreSQLBlockInputStream block_input_stream(connection->get(), request, invalidate_sample_block, 1); return readInvalidateQuery(block_input_stream); } @@ -172,15 +174,8 @@ void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory) { #if USE_LIBPQXX const auto config_prefix = root_config_prefix + ".postgresql"; - auto connection = std::make_shared( - config.getString(fmt::format("{}.db", config_prefix), ""), - config.getString(fmt::format("{}.host", config_prefix), ""), - config.getUInt(fmt::format("{}.port", config_prefix), 0), - config.getString(fmt::format("{}.user", config_prefix), ""), - config.getString(fmt::format("{}.password", config_prefix), "")); - return std::make_unique( - dict_struct, config, config_prefix, connection, sample_block); + dict_struct, config, config_prefix, sample_block); #else (void)dict_struct; (void)config; diff --git a/src/Dictionaries/PostgreSQLDictionarySource.h b/src/Dictionaries/PostgreSQLDictionarySource.h index a826ff15f4f..dd2d35db83a 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.h +++ b/src/Dictionaries/PostgreSQLDictionarySource.h @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include @@ -26,7 +26,6 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config_, const std::string & config_prefix, - PostgreSQLConnectionPtr connection_, const Block & sample_block_); /// copy-constructor is provided in order to support cloneability @@ -48,10 +47,11 @@ public: private: std::string getUpdateFieldAndDate(); std::string doInvalidateQuery(const std::string & request) const; + BlockInputStreamPtr loadBase(const String & query); const DictionaryStructure dict_struct; Block sample_block; - PostgreSQLConnectionPtr connection; + PostgreSQLReplicaConnectionPtr connection; Poco::Logger * log; const std::string db; diff --git a/src/Storages/PostgreSQL/PostgreSQLConnection.cpp b/src/Storages/PostgreSQL/PostgreSQLConnection.cpp index 668550ec721..58eb7192eb9 100644 --- a/src/Storages/PostgreSQL/PostgreSQLConnection.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLConnection.cpp @@ -6,23 +6,63 @@ #include #include #include +#include namespace DB { +PostgreSQLConnection::PostgreSQLConnection(std::string dbname, std::string host, UInt16 port, std::string user, std::string password) +{ + address = host + ':' + std::to_string(port); + connection_str = formatConnectionString(std::move(dbname), std::move(host), port, std::move(user), std::move(password)); +} + + +PostgreSQLConnection::PostgreSQLConnection(const PostgreSQLConnection & other) + : connection_str(other.connection_str) + , address(other.address) +{ +} + + PostgreSQLConnection::ConnectionPtr PostgreSQLConnection::conn() { - checkUpdateConnection(); + connect(); return connection; } -void PostgreSQLConnection::checkUpdateConnection() + +void PostgreSQLConnection::connect() { if (!connection || !connection->is_open()) connection = std::make_unique(connection_str); } + +bool PostgreSQLConnection::tryConnect() +{ + try + { + connect(); + } + catch (const pqxx::broken_connection & pqxx_error) + { + LOG_ERROR( + &Poco::Logger::get("PostgreSQLConnection"), + "Unable to setup connection to {}, reason: {}", + getAddress(), pqxx_error.what()); + return false; + } + catch (...) + { + throw; + } + + return true; +} + + std::string PostgreSQLConnection::formatConnectionString( std::string dbname, std::string host, UInt16 port, std::string user, std::string password) { diff --git a/src/Storages/PostgreSQL/PostgreSQLConnection.h b/src/Storages/PostgreSQL/PostgreSQLConnection.h index ae79a3436e0..f23308ddef9 100644 --- a/src/Storages/PostgreSQL/PostgreSQLConnection.h +++ b/src/Storages/PostgreSQL/PostgreSQLConnection.h @@ -16,29 +16,31 @@ namespace DB /// Connection is not made until actually used. class PostgreSQLConnection { +public: using ConnectionPtr = std::shared_ptr; -public: - PostgreSQLConnection(std::string dbname, std::string host, UInt16 port, std::string user, std::string password) - : connection_str(formatConnectionString(std::move(dbname), std::move(host), port, std::move(user), std::move(password))) {} + PostgreSQLConnection(std::string dbname, std::string host, UInt16 port, std::string user, std::string password); - PostgreSQLConnection(const std::string & connection_str_) : connection_str(connection_str_) {} + PostgreSQLConnection(const PostgreSQLConnection & other); - PostgreSQLConnection(const PostgreSQLConnection &) = delete; PostgreSQLConnection operator =(const PostgreSQLConnection &) = delete; + bool tryConnect(); + ConnectionPtr conn(); + const std::string & getAddress() { return address; } + std::string & conn_str() { return connection_str; } private: - ConnectionPtr connection; - std::string connection_str; + void connect(); static std::string formatConnectionString( std::string dbname, std::string host, UInt16 port, std::string user, std::string password); - void checkUpdateConnection(); + ConnectionPtr connection; + std::string connection_str, address; }; using PostgreSQLConnectionPtr = std::shared_ptr; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.cpp new file mode 100644 index 00000000000..0c1efc16e05 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.cpp @@ -0,0 +1,77 @@ +#include "PostgreSQLReplicaConnection.h" +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int POSTGRESQL_CONNECTION_FAILURE; +} + + +PostgreSQLReplicaConnection::PostgreSQLReplicaConnection( + const Poco::Util::AbstractConfiguration & config, + const String & config_prefix, + const size_t num_retries_) + : log(&Poco::Logger::get("PostgreSQLConnection")) + , num_retries(num_retries_) +{ + auto db = config.getString(config_prefix + ".db", ""); + auto host = config.getString(config_prefix + ".host", ""); + auto port = config.getUInt(config_prefix + ".port", 0); + auto user = config.getString(config_prefix + ".user", ""); + auto password = config.getString(config_prefix + ".password", ""); + + if (config.has(config_prefix + ".replica")) + { + Poco::Util::AbstractConfiguration::Keys config_keys; + config.keys(config_prefix, config_keys); + + for (const auto & config_key : config_keys) + { + if (config_key.starts_with("replica")) + { + std::string replica_name = config_prefix + "." + config_key; + size_t priority = config.getInt(replica_name + ".priority", 0); + + auto replica_host = config.getString(replica_name + ".host", host); + auto replica_port = config.getUInt(replica_name + ".port", port); + auto replica_user = config.getString(replica_name + ".user", user); + auto replica_password = config.getString(replica_name + ".password", password); + + replicas[priority] = std::make_shared(db, replica_host, replica_port, replica_user, replica_password); + } + } + } + else + { + replicas[0] = std::make_shared(db, host, port, user, password); + } +} + + +PostgreSQLReplicaConnection::PostgreSQLReplicaConnection(const PostgreSQLReplicaConnection & other) + : log(&Poco::Logger::get("PostgreSQLConnection")) + , replicas(other.replicas) + , num_retries(other.num_retries) +{ +} + + +PostgreSQLConnection::ConnectionPtr PostgreSQLReplicaConnection::get() +{ + for (size_t i = 0; i < num_retries; ++i) + { + for (auto & replica : replicas) + { + if (replica.second->tryConnect()) + return replica.second->conn(); + } + } + + throw Exception(ErrorCodes::POSTGRESQL_CONNECTION_FAILURE, "Unable to connect to any of the replicas"); +} + +} diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h new file mode 100644 index 00000000000..9b5dec5e9a5 --- /dev/null +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h @@ -0,0 +1,35 @@ +#include "PostgreSQLConnection.h" +#include +#include +#include + +namespace DB +{ + +class PostgreSQLReplicaConnection +{ + +public: + static constexpr inline auto POSTGRESQL_CONNECTION_DEFAULT_RETRIES_NUM = 5; + + PostgreSQLReplicaConnection( + const Poco::Util::AbstractConfiguration & config, + const String & config_name, + const size_t num_retries = POSTGRESQL_CONNECTION_DEFAULT_RETRIES_NUM); + + PostgreSQLReplicaConnection(const PostgreSQLReplicaConnection & other); + + PostgreSQLConnection::ConnectionPtr get(); + + +private: + using ReplicasByPriority = std::map; + + Poco::Logger * log; + ReplicasByPriority replicas; + size_t num_retries; +}; + +using PostgreSQLReplicaConnectionPtr = std::shared_ptr; + +} diff --git a/tests/integration/test_dictionaries_postgresql/configs/dictionaries/postgres_dict.xml b/tests/integration/test_dictionaries_postgresql/configs/dictionaries/postgres_dict.xml new file mode 100644 index 00000000000..4ee07d0972a --- /dev/null +++ b/tests/integration/test_dictionaries_postgresql/configs/dictionaries/postgres_dict.xml @@ -0,0 +1,83 @@ + + + + dict0 + + + clickhouse + postgres1 + 5432 + postgres + mysecretpassword + test0
+ SELECT value FROM test0 WHERE id = 0 +
+ + + + + + + id + UInt32 + + + id + UInt32 + + + + value + UInt32 + + + + 1 +
+ + dict1 + + + clickhouse + postgres + mysecretpassword + test1
+ + postgres1 + 3 + 5432 + + + postgres2 + 5433 + 1 + + + postgres2 + 5432 + 2 + +
+ + + + + + + id + UInt32 + + + id + UInt32 + + + + value + UInt32 + + + + 1 +
+
diff --git a/tests/integration/test_dictionaries_postgresql/configs/postgres_dict.xml b/tests/integration/test_dictionaries_postgresql/configs/postgres_dict.xml deleted file mode 100644 index 2572930a798..00000000000 --- a/tests/integration/test_dictionaries_postgresql/configs/postgres_dict.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - dict0 - - - clickhouse - postgres1 - 5432 - postgres - mysecretpassword - test0
- SELECT value FROM test0 WHERE id = 0 -
- - - - - - - id - UInt32 - - - id - UInt32 - - - - value - UInt32 - - - - 1 -
-
diff --git a/tests/integration/test_dictionaries_postgresql/test.py b/tests/integration/test_dictionaries_postgresql/test.py index b83c00409af..1ca3c89a5ee 100644 --- a/tests/integration/test_dictionaries_postgresql/test.py +++ b/tests/integration/test_dictionaries_postgresql/test.py @@ -6,7 +6,10 @@ from helpers.cluster import ClickHouseCluster from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/config.xml', 'configs/postgres_dict.xml', 'configs/log_conf.xml'], with_postgres=True) +node1 = cluster.add_instance('node1', main_configs=[ + 'configs/config.xml', + 'configs/dictionaries/postgres_dict.xml', + 'configs/log_conf.xml'], with_postgres=True) postgres_dict_table_template = """ CREATE TABLE IF NOT EXISTS {} ( @@ -18,11 +21,12 @@ click_dict_table_template = """ ) ENGINE = Dictionary({}) """ -def get_postgres_conn(database=False): +def get_postgres_conn(port=5432, database=False): if database == True: - conn_string = "host='localhost' dbname='clickhouse' user='postgres' password='mysecretpassword'" + conn_string = "host='localhost' port={} dbname='clickhouse' user='postgres' password='mysecretpassword'".format(port) else: - conn_string = "host='localhost' user='postgres' password='mysecretpassword'" + conn_string = "host='localhost' port={} user='postgres' password='mysecretpassword'".format(port) + conn = psycopg2.connect(conn_string) conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) conn.autocommit = True @@ -32,15 +36,13 @@ def create_postgres_db(conn, name): cursor = conn.cursor() cursor.execute("CREATE DATABASE {}".format(name)) -def create_postgres_table(conn, table_name): - cursor = conn.cursor() +def create_postgres_table(cursor, table_name): cursor.execute(postgres_dict_table_template.format(table_name)) -def create_and_fill_postgres_table(table_name): - conn = get_postgres_conn(True) - create_postgres_table(conn, table_name) +def create_and_fill_postgres_table(cursor, table_name, host='postgres1', port=5432): + create_postgres_table(cursor, table_name) # Fill postgres table using clickhouse postgres table function and check - table_func = '''postgresql('postgres1:5432', 'clickhouse', '{}', 'postgres', 'mysecretpassword')'''.format(table_name) + table_func = '''postgresql('{}:{}', 'clickhouse', '{}', 'postgres', 'mysecretpassword')'''.format(host, port, table_name) node1.query('''INSERT INTO TABLE FUNCTION {} SELECT number, number from numbers(10000) '''.format(table_func, table_name)) result = node1.query("SELECT count() FROM {}".format(table_func)) @@ -54,10 +56,16 @@ def create_dict(table_name, index=0): def started_cluster(): try: cluster.start() - postgres_conn = get_postgres_conn() node1.query("CREATE DATABASE IF NOT EXISTS test") - print("postgres connected") + + postgres_conn = get_postgres_conn(port=5432) + print("postgres1 connected") create_postgres_db(postgres_conn, 'clickhouse') + + postgres_conn = get_postgres_conn(port=5441) + print("postgres2 connected") + create_postgres_db(postgres_conn, 'clickhouse') + yield cluster finally: @@ -65,10 +73,10 @@ def started_cluster(): def test_load_dictionaries(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(database=True) cursor = conn.cursor() table_name = 'test0' - create_and_fill_postgres_table(table_name) + create_and_fill_postgres_table(cursor, table_name) create_dict(table_name) dict_name = 'dict0' @@ -76,14 +84,17 @@ def test_load_dictionaries(started_cluster): assert node1.query("SELECT count() FROM `test`.`dict_table_{}`".format(table_name)).rstrip() == '10000' assert node1.query("SELECT dictGetUInt32('{}', 'id', toUInt64(0))".format(dict_name)) == '0\n' assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(9999))".format(dict_name)) == '9999\n' + cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) + node1.query("DROP TABLE IF EXISTS {}".format(table_name)) + node1.query("DROP DICTIONARY IF EXISTS {}".format(dict_name)) def test_invalidate_query(started_cluster): - conn = get_postgres_conn(True) + conn = get_postgres_conn(database=True) cursor = conn.cursor() table_name = 'test0' - create_and_fill_postgres_table(table_name) + create_and_fill_postgres_table(cursor, table_name) # invalidate query: SELECT value FROM test0 WHERE id = 0 dict_name = 'dict0' @@ -112,6 +123,39 @@ def test_invalidate_query(started_cluster): assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name)) == '2\n' assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(1))".format(dict_name)) == '2\n' + node1.query("DROP TABLE IF EXISTS {}".format(table_name)) + node1.query("DROP DICTIONARY IF EXISTS {}".format(dict_name)) + + +def test_dictionary_with_replicas(started_cluster): + conn1 = get_postgres_conn(port=5432, database=True) + cursor1 = conn1.cursor() + conn2 = get_postgres_conn(port=5441, database=True) + cursor2 = conn2.cursor() + + create_postgres_table(cursor1, 'test1') + create_postgres_table(cursor2, 'test1') + + cursor1.execute('INSERT INTO test1 select i, i from generate_series(0, 99) as t(i);'); + cursor2.execute('INSERT INTO test1 select i, i from generate_series(100, 199) as t(i);'); + + create_dict('test1', 1) + result = node1.query("SELECT * FROM `test`.`dict_table_test1` ORDER BY id") + + # priority 0 - non running port + assert node1.contains_in_log('Unable to setup connection to postgres2:5433*') + + # priority 1 - postgres2, table contains rows with values 100-200 + # priority 2 - postgres1, table contains rows with values 0-100 + expected = node1.query("SELECT number, number FROM numbers(100, 100)") + assert(result == expected) + + cursor1.execute("DROP TABLE IF EXISTS test1") + cursor2.execute("DROP TABLE IF EXISTS test1") + + node1.query("DROP TABLE IF EXISTS test1") + node1.query("DROP DICTIONARY IF EXISTS dict1") + if __name__ == '__main__': cluster.start() From 797063ae0fae95c3a79e8ccab1d3b19bee8e843d Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 14 Mar 2021 10:35:10 +0000 Subject: [PATCH 159/333] Support non-default table schema for postgres storage/table-function --- src/Storages/StoragePostgreSQL.cpp | 20 ++++++++++++------ src/Storages/StoragePostgreSQL.h | 4 +++- .../TableFunctionPostgreSQL.cpp | 16 +++++++++----- src/TableFunctions/TableFunctionPostgreSQL.h | 2 +- .../test_storage_postgresql/test.py | 21 +++++++++++++++++++ 5 files changed, 50 insertions(+), 13 deletions(-) diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 78ec8c34e41..3e122ed2fc7 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -45,9 +45,11 @@ StoragePostgreSQL::StoragePostgreSQL( PostgreSQLConnectionPtr connection_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, - const Context & context_) + const Context & context_, + const String & remote_table_schema_) : IStorage(table_id_) , remote_table_name(remote_table_name_) + , remote_table_schema(remote_table_schema_) , global_context(context_) , connection(std::move(connection_)) { @@ -69,9 +71,11 @@ Pipe StoragePostgreSQL::read( { metadata_snapshot->check(column_names_, getVirtuals(), getStorageID()); + /// Connection is already made to the needed database, so it should not be present in the query; + /// remote_table_schema is empty if it is not specified, will access only table_name. String query = transformQueryForExternalDatabase( query_info_, metadata_snapshot->getColumns().getOrdinary(), - IdentifierQuotingStyle::DoubleQuotes, "", remote_table_name, context_); + IdentifierQuotingStyle::DoubleQuotes, remote_table_schema, remote_table_name, context_); Block sample_block; for (const String & column_name : column_names_) @@ -293,9 +297,9 @@ void registerStoragePostgreSQL(StorageFactory & factory) { ASTs & engine_args = args.engine_args; - if (engine_args.size() != 5) - throw Exception("Storage PostgreSQL requires 5 parameters: " - "PostgreSQL('host:port', 'database', 'table', 'username', 'password'.", + if (engine_args.size() < 5 || engine_args.size() > 6) + throw Exception("Storage PostgreSQL requires 5-6 parameters: " + "PostgreSQL('host:port', 'database', 'table', 'username', 'password' [, 'schema']", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); for (auto & engine_arg : engine_args) @@ -304,6 +308,10 @@ void registerStoragePostgreSQL(StorageFactory & factory) auto parsed_host_port = parseAddress(engine_args[0]->as().value.safeGet(), 5432); const String & remote_table = engine_args[2]->as().value.safeGet(); + String remote_table_schema; + if (engine_args.size() == 6) + remote_table_schema = engine_args[5]->as().value.safeGet(); + auto connection = std::make_shared( engine_args[1]->as().value.safeGet(), parsed_host_port.first, @@ -312,7 +320,7 @@ void registerStoragePostgreSQL(StorageFactory & factory) engine_args[4]->as().value.safeGet()); return StoragePostgreSQL::create( - args.table_id, remote_table, connection, args.columns, args.constraints, args.context); + args.table_id, remote_table, connection, args.columns, args.constraints, args.context, remote_table_schema); }, { .source_access_type = AccessType::POSTGRES, diff --git a/src/Storages/StoragePostgreSQL.h b/src/Storages/StoragePostgreSQL.h index 8aebae5896b..0d574c9e98e 100644 --- a/src/Storages/StoragePostgreSQL.h +++ b/src/Storages/StoragePostgreSQL.h @@ -28,7 +28,8 @@ public: PostgreSQLConnectionPtr connection_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, - const Context & context_); + const Context & context_, + const std::string & remote_table_schema_ = ""); String getName() const override { return "PostgreSQL"; } @@ -47,6 +48,7 @@ private: friend class PostgreSQLBlockOutputStream; String remote_table_name; + String remote_table_schema; Context global_context; PostgreSQLConnectionPtr connection; }; diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index eefdff1fa87..0e3f1c5da24 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -29,7 +29,7 @@ StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/, auto columns = getActualTableStructure(context); auto result = std::make_shared( StorageID(getDatabaseName(), table_name), remote_table_name, - connection, columns, ConstraintsDescription{}, context); + connection, columns, ConstraintsDescription{}, context, remote_table_schema); result->startup(); return result; @@ -39,7 +39,10 @@ StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/, ColumnsDescription TableFunctionPostgreSQL::getActualTableStructure(const Context & context) const { const bool use_nulls = context.getSettingsRef().external_table_functions_use_nulls; - auto columns = fetchPostgreSQLTableStructure(connection->conn(), remote_table_name, use_nulls); + auto columns = fetchPostgreSQLTableStructure( + connection->conn(), + remote_table_schema.empty() ? remote_table_name : remote_table_schema + '.' + remote_table_name, + use_nulls); return ColumnsDescription{*columns}; } @@ -54,9 +57,9 @@ void TableFunctionPostgreSQL::parseArguments(const ASTPtr & ast_function, const ASTs & args = func_args.arguments->children; - if (args.size() != 5) - throw Exception("Table function 'PostgreSQL' requires 5 parameters: " - "PostgreSQL('host:port', 'database', 'table', 'user', 'password').", + if (args.size() < 5 || args.size() > 6) + throw Exception("Table function 'PostgreSQL' requires 5-6 parameters: " + "PostgreSQL('host:port', 'database', 'table', 'user', 'password', [, 'schema']).", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); for (auto & arg : args) @@ -65,6 +68,9 @@ void TableFunctionPostgreSQL::parseArguments(const ASTPtr & ast_function, const auto parsed_host_port = parseAddress(args[0]->as().value.safeGet(), 5432); remote_table_name = args[2]->as().value.safeGet(); + if (args.size() == 6) + remote_table_schema = args[5]->as().value.safeGet(); + connection = std::make_shared( args[1]->as().value.safeGet(), parsed_host_port.first, diff --git a/src/TableFunctions/TableFunctionPostgreSQL.h b/src/TableFunctions/TableFunctionPostgreSQL.h index e625cbd9bf6..92e061e18ca 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.h +++ b/src/TableFunctions/TableFunctionPostgreSQL.h @@ -30,7 +30,7 @@ private: void parseArguments(const ASTPtr & ast_function, const Context & context) override; String connection_str; - String remote_table_name; + String remote_table_name, remote_table_schema; PostgreSQLConnectionPtr connection; }; diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py index cee495438a2..58f3233bacc 100644 --- a/tests/integration/test_storage_postgresql/test.py +++ b/tests/integration/test_storage_postgresql/test.py @@ -132,6 +132,27 @@ def test_postgres_conversions(started_cluster): assert(result == expected) +def test_non_default_scema(started_cluster): + conn = get_postgres_conn(True) + cursor = conn.cursor() + cursor.execute('CREATE SCHEMA test_schema') + cursor.execute('CREATE TABLE test_schema.test_table (a integer)') + cursor.execute('INSERT INTO test_schema.test_table SELECT i FROM generate_series(0, 99) as t(i)') + + node1.query(''' + CREATE TABLE test_pg_table_schema (a UInt32) + ENGINE PostgreSQL('postgres1:5432', 'clickhouse', 'test_table', 'postgres', 'mysecretpassword', 'test_schema'); + ''') + + result = node1.query('SELECT * FROM test_pg_table_schema') + expected = node1.query('SELECT number FROM numbers(100)') + assert(result == expected) + + table_function = '''postgresql('postgres1:5432', 'clickhouse', 'test_table', 'postgres', 'mysecretpassword', 'test_schema')''' + result = node1.query('SELECT * FROM {}'.format(table_function)) + assert(result == expected) + + if __name__ == '__main__': cluster.start() input("Cluster created, press any key to destroy...") From 6e69299191e10588500b32510dc0f1e9ac4d1471 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 14 Mar 2021 10:43:23 +0000 Subject: [PATCH 160/333] Fix style --- src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h index 9b5dec5e9a5..289183d8451 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h @@ -1,3 +1,5 @@ +#pragma once + #include "PostgreSQLConnection.h" #include #include From 583c78143de1367afea36ebb8aafd4f78b4a8e92 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:08:08 +0300 Subject: [PATCH 161/333] Remove useless CMake option --- CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c3571f1118..d310f7c298c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -240,9 +240,7 @@ else() message(STATUS "Disabling compiler -pipe option (have only ${AVAILABLE_PHYSICAL_MEMORY} mb of memory)") endif() -if(NOT DISABLE_CPU_OPTIMIZE) - include(cmake/cpu_features.cmake) -endif() +include(cmake/cpu_features.cmake) option(ARCH_NATIVE "Add -march=native compiler flag") From d273fa241a750d264c8964ead464e5f575977a7b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:24:03 +0300 Subject: [PATCH 162/333] Remove trash --- website/locale/fa/LC_MESSAGES/messages.mo | Bin 424 -> 0 bytes website/locale/fa/LC_MESSAGES/messages.po | 325 --------------------- website/locale/tr/LC_MESSAGES/messages.mo | Bin 431 -> 0 bytes website/locale/tr/LC_MESSAGES/messages.po | 326 ---------------------- 4 files changed, 651 deletions(-) delete mode 100644 website/locale/fa/LC_MESSAGES/messages.mo delete mode 100644 website/locale/fa/LC_MESSAGES/messages.po delete mode 100644 website/locale/tr/LC_MESSAGES/messages.mo delete mode 100644 website/locale/tr/LC_MESSAGES/messages.po diff --git a/website/locale/fa/LC_MESSAGES/messages.mo b/website/locale/fa/LC_MESSAGES/messages.mo deleted file mode 100644 index 89c73f3fea4a8629ec01407c3ee36a9301bbd7fd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 424 zcmaJ+%TB{E5G)cWA31aIfg5abf&`VTO7sy>P(!0M>fKGYA;`5O+d+i?;P?0z91xTf zE3LG%JDQpOI5~bF_&V}^>iet*f9k@|Z~h4VE;afY3kPut8#K1oiqkY(Jw)@IZlY`* zuO3N;jW!MvTh%4Z+R8$%IgOHOyu6(*7Fo1jlXR8C+@Nq?0T#j`ry&a&uxkizXb|#{ zowF;(JP|T{se7k?c}rm(lZ@Gj#jA9c$<pLlFHTy33uA;tzR`?&`yFe{T?TbBykcQ{tt$X%(Zf;e1fj=$_`)MWg|tc#_SuTQ!|1Cc6pS*wcE(YAKv{wF8QI!@=DZQzXRVyk7wE9*#x>4I!G9e`f# z<)#s}96<3Tv>_X|c7^?&uM4~)jgc%=S<8w;e_31a{wFSuwj4%cNeu)SeMjhh0%yQ| AGynhq diff --git a/website/locale/tr/LC_MESSAGES/messages.po b/website/locale/tr/LC_MESSAGES/messages.po deleted file mode 100644 index 710ebbdf120..00000000000 --- a/website/locale/tr/LC_MESSAGES/messages.po +++ /dev/null @@ -1,326 +0,0 @@ -# Translations template for PROJECT. -# Copyright (C) 2020 ORGANIZATION -# This file is distributed under the same license as the PROJECT project. -# Automatically generated, 2020. -# -msgid "" -msgstr "" -"Project-Id-Version: PROJECT VERSION\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-06-17 12:20+0300\n" -"PO-Revision-Date: 2020-06-17 12:20+0300\n" -"Last-Translator: Automatically generated\n" -"Language-Team: none\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.8.0\n" -"Language: tr\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" - -#: templates/common_meta.html:1 -msgid "" -"ClickHouse is a fast open-source column-oriented database management system " -"that allows generating analytical data reports in real-time using SQL queries" -msgstr "" - -#: templates/common_meta.html:6 -msgid "ClickHouse - fast open-source OLAP DBMS" -msgstr "" - -#: templates/common_meta.html:10 -msgid "ClickHouse DBMS" -msgstr "" - -#: templates/common_meta.html:32 -msgid "open-source" -msgstr "" - -#: templates/common_meta.html:32 -msgid "relational" -msgstr "" - -#: templates/common_meta.html:32 -msgid "analytics" -msgstr "" - -#: templates/common_meta.html:32 -msgid "analytical" -msgstr "" - -#: templates/common_meta.html:32 -msgid "Big Data" -msgstr "" - -#: templates/common_meta.html:32 -msgid "web-analytics" -msgstr "" - -#: templates/footer.html:8 -msgid "ClickHouse source code is published under the Apache 2.0 License." -msgstr "" - -#: templates/footer.html:8 -msgid "" -"Software is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR " -"CONDITIONS OF ANY KIND, either express or implied." -msgstr "" - -#: templates/footer.html:11 -msgid "Yandex LLC" -msgstr "" - -#: templates/blog/content.html:20 templates/blog/content.html:25 -#: templates/blog/content.html:30 -msgid "Share on" -msgstr "" - -#: templates/blog/content.html:37 -msgid "Published date" -msgstr "" - -#: templates/blog/nav.html:20 -msgid "New post" -msgstr "" - -#: templates/blog/nav.html:25 -msgid "Documentation" -msgstr "" - -#: templates/docs/footer.html:3 -msgid "Rating" -msgstr "" - -#: templates/docs/footer.html:3 -msgid "votes" -msgstr "" - -#: templates/docs/footer.html:4 -msgid "Article Rating" -msgstr "" - -#: templates/docs/footer.html:4 -msgid "Was this content helpful?" -msgstr "" - -#: templates/docs/footer.html:7 -msgid "Unusable" -msgstr "" - -#: templates/docs/footer.html:7 -msgid "Poor" -msgstr "" - -#: templates/docs/footer.html:7 -msgid "Good" -msgstr "" - -#: templates/docs/footer.html:7 -msgid "Excellent" -msgstr "" - -#: templates/docs/footer.html:8 -msgid "documentation" -msgstr "" - -#: templates/docs/footer.html:15 -msgid "Built from" -msgstr "" - -#: templates/docs/footer.html:15 -msgid "published on" -msgstr "" - -#: templates/docs/footer.html:15 -msgid "modified on" -msgstr "" - -#: templates/docs/machine-translated.html:3 -msgid "Help wanted!" -msgstr "" - -#: templates/docs/machine-translated.html:4 -msgid "" -"The following content of this documentation page has been machine-" -"translated. But unlike other websites, it is not done on the fly. This " -"translated text lives on GitHub repository alongside main ClickHouse " -"codebase and waits for fellow native speakers to make it more human-readable." -msgstr "" - -#: templates/docs/machine-translated.html:4 -msgid "You can also use the original English version as a reference." -msgstr "" - -#: templates/docs/machine-translated.html:7 -msgid "Help ClickHouse documentation by editing this page" -msgstr "" - -#: templates/docs/sidebar.html:3 -msgid "Multi-page or single-page" -msgstr "" - -#: templates/docs/sidebar.html:5 -msgid "Multi-page version" -msgstr "" - -#: templates/docs/sidebar.html:8 -msgid "Single-page version" -msgstr "" - -#: templates/docs/sidebar.html:13 -msgid "Version" -msgstr "" - -#: templates/docs/sidebar.html:13 templates/docs/sidebar.html:19 -msgid "latest" -msgstr "" - -#: templates/docs/sidebar.html:36 -msgid "PDF version" -msgstr "" - -#: templates/docs/toc.html:8 -msgid "Table of Contents" -msgstr "" - -#: templates/index/community.html:4 -msgid "ClickHouse community" -msgstr "" - -#: templates/index/community.html:13 templates/index/community.html:14 -msgid "ClickHouse YouTube Channel" -msgstr "" - -#: templates/index/community.html:25 templates/index/community.html:26 -msgid "ClickHouse Official Twitter Account" -msgstr "" - -#: templates/index/community.html:36 templates/index/community.html:37 -msgid "ClickHouse at Telegram" -msgstr "" - -#: templates/index/community.html:41 -msgid "Chat with real users in " -msgstr "" - -#: templates/index/community.html:44 templates/index/community.html:116 -msgid "English" -msgstr "" - -#: templates/index/community.html:45 -msgid "or in" -msgstr "" - -#: templates/index/community.html:47 templates/index/community.html:117 -msgid "Russian" -msgstr "" - -#: templates/index/community.html:65 -msgid "Open GitHub issue to ask for help or to file a feature request" -msgstr "" - -#: templates/index/community.html:76 templates/index/community.html:77 -msgid "ClickHouse Slack Workspace" -msgstr "" - -#: templates/index/community.html:82 -msgid "Multipurpose public hangout" -msgstr "" - -#: templates/index/community.html:101 -msgid "Ask any questions" -msgstr "" - -#: templates/index/community.html:115 -msgid "ClickHouse Blog" -msgstr "" - -#: templates/index/community.html:116 -msgid "in" -msgstr "" - -#: templates/index/community.html:128 templates/index/community.html:129 -msgid "ClickHouse at Google Groups" -msgstr "" - -#: templates/index/community.html:133 -msgid "Email discussions" -msgstr "" - -#: templates/index/community.html:142 -msgid "Like ClickHouse?" -msgstr "" - -#: templates/index/community.html:143 -msgid "Help to spread the word about it via" -msgstr "" - -#: templates/index/community.html:144 -msgid "and" -msgstr "" - -#: templates/index/community.html:153 -msgid "Hosting ClickHouse Meetups" -msgstr "" - -#: templates/index/community.html:157 -msgid "" -"ClickHouse meetups are essential for strengthening community worldwide, but " -"they couldn't be possible without the help of local organizers. Please, fill " -"this form if you want to become one or want to meet ClickHouse core team for " -"any other reason." -msgstr "" - -#: templates/index/community.html:159 -msgid "ClickHouse Meetup" -msgstr "" - -#: templates/index/community.html:165 -msgid "Name" -msgstr "" - -#: templates/index/community.html:168 -msgid "Email" -msgstr "" - -#: templates/index/community.html:171 -msgid "Company" -msgstr "" - -#: templates/index/community.html:174 -msgid "City" -msgstr "" - -#: templates/index/community.html:179 -msgid "We'd like to host a public ClickHouse Meetup" -msgstr "" - -#: templates/index/community.html:185 -msgid "We'd like to invite Yandex ClickHouse team to our office" -msgstr "" - -#: templates/index/community.html:191 -msgid "We'd like to invite Yandex ClickHouse team to another event we organize" -msgstr "" - -#: templates/index/community.html:197 -msgid "We're interested in commercial consulting, support or managed service" -msgstr "" - -#: templates/index/community.html:201 -msgid "Additional comments" -msgstr "" - -#: templates/index/community.html:203 -msgid "Send" -msgstr "" - -#: templates/index/community.html:212 -msgid "" -"If you have any more thoughts or questions, feel free to contact Yandex " -"ClickHouse team directly at" -msgstr "" - -#: templates/index/community.html:213 -msgid "turn on JavaScript to see email address" -msgstr "" From 1ac62970c40d50f54f404855950ebfac5587c3f4 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 14 Mar 2021 14:29:02 +0300 Subject: [PATCH 163/333] Modified SharedLibrary interface --- src/Common/SharedLibrary.cpp | 13 ++++++------ src/Common/SharedLibrary.h | 8 ++++---- .../LibraryDictionarySourceExternal.cpp | 20 +++++++++---------- .../LibraryDictionarySourceExternal.h | 20 +++++++++---------- 4 files changed, 31 insertions(+), 30 deletions(-) diff --git a/src/Common/SharedLibrary.cpp b/src/Common/SharedLibrary.cpp index 689179be7d8..9b81f74eb23 100644 --- a/src/Common/SharedLibrary.cpp +++ b/src/Common/SharedLibrary.cpp @@ -13,11 +13,11 @@ namespace ErrorCodes extern const int CANNOT_DLSYM; } -SharedLibrary::SharedLibrary(const std::string & path, int flags) +SharedLibrary::SharedLibrary(const std::string_view & path, int flags) { - handle = dlopen(path.c_str(), flags); + handle = dlopen(path.data(), flags); if (!handle) - throw Exception(std::string("Cannot dlopen: ") + dlerror(), ErrorCodes::CANNOT_DLOPEN); + throw Exception(ErrorCodes::CANNOT_DLOPEN, "Cannot dlopen: ({})", dlerror()); updatePHDRCache(); @@ -31,17 +31,18 @@ SharedLibrary::~SharedLibrary() std::terminate(); } -void * SharedLibrary::getImpl(const std::string & name, bool no_throw) +void * SharedLibrary::getImpl(const std::string_view & name, bool no_throw) { dlerror(); - auto * res = dlsym(handle, name.c_str()); + auto * res = dlsym(handle, name.data()); if (char * error = dlerror()) { if (no_throw) return nullptr; - throw Exception(std::string("Cannot dlsym: ") + error, ErrorCodes::CANNOT_DLSYM); + + throw Exception(ErrorCodes::CANNOT_DLSYM, "Cannot dlsym: ({})", error); } return res; diff --git a/src/Common/SharedLibrary.h b/src/Common/SharedLibrary.h index 9d2b9bc7843..e665c335c6f 100644 --- a/src/Common/SharedLibrary.h +++ b/src/Common/SharedLibrary.h @@ -14,23 +14,23 @@ namespace DB class SharedLibrary : private boost::noncopyable { public: - explicit SharedLibrary(const std::string & path, int flags = RTLD_LAZY); + explicit SharedLibrary(const std::string_view & path, int flags = RTLD_LAZY); ~SharedLibrary(); template - Func get(const std::string & name) + Func get(const std::string_view & name) { return reinterpret_cast(getImpl(name)); } template - Func tryGet(const std::string & name) + Func tryGet(const std::string_view & name) { return reinterpret_cast(getImpl(name, true)); } private: - void * getImpl(const std::string & name, bool no_throw = false); + void * getImpl(const std::string_view & name, bool no_throw = false); void * handle = nullptr; }; diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.cpp b/src/Dictionaries/LibraryDictionarySourceExternal.cpp index eba088c2c55..259d0a2846a 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.cpp +++ b/src/Dictionaries/LibraryDictionarySourceExternal.cpp @@ -9,19 +9,19 @@ const char DICT_LOGGER_NAME[] = "LibraryDictionarySourceExternal"; namespace ClickHouseLibrary { -std::string LIBRARY_CREATE_NEW_FUNC_NAME = "ClickHouseDictionary_v3_libNew"; -std::string LIBRARY_CLONE_FUNC_NAME = "ClickHouseDictionary_v3_libClone"; -std::string LIBRARY_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_libDelete"; +std::string_view LIBRARY_CREATE_NEW_FUNC_NAME = "ClickHouseDictionary_v3_libNew"; +std::string_view LIBRARY_CLONE_FUNC_NAME = "ClickHouseDictionary_v3_libClone"; +std::string_view LIBRARY_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_libDelete"; -std::string LIBRARY_DATA_NEW_FUNC_NAME = "ClickHouseDictionary_v3_dataNew"; -std::string LIBRARY_DATA_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_dataDelete"; +std::string_view LIBRARY_DATA_NEW_FUNC_NAME = "ClickHouseDictionary_v3_dataNew"; +std::string_view LIBRARY_DATA_DELETE_FUNC_NAME = "ClickHouseDictionary_v3_dataDelete"; -std::string LIBRARY_LOAD_ALL_FUNC_NAME = "ClickHouseDictionary_v3_loadAll"; -std::string LIBRARY_LOAD_IDS_FUNC_NAME = "ClickHouseDictionary_v3_loadIds"; -std::string LIBRARY_LOAD_KEYS_FUNC_NAME = "ClickHouseDictionary_v3_loadKeys"; +std::string_view LIBRARY_LOAD_ALL_FUNC_NAME = "ClickHouseDictionary_v3_loadAll"; +std::string_view LIBRARY_LOAD_IDS_FUNC_NAME = "ClickHouseDictionary_v3_loadIds"; +std::string_view LIBRARY_LOAD_KEYS_FUNC_NAME = "ClickHouseDictionary_v3_loadKeys"; -std::string LIBRARY_IS_MODIFIED_FUNC_NAME = "ClickHouseDictionary_v3_isModified"; -std::string LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME = "ClickHouseDictionary_v3_supportsSelectiveLoad"; +std::string_view LIBRARY_IS_MODIFIED_FUNC_NAME = "ClickHouseDictionary_v3_isModified"; +std::string_view LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME = "ClickHouseDictionary_v3_supportsSelectiveLoad"; void log(LogLevel level, CString msg) { diff --git a/src/Dictionaries/LibraryDictionarySourceExternal.h b/src/Dictionaries/LibraryDictionarySourceExternal.h index 64a5f678578..3b92707d091 100644 --- a/src/Dictionaries/LibraryDictionarySourceExternal.h +++ b/src/Dictionaries/LibraryDictionarySourceExternal.h @@ -63,19 +63,19 @@ enum LogLevel void log(LogLevel level, CString msg); -extern std::string LIBRARY_CREATE_NEW_FUNC_NAME; -extern std::string LIBRARY_CLONE_FUNC_NAME; -extern std::string LIBRARY_DELETE_FUNC_NAME; +extern std::string_view LIBRARY_CREATE_NEW_FUNC_NAME; +extern std::string_view LIBRARY_CLONE_FUNC_NAME; +extern std::string_view LIBRARY_DELETE_FUNC_NAME; -extern std::string LIBRARY_DATA_NEW_FUNC_NAME; -extern std::string LIBRARY_DATA_DELETE_FUNC_NAME; +extern std::string_view LIBRARY_DATA_NEW_FUNC_NAME; +extern std::string_view LIBRARY_DATA_DELETE_FUNC_NAME; -extern std::string LIBRARY_LOAD_ALL_FUNC_NAME; -extern std::string LIBRARY_LOAD_IDS_FUNC_NAME; -extern std::string LIBRARY_LOAD_KEYS_FUNC_NAME; +extern std::string_view LIBRARY_LOAD_ALL_FUNC_NAME; +extern std::string_view LIBRARY_LOAD_IDS_FUNC_NAME; +extern std::string_view LIBRARY_LOAD_KEYS_FUNC_NAME; -extern std::string LIBRARY_IS_MODIFIED_FUNC_NAME; -extern std::string LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME; +extern std::string_view LIBRARY_IS_MODIFIED_FUNC_NAME; +extern std::string_view LIBRARY_SUPPORTS_SELECTIVE_LOAD_FUNC_NAME; using LibraryContext = void *; From 0dcf8b4d55d0b3e91ada48c920f75d6b04cea69d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:39:14 +0300 Subject: [PATCH 164/333] Add .gitignore --- docs/tools/.gitignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 docs/tools/.gitignore diff --git a/docs/tools/.gitignore b/docs/tools/.gitignore new file mode 100644 index 00000000000..7d75298b357 --- /dev/null +++ b/docs/tools/.gitignore @@ -0,0 +1,2 @@ +build +__pycache__ From ef4913d184d07bab697016fb6dc49657cf7084e6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:41:04 +0300 Subject: [PATCH 165/333] Removed harmful code that was added due to misconduct --- docs/tools/translate/add_meta_flag.py | 12 -- docs/tools/translate/babel-mapping.ini | 3 - docs/tools/translate/filter.py | 199 ------------------ docs/tools/translate/normalize-markdown.sh | 13 -- .../remove_machine_translated_meta.py | 21 -- .../translate/replace-with-translation.sh | 17 -- docs/tools/translate/requirements.txt | 12 -- docs/tools/translate/split_meta.py | 35 --- docs/tools/translate/translate.py | 80 ------- docs/tools/translate/translate.sh | 29 --- docs/tools/translate/typograph_ru.py | 45 ---- .../update-all-machine-translated.sh | 26 --- docs/tools/translate/update-po.sh | 22 -- docs/tools/translate/util.py | 1 - 14 files changed, 515 deletions(-) delete mode 100755 docs/tools/translate/add_meta_flag.py delete mode 100644 docs/tools/translate/babel-mapping.ini delete mode 100755 docs/tools/translate/filter.py delete mode 100755 docs/tools/translate/normalize-markdown.sh delete mode 100755 docs/tools/translate/remove_machine_translated_meta.py delete mode 100755 docs/tools/translate/replace-with-translation.sh delete mode 100644 docs/tools/translate/requirements.txt delete mode 100755 docs/tools/translate/split_meta.py delete mode 100755 docs/tools/translate/translate.py delete mode 100755 docs/tools/translate/translate.sh delete mode 100644 docs/tools/translate/typograph_ru.py delete mode 100755 docs/tools/translate/update-all-machine-translated.sh delete mode 100755 docs/tools/translate/update-po.sh delete mode 120000 docs/tools/translate/util.py diff --git a/docs/tools/translate/add_meta_flag.py b/docs/tools/translate/add_meta_flag.py deleted file mode 100755 index d87aa044faf..00000000000 --- a/docs/tools/translate/add_meta_flag.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python3 - -import sys - -import util - -if __name__ == '__main__': - flag_name = sys.argv[1] - path = sys.argv[2] - meta, content = util.read_md_file(path) - meta[flag_name] = True - util.write_md_file(path, meta, content) diff --git a/docs/tools/translate/babel-mapping.ini b/docs/tools/translate/babel-mapping.ini deleted file mode 100644 index 6a9a3e5c073..00000000000 --- a/docs/tools/translate/babel-mapping.ini +++ /dev/null @@ -1,3 +0,0 @@ -[python: **.py] -[jinja2: **/templates/**.html] -extensions=jinja2.ext.i18n,jinja2.ext.autoescape,jinja2.ext.with_ diff --git a/docs/tools/translate/filter.py b/docs/tools/translate/filter.py deleted file mode 100755 index 61e1104d345..00000000000 --- a/docs/tools/translate/filter.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json.decoder - -import pandocfilters -import slugify - -import translate -import util - - -is_debug = os.environ.get('DEBUG') is not None - -filename = os.getenv('INPUT') - - -def debug(*args): - if is_debug: - print(*args, file=sys.stderr) - - -def process_buffer(buffer, new_value, item=None, is_header=False): - if buffer: - text = ''.join(buffer) - - try: - translated_text = translate.translate(text) - except TypeError: - translated_text = text - except json.decoder.JSONDecodeError as e: - print('Failed to translate', str(e), file=sys.stderr) - sys.exit(1) - - debug(f'Translate: "{text}" -> "{translated_text}"') - - if text and text[0].isupper() and not translated_text[0].isupper(): - translated_text = translated_text[0].upper() + translated_text[1:] - - if text.startswith(' ') and not translated_text.startswith(' '): - translated_text = ' ' + translated_text - - if text.endswith(' ') and not translated_text.endswith(' '): - translated_text = translated_text + ' ' - - if is_header and translated_text.endswith('.'): - translated_text = translated_text.rstrip('.') - - title_case = is_header and translate.default_target_language == 'en' and text[0].isupper() - title_case_whitelist = { - 'a', 'an', 'the', 'and', 'or', 'that', - 'of', 'on', 'for', 'from', 'with', 'to', 'in' - } - is_first_iteration = True - for token in translated_text.split(' '): - if title_case and token.isascii() and not token.isupper(): - if len(token) > 1 and token.lower() not in title_case_whitelist: - token = token[0].upper() + token[1:] - elif not is_first_iteration: - token = token.lower() - is_first_iteration = False - - new_value.append(pandocfilters.Str(token)) - new_value.append(pandocfilters.Space()) - - if item is None and len(new_value): - new_value.pop(len(new_value) - 1) - else: - new_value[-1] = item - elif item: - new_value.append(item) - - -def process_sentence(value, is_header=False): - new_value = [] - buffer = [] - for item in value: - if isinstance(item, list): - new_value.append([process_sentence(subitem, is_header) for subitem in item]) - continue - elif isinstance(item, dict): - t = item.get('t') - c = item.get('c') - if t == 'Str': - buffer.append(c) - elif t == 'Space': - buffer.append(' ') - elif t == 'DoubleQuote': - buffer.append('"') - else: - process_buffer(buffer, new_value, item, is_header) - buffer = [] - else: - new_value.append(item) - process_buffer(buffer, new_value, is_header=is_header) - return new_value - - -def translate_filter(key, value, _format, _): - if key not in ['Space', 'Str']: - debug(key, value) - try: - cls = getattr(pandocfilters, key) - except AttributeError: - return - - if key == 'Para' and value: - marker = value[0].get('c') - if isinstance(marker, str) and marker.startswith('!!!') and len(value) > 2: - # Admonition case - if marker != '!!!': - # Lost space after !!! case - value.insert(1, pandocfilters.Str(marker[3:])) - value.insert(1, pandocfilters.Space()) - value[0]['c'] = '!!!' - admonition_value = [] - remaining_para_value = [] - in_admonition = True - break_value = [pandocfilters.LineBreak(), pandocfilters.Str(' ' * 4)] - for item in value: - if in_admonition: - if item.get('t') == 'SoftBreak': - in_admonition = False - else: - admonition_value.append(item) - else: - if item.get('t') == 'SoftBreak': - remaining_para_value += break_value - else: - remaining_para_value.append(item) - - if admonition_value[-1].get('t') == 'Quoted': - text = process_sentence(admonition_value[-1]['c'][-1]) - text[0]['c'] = '"' + text[0]['c'] - text[-1]['c'] = text[-1]['c'] + '"' - admonition_value.pop(-1) - admonition_value += text - else: - text = admonition_value[-1].get('c') - if text: - text = translate.translate(text[0].upper() + text[1:]) - admonition_value.append(pandocfilters.Space()) - admonition_value.append(pandocfilters.Str(f'"{text}"')) - - return cls(admonition_value + break_value + process_sentence(remaining_para_value)) - else: - return cls(process_sentence(value)) - elif key == 'Plain' or key == 'Strong' or key == 'Emph': - return cls(process_sentence(value)) - elif key == 'Link': - try: - # Plain links case - if value[2][0] == value[1][0].get('c'): - return pandocfilters.Str(value[2][0]) - except IndexError: - pass - - value[1] = process_sentence(value[1]) - href = value[2][0] - if not (href.startswith('http') or href.startswith('#')): - anchor = None - attempts = 10 - if '#' in href: - href, anchor = href.split('#', 1) - if href.endswith('.md') and not href.startswith('/'): - parts = [part for part in os.environ['INPUT'].split('/') if len(part) == 2] - lang = parts[-1] - script_path = os.path.dirname(__file__) - base_path = os.path.abspath(f'{script_path}/../../{lang}') - href = os.path.join( - os.path.relpath(base_path, os.path.dirname(os.environ['INPUT'])), - os.path.relpath(href, base_path) - ) - if anchor: - href = f'{href}#{anchor}' - value[2][0] = href - return cls(*value) - elif key == 'Header': - if value[1][0].islower() and '_' not in value[1][0]: # Preserve some manually specified anchors - value[1][0] = slugify.slugify(value[1][0], separator='-', word_boundary=True, save_order=True) - - # TODO: title case header in en - value[2] = process_sentence(value[2], is_header=True) - return cls(*value) - elif key == 'SoftBreak': - return pandocfilters.LineBreak() - - return - - -if __name__ == "__main__": - os.environ['INPUT'] = os.path.abspath(os.environ['INPUT']) - pwd = os.path.dirname(filename or '.') - if pwd: - with util.cd(pwd): - pandocfilters.toJSONFilter(translate_filter) - else: - pandocfilters.toJSONFilter(translate_filter) diff --git a/docs/tools/translate/normalize-markdown.sh b/docs/tools/translate/normalize-markdown.sh deleted file mode 100755 index 7850fa34b1d..00000000000 --- a/docs/tools/translate/normalize-markdown.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -# Usage: normalize-en-markdown.sh -set -e -BASE_DIR=$(dirname $(readlink -f $0)) -TEMP_FILE=$(mktemp) -trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT -INPUT="$1" -if [[ ! -L "${INPUT}" ]] -then - export INPUT - cat "${INPUT}" > "${TEMP_FILE}" - "${BASE_DIR}/translate.sh" "en" "${TEMP_FILE}" "${INPUT}" -fi diff --git a/docs/tools/translate/remove_machine_translated_meta.py b/docs/tools/translate/remove_machine_translated_meta.py deleted file mode 100755 index 26cfde97f1e..00000000000 --- a/docs/tools/translate/remove_machine_translated_meta.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python3 -import os -import sys -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) -import convert_toc -import util - - -if __name__ == '__main__': - path = sys.argv[1][2:] - convert_toc.init_redirects() - try: - path = convert_toc.redirects[path] - except KeyError: - pass - meta, content = util.read_md_file(path) - if 'machine_translated' in meta: - del meta['machine_translated'] - if 'machine_translated_rev' in meta: - del meta['machine_translated_rev'] - util.write_md_file(path, meta, content) diff --git a/docs/tools/translate/replace-with-translation.sh b/docs/tools/translate/replace-with-translation.sh deleted file mode 100755 index 922ac65a921..00000000000 --- a/docs/tools/translate/replace-with-translation.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Usage: replace-with-translation.sh -set -e -BASE_DIR=$(dirname $(readlink -f $0)) -TEMP_FILE=$(mktemp) -trap 'rm -f -- "${TEMP_FILE}"' INT TERM HUP EXIT -TARGET_LANGUAGE="$1" -export INPUT="$2" -cat "${INPUT}" > "${TEMP_FILE}" -if [[ ! -z $SLEEP ]] -then - sleep $[ ( $RANDOM % 20 ) + 1 ]s -fi -rm -f "${INPUT}" -mkdir -p $(dirname "${INPUT}") || true -YANDEX=1 "${BASE_DIR}/translate.sh" "${TARGET_LANGUAGE}" "${TEMP_FILE}" "${INPUT}" -git add "${INPUT}" diff --git a/docs/tools/translate/requirements.txt b/docs/tools/translate/requirements.txt deleted file mode 100644 index 1bbd119b823..00000000000 --- a/docs/tools/translate/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -Babel==2.8.0 -certifi==2020.6.20 -chardet==3.0.4 -googletrans==3.0.0 -idna==2.10 -Jinja2==2.11.2 -pandocfilters==1.4.2 -python-slugify==4.0.1 -PyYAML==5.3.1 -requests==2.24.0 -text-unidecode==1.3 -urllib3==1.25.10 diff --git a/docs/tools/translate/split_meta.py b/docs/tools/translate/split_meta.py deleted file mode 100755 index b38b93e10b4..00000000000 --- a/docs/tools/translate/split_meta.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -import os -import subprocess -import sys - -import translate -import util - - -if __name__ == '__main__': - path = sys.argv[1] - content_path = f'{path}.content' - meta_path = f'{path}.meta' - meta, content = util.read_md_file(path) - - target_language = os.getenv('TARGET_LANGUAGE') - if target_language is not None and target_language != 'en': - rev = subprocess.check_output( - 'git rev-parse HEAD', shell=True - ).decode('utf-8').strip() - meta['machine_translated'] = True - meta['machine_translated_rev'] = rev - title = meta.get('toc_title') - if title: - meta['toc_title'] = translate.translate(title, target_language) - folder_title = meta.get('toc_folder_title') - if folder_title: - meta['toc_folder_title'] = translate.translate(folder_title, target_language) - if 'en_copy' in meta: - del meta['en_copy'] - - with open(content_path, 'w') as f: - print(content, file=f) - - util.write_md_file(meta_path, meta, '') diff --git a/docs/tools/translate/translate.py b/docs/tools/translate/translate.py deleted file mode 100755 index 605ff78f424..00000000000 --- a/docs/tools/translate/translate.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 - -import os -import random -import re -import sys -import time -import urllib.parse - -import googletrans -import requests -import yaml - - -translator = googletrans.Translator() -default_target_language = os.environ.get('TARGET_LANGUAGE', 'ru') -curly_braces_re = re.compile('({[^}]+})') - -is_yandex = os.environ.get('YANDEX') is not None - - -def translate_impl(text, target_language=None): - target_language = target_language or default_target_language - if target_language == 'en': - return text - elif is_yandex: - text = text.replace('‘', '\'') - text = text.replace('’', '\'') - has_alpha = any([char.isalpha() for char in text]) - if text.isascii() and has_alpha and not text.isupper(): - text = urllib.parse.quote(text) - url = f'http://translate.yandex.net/api/v1/tr.json/translate?srv=docs&lang=en-{target_language}&text={text}' - result = requests.get(url).json() - if result.get('code') == 200: - return result['text'][0] - else: - result = str(result) - print(f'Failed to translate "{text}": {result}', file=sys.stderr) - sys.exit(1) - else: - return text - else: - time.sleep(random.random()) - return translator.translate(text, target_language).text - - -def translate(text, target_language=None): - return "".join( - [ - part - if part.startswith("{") and part.endswith("}") - else translate_impl(part, target_language=target_language) - for part in re.split(curly_braces_re, text) - ] - ) - - -def translate_po(): - import babel.messages.pofile - base_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'website', 'locale') - for lang in ['en', 'zh', 'es', 'fr', 'ru', 'ja']: - po_path = os.path.join(base_dir, lang, 'LC_MESSAGES', 'messages.po') - with open(po_path, 'r') as f: - po_file = babel.messages.pofile.read_po(f, locale=lang, domain='messages') - for item in po_file: - if not item.string: - global is_yandex - is_yandex = True - item.string = translate(item.id, lang) - with open(po_path, 'wb') as f: - babel.messages.pofile.write_po(f, po_file) - - -if __name__ == '__main__': - target_language = sys.argv[1] - if target_language == 'po': - translate_po() - else: - result = translate_toc(yaml.full_load(sys.stdin.read())['nav'], sys.argv[1]) - print(yaml.dump({'nav': result})) diff --git a/docs/tools/translate/translate.sh b/docs/tools/translate/translate.sh deleted file mode 100755 index 1acf645eb81..00000000000 --- a/docs/tools/translate/translate.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# Usage: translate.sh -set -e -BASE_DIR=$(dirname $(readlink -f $0)) -OUTPUT=${3:-/dev/stdout} -export TARGET_LANGUAGE="$1" -export DEBUG -TEMP_FILE=$(mktemp) -export INPUT_PATH="$2" -INPUT_META="${INPUT_PATH}.meta" -INPUT_CONTENT="${INPUT_PATH}.content" - -trap 'rm -f -- "${TEMP_FILE}" "${INPUT_META}" "${INPUT_CONTENT}"' INT TERM HUP EXIT -source "${BASE_DIR}/venv/bin/activate" - -${BASE_DIR}/split_meta.py "${INPUT_PATH}" - -pandoc "${INPUT_CONTENT}" --filter "${BASE_DIR}/filter.py" -o "${TEMP_FILE}" \ - -f "markdown-space_in_atx_header" -t "markdown_strict+pipe_tables+markdown_attribute+all_symbols_escapable+backtick_code_blocks+autolink_bare_uris-link_attributes+markdown_attribute+mmd_link_attributes-raw_attribute+header_attributes-grid_tables+definition_lists" \ - --atx-headers --wrap=none --columns=99999 --tab-stop=4 -perl -pi -e 's/{\\#\\#/{##/g' "${TEMP_FILE}" -perl -pi -e 's/\\#\\#}/##}/g' "${TEMP_FILE}" -perl -pi -e 's/ *$//gg' "${TEMP_FILE}" -if [[ "${TARGET_LANGUAGE}" == "ru" ]] -then - perl -pi -e 's/“/«/gg' "${TEMP_FILE}" - perl -pi -e 's/”/»/gg' "${TEMP_FILE}" -fi -cat "${INPUT_META}" "${TEMP_FILE}" > "${OUTPUT}" diff --git a/docs/tools/translate/typograph_ru.py b/docs/tools/translate/typograph_ru.py deleted file mode 100644 index 2d970cf2a2e..00000000000 --- a/docs/tools/translate/typograph_ru.py +++ /dev/null @@ -1,45 +0,0 @@ -import requests - -class TypographError(Exception): - pass - - -def typograph(text): - text = text.replace('&', '&') - text = text.replace('<', '<') - text = text.replace('>', '>') - template = f''' - - - - {text} - 3 - 0 - 0 - 0 - - - - ''' - result = requests.post( - url='http://typograf.artlebedev.ru/webservices/typograf.asmx', - data=template.encode('utf-8'), - headers={ - 'Content-Type': 'text/xml', - 'SOAPAction': 'http://typograf.artlebedev.ru/webservices/ProcessText' - } - ) - if result.ok and 'ProcessTextResult' in result.text: - result_text = result.text.split('')[1].split('')[0].rstrip() - result_text = result_text.replace('&', '&') - result_text = result_text.replace('<', '<') - result_text = result_text.replace('>', '>') - return result_text - else: - raise TypographError(result.text) - - -if __name__ == '__main__': - import sys - print((typograph(sys.stdin.read()))) diff --git a/docs/tools/translate/update-all-machine-translated.sh b/docs/tools/translate/update-all-machine-translated.sh deleted file mode 100755 index fae2aae787f..00000000000 --- a/docs/tools/translate/update-all-machine-translated.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -BASE_DIR=$(dirname $(readlink -f $0)) - -function translate() { - set -x - LANGUAGE=$1 - DOCS_ROOT="${BASE_DIR}/../../" - REV="$(git rev-parse HEAD)" - for FILENAME in $(find "${DOCS_ROOT}${LANGUAGE}" -name "*.md" -type f) - do - HAS_MT_TAG=$(grep -c "machine_translated: true" "${FILENAME}") - IS_UP_TO_DATE=$(grep -c "machine_translated_rev: \"${REV}\"" "${FILENAME}") - if [ "${HAS_MT_TAG}" -eq "1" ] && [ "${IS_UP_TO_DATE}" -eq "0" ] - then - set -e - EN_FILENAME=${FILENAME/\/${LANGUAGE}\///en/} - rm "${FILENAME}" || true - cp "${EN_FILENAME}" "${FILENAME}" - DEBUG=1 SLEEP=1 ${BASE_DIR}/replace-with-translation.sh ${LANGUAGE} "${FILENAME}" - set +e - fi - done -} -export BASE_DIR -export -f translate -parallel translate ::: es fr zh ja fa tr diff --git a/docs/tools/translate/update-po.sh b/docs/tools/translate/update-po.sh deleted file mode 100755 index f2f4039bcb8..00000000000 --- a/docs/tools/translate/update-po.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# Usage: update-po.sh -set -ex -BASE_DIR=$(dirname $(readlink -f $0)) -WEBSITE_DIR="${BASE_DIR}/../../../website" -LOCALE_DIR="${WEBSITE_DIR}/locale" -MESSAGES_POT="${LOCALE_DIR}/messages.pot" -BABEL_INI="${BASE_DIR}/babel-mapping.ini" -LANGS="en zh es fr ru ja tr fa" -source "${BASE_DIR}/venv/bin/activate" -cd "${WEBSITE_DIR}" -pybabel extract "." -o "${MESSAGES_POT}" -F "${BABEL_INI}" -for L in ${LANGS} -do - pybabel update -d locale -l "${L}" -i "${MESSAGES_POT}" || \ - pybabel init -d locale -l "${L}" -i "${MESSAGES_POT}" -done -python3 "${BASE_DIR}/translate.py" po -for L in ${LANGS} -do - pybabel compile -d locale -l "${L}" -done diff --git a/docs/tools/translate/util.py b/docs/tools/translate/util.py deleted file mode 120000 index 7f16d68497e..00000000000 --- a/docs/tools/translate/util.py +++ /dev/null @@ -1 +0,0 @@ -../util.py \ No newline at end of file From b65cc841e41dd034c61f1be30b1f1ec557ce16bf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:45:47 +0300 Subject: [PATCH 166/333] Remove a test that cannot be fixed - less garbage in CI check output --- docs/tools/test.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/docs/tools/test.py b/docs/tools/test.py index 7d11157c986..00d1d47137f 100755 --- a/docs/tools/test.py +++ b/docs/tools/test.py @@ -68,17 +68,17 @@ def test_single_page(input_path, lang): f, features='html.parser' ) + anchor_points = set() + duplicate_anchor_points = 0 links_to_nowhere = 0 + for tag in soup.find_all(): for anchor_point in [tag.attrs.get('name'), tag.attrs.get('id')]: if anchor_point: - if anchor_point in anchor_points: - duplicate_anchor_points += 1 - logging.info('Duplicate anchor point: %s' % anchor_point) - else: - anchor_points.add(anchor_point) + anchor_points.add(anchor_point) + for tag in soup.find_all(): href = tag.attrs.get('href') if href and href.startswith('#') and href != '#': @@ -87,11 +87,8 @@ def test_single_page(input_path, lang): logging.info("Tag %s", tag) logging.info('Link to nowhere: %s' % href) - if duplicate_anchor_points: - logging.warning('Found %d duplicate anchor points' % duplicate_anchor_points) - if links_to_nowhere: - if lang == 'en' or lang == 'ru': # TODO: check all languages again + if lang == 'en' or lang == 'ru': logging.error(f'Found {links_to_nowhere} links to nowhere in {lang}') sys.exit(1) else: From 78fba611e0880eca9dee8709df2f435889069359 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:48:36 +0300 Subject: [PATCH 167/333] Remove trash --- docs/tools/make_links.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/make_links.sh b/docs/tools/make_links.sh index 743d4eebf16..c1194901f8f 100755 --- a/docs/tools/make_links.sh +++ b/docs/tools/make_links.sh @@ -8,7 +8,7 @@ BASE_DIR=$(dirname $(readlink -f $0)) function do_make_links() { set -x - langs=(en es zh fr ru ja tr fa) + langs=(en es zh fr ru ja) src_file="$1" for lang in "${langs[@]}" do From cb865ebe604c0c3c930988825f75ea4cdbdd56ec Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:49:53 +0300 Subject: [PATCH 168/333] Removed strange file --- docs/tools/output.md | 204 ------------------------------------------- 1 file changed, 204 deletions(-) delete mode 100644 docs/tools/output.md diff --git a/docs/tools/output.md b/docs/tools/output.md deleted file mode 100644 index 91ec6e75999..00000000000 --- a/docs/tools/output.md +++ /dev/null @@ -1,204 +0,0 @@ -# What is ClickHouse? {#what-is-clickhouse} - -ClickHouse is a column-oriented database management system (DBMS) for -online analytical processing of queries (OLAP). - -In a “normal” row-oriented DBMS, data is stored in this order: - - Row WatchID JavaEnable Title GoodEvent EventTime - ----- ------------- ------------ -------------------- ----------- --------------------- - #0 89354350662 1 Investor Relations 1 2016-05-18 05:19:20 - #1 90329509958 0 Contact us 1 2016-05-18 08:10:20 - #2 89953706054 1 Mission 1 2016-05-18 07:38:00 - #N ... ... ... ... ... - -In other words, all the values related to a row are physically stored -next to each other. - -Examples of a row-oriented DBMS are MySQL, Postgres, and MS SQL Server. -{: .grey } - -In a column-oriented DBMS, data is stored like this: - - Row: #0 #1 #2 #N - ------------- --------------------- --------------------- --------------------- ----- - WatchID: 89354350662 90329509958 89953706054 ... - JavaEnable: 1 0 1 ... - Title: Investor Relations Contact us Mission ... - GoodEvent: 1 1 1 ... - EventTime: 2016-05-18 05:19:20 2016-05-18 08:10:20 2016-05-18 07:38:00 ... - -These examples only show the order that data is arranged in. The values -from different columns are stored separately, and data from the same -column is stored together. - -Examples of a column-oriented DBMS: Vertica, Paraccel (Actian Matrix and -Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB -(VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google -PowerDrill, Druid, and kdb+. {: .grey } - -Different orders for storing data are better suited to different -scenarios. The data access scenario refers to what queries are made, how -often, and in what proportion; how much data is read for each type of -query – rows, columns, and bytes; the relationship between reading and -updating data; the working size of the data and how locally it is used; -whether transactions are used, and how isolated they are; requirements -for data replication and logical integrity; requirements for latency and -throughput for each type of query, and so on. - -The higher the load on the system, the more important it is to customize -the system set up to match the requirements of the usage scenario, and -the more fine grained this customization becomes. There is no system -that is equally well-suited to significantly different scenarios. If a -system is adaptable to a wide set of scenarios, under a high load, the -system will handle all the scenarios equally poorly, or will work well -for just one or few of possible scenarios. - -## Key Properties of the OLAP scenario {#key-properties-of-the-olap-scenario} - -- The vast majority of requests are for read access. -- Data is updated in fairly large batches (\> 1000 rows), not by - single rows; or it is not updated at all. -- Data is added to the DB but is not modified. -- For reads, quite a large number of rows are extracted from the DB, - but only a small subset of columns. -- Tables are “wide,” meaning they contain a large number of columns. -- Queries are relatively rare (usually hundreds of queries per server - or less per second). -- For simple queries, latencies around 50 ms are allowed. -- Column values are fairly small: numbers and short strings (for - example, 60 bytes per URL). -- Requires high throughput when processing a single query (up to - billions of rows per second per server). -- Transactions are not necessary. -- Low requirements for data consistency. -- There is one large table per query. All tables are small, except for - one. -- A query result is significantly smaller than the source data. In - other words, data is filtered or aggregated, so the result fits in a - single server’s RAM. - -It is easy to see that the OLAP scenario is very different from other -popular scenarios (such as OLTP or Key-Value access). So it doesn’t make -sense to try to use OLTP or a Key-Value DB for processing analytical -queries if you want to get decent performance. For example, if you try -to use MongoDB or Redis for analytics, you will get very poor -performance compared to OLAP databases. - -## Why Column-Oriented Databases Work Better in the OLAP Scenario {#why-column-oriented-databases-work-better-in-the-olap-scenario} - -Column-oriented databases are better suited to OLAP scenarios: they are -at least 100 times faster in processing most queries. The reasons are -explained in detail below, but the fact is easier to demonstrate -visually: - -**Row-oriented DBMS** - -![Row-oriented](images/row_oriented.gif#) - -**Column-oriented DBMS** - -![Column-oriented](images/column_oriented.gif#) - -See the difference? - -### Input/output {#inputoutput} - -1. For an analytical query, only a small number of table columns need - to be read. In a column-oriented database, you can read just the - data you need. For example, if you need 5 columns out of 100, you - can expect a 20-fold reduction in I/O. -2. Since data is read in packets, it is easier to compress. Data in - columns is also easier to compress. This further reduces the I/O - volume. -3. Due to the reduced I/O, more data fits in the system cache. - -For example, the query “count the number of records for each advertising -platform” requires reading one “advertising platform ID” column, which -takes up 1 byte uncompressed. If most of the traffic was not from -advertising platforms, you can expect at least 10-fold compression of -this column. When using a quick compression algorithm, data -decompression is possible at a speed of at least several gigabytes of -uncompressed data per second. In other words, this query can be -processed at a speed of approximately several billion rows per second on -a single server. This speed is actually achieved in practice. - -
- -Example - - $ clickhouse-client - ClickHouse client version 0.0.52053. - Connecting to localhost:9000. - Connected to ClickHouse server version 0.0.52053. - - :) SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 - - SELECT - CounterID, - count() - FROM hits - GROUP BY CounterID - ORDER BY count() DESC - LIMIT 20 - - ┌─CounterID─┬──count()─┐ - │ 114208 │ 56057344 │ - │ 115080 │ 51619590 │ - │ 3228 │ 44658301 │ - │ 38230 │ 42045932 │ - │ 145263 │ 42042158 │ - │ 91244 │ 38297270 │ - │ 154139 │ 26647572 │ - │ 150748 │ 24112755 │ - │ 242232 │ 21302571 │ - │ 338158 │ 13507087 │ - │ 62180 │ 12229491 │ - │ 82264 │ 12187441 │ - │ 232261 │ 12148031 │ - │ 146272 │ 11438516 │ - │ 168777 │ 11403636 │ - │ 4120072 │ 11227824 │ - │ 10938808 │ 10519739 │ - │ 74088 │ 9047015 │ - │ 115079 │ 8837972 │ - │ 337234 │ 8205961 │ - └───────────┴──────────┘ - - 20 rows in set. Elapsed: 0.153 sec. Processed 1.00 billion rows, 4.00 GB (6.53 billion rows/s., 26.10 GB/s.) - - :) - -
- -### CPU {#cpu} - -Since executing a query requires processing a large number of rows, it -helps to dispatch all operations for entire vectors instead of for -separate rows, or to implement the query engine so that there is almost -no dispatching cost. If you don’t do this, with any half-decent disk -subsystem, the query interpreter inevitably stalls the CPU. It makes -sense to both store data in columns and process it, when possible, by -columns. - -There are two ways to do this: - -1. A vector engine. All operations are written for vectors, instead of - for separate values. This means you don’t need to call operations - very often, and dispatching costs are negligible. Operation code - contains an optimized internal cycle. - -2. Code generation. The code generated for the query has all the - indirect calls in it. - -This is not done in “normal” databases, because it doesn’t make sense -when running simple queries. However, there are exceptions. For example, -MemSQL uses code generation to reduce latency when processing SQL -queries. (For comparison, analytical DBMSs require optimization of -throughput, not latency.) - -Note that for CPU efficiency, the query language must be declarative -(SQL or MDX), or at least a vector (J, K). The query should only contain -implicit loops, allowing for optimization. - -[Original article](https://clickhouse.tech/docs/en/) From 4820498231d7597be25fb04279ba5988c4712104 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 14:53:27 +0300 Subject: [PATCH 169/333] Update gitignore --- docs/tools/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/tools/.gitignore b/docs/tools/.gitignore index 7d75298b357..443cee8638c 100644 --- a/docs/tools/.gitignore +++ b/docs/tools/.gitignore @@ -1,2 +1,3 @@ build __pycache__ +*.pyc From 3f35e686edc730220f31a5310d2327b59ae2585b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 15:01:03 +0300 Subject: [PATCH 170/333] Add .gitignore --- docs/.gitignore | 1 + website/README.md | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 docs/.gitignore diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 00000000000..378eac25d31 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +build diff --git a/website/README.md b/website/README.md index c4383bea24c..a09a00379d1 100644 --- a/website/README.md +++ b/website/README.md @@ -22,3 +22,9 @@ virtualenv build ``` ./build.py --skip-multi-page --skip-single-page --skip-amp --skip-pdf --skip-git-log --skip-docs --skip-test-templates --livereload 8080 ``` + +# How to quickly test the ugly annoying broken links in docs + +``` +./build.py --skip-multi-page --skip-amp --skip-pdf --skip-blog --skip-git-log --skip-test-templates --lang en --livereload 8080 +``` From bab7e9be9afed99bc1659a456427fab957a1ac57 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 15:18:10 +0300 Subject: [PATCH 171/333] Fix some broken links --- docs/en/getting-started/playground.md | 8 ++++---- docs/en/interfaces/formats.md | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/en/getting-started/playground.md b/docs/en/getting-started/playground.md index 7838dad14ea..9adf0423cf3 100644 --- a/docs/en/getting-started/playground.md +++ b/docs/en/getting-started/playground.md @@ -38,10 +38,10 @@ The queries are executed as a read-only user. It implies some limitations: The following settings are also enforced: -- [max_result_bytes=10485760](../operations/settings/query_complexity/#max-result-bytes) -- [max_result_rows=2000](../operations/settings/query_complexity/#setting-max_result_rows) -- [result_overflow_mode=break](../operations/settings/query_complexity/#result-overflow-mode) -- [max_execution_time=60000](../operations/settings/query_complexity/#max-execution-time) +- [max_result_bytes=10485760](../operations/settings/query-complexity/#max-result-bytes) +- [max_result_rows=2000](../operations/settings/query-complexity/#setting-max_result_rows) +- [result_overflow_mode=break](../operations/settings/query-complexity/#result-overflow-mode) +- [max_execution_time=60000](../operations/settings/query-complexity/#max-execution-time) ## Examples {#examples} diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 33bf90a8b52..ee2235b7861 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1254,7 +1254,7 @@ ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query Unsupported Parquet data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Data types of ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column. +Data types of ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column. ### Inserting and Selecting Data {#inserting-and-selecting-data} @@ -1359,15 +1359,15 @@ When working with the `Regexp` format, you can use the following settings: - Escaped (similarly to [TSV](#tabseparated)) - Quoted (similarly to [Values](#data-format-values)) - Raw (extracts subpatterns as a whole, no escaping rules) -- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Defines the need to throw an exeption in case the `format_regexp` expression does not match the imported data. Can be set to `0` or `1`. +- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Defines the need to throw an exeption in case the `format_regexp` expression does not match the imported data. Can be set to `0` or `1`. -**Usage** +**Usage** -The regular expression from `format_regexp` setting is applied to every line of imported data. The number of subpatterns in the regular expression must be equal to the number of columns in imported dataset. +The regular expression from `format_regexp` setting is applied to every line of imported data. The number of subpatterns in the regular expression must be equal to the number of columns in imported dataset. -Lines of the imported data must be separated by newline character `'\n'` or DOS-style newline `"\r\n"`. +Lines of the imported data must be separated by newline character `'\n'` or DOS-style newline `"\r\n"`. -The content of every matched subpattern is parsed with the method of corresponding data type, according to `format_regexp_escaping_rule` setting. +The content of every matched subpattern is parsed with the method of corresponding data type, according to `format_regexp_escaping_rule` setting. If the regular expression does not match the line and `format_regexp_skip_unmatched` is set to 1, the line is silently skipped. If `format_regexp_skip_unmatched` is set to 0, exception is thrown. From 8c9bd09e6c8ba8033fb08a6b3bb72a392f4e62df Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 15:19:04 +0300 Subject: [PATCH 172/333] Remove broken "Original article" --- docs/en/sql-reference/aggregate-functions/combinators.md | 1 - docs/en/sql-reference/aggregate-functions/index.md | 1 - .../en/sql-reference/aggregate-functions/parametric-functions.md | 1 - .../external-dictionaries/external-dicts-dict-hierarchical.md | 1 - .../external-dictionaries/external-dicts-dict-layout.md | 1 - .../external-dictionaries/external-dicts-dict-lifetime.md | 1 - .../external-dictionaries/external-dicts-dict-structure.md | 1 - .../dictionaries/external-dictionaries/external-dicts-dict.md | 1 - .../dictionaries/external-dictionaries/external-dicts.md | 1 - docs/en/sql-reference/dictionaries/index.md | 1 - docs/en/sql-reference/dictionaries/internal-dicts.md | 1 - docs/en/sql-reference/functions/arithmetic-functions.md | 1 - docs/en/sql-reference/functions/array-functions.md | 1 - docs/en/sql-reference/functions/array-join.md | 1 - docs/en/sql-reference/functions/bit-functions.md | 1 - docs/en/sql-reference/functions/bitmap-functions.md | 1 - docs/en/sql-reference/functions/comparison-functions.md | 1 - docs/en/sql-reference/functions/conditional-functions.md | 1 - docs/en/sql-reference/functions/date-time-functions.md | 1 - docs/en/sql-reference/functions/encoding-functions.md | 1 - docs/en/sql-reference/functions/ext-dict-functions.md | 1 - docs/en/sql-reference/functions/functions-for-nulls.md | 1 - docs/en/sql-reference/functions/hash-functions.md | 1 - docs/en/sql-reference/functions/in-functions.md | 1 - docs/en/sql-reference/functions/index.md | 1 - docs/en/sql-reference/functions/introspection.md | 1 - docs/en/sql-reference/functions/ip-address-functions.md | 1 - docs/en/sql-reference/functions/json-functions.md | 1 - docs/en/sql-reference/functions/logical-functions.md | 1 - docs/en/sql-reference/functions/machine-learning-functions.md | 1 - docs/en/sql-reference/functions/math-functions.md | 1 - docs/en/sql-reference/functions/other-functions.md | 1 - docs/en/sql-reference/functions/random-functions.md | 1 - docs/en/sql-reference/functions/rounding-functions.md | 1 - docs/en/sql-reference/functions/splitting-merging-functions.md | 1 - docs/en/sql-reference/functions/string-functions.md | 1 - docs/en/sql-reference/functions/string-replace-functions.md | 1 - docs/en/sql-reference/functions/string-search-functions.md | 1 - docs/en/sql-reference/functions/type-conversion-functions.md | 1 - docs/en/sql-reference/functions/url-functions.md | 1 - docs/en/sql-reference/functions/uuid-functions.md | 1 - docs/en/sql-reference/functions/ym-dict-functions.md | 1 - docs/en/sql-reference/operators/index.md | 1 - docs/en/sql-reference/statements/alter/index.md | 1 - docs/en/sql-reference/statements/grant.md | 1 - docs/en/sql-reference/statements/insert-into.md | 1 - docs/en/sql-reference/statements/system.md | 1 - docs/en/sql-reference/table-functions/generate.md | 1 - docs/en/sql-reference/table-functions/hdfs.md | 1 - docs/en/sql-reference/table-functions/input.md | 1 - docs/en/sql-reference/table-functions/jdbc.md | 1 - docs/en/sql-reference/table-functions/merge.md | 1 - docs/en/sql-reference/table-functions/numbers.md | 1 - docs/en/sql-reference/table-functions/s3.md | 1 - 54 files changed, 54 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/combinators.md b/docs/en/sql-reference/aggregate-functions/combinators.md index 015c90e90c7..cddef68d49c 100644 --- a/docs/en/sql-reference/aggregate-functions/combinators.md +++ b/docs/en/sql-reference/aggregate-functions/combinators.md @@ -250,4 +250,3 @@ FROM people ``` -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) diff --git a/docs/en/sql-reference/aggregate-functions/index.md b/docs/en/sql-reference/aggregate-functions/index.md index 543a5d3fed8..d2b46f6de53 100644 --- a/docs/en/sql-reference/aggregate-functions/index.md +++ b/docs/en/sql-reference/aggregate-functions/index.md @@ -59,4 +59,3 @@ SELECT groupArray(y) FROM t_null_big `groupArray` does not include `NULL` in the resulting array. -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/en/sql-reference/aggregate-functions/parametric-functions.md b/docs/en/sql-reference/aggregate-functions/parametric-functions.md index c6c97b5428b..d059e0fc744 100644 --- a/docs/en/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/en/sql-reference/aggregate-functions/parametric-functions.md @@ -500,7 +500,6 @@ Problem: Generate a report that shows only keywords that produced at least 5 uni Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 ``` -[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) ## sumMapFiltered(keys_to_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values} diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md index a5e105d2e13..08d3b8d8ad0 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md @@ -65,4 +65,3 @@ For our example, the structure of dictionary can be the following: ``` -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index efef91b4b09..337586a2e10 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -445,4 +445,3 @@ Other types are not supported yet. The function returns the attribute for the pr Data must completely fit into RAM. -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 32763e27ddd..081cc5b0b69 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -86,4 +86,3 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index e25b3ab78c3..dbf2fa67ac5 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -170,4 +170,3 @@ Configuration fields: - [Functions for working with external dictionaries](../../../sql-reference/functions/ext-dict-functions.md). -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md index 17ad110aa19..e15d944130e 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md @@ -48,4 +48,3 @@ LIFETIME(...) -- Lifetime of dictionary in memory - [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key. - [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) — Frequency of dictionary updates. -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md index 99a62002822..8217fb8da3a 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -57,4 +57,3 @@ You can [configure](../../../sql-reference/dictionaries/external-dictionaries/ex - [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md) - [Functions for Working with External Dictionaries](../../../sql-reference/functions/ext-dict-functions.md) -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 420182642bb..fa127dab103 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -17,4 +17,3 @@ ClickHouse supports: - [Built-in dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md). - [Plug-in (external) dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md). -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/) diff --git a/docs/en/sql-reference/dictionaries/internal-dicts.md b/docs/en/sql-reference/dictionaries/internal-dicts.md index 7d657d4177f..472351a19a4 100644 --- a/docs/en/sql-reference/dictionaries/internal-dicts.md +++ b/docs/en/sql-reference/dictionaries/internal-dicts.md @@ -50,4 +50,3 @@ We recommend periodically updating the dictionaries with the geobase. During an There are also functions for working with OS identifiers and Yandex.Metrica search engines, but they shouldn’t be used. -[Original article](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) diff --git a/docs/en/sql-reference/functions/arithmetic-functions.md b/docs/en/sql-reference/functions/arithmetic-functions.md index c4b151f59ce..faa03dfc9d3 100644 --- a/docs/en/sql-reference/functions/arithmetic-functions.md +++ b/docs/en/sql-reference/functions/arithmetic-functions.md @@ -82,4 +82,3 @@ An exception is thrown when dividing by zero or when dividing a minimal negative Returns the least common multiple of the numbers. An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index c9c418d57a4..9c2d37a0abb 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -1541,4 +1541,3 @@ SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res ``` Note that the `arraySumNonNegative` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) diff --git a/docs/en/sql-reference/functions/array-join.md b/docs/en/sql-reference/functions/array-join.md index f1f9a545366..f35e0d10117 100644 --- a/docs/en/sql-reference/functions/array-join.md +++ b/docs/en/sql-reference/functions/array-join.md @@ -32,4 +32,3 @@ SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src └─────┴───────────┴─────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/array_join/) diff --git a/docs/en/sql-reference/functions/bit-functions.md b/docs/en/sql-reference/functions/bit-functions.md index a3d0c82d8ab..0a1ef737f2a 100644 --- a/docs/en/sql-reference/functions/bit-functions.md +++ b/docs/en/sql-reference/functions/bit-functions.md @@ -250,4 +250,3 @@ Result: └───────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index bfff70576f2..16ae053f715 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -491,4 +491,3 @@ SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res └─────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) diff --git a/docs/en/sql-reference/functions/comparison-functions.md b/docs/en/sql-reference/functions/comparison-functions.md index 0b6d8b6e36e..edaf0a01c73 100644 --- a/docs/en/sql-reference/functions/comparison-functions.md +++ b/docs/en/sql-reference/functions/comparison-functions.md @@ -32,4 +32,3 @@ Strings are compared by bytes. A shorter string is smaller than all strings that ## greaterOrEquals, \>= operator {#function-greaterorequals} -[Original article](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) diff --git a/docs/en/sql-reference/functions/conditional-functions.md b/docs/en/sql-reference/functions/conditional-functions.md index 2d57cbb3bd5..70eba4156c8 100644 --- a/docs/en/sql-reference/functions/conditional-functions.md +++ b/docs/en/sql-reference/functions/conditional-functions.md @@ -202,4 +202,3 @@ FROM LEFT_RIGHT └──────┴───────┴──────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 304371f44eb..1ef116be617 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -1070,4 +1070,3 @@ Result: └────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index c1013ebb0e1..29286750240 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -172,4 +172,3 @@ Accepts an integer. Returns a string containing the list of powers of two that t Accepts an integer. Returns an array of UInt64 numbers containing the list of powers of two that total the source number when summed. Numbers in the array are in ascending order. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) diff --git a/docs/en/sql-reference/functions/ext-dict-functions.md b/docs/en/sql-reference/functions/ext-dict-functions.md index 834fcdf8282..5fc146f603f 100644 --- a/docs/en/sql-reference/functions/ext-dict-functions.md +++ b/docs/en/sql-reference/functions/ext-dict-functions.md @@ -203,4 +203,3 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesn’t match the attribute data type. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index f57f0f7e27d..dde0ef8ba93 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -309,4 +309,3 @@ SELECT toTypeName(toNullable(10)) └────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index 465ad01527f..2e78d5f4105 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -482,4 +482,3 @@ Result: - [xxHash](http://cyan4973.github.io/xxHash/). -[Original article](https://clickhouse.tech/docs/en/query_language/functions/hash_functions/) diff --git a/docs/en/sql-reference/functions/in-functions.md b/docs/en/sql-reference/functions/in-functions.md index dd3c1900fdc..c8936e74954 100644 --- a/docs/en/sql-reference/functions/in-functions.md +++ b/docs/en/sql-reference/functions/in-functions.md @@ -9,4 +9,3 @@ toc_title: IN Operator See the section [IN operators](../../sql-reference/operators/in.md#select-in-operators). -[Original article](https://clickhouse.tech/docs/en/query_language/functions/in_functions/) diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index 1a0b9d83b5f..32408759b98 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -84,4 +84,3 @@ Another example is the `hostName` function, which returns the name of the server If a function in a query is performed on the requestor server, but you need to perform it on remote servers, you can wrap it in an ‘any’ aggregate function or add it to a key in `GROUP BY`. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/) diff --git a/docs/en/sql-reference/functions/introspection.md b/docs/en/sql-reference/functions/introspection.md index 964265a461b..29752ae00bf 100644 --- a/docs/en/sql-reference/functions/introspection.md +++ b/docs/en/sql-reference/functions/introspection.md @@ -369,4 +369,3 @@ Result: └──────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/introspection/) diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 64457627cce..ef17654295c 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -394,4 +394,3 @@ Result: └──────────────────┴────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ip_address_functions/) diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index edee048eb77..2b274ee912e 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -292,4 +292,3 @@ Result: └───────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/json_functions/) diff --git a/docs/en/sql-reference/functions/logical-functions.md b/docs/en/sql-reference/functions/logical-functions.md index 13452f88a85..6cce0e4fff5 100644 --- a/docs/en/sql-reference/functions/logical-functions.md +++ b/docs/en/sql-reference/functions/logical-functions.md @@ -17,4 +17,3 @@ Zero as an argument is considered “false,” while any non-zero value is consi ## xor {#xor} -[Original article](https://clickhouse.tech/docs/en/query_language/functions/logical_functions/) diff --git a/docs/en/sql-reference/functions/machine-learning-functions.md b/docs/en/sql-reference/functions/machine-learning-functions.md index f103a4ea421..630a4465c82 100644 --- a/docs/en/sql-reference/functions/machine-learning-functions.md +++ b/docs/en/sql-reference/functions/machine-learning-functions.md @@ -94,4 +94,3 @@ Result: } ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/machine-learning-functions/) diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md index bfe973e3d96..54fbc03f7ae 100644 --- a/docs/en/sql-reference/functions/math-functions.md +++ b/docs/en/sql-reference/functions/math-functions.md @@ -477,4 +477,3 @@ Result: └──────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 2c7f8da881e..ce9d3a13221 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -1971,4 +1971,3 @@ Result: - [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) -[Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/en/sql-reference/functions/random-functions.md b/docs/en/sql-reference/functions/random-functions.md index 2b9846344e4..aab9483de45 100644 --- a/docs/en/sql-reference/functions/random-functions.md +++ b/docs/en/sql-reference/functions/random-functions.md @@ -102,4 +102,3 @@ FROM numbers(3) │ aeca2A │ └───────────────────────────────────────┘ -[Original article](https://clickhouse.tech/docs/en/query_language/functions/random_functions/) diff --git a/docs/en/sql-reference/functions/rounding-functions.md b/docs/en/sql-reference/functions/rounding-functions.md index 83db1975366..4fb077f0be3 100644 --- a/docs/en/sql-reference/functions/rounding-functions.md +++ b/docs/en/sql-reference/functions/rounding-functions.md @@ -185,4 +185,3 @@ Accepts a number. If the number is less than 18, it returns 0. Otherwise, it rou Accepts a number and rounds it down to an element in the specified array. If the value is less than the lowest bound, the lowest bound is returned. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/rounding_functions/) diff --git a/docs/en/sql-reference/functions/splitting-merging-functions.md b/docs/en/sql-reference/functions/splitting-merging-functions.md index c70ee20f076..bd7e209549c 100644 --- a/docs/en/sql-reference/functions/splitting-merging-functions.md +++ b/docs/en/sql-reference/functions/splitting-merging-functions.md @@ -150,4 +150,3 @@ Result: └───────────────────────────────────────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/splitting_merging_functions/) diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 2c08fa3acb7..91020ae795e 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -648,4 +648,3 @@ Result: - [List of XML and HTML character entity references](https://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references) -[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/en/sql-reference/functions/string-replace-functions.md b/docs/en/sql-reference/functions/string-replace-functions.md index 8905500995c..144b4fbc1da 100644 --- a/docs/en/sql-reference/functions/string-replace-functions.md +++ b/docs/en/sql-reference/functions/string-replace-functions.md @@ -92,4 +92,3 @@ Predefined characters: `\0`, `\\`, `|`, `(`, `)`, `^`, `$`, `.`, `[`, `]`, `?`, This implementation slightly differs from re2::RE2::QuoteMeta. It escapes zero byte as `\0` instead of `\x00` and it escapes only required characters. For more information, see the link: [RE2](https://github.com/google/re2/blob/master/re2/re2.cc#L473) -[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_replace_functions/) diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index 83b0edea438..050234be19b 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -773,4 +773,3 @@ Result: └───────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 8a793b99ac9..ff4e8da6697 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1210,4 +1210,3 @@ Result: └───────────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/functions/type_conversion_functions/) diff --git a/docs/en/sql-reference/functions/url-functions.md b/docs/en/sql-reference/functions/url-functions.md index 9e79ef2d0cb..f352fb4f74b 100644 --- a/docs/en/sql-reference/functions/url-functions.md +++ b/docs/en/sql-reference/functions/url-functions.md @@ -420,4 +420,3 @@ Removes the query string and fragment identifier. The question mark and number s Removes the ‘name’ URL parameter, if present. This function works under the assumption that the parameter name is encoded in the URL exactly the same way as in the passed argument. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/url_functions/) diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md index 01a61c65b67..e7e55c699cd 100644 --- a/docs/en/sql-reference/functions/uuid-functions.md +++ b/docs/en/sql-reference/functions/uuid-functions.md @@ -165,4 +165,3 @@ SELECT - [dictGetUUID](../../sql-reference/functions/ext-dict-functions.md#ext_dict_functions-other) -[Original article](https://clickhouse.tech/docs/en/query_language/functions/uuid_function/) diff --git a/docs/en/sql-reference/functions/ym-dict-functions.md b/docs/en/sql-reference/functions/ym-dict-functions.md index 56530b5e83b..9dff9a8cba8 100644 --- a/docs/en/sql-reference/functions/ym-dict-functions.md +++ b/docs/en/sql-reference/functions/ym-dict-functions.md @@ -150,4 +150,3 @@ Accepts a UInt32 number – the region ID from the Yandex geobase. A string with `ua` and `uk` both mean Ukrainian. -[Original article](https://clickhouse.tech/docs/en/query_language/functions/ym_dict_functions/) diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 274f7269bc8..e073d5f23f0 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -296,4 +296,3 @@ SELECT * FROM t_null WHERE y IS NOT NULL └───┴───┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/operators/) diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index 30603122096..71333e6fcce 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -47,4 +47,3 @@ For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_p For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. -[Original article](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index f3829de2fbb..0afc9b5b95f 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -473,4 +473,3 @@ Doesn’t grant any privileges. The `ADMIN OPTION` privilege allows a user to grant their role to another user. -[Original article](https://clickhouse.tech/docs/en/query_language/grant/) diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index c517a515ab7..66effcccc3f 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -117,4 +117,3 @@ Performance will not decrease if: - Data is added in real time. - You upload data that is usually sorted by time. -[Original article](https://clickhouse.tech/docs/en/query_language/insert_into/) diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index bb279703cc2..725024efe0c 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -277,4 +277,3 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed -[Original article](https://clickhouse.tech/docs/en/query_language/system/) diff --git a/docs/en/sql-reference/table-functions/generate.md b/docs/en/sql-reference/table-functions/generate.md index be6ba2b8bc4..fee2c80cc8f 100644 --- a/docs/en/sql-reference/table-functions/generate.md +++ b/docs/en/sql-reference/table-functions/generate.md @@ -39,4 +39,3 @@ SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64( └──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/generate/) diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 31e2000b22d..a7c3baca299 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -99,4 +99,3 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns) -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/hdfs/) diff --git a/docs/en/sql-reference/table-functions/input.md b/docs/en/sql-reference/table-functions/input.md index 40f9f4f7f6f..17707b798d6 100644 --- a/docs/en/sql-reference/table-functions/input.md +++ b/docs/en/sql-reference/table-functions/input.md @@ -42,4 +42,3 @@ $ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" $ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" ``` -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/input/) diff --git a/docs/en/sql-reference/table-functions/jdbc.md b/docs/en/sql-reference/table-functions/jdbc.md index 6fd53b0e794..c6df022c342 100644 --- a/docs/en/sql-reference/table-functions/jdbc.md +++ b/docs/en/sql-reference/table-functions/jdbc.md @@ -24,4 +24,3 @@ SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') ``` -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/jdbc/) diff --git a/docs/en/sql-reference/table-functions/merge.md b/docs/en/sql-reference/table-functions/merge.md index 7b3d88f6266..a5c74b71069 100644 --- a/docs/en/sql-reference/table-functions/merge.md +++ b/docs/en/sql-reference/table-functions/merge.md @@ -9,4 +9,3 @@ toc_title: merge The table structure is taken from the first table encountered that matches the regular expression. -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/merge/) diff --git a/docs/en/sql-reference/table-functions/numbers.md b/docs/en/sql-reference/table-functions/numbers.md index 53e4e42a2f8..f9735056b05 100644 --- a/docs/en/sql-reference/table-functions/numbers.md +++ b/docs/en/sql-reference/table-functions/numbers.md @@ -25,4 +25,3 @@ Examples: select toDate('2010-01-01') + number as d FROM numbers(365); ``` -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/numbers/) diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index ea5dde707b8..2427f0f863c 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -166,4 +166,3 @@ Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max - [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns) -[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/s3/) From ecb686b0be84da52979d7574d4fdb70d4452a9c4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 15:29:23 +0300 Subject: [PATCH 173/333] Fix some broken links --- .../system-tables/replication_queue.md | 4 ++-- docs/en/sql-reference/statements/alter/ttl.md | 6 +++--- .../sql-reference/statements/create/table.md | 18 ++++++++---------- docs/en/sql-reference/table-functions/url.md | 2 +- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/docs/en/operations/system-tables/replication_queue.md b/docs/en/operations/system-tables/replication_queue.md index aa379caa46c..d1c74a771c6 100644 --- a/docs/en/operations/system-tables/replication_queue.md +++ b/docs/en/operations/system-tables/replication_queue.md @@ -70,12 +70,12 @@ num_tries: 36 last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build)) last_attempt_time: 2020-12-08 17:35:54 num_postponed: 0 -postpone_reason: +postpone_reason: last_postpone_time: 1970-01-01 03:00:00 ``` **See Also** -- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md/#query-language-system-replicated) +- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md#query-language-system-replicated) [Original article](https://clickhouse.tech/docs/en/operations/system_tables/replication_queue) diff --git a/docs/en/sql-reference/statements/alter/ttl.md b/docs/en/sql-reference/statements/alter/ttl.md index e8bfb78ec68..e740bfe173e 100644 --- a/docs/en/sql-reference/statements/alter/ttl.md +++ b/docs/en/sql-reference/statements/alter/ttl.md @@ -18,7 +18,7 @@ ALTER TABLE table_name MODIFY TTL ttl_expression; TTL-property can be removed from table with the following query: ```sql -ALTER TABLE table_name REMOVE TTL +ALTER TABLE table_name REMOVE TTL ``` **Example** @@ -81,5 +81,5 @@ The `TTL` is no longer there, so the second row is not deleted: ### See Also -- More about the [TTL-expression](../../../../sql-reference/statements/create/table#ttl-expression). -- Modify column [with TTL](../../../../sql-reference/statements/alter/column#alter_modify-column). +- More about the [TTL-expression](../../../../sql-reference/statements/create/table.md#ttl-expression). +- Modify column [with TTL](../../../../sql-reference/statements/alter/column.md#alter_modify-column). diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 0090eec14b7..60ec40a60ba 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -59,7 +59,7 @@ There can be other clauses after the `ENGINE` clause in the query. See detailed ## NULL Or NOT NULL Modifiers {#null-modifiers} -`NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). +`NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). If the type is not `Nullable` and if `NULL` is specified, it will be treated as `Nullable`; if `NOT NULL` is specified, then no. For example, `INT NULL` is the same as `Nullable(INT)`. If the type is `Nullable` and `NULL` or `NOT NULL` modifiers are specified, the exception will be thrown. @@ -109,16 +109,16 @@ It is not possible to set default values for elements in nested data structures. ## Primary Key {#primary-key} -You can define a [primary key](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries) when creating a table. Primary key can be specified in two ways: +You can define a [primary key](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries) when creating a table. Primary key can be specified in two ways: - Inside the column list ``` sql -CREATE TABLE db.table_name -( - name1 type1, name2 type2, ..., +CREATE TABLE db.table_name +( + name1 type1, name2 type2, ..., PRIMARY KEY(expr1[, expr2,...])] -) +) ENGINE = engine; ``` @@ -126,9 +126,9 @@ ENGINE = engine; ``` sql CREATE TABLE db.table_name -( +( name1 type1, name2 type2, ... -) +) ENGINE = engine PRIMARY KEY(expr1[, expr2,...]); ``` @@ -333,5 +333,3 @@ SELECT * FROM base.t1; │ 3 │ └───┘ ``` - - [Original article](https://clickhouse.tech/docs/en/sql-reference/statements/create/table) diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 63b0ff0e152..2192b69d006 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -27,7 +27,7 @@ A table with the specified format and structure and with data from the defined ` **Examples** -Getting the first 3 lines of a table that contains columns of `String` and [UInt32](../../sql-reference/data-types/int-uint.md) type from HTTP-server which answers in [CSV](../../interfaces/formats.md/#csv) format. +Getting the first 3 lines of a table that contains columns of `String` and [UInt32](../../sql-reference/data-types/int-uint.md) type from HTTP-server which answers in [CSV](../../interfaces/formats.md#csv) format. ``` sql SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3; From 037cc92433ba200eb2772cc6af83636a62f5e972 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 14 Mar 2021 15:45:39 +0300 Subject: [PATCH 174/333] Reverted changes InterpreterSelectWithUnionQuery --- src/Interpreters/InterpreterSelectWithUnionQuery.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index 1d5b05ddd6b..b894db79c7b 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -43,7 +43,7 @@ struct CustomizeASTSelectWithUnionQueryNormalize return; } - selects.push_back(ast_select); + selects.push_back(std::move(ast_select)); } void visit(ASTSelectWithUnionQuery & ast, ASTPtr &) const @@ -76,10 +76,10 @@ struct CustomizeASTSelectWithUnionQueryNormalize for (auto child = inner_union->list_of_selects->children.rbegin(); child != inner_union->list_of_selects->children.rend(); ++child) - selects.push_back(*child); + selects.push_back(std::move(*child)); } else - selects.push_back(select_list[i + 1]); + selects.push_back(std::move(select_list[i + 1])); } /// flatten all left nodes and current node to a UNION DISTINCT list else if (union_modes[i] == ASTSelectWithUnionQuery::Mode::DISTINCT) @@ -108,10 +108,10 @@ struct CustomizeASTSelectWithUnionQueryNormalize /// Inner_union is an UNION ALL list, just lift it up for (auto child = inner_union->list_of_selects->children.rbegin(); child != inner_union->list_of_selects->children.rend(); ++child) - selects.push_back(*child); + selects.push_back(std::move(*child)); } else - selects.push_back(select_list[0]); + selects.push_back(std::move(select_list[0])); } // reverse children list From f617976571e9bb20b1f0821dcd64b520606d0395 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 15:55:11 +0300 Subject: [PATCH 175/333] Fix broken links --- docs/en/operations/system-tables/data_type_families.md | 2 +- docs/en/sql-reference/statements/alter/ttl.md | 4 ++-- docs/en/sql-reference/statements/watch.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/system-tables/data_type_families.md b/docs/en/operations/system-tables/data_type_families.md index ddda91ed151..4e439f13aa5 100644 --- a/docs/en/operations/system-tables/data_type_families.md +++ b/docs/en/operations/system-tables/data_type_families.md @@ -1,6 +1,6 @@ # system.data_type_families {#system_tables-data_type_families} -Contains information about supported [data types](../../sql-reference/data-types/). +Contains information about supported [data types](../../sql-reference/data-types/index.md). Columns: diff --git a/docs/en/sql-reference/statements/alter/ttl.md b/docs/en/sql-reference/statements/alter/ttl.md index e740bfe173e..aa7ee838e10 100644 --- a/docs/en/sql-reference/statements/alter/ttl.md +++ b/docs/en/sql-reference/statements/alter/ttl.md @@ -81,5 +81,5 @@ The `TTL` is no longer there, so the second row is not deleted: ### See Also -- More about the [TTL-expression](../../../../sql-reference/statements/create/table.md#ttl-expression). -- Modify column [with TTL](../../../../sql-reference/statements/alter/column.md#alter_modify-column). +- More about the [TTL-expression](../../../sql-reference/statements/create/table.md#ttl-expression). +- Modify column [with TTL](../../../sql-reference/statements/alter/column.md#alter_modify-column). diff --git a/docs/en/sql-reference/statements/watch.md b/docs/en/sql-reference/statements/watch.md index 761bc8a041e..4da586259d2 100644 --- a/docs/en/sql-reference/statements/watch.md +++ b/docs/en/sql-reference/statements/watch.md @@ -102,5 +102,5 @@ WATCH lv EVENTS LIMIT 1 The `FORMAT` clause works the same way as for the [SELECT](../../sql-reference/statements/select/format.md#format-clause). !!! info "Note" - The [JSONEachRowWithProgress](../../../interfaces/formats/#jsoneachrowwithprogress) format should be used when watching [live view](./create/view.md#live-view) tables over the HTTP interface. The progress messages will be added to the output to keep the long-lived HTTP connection alive until the query result changes. The interval between progress messages is controlled using the [live_view_heartbeat_interval](./create/view.md#live-view-settings) setting. + The [JSONEachRowWithProgress](../../interfaces/formats/#jsoneachrowwithprogress) format should be used when watching [live view](./create/view.md#live-view) tables over the HTTP interface. The progress messages will be added to the output to keep the long-lived HTTP connection alive until the query result changes. The interval between progress messages is controlled using the [live_view_heartbeat_interval](./create/view.md#live-view-settings) setting. From fd766a351917ec7207faa2471367e0371adb8ea2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 16:31:16 +0300 Subject: [PATCH 176/333] Better than nothing --- docs/tools/single_page.py | 80 ++++++++++++++++++++++++--------------- docs/tools/util.py | 9 ----- 2 files changed, 50 insertions(+), 39 deletions(-) diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index 05d50e768e2..27628962271 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -24,55 +24,71 @@ def recursive_values(item): yield item +anchor_not_allowed_chars = re.compile(r'[^\w\-]') +def generate_anchor_from_path(path): + return re.sub(anchor_not_allowed_chars, '-', path) + + +def replace_link(match, path): + link = match.group(1) + if link.endswith('/'): + link = link[0:-1] + '.md' + + return '(#{})'.format(generate_anchor_from_path(os.path.normpath(os.path.join(os.path.dirname(path), link)))) + + +# Concatenates Markdown files to a single file. def concatenate(lang, docs_path, single_page_file, nav): lang_path = os.path.join(docs_path, lang) - az_re = re.compile(r'[a-z]') proj_config = f'{docs_path}/toc_{lang}.yml' if os.path.exists(proj_config): with open(proj_config) as cfg_file: nav = yaml.full_load(cfg_file.read())['nav'] + files_to_concatenate = list(recursive_values(nav)) files_count = len(files_to_concatenate) logging.info(f'{files_count} files will be concatenated into single md-file for {lang}.') logging.debug('Concatenating: ' + ', '.join(files_to_concatenate)) assert files_count > 0, f'Empty single-page for {lang}' + # (../anything) or (../anything#anchor) or (xyz-abc.md) or (xyz-abc.md#anchor) + relative_link_regexp = re.compile(r'\((\.\./[^)#]+|[\w\-]+\.md)(?:#[^\)]*)?\)') + for path in files_to_concatenate: - if path.endswith('introduction/info.md'): - continue try: with open(os.path.join(lang_path, path)) as f: - anchors = set() - tmp_path = path.replace('/index.md', '/').replace('.md', '/') - prefixes = ['', '../', '../../', '../../../'] - parts = tmp_path.split('/') - anchors.add(parts[-2] + '/') - anchors.add('/'.join(parts[1:])) - - for part in parts[0:-2] if len(parts) > 2 else parts: - for prefix in prefixes: - anchor = prefix + tmp_path - if anchor: - anchors.add(anchor) - anchors.add('../' + anchor) - anchors.add('../../' + anchor) - tmp_path = tmp_path.replace(part, '..') - - for anchor in anchors: - if re.search(az_re, anchor): - single_page_file.write('' % anchor) - - single_page_file.write('\n') + # Insert a horizontal ruler. Then insert an anchor that we will link to. Its name will be a path to the .md file. + single_page_file.write('\n______\n\n' % generate_anchor_from_path(path)) in_metadata = False - for l in f: - if l.startswith('---'): + for line in f: + # Skip YAML metadata. + if line == '---\n': in_metadata = not in_metadata - if l.startswith('#'): - l = '#' + l + continue + if not in_metadata: - single_page_file.write(l) + # Increase the level of headers. + if line.startswith('#'): + line = '#' + line + + # Replace links within the docs. + + if re.search(relative_link_regexp, line): + line = re.sub( + relative_link_regexp, + lambda match: replace_link(match, path), + line) + + # If failed to replace the relative link, print to log + if '../' in line: + logging.info('Failed to resolve relative link:') + logging.info(path) + logging.info(line) + + single_page_file.write(line) + except IOError as e: logging.warning(str(e)) @@ -86,7 +102,7 @@ def build_single_page_version(lang, args, nav, cfg): extra['single_page'] = True extra['is_amp'] = False - with util.autoremoved_file(os.path.join(args.docs_dir, lang, 'single.md')) as single_md: + with open(os.path.join(args.docs_dir, lang, 'single.md'), 'w') as single_md: concatenate(lang, args.docs_dir, single_md, nav) with util.temp_dir() as site_temp: @@ -123,11 +139,14 @@ def build_single_page_version(lang, args, nav, cfg): single_page_index_html = os.path.join(single_page_output_path, 'index.html') single_page_content_js = os.path.join(single_page_output_path, 'content.js') + with open(single_page_index_html, 'r') as f: sp_prefix, sp_js, sp_suffix = f.read().split('') + with open(single_page_index_html, 'w') as f: f.write(sp_prefix) f.write(sp_suffix) + with open(single_page_content_js, 'w') as f: if args.minify: import jsmin @@ -151,6 +170,7 @@ def build_single_page_version(lang, args, nav, cfg): js_in = ' '.join(website.get_js_in(args)) subprocess.check_call(f'cat {css_in} > {test_dir}/css/base.css', shell=True) subprocess.check_call(f'cat {js_in} > {test_dir}/js/base.js', shell=True) + if args.save_raw_single_page: shutil.copytree(test_dir, args.save_raw_single_page) diff --git a/docs/tools/util.py b/docs/tools/util.py index b840dc1168a..25961561f99 100644 --- a/docs/tools/util.py +++ b/docs/tools/util.py @@ -22,15 +22,6 @@ def temp_dir(): shutil.rmtree(path) -@contextlib.contextmanager -def autoremoved_file(path): - try: - with open(path, 'w') as handle: - yield handle - finally: - os.unlink(path) - - @contextlib.contextmanager def cd(new_cwd): old_cwd = os.getcwd() From dfd2068350b7cc6b58d14556e7ee9938565ceb1e Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 14 Mar 2021 17:19:48 +0300 Subject: [PATCH 177/333] Updated SharedLibrary string_view interface --- src/Common/SharedLibrary.cpp | 4 ++-- src/Common/SharedLibrary.h | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Common/SharedLibrary.cpp b/src/Common/SharedLibrary.cpp index 9b81f74eb23..37da308d5af 100644 --- a/src/Common/SharedLibrary.cpp +++ b/src/Common/SharedLibrary.cpp @@ -13,7 +13,7 @@ namespace ErrorCodes extern const int CANNOT_DLSYM; } -SharedLibrary::SharedLibrary(const std::string_view & path, int flags) +SharedLibrary::SharedLibrary(std::string_view path, int flags) { handle = dlopen(path.data(), flags); if (!handle) @@ -31,7 +31,7 @@ SharedLibrary::~SharedLibrary() std::terminate(); } -void * SharedLibrary::getImpl(const std::string_view & name, bool no_throw) +void * SharedLibrary::getImpl(std::string_view name, bool no_throw) { dlerror(); diff --git a/src/Common/SharedLibrary.h b/src/Common/SharedLibrary.h index e665c335c6f..866e60fbd33 100644 --- a/src/Common/SharedLibrary.h +++ b/src/Common/SharedLibrary.h @@ -14,23 +14,24 @@ namespace DB class SharedLibrary : private boost::noncopyable { public: - explicit SharedLibrary(const std::string_view & path, int flags = RTLD_LAZY); + explicit SharedLibrary(std::string_view path, int flags = RTLD_LAZY); ~SharedLibrary(); template - Func get(const std::string_view & name) + Func get(std::string_view name) { return reinterpret_cast(getImpl(name)); } + template - Func tryGet(const std::string_view & name) + Func tryGet(std::string_view name) { return reinterpret_cast(getImpl(name, true)); } private: - void * getImpl(const std::string_view & name, bool no_throw = false); + void * getImpl(std::string_view name, bool no_throw = false); void * handle = nullptr; }; From b3d29480815d31900692521d6c00ea877230b906 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 17:31:07 +0300 Subject: [PATCH 178/333] Fix UBSan report in modulo by constant --- src/Functions/modulo.cpp | 8 ++++++++ tests/queries/0_stateless/01760_modulo_negative.reference | 0 tests/queries/0_stateless/01760_modulo_negative.sql | 1 + 3 files changed, 9 insertions(+) create mode 100644 tests/queries/0_stateless/01760_modulo_negative.reference create mode 100644 tests/queries/0_stateless/01760_modulo_negative.sql diff --git a/src/Functions/modulo.cpp b/src/Functions/modulo.cpp index d9bf74ccaf5..fe215851bb6 100644 --- a/src/Functions/modulo.cpp +++ b/src/Functions/modulo.cpp @@ -70,6 +70,14 @@ struct ModuloByConstantImpl if (unlikely(static_cast(b) == 0)) throw Exception("Division by zero", ErrorCodes::ILLEGAL_DIVISION); + /// Division by min negative value. + if (std::is_signed_v && b == std::numeric_limits::lowest()) + throw Exception("Division by the most negative number", ErrorCodes::ILLEGAL_DIVISION); + + /// Modulo of division by negative number is the same as the positive number. + if (b < 0) + b = -b; + libdivide::divider divider(b); /// Here we failed to make the SSE variant from libdivide give an advantage. diff --git a/tests/queries/0_stateless/01760_modulo_negative.reference b/tests/queries/0_stateless/01760_modulo_negative.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01760_modulo_negative.sql b/tests/queries/0_stateless/01760_modulo_negative.sql new file mode 100644 index 00000000000..dbea06cc100 --- /dev/null +++ b/tests/queries/0_stateless/01760_modulo_negative.sql @@ -0,0 +1 @@ +SELECT -number % -9223372036854775808 FROM system.numbers; -- { serverError 153 } From c64892cdc83ff56e676652f8d4ade1be3a8a5fdc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 17:36:45 +0300 Subject: [PATCH 179/333] Fix broken links --- docs/ru/sql-reference/table-functions/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/table-functions/index.md b/docs/ru/sql-reference/table-functions/index.md index 52fb1bd2737..ca90306bbd5 100644 --- a/docs/ru/sql-reference/table-functions/index.md +++ b/docs/ru/sql-reference/table-functions/index.md @@ -33,6 +33,6 @@ toc_title: "Введение" | [jdbc](../../sql-reference/table-functions/jdbc.md) | Создаёт таблицу с движком [JDBC](../../engines/table-engines/integrations/jdbc.md). | | [odbc](../../sql-reference/table-functions/odbc.md) | Создаёт таблицу с движком [ODBC](../../engines/table-engines/integrations/odbc.md). | | [hdfs](../../sql-reference/table-functions/hdfs.md) | Создаёт таблицу с движком [HDFS](../../engines/table-engines/integrations/hdfs.md). | -| [s3](../../sql-reference/table-functions/s3.md) | Создаёт таблицу с движком [S3](../../engines/table-engines/integrations/s3.md). | +| [s3](../../sql-reference/table-functions/s3.md) | Создаёт таблицу с движком [S3](../../engines/table-engines/integrations/s3.md). | [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/) From d6e0342c3041024bd0386b297ca5bff47cc11207 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 27 Feb 2021 01:37:00 +0300 Subject: [PATCH 180/333] Improvements in implementations of the classes AccessRights and GrantedRoles. --- src/Access/AccessControlManager.cpp | 8 +- src/Access/AccessControlManager.h | 6 +- src/Access/AccessRights.cpp | 103 +++++-- src/Access/AccessRights.h | 15 +- src/Access/AccessRightsElement.cpp | 233 +++++++-------- src/Access/AccessRightsElement.h | 142 +++------ src/Access/ContextAccess.cpp | 188 ++++++------ src/Access/ContextAccess.h | 47 +-- src/Access/GrantedRoles.cpp | 144 +++++++-- src/Access/GrantedRoles.h | 46 ++- src/Access/LDAPAccessStorage.cpp | 13 +- src/Access/RoleCache.cpp | 10 +- src/Access/RoleCache.h | 3 +- src/Access/RolesOrUsersSet.cpp | 64 +--- src/Access/RolesOrUsersSet.h | 5 +- src/Interpreters/Context.cpp | 2 +- src/Interpreters/Context.h | 4 +- .../InterpreterCreateQuotaQuery.cpp | 2 +- .../InterpreterCreateRowPolicyQuery.cpp | 4 +- .../InterpreterCreateSettingsProfileQuery.cpp | 2 +- .../InterpreterDropAccessEntityQuery.cpp | 2 +- src/Interpreters/InterpreterGrantQuery.cpp | 278 ++++++++++-------- src/Interpreters/InterpreterSetRoleQuery.cpp | 12 +- .../InterpreterShowAccessEntitiesQuery.cpp | 2 +- ...InterpreterShowCreateAccessEntityQuery.cpp | 2 +- .../InterpreterShowGrantsQuery.cpp | 44 ++- src/Interpreters/executeDDLQueryOnCluster.cpp | 11 +- src/Interpreters/executeDDLQueryOnCluster.h | 4 +- src/Parsers/ASTCreateQuotaQuery.cpp | 4 +- src/Parsers/ASTCreateQuotaQuery.h | 2 +- src/Parsers/ASTCreateRowPolicyQuery.cpp | 8 +- src/Parsers/ASTCreateRowPolicyQuery.h | 4 +- src/Parsers/ASTCreateSettingsProfileQuery.cpp | 4 +- src/Parsers/ASTCreateSettingsProfileQuery.h | 2 +- src/Parsers/ASTCreateUserQuery.h | 11 +- src/Parsers/ASTDropAccessEntityQuery.cpp | 4 +- src/Parsers/ASTDropAccessEntityQuery.h | 2 +- src/Parsers/ASTGrantQuery.cpp | 75 +++-- src/Parsers/ASTGrantQuery.h | 12 +- src/Parsers/ASTRolesOrUsersSet.cpp | 13 +- src/Parsers/ASTRolesOrUsersSet.h | 13 +- src/Parsers/ASTRowPolicyName.cpp | 4 +- src/Parsers/ASTRowPolicyName.h | 4 +- src/Parsers/ASTShowAccessEntitiesQuery.cpp | 2 +- src/Parsers/ASTShowAccessEntitiesQuery.h | 2 +- .../ASTShowCreateAccessEntityQuery.cpp | 4 +- src/Parsers/ASTShowCreateAccessEntityQuery.h | 2 +- src/Parsers/ParserCreateQuotaQuery.cpp | 2 +- src/Parsers/ParserCreateRowPolicyQuery.cpp | 2 +- .../ParserCreateSettingsProfileQuery.cpp | 2 +- src/Parsers/ParserCreateUserQuery.cpp | 4 +- src/Parsers/ParserCreateUserQuery.h | 2 + src/Parsers/ParserGrantQuery.cpp | 91 +++--- src/Parsers/ParserRolesOrUsersSet.cpp | 29 +- src/Parsers/ParserRolesOrUsersSet.h | 11 +- src/Parsers/ParserSetRoleQuery.cpp | 8 +- src/Parsers/ParserShowGrantsQuery.cpp | 2 +- src/Storages/System/StorageSystemGrants.cpp | 11 +- .../System/StorageSystemRoleGrants.cpp | 16 +- .../rbac/tests/syntax/revoke_role.py | 7 +- 60 files changed, 944 insertions(+), 811 deletions(-) diff --git a/src/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp index 0e7bf1e56f4..66023c1c0ea 100644 --- a/src/Access/AccessControlManager.cpp +++ b/src/Access/AccessControlManager.cpp @@ -403,7 +403,7 @@ void AccessControlManager::checkSettingNameIsAllowed(const std::string_view & se std::shared_ptr AccessControlManager::getContextAccess( const UUID & user_id, - const boost::container::flat_set & current_roles, + const std::vector & current_roles, bool use_default_roles, const Settings & settings, const String & current_database, @@ -411,7 +411,7 @@ std::shared_ptr AccessControlManager::getContextAccess( { ContextAccessParams params; params.user_id = user_id; - params.current_roles = current_roles; + params.current_roles.insert(current_roles.begin(), current_roles.end()); params.use_default_roles = use_default_roles; params.current_database = current_database; params.readonly = settings.readonly; @@ -444,8 +444,8 @@ std::shared_ptr AccessControlManager::getContextAccess(cons std::shared_ptr AccessControlManager::getEnabledRoles( - const boost::container::flat_set & current_roles, - const boost::container::flat_set & current_roles_with_admin_option) const + const std::vector & current_roles, + const std::vector & current_roles_with_admin_option) const { return role_cache->getEnabledRoles(current_roles, current_roles_with_admin_option); } diff --git a/src/Access/AccessControlManager.h b/src/Access/AccessControlManager.h index b4d90a4198e..789c33af1c1 100644 --- a/src/Access/AccessControlManager.h +++ b/src/Access/AccessControlManager.h @@ -114,7 +114,7 @@ public: std::shared_ptr getContextAccess( const UUID & user_id, - const boost::container::flat_set & current_roles, + const std::vector & current_roles, bool use_default_roles, const Settings & settings, const String & current_database, @@ -123,8 +123,8 @@ public: std::shared_ptr getContextAccess(const ContextAccessParams & params) const; std::shared_ptr getEnabledRoles( - const boost::container::flat_set & current_roles, - const boost::container::flat_set & current_roles_with_admin_option) const; + const std::vector & current_roles, + const std::vector & current_roles_with_admin_option) const; std::shared_ptr getEnabledRowPolicies( const UUID & user_id, diff --git a/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp index 8ce71dd8da8..f9c1d23350d 100644 --- a/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -7,16 +7,19 @@ namespace DB { +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + namespace { - using Kind = AccessRightsElementWithOptions::Kind; - struct ProtoElement { AccessFlags access_flags; boost::container::small_vector full_name; bool grant_option = false; - Kind kind = Kind::GRANT; + bool is_partial_revoke = false; friend bool operator<(const ProtoElement & left, const ProtoElement & right) { @@ -43,8 +46,8 @@ namespace if (int cmp = compare_name(left.full_name, right.full_name, 1)) return cmp < 0; - if (left.kind != right.kind) - return (left.kind == Kind::GRANT); + if (left.is_partial_revoke != right.is_partial_revoke) + return right.is_partial_revoke; if (left.grant_option != right.grant_option) return right.grant_option; @@ -55,12 +58,12 @@ namespace return (left.access_flags < right.access_flags); } - AccessRightsElementWithOptions getResult() const + AccessRightsElement getResult() const { - AccessRightsElementWithOptions res; + AccessRightsElement res; res.access_flags = access_flags; res.grant_option = grant_option; - res.kind = kind; + res.is_partial_revoke = is_partial_revoke; switch (full_name.size()) { case 0: @@ -105,11 +108,11 @@ namespace class ProtoElements : public std::vector { public: - AccessRightsElementsWithOptions getResult() const + AccessRightsElements getResult() const { ProtoElements sorted = *this; boost::range::sort(sorted); - AccessRightsElementsWithOptions res; + AccessRightsElements res; res.reserve(sorted.size()); for (size_t i = 0; i != sorted.size();) @@ -144,7 +147,7 @@ namespace { return (element.full_name.size() != 3) || (element.full_name[0] != start_element.full_name[0]) || (element.full_name[1] != start_element.full_name[1]) || (element.grant_option != start_element.grant_option) - || (element.kind != start_element.kind); + || (element.is_partial_revoke != start_element.is_partial_revoke); }); return it - (begin() + start); @@ -153,7 +156,7 @@ namespace /// Collects columns together to write multiple columns into one AccessRightsElement. /// That procedure allows to output access rights in more compact way, /// e.g. "SELECT(x, y)" instead of "SELECT(x), SELECT(y)". - void appendResultWithElementsWithDifferenceInColumnOnly(size_t start, size_t count, AccessRightsElementsWithOptions & res) const + void appendResultWithElementsWithDifferenceInColumnOnly(size_t start, size_t count, AccessRightsElements & res) const { const auto * pbegin = data() + start; const auto * pend = pbegin + count; @@ -180,7 +183,7 @@ namespace res.emplace_back(); auto & back = res.back(); back.grant_option = pbegin->grant_option; - back.kind = pbegin->kind; + back.is_partial_revoke = pbegin->is_partial_revoke; back.any_database = false; back.database = pbegin->full_name[0]; back.any_table = false; @@ -515,10 +518,10 @@ private: auto grants = flags - parent_fl; if (revokes) - res.push_back(ProtoElement{revokes, full_name, false, Kind::REVOKE}); + res.push_back(ProtoElement{revokes, full_name, false, true}); if (grants) - res.push_back(ProtoElement{grants, full_name, false, Kind::GRANT}); + res.push_back(ProtoElement{grants, full_name, false, false}); if (node.children) { @@ -550,16 +553,16 @@ private: auto grants = flags - parent_fl - grants_go; if (revokes) - res.push_back(ProtoElement{revokes, full_name, false, Kind::REVOKE}); + res.push_back(ProtoElement{revokes, full_name, false, true}); if (revokes_go) - res.push_back(ProtoElement{revokes_go, full_name, true, Kind::REVOKE}); + res.push_back(ProtoElement{revokes_go, full_name, true, true}); if (grants) - res.push_back(ProtoElement{grants, full_name, false, Kind::GRANT}); + res.push_back(ProtoElement{grants, full_name, false, false}); if (grants_go) - res.push_back(ProtoElement{grants_go, full_name, true, Kind::GRANT}); + res.push_back(ProtoElement{grants_go, full_name, true, false}); if (node && node->children) { @@ -774,8 +777,10 @@ void AccessRights::grantImpl(const AccessFlags & flags, const Args &... args) } template -void AccessRights::grantImpl(const AccessRightsElement & element) +void AccessRights::grantImplHelper(const AccessRightsElement & element) { + assert(!element.is_partial_revoke); + assert(!element.grant_option || with_grant_option); if (element.any_database) grantImpl(element.access_flags); else if (element.any_table) @@ -786,6 +791,24 @@ void AccessRights::grantImpl(const AccessRightsElement & element) grantImpl(element.access_flags, element.database, element.table, element.columns); } +template +void AccessRights::grantImpl(const AccessRightsElement & element) +{ + if (element.is_partial_revoke) + throw Exception("A partial revoke should be revoked, not granted", ErrorCodes::BAD_ARGUMENTS); + if constexpr (with_grant_option) + { + grantImplHelper(element); + } + else + { + if (element.grant_option) + grantImplHelper(element); + else + grantImplHelper(element); + } +} + template void AccessRights::grantImpl(const AccessRightsElements & elements) { @@ -830,8 +853,9 @@ void AccessRights::revokeImpl(const AccessFlags & flags, const Args &... args) } template -void AccessRights::revokeImpl(const AccessRightsElement & element) +void AccessRights::revokeImplHelper(const AccessRightsElement & element) { + assert(!element.grant_option || grant_option); if (element.any_database) revokeImpl(element.access_flags); else if (element.any_table) @@ -842,6 +866,22 @@ void AccessRights::revokeImpl(const AccessRightsElement & element) revokeImpl(element.access_flags, element.database, element.table, element.columns); } +template +void AccessRights::revokeImpl(const AccessRightsElement & element) +{ + if constexpr (grant_option) + { + revokeImplHelper(element); + } + else + { + if (element.grant_option) + revokeImplHelper(element); + else + revokeImplHelper(element); + } +} + template void AccessRights::revokeImpl(const AccessRightsElements & elements) { @@ -868,7 +908,7 @@ void AccessRights::revokeGrantOption(const AccessRightsElement & element) { revo void AccessRights::revokeGrantOption(const AccessRightsElements & elements) { revokeImpl(elements); } -AccessRightsElementsWithOptions AccessRights::getElements() const +AccessRightsElements AccessRights::getElements() const { #if 0 logTree(); @@ -903,8 +943,9 @@ bool AccessRights::isGrantedImpl(const AccessFlags & flags, const Args &... args } template -bool AccessRights::isGrantedImpl(const AccessRightsElement & element) const +bool AccessRights::isGrantedImplHelper(const AccessRightsElement & element) const { + assert(!element.grant_option || grant_option); if (element.any_database) return isGrantedImpl(element.access_flags); else if (element.any_table) @@ -915,6 +956,22 @@ bool AccessRights::isGrantedImpl(const AccessRightsElement & element) const return isGrantedImpl(element.access_flags, element.database, element.table, element.columns); } +template +bool AccessRights::isGrantedImpl(const AccessRightsElement & element) const +{ + if constexpr (grant_option) + { + return isGrantedImplHelper(element); + } + else + { + if (element.grant_option) + return isGrantedImplHelper(element); + else + return isGrantedImplHelper(element); + } +} + template bool AccessRights::isGrantedImpl(const AccessRightsElements & elements) const { diff --git a/src/Access/AccessRights.h b/src/Access/AccessRights.h index c610795ab45..a90616ea27f 100644 --- a/src/Access/AccessRights.h +++ b/src/Access/AccessRights.h @@ -30,7 +30,7 @@ public: String toString() const; /// Returns the information about all the access granted. - AccessRightsElementsWithOptions getElements() const; + AccessRightsElements getElements() const; /// Grants access on a specified database/table/column. /// Does nothing if the specified access has been already granted. @@ -119,12 +119,15 @@ private: template void grantImpl(const AccessFlags & flags, const Args &... args); - template + template void grantImpl(const AccessRightsElement & element); - template + template void grantImpl(const AccessRightsElements & elements); + template + void grantImplHelper(const AccessRightsElement & element); + template void revokeImpl(const AccessFlags & flags, const Args &... args); @@ -134,6 +137,9 @@ private: template void revokeImpl(const AccessRightsElements & elements); + template + void revokeImplHelper(const AccessRightsElement & element); + template bool isGrantedImpl(const AccessFlags & flags, const Args &... args) const; @@ -143,6 +149,9 @@ private: template bool isGrantedImpl(const AccessRightsElements & elements) const; + template + bool isGrantedImplHelper(const AccessRightsElement & element) const; + void logTree() const; struct Node; diff --git a/src/Access/AccessRightsElement.cpp b/src/Access/AccessRightsElement.cpp index e69fb6d3b74..823019ffebd 100644 --- a/src/Access/AccessRightsElement.cpp +++ b/src/Access/AccessRightsElement.cpp @@ -1,169 +1,162 @@ #include -#include #include -#include -#include -#include #include -#include namespace DB { namespace { - using Kind = AccessRightsElementWithOptions::Kind; - - String formatOptions(bool grant_option, Kind kind, const String & inner_part) + void formatColumnNames(const Strings & columns, String & result) { - if (kind == Kind::REVOKE) + result += "("; + bool need_comma = false; + for (const auto & column : columns) { - if (grant_option) - return "REVOKE GRANT OPTION " + inner_part; - else - return "REVOKE " + inner_part; - } - else - { - if (grant_option) - return "GRANT " + inner_part + " WITH GRANT OPTION"; - else - return "GRANT " + inner_part; + if (need_comma) + result += ", "; + need_comma = true; + result += backQuoteIfNeed(column); } + result += ")"; } - - String formatONClause(const String & database, bool any_database, const String & table, bool any_table) + void formatONClause(const String & database, bool any_database, const String & table, bool any_table, String & result) { - String msg = "ON "; - + result += "ON "; if (any_database) - msg += "*."; - else if (!database.empty()) - msg += backQuoteIfNeed(database) + "."; - - if (any_table) - msg += "*"; + { + result += "*.*"; + } else - msg += backQuoteIfNeed(table); - return msg; + { + if (!database.empty()) + { + result += backQuoteIfNeed(database); + result += "."; + } + if (any_table) + result += "*"; + else + result += backQuoteIfNeed(table); + } } - - String formatAccessFlagsWithColumns(const AccessFlags & access_flags, const Strings & columns, bool any_column) + void formatOptions(bool grant_option, bool is_partial_revoke, String & result) { - String columns_in_parentheses; + if (is_partial_revoke) + { + if (grant_option) + result.insert(0, "REVOKE GRANT OPTION "); + else + result.insert(0, "REVOKE "); + } + else + { + if (grant_option) + result.insert(0, "GRANT ").append(" WITH GRANT OPTION"); + else + result.insert(0, "GRANT "); + } + } + + void formatAccessFlagsWithColumns(const AccessFlags & access_flags, const Strings & columns, bool any_column, String & result) + { + String columns_as_str; if (!any_column) { if (columns.empty()) - return "USAGE"; - for (const auto & column : columns) { - columns_in_parentheses += columns_in_parentheses.empty() ? "(" : ", "; - columns_in_parentheses += backQuoteIfNeed(column); + result += "USAGE"; + return; } - columns_in_parentheses += ")"; + formatColumnNames(columns, columns_as_str); } auto keywords = access_flags.toKeywords(); if (keywords.empty()) - return "USAGE"; + { + result += "USAGE"; + return; + } - String msg; + bool need_comma = false; for (const std::string_view & keyword : keywords) { - if (!msg.empty()) - msg += ", "; - msg += String{keyword} + columns_in_parentheses; + if (need_comma) + result.append(", "); + need_comma = true; + result += keyword; + result += columns_as_str; } - return msg; } -} - -String AccessRightsElement::toString() const -{ - return formatAccessFlagsWithColumns(access_flags, columns, any_column) + " " + formatONClause(database, any_database, table, any_table); -} - -String AccessRightsElementWithOptions::toString() const -{ - return formatOptions(grant_option, kind, AccessRightsElement::toString()); -} - -String AccessRightsElements::toString() const -{ - if (empty()) - return "USAGE ON *.*"; - - String res; - String inner_part; - - for (size_t i = 0; i != size(); ++i) + String toStringImpl(const AccessRightsElement & element, bool with_options) { - const auto & element = (*this)[i]; - - if (!inner_part.empty()) - inner_part += ", "; - inner_part += formatAccessFlagsWithColumns(element.access_flags, element.columns, element.any_column); - - bool next_element_uses_same_table = false; - if (i != size() - 1) - { - const auto & next_element = (*this)[i + 1]; - if (element.sameDatabaseAndTable(next_element)) - next_element_uses_same_table = true; - } - - if (!next_element_uses_same_table) - { - if (!res.empty()) - res += ", "; - res += inner_part + " " + formatONClause(element.database, element.any_database, element.table, element.any_table); - inner_part.clear(); - } + String result; + formatAccessFlagsWithColumns(element.access_flags, element.columns, element.any_column, result); + result += " "; + formatONClause(element.database, element.any_database, element.table, element.any_table, result); + if (with_options) + formatOptions(element.grant_option, element.is_partial_revoke, result); + return result; } - return res; -} - -String AccessRightsElementsWithOptions::toString() const -{ - if (empty()) - return "GRANT USAGE ON *.*"; - - String res; - String inner_part; - - for (size_t i = 0; i != size(); ++i) + String toStringImpl(const AccessRightsElements & elements, bool with_options) { - const auto & element = (*this)[i]; + if (elements.empty()) + return with_options ? "GRANT USAGE ON *.*" : "USAGE ON *.*"; - if (!inner_part.empty()) - inner_part += ", "; - inner_part += formatAccessFlagsWithColumns(element.access_flags, element.columns, element.any_column); + String result; + String part; - bool next_element_uses_same_mode_and_table = false; - if (i != size() - 1) + for (size_t i = 0; i != elements.size(); ++i) { - const auto & next_element = (*this)[i + 1]; - if (element.sameDatabaseAndTable(next_element) && element.sameOptions(next_element)) - next_element_uses_same_mode_and_table = true; + const auto & element = elements[i]; + + if (!part.empty()) + part += ", "; + formatAccessFlagsWithColumns(element.access_flags, element.columns, element.any_column, part); + + bool next_element_uses_same_table_and_options = false; + if (i != elements.size() - 1) + { + const auto & next_element = elements[i + 1]; + if (element.sameDatabaseAndTable(next_element) && element.sameOptions(next_element)) + next_element_uses_same_table_and_options = true; + } + + if (!next_element_uses_same_table_and_options) + { + part += " "; + formatONClause(element.database, element.any_database, element.table, element.any_table, part); + if (with_options) + formatOptions(element.grant_option, element.is_partial_revoke, part); + if (result.empty()) + result = std::move(part); + else + result.append(", ").append(part); + part.clear(); + } } - if (!next_element_uses_same_mode_and_table) - { - if (!res.empty()) - res += ", "; - res += formatOptions( - element.grant_option, - element.kind, - inner_part + " " + formatONClause(element.database, element.any_database, element.table, element.any_table)); - inner_part.clear(); - } + return result; } +} - return res; + +String AccessRightsElement::toString() const { return toStringImpl(*this, true); } +String AccessRightsElement::toStringWithoutOptions() const { return toStringImpl(*this, false); } +String AccessRightsElements::toString() const { return toStringImpl(*this, true); } +String AccessRightsElements::toStringWithoutOptions() const { return toStringImpl(*this, false); } + +void AccessRightsElements::eraseNonGrantable() +{ + boost::range::remove_erase_if(*this, [](AccessRightsElement & element) + { + element.eraseNonGrantable(); + return element.empty(); + }); } } diff --git a/src/Access/AccessRightsElement.h b/src/Access/AccessRightsElement.h index 36cb64e6eba..c76f019bc61 100644 --- a/src/Access/AccessRightsElement.h +++ b/src/Access/AccessRightsElement.h @@ -16,6 +16,8 @@ struct AccessRightsElement bool any_database = true; bool any_table = true; bool any_column = true; + bool grant_option = false; + bool is_partial_revoke = false; AccessRightsElement() = default; AccessRightsElement(const AccessRightsElement &) = default; @@ -73,7 +75,7 @@ struct AccessRightsElement bool empty() const { return !access_flags || (!any_column && columns.empty()); } - auto toTuple() const { return std::tie(access_flags, any_database, database, any_table, table, any_column, columns); } + auto toTuple() const { return std::tie(access_flags, any_database, database, any_table, table, any_column, columns, grant_option, is_partial_revoke); } friend bool operator==(const AccessRightsElement & left, const AccessRightsElement & right) { return left.toTuple() == right.toTuple(); } friend bool operator!=(const AccessRightsElement & left, const AccessRightsElement & right) { return !(left == right); } @@ -83,44 +85,36 @@ struct AccessRightsElement && (any_table == other.any_table); } - bool isEmptyDatabase() const { return !any_database && database.empty(); } - - /// If the database is empty, replaces it with `new_database`. Otherwise does nothing. - void replaceEmptyDatabase(const String & new_database); - - /// Resets flags which cannot be granted. - void removeNonGrantableFlags(); - - /// Returns a human-readable representation like "SELECT, UPDATE(x, y) ON db.table". - String toString() const; -}; - - -struct AccessRightsElementWithOptions : public AccessRightsElement -{ - bool grant_option = false; - - enum class Kind + bool sameOptions(const AccessRightsElement & other) const { - GRANT, - REVOKE, - }; - Kind kind = Kind::GRANT; - - bool sameOptions(const AccessRightsElementWithOptions & other) const - { - return (grant_option == other.grant_option) && (kind == other.kind); + return (grant_option == other.grant_option) && (is_partial_revoke == other.is_partial_revoke); } - auto toTuple() const { return std::tie(access_flags, any_database, database, any_table, table, any_column, columns, grant_option, kind); } - friend bool operator==(const AccessRightsElementWithOptions & left, const AccessRightsElementWithOptions & right) { return left.toTuple() == right.toTuple(); } - friend bool operator!=(const AccessRightsElementWithOptions & left, const AccessRightsElementWithOptions & right) { return !(left == right); } - /// Resets flags which cannot be granted. - void removeNonGrantableFlags(); + void eraseNonGrantable() + { + if (!any_column) + access_flags &= AccessFlags::allFlagsGrantableOnColumnLevel(); + else if (!any_table) + access_flags &= AccessFlags::allFlagsGrantableOnTableLevel(); + else if (!any_database) + access_flags &= AccessFlags::allFlagsGrantableOnDatabaseLevel(); + else + access_flags &= AccessFlags::allFlagsGrantableOnGlobalLevel(); + } + + bool isEmptyDatabase() const { return !any_database && database.empty(); } + + /// If the database is empty, replaces it with `current_database`. Otherwise does nothing. + void replaceEmptyDatabase(const String & current_database) + { + if (isEmptyDatabase()) + database = current_database; + } /// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table". String toString() const; + String toStringWithoutOptions() const; }; @@ -130,77 +124,29 @@ class AccessRightsElements : public std::vector public: bool empty() const { return std::all_of(begin(), end(), [](const AccessRightsElement & e) { return e.empty(); }); } - /// Replaces the empty database with `new_database`. - void replaceEmptyDatabase(const String & new_database); + bool sameDatabaseAndTable() const + { + return (size() < 2) || std::all_of(std::next(begin()), end(), [this](const AccessRightsElement & e) { return e.sameDatabaseAndTable(front()); }); + } + + bool sameOptions() const + { + return (size() < 2) || std::all_of(std::next(begin()), end(), [this](const AccessRightsElement & e) { return e.sameOptions(front()); }); + } /// Resets flags which cannot be granted. - void removeNonGrantableFlags(); + void eraseNonGrantable(); + + /// If the database is empty, replaces it with `current_database`. Otherwise does nothing. + void replaceEmptyDatabase(const String & current_database) + { + for (auto & element : *this) + element.replaceEmptyDatabase(current_database); + } /// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table". String toString() const; + String toStringWithoutOptions() const; }; - -class AccessRightsElementsWithOptions : public std::vector -{ -public: - /// Replaces the empty database with `new_database`. - void replaceEmptyDatabase(const String & new_database); - - /// Resets flags which cannot be granted. - void removeNonGrantableFlags(); - - /// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table". - String toString() const; -}; - - -inline void AccessRightsElement::replaceEmptyDatabase(const String & new_database) -{ - if (isEmptyDatabase()) - database = new_database; -} - -inline void AccessRightsElements::replaceEmptyDatabase(const String & new_database) -{ - for (auto & element : *this) - element.replaceEmptyDatabase(new_database); -} - -inline void AccessRightsElementsWithOptions::replaceEmptyDatabase(const String & new_database) -{ - for (auto & element : *this) - element.replaceEmptyDatabase(new_database); -} - -inline void AccessRightsElement::removeNonGrantableFlags() -{ - if (!any_column) - access_flags &= AccessFlags::allFlagsGrantableOnColumnLevel(); - else if (!any_table) - access_flags &= AccessFlags::allFlagsGrantableOnTableLevel(); - else if (!any_database) - access_flags &= AccessFlags::allFlagsGrantableOnDatabaseLevel(); - else - access_flags &= AccessFlags::allFlagsGrantableOnGlobalLevel(); -} - -inline void AccessRightsElementWithOptions::removeNonGrantableFlags() -{ - if (kind == Kind::GRANT) - AccessRightsElement::removeNonGrantableFlags(); -} - -inline void AccessRightsElements::removeNonGrantableFlags() -{ - for (auto & element : *this) - element.removeNonGrantableFlags(); -} - -inline void AccessRightsElementsWithOptions::removeNonGrantableFlags() -{ - for (auto & element : *this) - element.removeNonGrantableFlags(); -} - } diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 494da4eaeae..0bcaef1e441 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -177,28 +177,18 @@ void ContextAccess::setUser(const UserPtr & user_) const user_name = user->getName(); trace_log = &Poco::Logger::get("ContextAccess (" + user_name + ")"); - boost::container::flat_set current_roles, current_roles_with_admin_option; + std::vector current_roles, current_roles_with_admin_option; if (params.use_default_roles) { - for (const UUID & id : user->granted_roles.roles) - { - if (user->default_roles.match(id)) - current_roles.emplace(id); - } + current_roles = user->granted_roles.findGranted(user->default_roles); + current_roles_with_admin_option = user->granted_roles.findGrantedWithAdminOption(user->default_roles); } else { - boost::range::set_intersection( - params.current_roles, - user->granted_roles.roles, - std::inserter(current_roles, current_roles.end())); + current_roles = user->granted_roles.findGranted(params.current_roles); + current_roles_with_admin_option = user->granted_roles.findGrantedWithAdminOption(params.current_roles); } - boost::range::set_intersection( - current_roles, - user->granted_roles.roles_with_admin_option, - std::inserter(current_roles_with_admin_option, current_roles_with_admin_option.end())); - subscription_for_roles_changes = {}; enabled_roles = manager->getEnabledRoles(current_roles, current_roles_with_admin_option); subscription_for_roles_changes = enabled_roles->subscribeForChanges([this](const std::shared_ptr & roles_info_) @@ -331,47 +321,13 @@ std::shared_ptr ContextAccess::getAccessRightsWithImplicit() } -template -bool ContextAccess::checkAccessImpl(const AccessFlags & flags) const -{ - return checkAccessImpl2(flags); -} - template -bool ContextAccess::checkAccessImpl(const AccessFlags & flags, const std::string_view & database, const Args &... args) const -{ - return checkAccessImpl2(flags, database.empty() ? params.current_database : database, args...); -} - -template -bool ContextAccess::checkAccessImpl(const AccessRightsElement & element) const -{ - if (element.any_database) - return checkAccessImpl(element.access_flags); - else if (element.any_table) - return checkAccessImpl(element.access_flags, element.database); - else if (element.any_column) - return checkAccessImpl(element.access_flags, element.database, element.table); - else - return checkAccessImpl(element.access_flags, element.database, element.table, element.columns); -} - -template -bool ContextAccess::checkAccessImpl(const AccessRightsElements & elements) const -{ - for (const auto & element : elements) - if (!checkAccessImpl(element)) - return false; - return true; -} - -template -bool ContextAccess::checkAccessImpl2(const AccessFlags & flags, const Args &... args) const +bool ContextAccess::checkAccessImplHelper(const AccessFlags & flags, const Args &... args) const { auto access_granted = [&] { if (trace_log) - LOG_TRACE(trace_log, "Access granted: {}{}", (AccessRightsElement{flags, args...}.toString()), + LOG_TRACE(trace_log, "Access granted: {}{}", (AccessRightsElement{flags, args...}.toStringWithoutOptions()), (grant_option ? " WITH GRANT OPTION" : "")); return true; }; @@ -379,7 +335,7 @@ bool ContextAccess::checkAccessImpl2(const AccessFlags & flags, const Args &... auto access_denied = [&](const String & error_msg, int error_code [[maybe_unused]]) { if (trace_log) - LOG_TRACE(trace_log, "Access denied: {}{}", (AccessRightsElement{flags, args...}.toString()), + LOG_TRACE(trace_log, "Access denied: {}{}", (AccessRightsElement{flags, args...}.toStringWithoutOptions()), (grant_option ? " WITH GRANT OPTION" : "")); if constexpr (throw_if_denied) throw Exception(getUserName() + ": " + error_msg, error_code); @@ -415,13 +371,13 @@ bool ContextAccess::checkAccessImpl2(const AccessFlags & flags, const Args &... "Not enough privileges. " "The required privileges have been granted, but without grant option. " "To execute this query it's necessary to have grant " - + AccessRightsElement{flags, args...}.toString() + " WITH GRANT OPTION", + + AccessRightsElement{flags, args...}.toStringWithoutOptions() + " WITH GRANT OPTION", ErrorCodes::ACCESS_DENIED); } return access_denied( "Not enough privileges. To execute this query it's necessary to have grant " - + AccessRightsElement{flags, args...}.toString() + (grant_option ? " WITH GRANT OPTION" : ""), + + AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : ""), ErrorCodes::ACCESS_DENIED); } @@ -478,6 +434,56 @@ bool ContextAccess::checkAccessImpl2(const AccessFlags & flags, const Args &... return access_granted(); } +template +bool ContextAccess::checkAccessImpl(const AccessFlags & flags) const +{ + return checkAccessImplHelper(flags); +} + +template +bool ContextAccess::checkAccessImpl(const AccessFlags & flags, const std::string_view & database, const Args &... args) const +{ + return checkAccessImplHelper(flags, database.empty() ? params.current_database : database, args...); +} + +template +bool ContextAccess::checkAccessImplHelper(const AccessRightsElement & element) const +{ + assert(!element.grant_option || grant_option); + if (element.any_database) + return checkAccessImpl(element.access_flags); + else if (element.any_table) + return checkAccessImpl(element.access_flags, element.database); + else if (element.any_column) + return checkAccessImpl(element.access_flags, element.database, element.table); + else + return checkAccessImpl(element.access_flags, element.database, element.table, element.columns); +} + +template +bool ContextAccess::checkAccessImpl(const AccessRightsElement & element) const +{ + if constexpr (grant_option) + { + return checkAccessImplHelper(element); + } + else + { + if (element.grant_option) + return checkAccessImplHelper(element); + else + return checkAccessImplHelper(element); + } +} + +template +bool ContextAccess::checkAccessImpl(const AccessRightsElements & elements) const +{ + for (const auto & element : elements) + if (!checkAccessImpl(element)) + return false; + return true; +} bool ContextAccess::isGranted(const AccessFlags & flags) const { return checkAccessImpl(flags); } bool ContextAccess::isGranted(const AccessFlags & flags, const std::string_view & database) const { return checkAccessImpl(flags, database); } @@ -516,44 +522,8 @@ void ContextAccess::checkGrantOption(const AccessRightsElement & element) const void ContextAccess::checkGrantOption(const AccessRightsElements & elements) const { checkAccessImpl(elements); } -template -bool ContextAccess::checkAdminOptionImpl(const UUID & role_id) const -{ - return checkAdminOptionImpl2(to_array(role_id), [this](const UUID & id, size_t) { return manager->tryReadName(id); }); -} - -template -bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const String & role_name) const -{ - return checkAdminOptionImpl2(to_array(role_id), [&role_name](const UUID &, size_t) { return std::optional{role_name}; }); -} - -template -bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const std::unordered_map & names_of_roles) const -{ - return checkAdminOptionImpl2(to_array(role_id), [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; }); -} - -template -bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids) const -{ - return checkAdminOptionImpl2(role_ids, [this](const UUID & id, size_t) { return manager->tryReadName(id); }); -} - -template -bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids, const Strings & names_of_roles) const -{ - return checkAdminOptionImpl2(role_ids, [&names_of_roles](const UUID &, size_t i) { return std::optional{names_of_roles[i]}; }); -} - -template -bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids, const std::unordered_map & names_of_roles) const -{ - return checkAdminOptionImpl2(role_ids, [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; }); -} - template -bool ContextAccess::checkAdminOptionImpl2(const Container & role_ids, const GetNameFunction & get_name_function) const +bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const GetNameFunction & get_name_function) const { if (!std::size(role_ids) || is_full_access) return true; @@ -605,6 +575,42 @@ bool ContextAccess::checkAdminOptionImpl2(const Container & role_ids, const GetN return true; } +template +bool ContextAccess::checkAdminOptionImpl(const UUID & role_id) const +{ + return checkAdminOptionImplHelper(to_array(role_id), [this](const UUID & id, size_t) { return manager->tryReadName(id); }); +} + +template +bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const String & role_name) const +{ + return checkAdminOptionImplHelper(to_array(role_id), [&role_name](const UUID &, size_t) { return std::optional{role_name}; }); +} + +template +bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const std::unordered_map & names_of_roles) const +{ + return checkAdminOptionImplHelper(to_array(role_id), [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; }); +} + +template +bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids) const +{ + return checkAdminOptionImplHelper(role_ids, [this](const UUID & id, size_t) { return manager->tryReadName(id); }); +} + +template +bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids, const Strings & names_of_roles) const +{ + return checkAdminOptionImplHelper(role_ids, [&names_of_roles](const UUID &, size_t i) { return std::optional{names_of_roles[i]}; }); +} + +template +bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids, const std::unordered_map & names_of_roles) const +{ + return checkAdminOptionImplHelper(role_ids, [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; }); +} + bool ContextAccess::hasAdminOption(const UUID & role_id) const { return checkAdminOptionImpl(role_id); } bool ContextAccess::hasAdminOption(const UUID & role_id, const String & role_name) const { return checkAdminOptionImpl(role_id, role_name); } bool ContextAccess::hasAdminOption(const UUID & role_id, const std::unordered_map & names_of_roles) const { return checkAdminOptionImpl(role_id, names_of_roles); } diff --git a/src/Access/ContextAccess.h b/src/Access/ContextAccess.h index 43e9f60a4c6..320c2566769 100644 --- a/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -99,25 +99,6 @@ public: std::shared_ptr getAccessRights() const; std::shared_ptr getAccessRightsWithImplicit() const; - /// Checks if a specified access is granted. - bool isGranted(const AccessFlags & flags) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; - bool isGranted(const AccessRightsElement & element) const; - bool isGranted(const AccessRightsElements & elements) const; - - bool hasGrantOption(const AccessFlags & flags) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; - bool hasGrantOption(const AccessRightsElement & element) const; - bool hasGrantOption(const AccessRightsElements & elements) const; - /// Checks if a specified access is granted, and throws an exception if not. /// Empty database means the current database. void checkAccess(const AccessFlags & flags) const; @@ -138,6 +119,26 @@ public: void checkGrantOption(const AccessRightsElement & element) const; void checkGrantOption(const AccessRightsElements & elements) const; + /// Checks if a specified access is granted, and returns false if not. + /// Empty database means the current database. + bool isGranted(const AccessFlags & flags) const; + bool isGranted(const AccessFlags & flags, const std::string_view & database) const; + bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; + bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; + bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; + bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + bool isGranted(const AccessRightsElement & element) const; + bool isGranted(const AccessRightsElements & elements) const; + + bool hasGrantOption(const AccessFlags & flags) const; + bool hasGrantOption(const AccessFlags & flags, const std::string_view & database) const; + bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; + bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; + bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; + bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + bool hasGrantOption(const AccessRightsElement & element) const; + bool hasGrantOption(const AccessRightsElements & elements) const; + /// Checks if a specified role is granted with admin option, and throws an exception if not. void checkAdminOption(const UUID & role_id) const; void checkAdminOption(const UUID & role_id, const String & role_name) const; @@ -146,6 +147,7 @@ public: void checkAdminOption(const std::vector & role_ids, const Strings & names_of_roles) const; void checkAdminOption(const std::vector & role_ids, const std::unordered_map & names_of_roles) const; + /// Checks if a specified role is granted with admin option, and returns false if not. bool hasAdminOption(const UUID & role_id) const; bool hasAdminOption(const UUID & role_id, const String & role_name) const; bool hasAdminOption(const UUID & role_id, const std::unordered_map & names_of_roles) const; @@ -180,7 +182,10 @@ private: bool checkAccessImpl(const AccessRightsElements & elements) const; template - bool checkAccessImpl2(const AccessFlags & flags, const Args &... args) const; + bool checkAccessImplHelper(const AccessFlags & flags, const Args &... args) const; + + template + bool checkAccessImplHelper(const AccessRightsElement & element) const; template bool checkAdminOptionImpl(const UUID & role_id) const; @@ -201,7 +206,7 @@ private: bool checkAdminOptionImpl(const std::vector & role_ids, const std::unordered_map & names_of_roles) const; template - bool checkAdminOptionImpl2(const Container & role_ids, const GetNameFunction & get_name_function) const; + bool checkAdminOptionImplHelper(const Container & role_ids, const GetNameFunction & get_name_function) const; const AccessControlManager * manager = nullptr; const Params params; diff --git a/src/Access/GrantedRoles.cpp b/src/Access/GrantedRoles.cpp index 4d7007c4db6..7930b56e44d 100644 --- a/src/Access/GrantedRoles.cpp +++ b/src/Access/GrantedRoles.cpp @@ -1,37 +1,38 @@ #include +#include #include +#include namespace DB { -void GrantedRoles::grant(const UUID & role) +void GrantedRoles::grant(const UUID & role_) { - roles.insert(role); + roles.insert(role_); } void GrantedRoles::grant(const std::vector & roles_) { - for (const UUID & role : roles_) - grant(role); + roles.insert(roles_.begin(), roles_.end()); } -void GrantedRoles::grantWithAdminOption(const UUID & role) +void GrantedRoles::grantWithAdminOption(const UUID & role_) { - roles.insert(role); - roles_with_admin_option.insert(role); + roles.insert(role_); + roles_with_admin_option.insert(role_); } void GrantedRoles::grantWithAdminOption(const std::vector & roles_) { - for (const UUID & role : roles_) - grantWithAdminOption(role); + roles.insert(roles_.begin(), roles_.end()); + roles_with_admin_option.insert(roles_.begin(), roles_.end()); } -void GrantedRoles::revoke(const UUID & role) +void GrantedRoles::revoke(const UUID & role_) { - roles.erase(role); - roles_with_admin_option.erase(role); + roles.erase(role_); + roles_with_admin_option.erase(role_); } void GrantedRoles::revoke(const std::vector & roles_) @@ -40,9 +41,9 @@ void GrantedRoles::revoke(const std::vector & roles_) revoke(role); } -void GrantedRoles::revokeAdminOption(const UUID & role) +void GrantedRoles::revokeAdminOption(const UUID & role_) { - roles_with_admin_option.erase(role); + roles_with_admin_option.erase(role_); } void GrantedRoles::revokeAdminOption(const std::vector & roles_) @@ -52,13 +53,118 @@ void GrantedRoles::revokeAdminOption(const std::vector & roles_) } -GrantedRoles::Grants GrantedRoles::getGrants() const +bool GrantedRoles::isGranted(const UUID & role_) const { - Grants res; - res.grants_with_admin_option.insert(res.grants_with_admin_option.end(), roles_with_admin_option.begin(), roles_with_admin_option.end()); - res.grants.reserve(roles.size() - roles_with_admin_option.size()); - boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(res.grants)); + return roles.count(role_); +} + +bool GrantedRoles::isGrantedWithAdminOption(const UUID & role_) const +{ + return roles_with_admin_option.count(role_); +} + + +std::vector GrantedRoles::findGranted(const std::vector & ids) const +{ + std::vector res; + res.reserve(ids.size()); + for (const UUID & id : ids) + { + if (isGranted(id)) + res.push_back(id); + } return res; } +std::vector GrantedRoles::findGranted(const boost::container::flat_set & ids) const +{ + std::vector res; + res.reserve(ids.size()); + boost::range::set_difference(ids, roles, std::back_inserter(res)); + return res; +} + +std::vector GrantedRoles::findGranted(const RolesOrUsersSet & ids) const +{ + std::vector res; + for (const UUID & id : roles) + { + if (ids.match(id)) + res.emplace_back(id); + } + return res; +} + +std::vector GrantedRoles::findGrantedWithAdminOption(const std::vector & ids) const +{ + std::vector res; + res.reserve(ids.size()); + for (const UUID & id : ids) + { + if (isGrantedWithAdminOption(id)) + res.push_back(id); + } + return res; +} + +std::vector GrantedRoles::findGrantedWithAdminOption(const boost::container::flat_set & ids) const +{ + std::vector res; + res.reserve(ids.size()); + boost::range::set_difference(ids, roles_with_admin_option, std::back_inserter(res)); + return res; +} + +std::vector GrantedRoles::findGrantedWithAdminOption(const RolesOrUsersSet & ids) const +{ + std::vector res; + for (const UUID & id : roles_with_admin_option) + { + if (ids.match(id)) + res.emplace_back(id); + } + return res; +} + + +GrantedRoles::Elements GrantedRoles::getElements() const +{ + Elements elements; + + Element element; + element.ids.reserve(roles.size()); + boost::range::set_difference(roles, roles_with_admin_option, std::back_inserter(element.ids)); + if (!element.empty()) + { + element.admin_option = false; + elements.emplace_back(std::move(element)); + } + + if (!roles_with_admin_option.empty()) + { + element = {}; + element.ids.insert(element.ids.end(), roles_with_admin_option.begin(), roles_with_admin_option.end()); + element.admin_option = true; + elements.emplace_back(std::move(element)); + } + + return elements; +} + + +void GrantedRoles::makeUnion(const GrantedRoles & other) +{ + roles.insert(other.roles.begin(), other.roles.end()); + roles_with_admin_option.insert(other.roles_with_admin_option.begin(), other.roles_with_admin_option.end()); +} + +void GrantedRoles::makeIntersection(const GrantedRoles & other) +{ + boost::range::remove_erase_if(roles, [&other](const UUID & id) { return other.roles.find(id) == other.roles.end(); }); + + boost::range::remove_erase_if(roles_with_admin_option, [&other](const UUID & id) + { + return other.roles_with_admin_option.find(id) == other.roles_with_admin_option.end(); + }); +} } diff --git a/src/Access/GrantedRoles.h b/src/Access/GrantedRoles.h index fd091755a80..75ea56aba96 100644 --- a/src/Access/GrantedRoles.h +++ b/src/Access/GrantedRoles.h @@ -7,33 +7,55 @@ namespace DB { +struct RolesOrUsersSet; + /// Roles when they are granted to a role or user. /// Stores both the roles themselves and the roles with admin option. -struct GrantedRoles +class GrantedRoles { - boost::container::flat_set roles; - boost::container::flat_set roles_with_admin_option; - - void grant(const UUID & role); +public: + void grant(const UUID & role_); void grant(const std::vector & roles_); - void grantWithAdminOption(const UUID & role); + void grantWithAdminOption(const UUID & role_); void grantWithAdminOption(const std::vector & roles_); - void revoke(const UUID & role); + void revoke(const UUID & role_); void revoke(const std::vector & roles_); - void revokeAdminOption(const UUID & role); + void revokeAdminOption(const UUID & role_); void revokeAdminOption(const std::vector & roles_); - struct Grants + bool isGranted(const UUID & role_) const; + bool isGrantedWithAdminOption(const UUID & role_) const; + + const boost::container::flat_set & getGranted() const { return roles; } + const boost::container::flat_set & getGrantedWithAdminOption() const { return roles_with_admin_option; } + + std::vector findGranted(const std::vector & ids) const; + std::vector findGranted(const boost::container::flat_set & ids) const; + std::vector findGranted(const RolesOrUsersSet & ids) const; + std::vector findGrantedWithAdminOption(const std::vector & ids) const; + std::vector findGrantedWithAdminOption(const boost::container::flat_set & ids) const; + std::vector findGrantedWithAdminOption(const RolesOrUsersSet & ids) const; + + struct Element { - std::vector grants; - std::vector grants_with_admin_option; + std::vector ids; + bool admin_option = false; + bool empty() const { return ids.empty(); } }; + using Elements = std::vector; /// Retrieves the information about grants. - Grants getGrants() const; + Elements getElements() const; + + void makeUnion(const GrantedRoles & other); + void makeIntersection(const GrantedRoles & other); friend bool operator ==(const GrantedRoles & left, const GrantedRoles & right) { return (left.roles == right.roles) && (left.roles_with_admin_option == right.roles_with_admin_option); } friend bool operator !=(const GrantedRoles & left, const GrantedRoles & right) { return !(left == right); } + +private: + boost::container::flat_set roles; + boost::container::flat_set roles_with_admin_option; }; } diff --git a/src/Access/LDAPAccessStorage.cpp b/src/Access/LDAPAccessStorage.cpp index b69f489ca6b..b47a9b3e041 100644 --- a/src/Access/LDAPAccessStorage.cpp +++ b/src/Access/LDAPAccessStorage.cpp @@ -187,13 +187,10 @@ void LDAPAccessStorage::applyRoleChangeNoLock(bool grant, const UUID & role_id, if (auto user = typeid_cast>(entity_)) { auto changed_user = typeid_cast>(user->clone()); - auto & granted_roles = changed_user->granted_roles.roles; - if (grant) - granted_roles.insert(role_id); + changed_user->granted_roles.grant(role_id); else - granted_roles.erase(role_id); - + changed_user->granted_roles.revoke(role_id); return changed_user; } return entity_; @@ -229,7 +226,7 @@ void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchR void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchResultsList & external_roles, const std::size_t external_roles_hash) const { const auto & user_name = user.getName(); - auto & granted_roles = user.granted_roles.roles; + auto & granted_roles = user.granted_roles; const auto local_role_names = mapExternalRolesNoLock(external_roles); auto grant_role = [this, &user_name, &granted_roles] (const String & role_name, const bool common) @@ -247,7 +244,7 @@ void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchR if (it != granted_role_ids.end()) { const auto & role_id = it->second; - granted_roles.insert(role_id); + granted_roles.grant(role_id); } else { @@ -256,7 +253,7 @@ void LDAPAccessStorage::assignRolesNoLock(User & user, const LDAPClient::SearchR }; external_role_hashes.erase(user_name); - granted_roles.clear(); + granted_roles = {}; const auto old_role_names = std::move(roles_per_users[user_name]); // Grant the common roles first. diff --git a/src/Access/RoleCache.cpp b/src/Access/RoleCache.cpp index f386044bbf7..8fa3d51f867 100644 --- a/src/Access/RoleCache.cpp +++ b/src/Access/RoleCache.cpp @@ -46,10 +46,10 @@ namespace roles_info.access.makeUnion(role->access); roles_info.settings_from_enabled_roles.merge(role->settings); - for (const auto & granted_role : role->granted_roles.roles) + for (const auto & granted_role : role->granted_roles.getGranted()) collectRoles(roles_info, skip_ids, get_role_function, granted_role, false, false); - for (const auto & granted_role : role->granted_roles.roles_with_admin_option) + for (const auto & granted_role : role->granted_roles.getGrantedWithAdminOption()) collectRoles(roles_info, skip_ids, get_role_function, granted_role, false, true); } } @@ -63,15 +63,15 @@ RoleCache::~RoleCache() = default; std::shared_ptr -RoleCache::getEnabledRoles(const boost::container::flat_set & roles, const boost::container::flat_set & roles_with_admin_option) +RoleCache::getEnabledRoles(const std::vector & roles, const std::vector & roles_with_admin_option) { /// Declared before `lock` to send notifications after the mutex will be unlocked. ext::scope_guard notifications; std::lock_guard lock{mutex}; EnabledRoles::Params params; - params.current_roles = roles; - params.current_roles_with_admin_option = roles_with_admin_option; + params.current_roles.insert(roles.begin(), roles.end()); + params.current_roles_with_admin_option.insert(roles_with_admin_option.begin(), roles_with_admin_option.end()); auto it = enabled_roles.find(params); if (it != enabled_roles.end()) { diff --git a/src/Access/RoleCache.h b/src/Access/RoleCache.h index cc6c8599f27..b3f426debcb 100644 --- a/src/Access/RoleCache.h +++ b/src/Access/RoleCache.h @@ -20,7 +20,8 @@ public: ~RoleCache(); std::shared_ptr getEnabledRoles( - const boost::container::flat_set & current_roles, const boost::container::flat_set & current_roles_with_admin_option); + const std::vector & current_roles, + const std::vector & current_roles_with_admin_option); private: void collectEnabledRoles(ext::scope_guard & notifications); diff --git a/src/Access/RolesOrUsersSet.cpp b/src/Access/RolesOrUsersSet.cpp index cb0beb42700..ebd4f0f7a40 100644 --- a/src/Access/RolesOrUsersSet.cpp +++ b/src/Access/RolesOrUsersSet.cpp @@ -72,20 +72,20 @@ void RolesOrUsersSet::init(const ASTRolesOrUsersSet & ast, const AccessControlMa if (ast.id_mode) return parse(name); assert(manager); - if (ast.allow_user_names && ast.allow_role_names) + if (ast.allow_users && ast.allow_roles) { auto id = manager->find(name); if (id) return *id; return manager->getID(name); } - else if (ast.allow_user_names) + else if (ast.allow_users) { return manager->getID(name); } else { - assert(ast.allow_role_names); + assert(ast.allow_roles); return manager->getID(name); } }; @@ -106,8 +106,8 @@ void RolesOrUsersSet::init(const ASTRolesOrUsersSet & ast, const AccessControlMa if (!ast.except_names.empty()) { except_ids.reserve(ast.except_names.size()); - for (const String & except_name : ast.except_names) - except_ids.insert(name_to_id(except_name)); + for (const String & name : ast.except_names) + except_ids.insert(name_to_id(name)); } if (ast.except_current_user) @@ -116,8 +116,8 @@ void RolesOrUsersSet::init(const ASTRolesOrUsersSet & ast, const AccessControlMa except_ids.insert(*current_user_id); } - for (const UUID & except_id : except_ids) - ids.erase(except_id); + for (const UUID & id : except_ids) + ids.erase(id); } @@ -127,7 +127,7 @@ std::shared_ptr RolesOrUsersSet::toAST() const ast->id_mode = true; ast->all = all; - if (!ids.empty()) + if (!ids.empty() && !all) { ast->names.reserve(ids.size()); for (const UUID & id : ids) @@ -152,7 +152,7 @@ std::shared_ptr RolesOrUsersSet::toASTWithNames(const Access auto ast = std::make_shared(); ast->all = all; - if (!ids.empty()) + if (!ids.empty() && !all) { ast->names.reserve(ids.size()); for (const UUID & id : ids) @@ -194,44 +194,6 @@ String RolesOrUsersSet::toStringWithNames(const AccessControlManager & manager) } -Strings RolesOrUsersSet::toStringsWithNames(const AccessControlManager & manager) const -{ - if (!all && ids.empty()) - return {}; - - Strings res; - res.reserve(ids.size() + except_ids.size()); - - if (all) - res.emplace_back("ALL"); - else - { - for (const UUID & id : ids) - { - auto name = manager.tryReadName(id); - if (name) - res.emplace_back(std::move(*name)); - } - std::sort(res.begin(), res.end()); - } - - if (!except_ids.empty()) - { - res.emplace_back("EXCEPT"); - size_t old_size = res.size(); - for (const UUID & id : except_ids) - { - auto name = manager.tryReadName(id); - if (name) - res.emplace_back(std::move(*name)); - } - std::sort(res.begin() + old_size, res.end()); - } - - return res; -} - - bool RolesOrUsersSet::empty() const { return ids.empty() && !all; @@ -248,14 +210,18 @@ void RolesOrUsersSet::clear() void RolesOrUsersSet::add(const UUID & id) { - ids.insert(id); + if (!all) + ids.insert(id); + except_ids.erase(id); } void RolesOrUsersSet::add(const std::vector & ids_) { + if (!all) + ids.insert(ids_.begin(), ids_.end()); for (const auto & id : ids_) - add(id); + except_ids.erase(id); } diff --git a/src/Access/RolesOrUsersSet.h b/src/Access/RolesOrUsersSet.h index bae7f52a574..0d8983c2ec3 100644 --- a/src/Access/RolesOrUsersSet.h +++ b/src/Access/RolesOrUsersSet.h @@ -13,7 +13,8 @@ class AccessControlManager; /// Represents a set of users/roles like -/// {user_name | role_name | CURRENT_USER} [,...] | NONE | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] +/// {user_name | role_name | CURRENT_USER | ALL | NONE} [,...] +/// [EXCEPT {user_name | role_name | CURRENT_USER | ALL | NONE} [,...]] /// Similar to ASTRolesOrUsersSet, but with IDs instead of names. struct RolesOrUsersSet { @@ -60,8 +61,8 @@ struct RolesOrUsersSet friend bool operator ==(const RolesOrUsersSet & lhs, const RolesOrUsersSet & rhs); friend bool operator !=(const RolesOrUsersSet & lhs, const RolesOrUsersSet & rhs) { return !(lhs == rhs); } - boost::container::flat_set ids; bool all = false; + boost::container::flat_set ids; boost::container::flat_set except_ids; private: diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 8615cf70343..62a3d8a279d 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -753,7 +753,7 @@ std::optional Context::getUserID() const } -void Context::setCurrentRoles(const boost::container::flat_set & current_roles_) +void Context::setCurrentRoles(const std::vector & current_roles_) { auto lock = getLock(); if (current_roles == current_roles_ && !use_default_roles) diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 563239cd88d..d89799e9d97 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -181,7 +181,7 @@ private: InputBlocksReader input_blocks_reader; std::optional user_id; - boost::container::flat_set current_roles; + std::vector current_roles; bool use_default_roles = false; std::shared_ptr access; std::shared_ptr initial_row_policy; @@ -354,7 +354,7 @@ public: String getUserName() const; std::optional getUserID() const; - void setCurrentRoles(const boost::container::flat_set & current_roles_); + void setCurrentRoles(const std::vector & current_roles_); void setCurrentRolesDefault(); boost::container::flat_set getCurrentRoles() const; boost::container::flat_set getEnabledRoles() const; diff --git a/src/Interpreters/InterpreterCreateQuotaQuery.cpp b/src/Interpreters/InterpreterCreateQuotaQuery.cpp index ff30a2fff47..4c35aaf573b 100644 --- a/src/Interpreters/InterpreterCreateQuotaQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuotaQuery.cpp @@ -78,7 +78,7 @@ BlockIO InterpreterCreateQuotaQuery::execute() if (!query.cluster.empty()) { - query.replaceCurrentUserTagWithName(context.getUserName()); + query.replaceCurrentUserTag(context.getUserName()); return executeDDLQueryOnCluster(query_ptr, context); } diff --git a/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp index 8f1c5b061e0..0932f74cdc5 100644 --- a/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp +++ b/src/Interpreters/InterpreterCreateRowPolicyQuery.cpp @@ -49,7 +49,7 @@ BlockIO InterpreterCreateRowPolicyQuery::execute() if (!query.cluster.empty()) { - query.replaceCurrentUserTagWithName(context.getUserName()); + query.replaceCurrentUserTag(context.getUserName()); return executeDDLQueryOnCluster(query_ptr, context); } @@ -58,7 +58,7 @@ BlockIO InterpreterCreateRowPolicyQuery::execute() if (query.roles) roles_from_query = RolesOrUsersSet{*query.roles, access_control, context.getUserID()}; - query.replaceEmptyDatabaseWithCurrent(context.getCurrentDatabase()); + query.replaceEmptyDatabase(context.getCurrentDatabase()); if (query.alter) { diff --git a/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp b/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp index b65225db16c..0931b48e723 100644 --- a/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp +++ b/src/Interpreters/InterpreterCreateSettingsProfileQuery.cpp @@ -50,7 +50,7 @@ BlockIO InterpreterCreateSettingsProfileQuery::execute() if (!query.cluster.empty()) { - query.replaceCurrentUserTagWithName(context.getUserName()); + query.replaceCurrentUserTag(context.getUserName()); return executeDDLQueryOnCluster(query_ptr, context); } diff --git a/src/Interpreters/InterpreterDropAccessEntityQuery.cpp b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp index e86f8361100..e02c047e75d 100644 --- a/src/Interpreters/InterpreterDropAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterDropAccessEntityQuery.cpp @@ -31,7 +31,7 @@ BlockIO InterpreterDropAccessEntityQuery::execute() if (!query.cluster.empty()) return executeDDLQueryOnCluster(query_ptr, context); - query.replaceEmptyDatabaseWithCurrent(context.getCurrentDatabase()); + query.replaceEmptyDatabase(context.getCurrentDatabase()); auto do_drop = [&](const Strings & names) { diff --git a/src/Interpreters/InterpreterGrantQuery.cpp b/src/Interpreters/InterpreterGrantQuery.cpp index 034ebcec050..ac37266bcfb 100644 --- a/src/Interpreters/InterpreterGrantQuery.cpp +++ b/src/Interpreters/InterpreterGrantQuery.cpp @@ -12,13 +12,15 @@ #include #include - namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + namespace { - using Kind = ASTGrantQuery::Kind; - template void updateFromQueryTemplate( T & grantee, @@ -27,38 +29,28 @@ namespace { if (!query.access_rights_elements.empty()) { - if (query.kind == Kind::GRANT) - { - if (query.grant_option) - grantee.access.grantWithGrantOption(query.access_rights_elements); - else - grantee.access.grant(query.access_rights_elements); - } + if (query.is_revoke) + grantee.access.revoke(query.access_rights_elements); else - { - if (query.grant_option) - grantee.access.revokeGrantOption(query.access_rights_elements); - else - grantee.access.revoke(query.access_rights_elements); - } + grantee.access.grant(query.access_rights_elements); } if (!roles_to_grant_or_revoke.empty()) { - if (query.kind == Kind::GRANT) - { - if (query.admin_option) - grantee.granted_roles.grantWithAdminOption(roles_to_grant_or_revoke); - else - grantee.granted_roles.grant(roles_to_grant_or_revoke); - } - else + if (query.is_revoke) { if (query.admin_option) grantee.granted_roles.revokeAdminOption(roles_to_grant_or_revoke); else grantee.granted_roles.revoke(roles_to_grant_or_revoke); } + else + { + if (query.admin_option) + grantee.granted_roles.grantWithAdminOption(roles_to_grant_or_revoke); + else + grantee.granted_roles.grant(roles_to_grant_or_revoke); + } } } @@ -72,122 +64,166 @@ namespace else if (auto * role = typeid_cast(&grantee)) updateFromQueryTemplate(*role, query, roles_to_grant_or_revoke); } + + void checkGrantOption( + const AccessControlManager & access_control, + const ContextAccess & access, + const ASTGrantQuery & query, + const std::vector & grantees_from_query) + { + const auto & elements = query.access_rights_elements; + if (elements.empty()) + return; + + /// To execute the command GRANT the current user needs to have the access granted + /// with GRANT OPTION. + if (!query.is_revoke) + { + access.checkGrantOption(elements); + return; + } + + if (access.hasGrantOption(elements)) + return; + + /// Special case for the command REVOKE: it's possible that the current user doesn't have + /// the access granted with GRANT OPTION but it's still ok because the roles or users + /// from whom the access rights will be revoked don't have the specified access granted either. + /// + /// For example, to execute + /// GRANT ALL ON mydb.* TO role1 + /// REVOKE ALL ON *.* FROM role1 + /// the current user needs to have grants only on the 'mydb' database. + AccessRights all_granted_access; + for (const auto & id : grantees_from_query) + { + auto entity = access_control.tryRead(id); + if (auto role = typeid_cast(entity)) + all_granted_access.makeUnion(role->access); + else if (auto user = typeid_cast(entity)) + all_granted_access.makeUnion(user->access); + } + + AccessRights required_access; + if (elements[0].is_partial_revoke) + { + AccessRightsElements non_revoke_elements = elements; + std::for_each(non_revoke_elements.begin(), non_revoke_elements.end(), [&](AccessRightsElement & element) { element.is_partial_revoke = false; }); + required_access.grant(non_revoke_elements); + } + else + { + required_access.grant(elements); + } + required_access.makeIntersection(all_granted_access); + + for (auto & required_access_element : required_access.getElements()) + { + if (!required_access_element.is_partial_revoke && (required_access_element.grant_option || !elements[0].grant_option)) + access.checkGrantOption(required_access_element); + } + } + + + std::vector getRoleIDsAndCheckAdminOption( + const AccessControlManager & access_control, + const ContextAccess & access, + const ASTGrantQuery & query, + const RolesOrUsersSet & roles_from_query, + const std::vector & grantees_from_query) + { + std::vector matching_ids; + + if (!query.is_revoke) + { + matching_ids = roles_from_query.getMatchingIDs(access_control); + access.checkAdminOption(matching_ids); + return matching_ids; + } + + if (!roles_from_query.all) + { + matching_ids = roles_from_query.getMatchingIDs(); + if (access.hasAdminOption(matching_ids)) + return matching_ids; + } + + /// Special case for the command REVOKE: it's possible that the current user doesn't have the admin option + /// for some of the specified roles but it's still ok because the roles or users from whom the roles will be + /// revoked from don't have the specified roles granted either. + /// + /// For example, to execute + /// GRANT role2 TO role1 + /// REVOKE ALL FROM role1 + /// the current user needs to have only 'role2' to be granted with admin option (not all the roles). + GrantedRoles all_granted_roles; + for (const auto & id : grantees_from_query) + { + auto entity = access_control.tryRead(id); + if (auto role = typeid_cast(entity)) + all_granted_roles.makeUnion(role->granted_roles); + else if (auto user = typeid_cast(entity)) + all_granted_roles.makeUnion(user->granted_roles); + } + + const auto & all_granted_roles_set = query.admin_option ? all_granted_roles.getGrantedWithAdminOption() : all_granted_roles.getGranted(); + if (roles_from_query.all) + boost::range::set_difference(all_granted_roles_set, roles_from_query.except_ids, std::back_inserter(matching_ids)); + else + boost::range::remove_erase_if(matching_ids, [&](const UUID & id) { return !all_granted_roles_set.count(id); }); + access.checkAdminOption(matching_ids); + return matching_ids; + } } BlockIO InterpreterGrantQuery::execute() { auto & query = query_ptr->as(); - query.replaceCurrentUserTagWithName(context.getUserName()); - if (!query.cluster.empty()) - return executeDDLQueryOnCluster(query_ptr, context, query.access_rights_elements, true); + query.replaceCurrentUserTag(context.getUserName()); + query.access_rights_elements.eraseNonGrantable(); + + if (!query.access_rights_elements.sameOptions()) + throw Exception("Elements of an ASTGrantQuery are expected to have the same options", ErrorCodes::LOGICAL_ERROR); + if (!query.access_rights_elements.empty() && query.access_rights_elements[0].is_partial_revoke && !query.is_revoke) + throw Exception("A partial revoke should be revoked, not granted", ErrorCodes::LOGICAL_ERROR); - auto access = context.getAccess(); auto & access_control = context.getAccessControlManager(); - query.replaceEmptyDatabaseWithCurrent(context.getCurrentDatabase()); - - RolesOrUsersSet roles_set; + std::optional roles_set; if (query.roles) roles_set = RolesOrUsersSet{*query.roles, access_control}; - std::vector to_roles = RolesOrUsersSet{*query.to_roles, access_control, context.getUserID()}.getMatchingIDs(access_control); + std::vector grantees = RolesOrUsersSet{*query.grantees, access_control, context.getUserID()}.getMatchingIDs(access_control); + + /// Check if the current user has corresponding roles granted with admin option. + std::vector roles; + if (roles_set) + roles = getRoleIDsAndCheckAdminOption(access_control, *context.getAccess(), query, *roles_set, grantees); + + if (!query.cluster.empty()) + { + /// To execute the command GRANT the current user needs to have the access granted with GRANT OPTION. + auto required_access = query.access_rights_elements; + std::for_each(required_access.begin(), required_access.end(), [&](AccessRightsElement & element) { element.grant_option = true; }); + return executeDDLQueryOnCluster(query_ptr, context, std::move(required_access)); + } + + query.replaceEmptyDatabase(context.getCurrentDatabase()); /// Check if the current user has corresponding access rights with grant option. if (!query.access_rights_elements.empty()) - { - query.access_rights_elements.removeNonGrantableFlags(); + checkGrantOption(access_control, *context.getAccess(), query, grantees); - /// Special case for REVOKE: it's possible that the current user doesn't have the grant option for all - /// the specified access rights and that's ok because the roles or users which the access rights - /// will be revoked from don't have the specified access rights either. - /// - /// For example, to execute - /// GRANT ALL ON mydb.* TO role1 - /// REVOKE ALL ON *.* FROM role1 - /// the current user needs to have access rights only for the 'mydb' database. - if ((query.kind == Kind::REVOKE) && !access->hasGrantOption(query.access_rights_elements)) - { - AccessRights max_access; - for (const auto & id : to_roles) - { - auto entity = access_control.tryRead(id); - if (auto role = typeid_cast(entity)) - max_access.makeUnion(role->access); - else if (auto user = typeid_cast(entity)) - max_access.makeUnion(user->access); - } - AccessRights access_to_revoke; - if (query.grant_option) - access_to_revoke.grantWithGrantOption(query.access_rights_elements); - else - access_to_revoke.grant(query.access_rights_elements); - access_to_revoke.makeIntersection(max_access); - AccessRightsElements filtered_access_to_revoke; - for (auto & element : access_to_revoke.getElements()) - { - if ((element.kind == Kind::GRANT) && (element.grant_option || !query.grant_option)) - filtered_access_to_revoke.emplace_back(std::move(element)); - } - query.access_rights_elements = std::move(filtered_access_to_revoke); - } - - access->checkGrantOption(query.access_rights_elements); - } - - /// Check if the current user has corresponding roles granted with admin option. - std::vector roles_to_grant_or_revoke; - if (!roles_set.empty()) - { - bool all = roles_set.all; - if (!all) - roles_to_grant_or_revoke = roles_set.getMatchingIDs(); - - /// Special case for REVOKE: it's possible that the current user doesn't have the admin option for all - /// the specified roles and that's ok because the roles or users which the roles will be revoked from - /// don't have the specified roles granted either. - /// - /// For example, to execute - /// GRANT role2 TO role1 - /// REVOKE ALL FROM role1 - /// the current user needs to have only 'role2' to be granted with admin option (not all the roles). - if ((query.kind == Kind::REVOKE) && (roles_set.all || !access->hasAdminOption(roles_to_grant_or_revoke))) - { - auto & roles_to_revoke = roles_to_grant_or_revoke; - boost::container::flat_set max_roles; - for (const auto & id : to_roles) - { - auto entity = access_control.tryRead(id); - auto add_to_max_roles = [&](const GrantedRoles & granted_roles) - { - if (query.admin_option) - max_roles.insert(granted_roles.roles_with_admin_option.begin(), granted_roles.roles_with_admin_option.end()); - else - max_roles.insert(granted_roles.roles.begin(), granted_roles.roles.end()); - }; - if (auto role = typeid_cast(entity)) - add_to_max_roles(role->granted_roles); - else if (auto user = typeid_cast(entity)) - add_to_max_roles(user->granted_roles); - } - if (roles_set.all) - boost::range::set_difference(max_roles, roles_set.except_ids, std::back_inserter(roles_to_revoke)); - else - boost::range::remove_erase_if(roles_to_revoke, [&](const UUID & id) { return !max_roles.count(id); }); - } - - access->checkAdminOption(roles_to_grant_or_revoke); - } - - /// Update roles and users listed in `to_roles`. + /// Update roles and users listed in `grantees`. auto update_func = [&](const AccessEntityPtr & entity) -> AccessEntityPtr { auto clone = entity->clone(); - updateFromQueryImpl(*clone, query, roles_to_grant_or_revoke); + updateFromQueryImpl(*clone, query, roles); return clone; }; - access_control.update(to_roles, update_func); + access_control.update(grantees, update_func); return {}; } @@ -213,10 +249,10 @@ void InterpreterGrantQuery::updateRoleFromQuery(Role & role, const ASTGrantQuery void InterpreterGrantQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, const Context &) const { auto & query = query_ptr->as(); - if (query.kind == Kind::GRANT) - elem.query_kind = "Grant"; - else if (query.kind == Kind::REVOKE) + if (query.is_revoke) elem.query_kind = "Revoke"; + else + elem.query_kind = "Grant"; } } diff --git a/src/Interpreters/InterpreterSetRoleQuery.cpp b/src/Interpreters/InterpreterSetRoleQuery.cpp index f955c881b2e..8d314606329 100644 --- a/src/Interpreters/InterpreterSetRoleQuery.cpp +++ b/src/Interpreters/InterpreterSetRoleQuery.cpp @@ -39,20 +39,18 @@ void InterpreterSetRoleQuery::setRole(const ASTSetRoleQuery & query) else { RolesOrUsersSet roles_from_query{*query.roles, access_control}; - boost::container::flat_set new_current_roles; + std::vector new_current_roles; if (roles_from_query.all) { - for (const auto & id : user->granted_roles.roles) - if (roles_from_query.match(id)) - new_current_roles.emplace(id); + new_current_roles = user->granted_roles.findGranted(roles_from_query); } else { for (const auto & id : roles_from_query.getMatchingIDs()) { - if (!user->granted_roles.roles.count(id)) + if (!user->granted_roles.isGranted(id)) throw Exception("Role should be granted to set current", ErrorCodes::SET_NON_GRANTED_ROLE); - new_current_roles.emplace(id); + new_current_roles.emplace_back(id); } } session_context.setCurrentRoles(new_current_roles); @@ -85,7 +83,7 @@ void InterpreterSetRoleQuery::updateUserSetDefaultRoles(User & user, const Roles { for (const auto & id : roles_from_query.getMatchingIDs()) { - if (!user.granted_roles.roles.count(id)) + if (!user.granted_roles.isGranted(id)) throw Exception("Role should be granted to set default", ErrorCodes::SET_NON_GRANTED_ROLE); } } diff --git a/src/Interpreters/InterpreterShowAccessEntitiesQuery.cpp b/src/Interpreters/InterpreterShowAccessEntitiesQuery.cpp index 009b9c580d3..31c1ef874df 100644 --- a/src/Interpreters/InterpreterShowAccessEntitiesQuery.cpp +++ b/src/Interpreters/InterpreterShowAccessEntitiesQuery.cpp @@ -32,7 +32,7 @@ BlockIO InterpreterShowAccessEntitiesQuery::execute() String InterpreterShowAccessEntitiesQuery::getRewrittenQuery() const { auto & query = query_ptr->as(); - query.replaceEmptyDatabaseWithCurrent(context.getCurrentDatabase()); + query.replaceEmptyDatabase(context.getCurrentDatabase()); String origin; String expr = "*"; String filter, order; diff --git a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp index 3135b0cfdf2..4e391035d5d 100644 --- a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp @@ -263,7 +263,7 @@ std::vector InterpreterShowCreateAccessEntityQuery::getEntities auto & show_query = query_ptr->as(); const auto & access_control = context.getAccessControlManager(); context.checkAccess(getRequiredAccess()); - show_query.replaceEmptyDatabaseWithCurrent(context.getCurrentDatabase()); + show_query.replaceEmptyDatabase(context.getCurrentDatabase()); std::vector entities; if (show_query.all) diff --git a/src/Interpreters/InterpreterShowGrantsQuery.cpp b/src/Interpreters/InterpreterShowGrantsQuery.cpp index a2ddc5eec27..bda5ef0f8b1 100644 --- a/src/Interpreters/InterpreterShowGrantsQuery.cpp +++ b/src/Interpreters/InterpreterShowGrantsQuery.cpp @@ -32,56 +32,50 @@ namespace { ASTs res; - std::shared_ptr to_roles = std::make_shared(); - to_roles->names.push_back(grantee.getName()); + std::shared_ptr grantees = std::make_shared(); + grantees->names.push_back(grantee.getName()); std::shared_ptr current_query = nullptr; - auto elements = grantee.access.getElements(); - for (const auto & element : elements) + for (const auto & element : grantee.access.getElements()) { + if (element.empty()) + continue; + if (current_query) { const auto & prev_element = current_query->access_rights_elements.back(); - bool continue_using_current_query = (element.database == prev_element.database) - && (element.any_database == prev_element.any_database) && (element.table == prev_element.table) - && (element.any_table == prev_element.any_table) && (element.grant_option == current_query->grant_option) - && (element.kind == current_query->kind); - if (!continue_using_current_query) + bool continue_with_current_query = element.sameDatabaseAndTable(prev_element) && element.sameOptions(prev_element); + if (!continue_with_current_query) current_query = nullptr; } if (!current_query) { current_query = std::make_shared(); - current_query->kind = element.kind; - current_query->attach = attach_mode; - current_query->grant_option = element.grant_option; - current_query->to_roles = to_roles; + current_query->grantees = grantees; + current_query->attach_mode = attach_mode; + if (element.is_partial_revoke) + current_query->is_revoke = true; res.push_back(current_query); } current_query->access_rights_elements.emplace_back(std::move(element)); } - auto grants_roles = grantee.granted_roles.getGrants(); - - for (bool admin_option : {false, true}) + for (const auto & element : grantee.granted_roles.getElements()) { - const auto & roles = admin_option ? grants_roles.grants_with_admin_option : grants_roles.grants; - if (roles.empty()) + if (element.empty()) continue; auto grant_query = std::make_shared(); - using Kind = ASTGrantQuery::Kind; - grant_query->kind = Kind::GRANT; - grant_query->attach = attach_mode; - grant_query->admin_option = admin_option; - grant_query->to_roles = to_roles; + grant_query->grantees = grantees; + grant_query->admin_option = element.admin_option; + grant_query->attach_mode = attach_mode; if (attach_mode) - grant_query->roles = RolesOrUsersSet{roles}.toAST(); + grant_query->roles = RolesOrUsersSet{element.ids}.toAST(); else - grant_query->roles = RolesOrUsersSet{roles}.toASTWithNames(*manager); + grant_query->roles = RolesOrUsersSet{element.ids}.toASTWithNames(*manager); res.push_back(std::move(grant_query)); } diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index 1937fbaf905..c498eb21379 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -50,12 +50,12 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & cont return executeDDLQueryOnCluster(query_ptr_, context, {}); } -BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, const AccessRightsElements & query_requires_access, bool query_requires_grant_option) +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, const AccessRightsElements & query_requires_access) { - return executeDDLQueryOnCluster(query_ptr, context, AccessRightsElements{query_requires_access}, query_requires_grant_option); + return executeDDLQueryOnCluster(query_ptr, context, AccessRightsElements{query_requires_access}); } -BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context, AccessRightsElements && query_requires_access, bool query_requires_grant_option) +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context, AccessRightsElements && query_requires_access) { /// Remove FORMAT and INTO OUTFILE if exists ASTPtr query_ptr = query_ptr_->clone(); @@ -154,10 +154,7 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & cont visitor.visitDDL(query_ptr); /// Check access rights, assume that all servers have the same users config - if (query_requires_grant_option) - context.getAccess()->checkGrantOption(query_requires_access); - else - context.checkAccess(query_requires_access); + context.checkAccess(query_requires_access); DDLLogEntry entry; entry.hosts = std::move(hosts); diff --git a/src/Interpreters/executeDDLQueryOnCluster.h b/src/Interpreters/executeDDLQueryOnCluster.h index 2b272d3b0da..1bcbff36178 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.h +++ b/src/Interpreters/executeDDLQueryOnCluster.h @@ -21,8 +21,8 @@ bool isSupportedAlterType(int type); /// Pushes distributed DDL query to the queue. /// Returns DDLQueryStatusInputStream, which reads results of query execution on each host in the cluster. BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context); -BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, const AccessRightsElements & query_requires_access, bool query_requires_grant_option = false); -BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, AccessRightsElements && query_requires_access, bool query_requires_grant_option = false); +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, const AccessRightsElements & query_requires_access); +BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, AccessRightsElements && query_requires_access); class DDLQueryStatusInputStream final : public IBlockInputStream diff --git a/src/Parsers/ASTCreateQuotaQuery.cpp b/src/Parsers/ASTCreateQuotaQuery.cpp index 7e570b889e3..18f72d61319 100644 --- a/src/Parsers/ASTCreateQuotaQuery.cpp +++ b/src/Parsers/ASTCreateQuotaQuery.cpp @@ -185,10 +185,10 @@ void ASTCreateQuotaQuery::formatImpl(const FormatSettings & settings, FormatStat } -void ASTCreateQuotaQuery::replaceCurrentUserTagWithName(const String & current_user_name) const +void ASTCreateQuotaQuery::replaceCurrentUserTag(const String & current_user_name) const { if (roles) - roles->replaceCurrentUserTagWithName(current_user_name); + roles->replaceCurrentUserTag(current_user_name); } } diff --git a/src/Parsers/ASTCreateQuotaQuery.h b/src/Parsers/ASTCreateQuotaQuery.h index a1269afafa6..00984d4b4c9 100644 --- a/src/Parsers/ASTCreateQuotaQuery.h +++ b/src/Parsers/ASTCreateQuotaQuery.h @@ -56,7 +56,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; - void replaceCurrentUserTagWithName(const String & current_user_name) const; + void replaceCurrentUserTag(const String & current_user_name) const; ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTCreateRowPolicyQuery.cpp b/src/Parsers/ASTCreateRowPolicyQuery.cpp index 30b001feeca..3b4c2484acf 100644 --- a/src/Parsers/ASTCreateRowPolicyQuery.cpp +++ b/src/Parsers/ASTCreateRowPolicyQuery.cpp @@ -169,15 +169,15 @@ void ASTCreateRowPolicyQuery::formatImpl(const FormatSettings & settings, Format } -void ASTCreateRowPolicyQuery::replaceCurrentUserTagWithName(const String & current_user_name) const +void ASTCreateRowPolicyQuery::replaceCurrentUserTag(const String & current_user_name) const { if (roles) - roles->replaceCurrentUserTagWithName(current_user_name); + roles->replaceCurrentUserTag(current_user_name); } -void ASTCreateRowPolicyQuery::replaceEmptyDatabaseWithCurrent(const String & current_database) const +void ASTCreateRowPolicyQuery::replaceEmptyDatabase(const String & current_database) const { if (names) - names->replaceEmptyDatabaseWithCurrent(current_database); + names->replaceEmptyDatabase(current_database); } } diff --git a/src/Parsers/ASTCreateRowPolicyQuery.h b/src/Parsers/ASTCreateRowPolicyQuery.h index 9d0e2fcce7b..46a7578726e 100644 --- a/src/Parsers/ASTCreateRowPolicyQuery.h +++ b/src/Parsers/ASTCreateRowPolicyQuery.h @@ -49,7 +49,7 @@ public: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } - void replaceCurrentUserTagWithName(const String & current_user_name) const; - void replaceEmptyDatabaseWithCurrent(const String & current_database) const; + void replaceCurrentUserTag(const String & current_user_name) const; + void replaceEmptyDatabase(const String & current_database) const; }; } diff --git a/src/Parsers/ASTCreateSettingsProfileQuery.cpp b/src/Parsers/ASTCreateSettingsProfileQuery.cpp index 84f8309462e..e99c40ca681 100644 --- a/src/Parsers/ASTCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ASTCreateSettingsProfileQuery.cpp @@ -86,9 +86,9 @@ void ASTCreateSettingsProfileQuery::formatImpl(const FormatSettings & format, Fo } -void ASTCreateSettingsProfileQuery::replaceCurrentUserTagWithName(const String & current_user_name) const +void ASTCreateSettingsProfileQuery::replaceCurrentUserTag(const String & current_user_name) const { if (to_roles) - to_roles->replaceCurrentUserTagWithName(current_user_name); + to_roles->replaceCurrentUserTag(current_user_name); } } diff --git a/src/Parsers/ASTCreateSettingsProfileQuery.h b/src/Parsers/ASTCreateSettingsProfileQuery.h index 119019093b2..df0a11456bc 100644 --- a/src/Parsers/ASTCreateSettingsProfileQuery.h +++ b/src/Parsers/ASTCreateSettingsProfileQuery.h @@ -39,7 +39,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; - void replaceCurrentUserTagWithName(const String & current_user_name) const; + void replaceCurrentUserTag(const String & current_user_name) const; ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h index 7acfd87909a..22992b2c408 100644 --- a/src/Parsers/ASTCreateUserQuery.h +++ b/src/Parsers/ASTCreateUserQuery.h @@ -19,11 +19,11 @@ class ASTSettingsProfileElements; * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name - * [RENAME TO new_name] - * [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}}|{WITH ldap SERVER 'server_name'}|{WITH kerberos [REALM 'realm']}] - * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] - * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] - * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [RENAME TO new_name] + * [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}}|{WITH ldap SERVER 'server_name'}|{WITH kerberos [REALM 'realm']}] + * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] + * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ class ASTCreateUserQuery : public IAST, public ASTQueryWithOnCluster { @@ -46,7 +46,6 @@ public: std::optional remove_hosts; std::shared_ptr default_roles; - std::shared_ptr settings; String getID(char) const override; diff --git a/src/Parsers/ASTDropAccessEntityQuery.cpp b/src/Parsers/ASTDropAccessEntityQuery.cpp index 1df176c24ec..6c19c9f8af3 100644 --- a/src/Parsers/ASTDropAccessEntityQuery.cpp +++ b/src/Parsers/ASTDropAccessEntityQuery.cpp @@ -54,9 +54,9 @@ void ASTDropAccessEntityQuery::formatImpl(const FormatSettings & settings, Forma } -void ASTDropAccessEntityQuery::replaceEmptyDatabaseWithCurrent(const String & current_database) const +void ASTDropAccessEntityQuery::replaceEmptyDatabase(const String & current_database) const { if (row_policy_names) - row_policy_names->replaceEmptyDatabaseWithCurrent(current_database); + row_policy_names->replaceEmptyDatabase(current_database); } } diff --git a/src/Parsers/ASTDropAccessEntityQuery.h b/src/Parsers/ASTDropAccessEntityQuery.h index 76a5f450566..df78acef6f4 100644 --- a/src/Parsers/ASTDropAccessEntityQuery.h +++ b/src/Parsers/ASTDropAccessEntityQuery.h @@ -30,6 +30,6 @@ public: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } - void replaceEmptyDatabaseWithCurrent(const String & current_database) const; + void replaceEmptyDatabase(const String & current_database) const; }; } diff --git a/src/Parsers/ASTGrantQuery.cpp b/src/Parsers/ASTGrantQuery.cpp index 2610836c759..aca53868226 100644 --- a/src/Parsers/ASTGrantQuery.cpp +++ b/src/Parsers/ASTGrantQuery.cpp @@ -27,7 +27,26 @@ namespace } - void formatAccessRightsElements(const AccessRightsElements & elements, const IAST::FormatSettings & settings) + void formatONClause(const String & database, bool any_database, const String & table, bool any_table, const IAST::FormatSettings & settings) + { + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "ON " << (settings.hilite ? IAST::hilite_none : ""); + if (any_database) + { + settings.ostr << "*.*"; + } + else + { + if (!database.empty()) + settings.ostr << backQuoteIfNeed(database) << "."; + if (any_table) + settings.ostr << "*"; + else + settings.ostr << backQuoteIfNeed(table); + } + } + + + void formatElementsWithoutOptions(const AccessRightsElements & elements, const IAST::FormatSettings & settings) { bool no_output = true; for (size_t i = 0; i != elements.size(); ++i) @@ -58,31 +77,14 @@ namespace if (!next_element_on_same_db_and_table) { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " ON " << (settings.hilite ? IAST::hilite_none : ""); - if (element.any_database) - settings.ostr << "*."; - else if (!element.database.empty()) - settings.ostr << backQuoteIfNeed(element.database) + "."; - - if (element.any_table) - settings.ostr << "*"; - else - settings.ostr << backQuoteIfNeed(element.table); + settings.ostr << " "; + formatONClause(element.database, element.any_database, element.table, element.any_table, settings); } } if (no_output) settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "USAGE ON " << (settings.hilite ? IAST::hilite_none : "") << "*.*"; } - - - void formatToRoles(const ASTRolesOrUsersSet & to_roles, ASTGrantQuery::Kind kind, const IAST::FormatSettings & settings) - { - using Kind = ASTGrantQuery::Kind; - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << ((kind == Kind::GRANT) ? " TO " : " FROM ") - << (settings.hilite ? IAST::hilite_none : ""); - to_roles.format(settings); - } } @@ -100,12 +102,18 @@ ASTPtr ASTGrantQuery::clone() const void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (attach ? "ATTACH " : "") << ((kind == Kind::GRANT) ? "GRANT" : "REVOKE") + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (attach_mode ? "ATTACH " : "") << (is_revoke ? "REVOKE" : "GRANT") << (settings.hilite ? IAST::hilite_none : ""); + if (!access_rights_elements.sameOptions()) + throw Exception("Elements of an ASTGrantQuery are expected to have the same options", ErrorCodes::LOGICAL_ERROR); + if (!access_rights_elements.empty() && access_rights_elements[0].is_partial_revoke && !is_revoke) + throw Exception("A partial revoke should be revoked, not granted", ErrorCodes::LOGICAL_ERROR); + bool grant_option = !access_rights_elements.empty() && access_rights_elements[0].grant_option; + formatOnCluster(settings); - if (kind == Kind::REVOKE) + if (is_revoke) { if (grant_option) settings.ostr << (settings.hilite ? hilite_keyword : "") << " GRANT OPTION FOR" << (settings.hilite ? hilite_none : ""); @@ -113,18 +121,21 @@ void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, F settings.ostr << (settings.hilite ? hilite_keyword : "") << " ADMIN OPTION FOR" << (settings.hilite ? hilite_none : ""); } - if (roles && !access_rights_elements.empty()) - throw Exception("Either roles or access rights elements should be set", ErrorCodes::LOGICAL_ERROR); - settings.ostr << " "; if (roles) + { roles->format(settings); + if (!access_rights_elements.empty()) + throw Exception("ASTGrantQuery can contain either roles or access rights elements to grant or revoke, not both of them", ErrorCodes::LOGICAL_ERROR); + } else - formatAccessRightsElements(access_rights_elements, settings); + formatElementsWithoutOptions(access_rights_elements, settings); - formatToRoles(*to_roles, kind, settings); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (is_revoke ? " FROM " : " TO ") + << (settings.hilite ? IAST::hilite_none : ""); + grantees->format(settings); - if (kind == Kind::GRANT) + if (!is_revoke) { if (grant_option) settings.ostr << (settings.hilite ? hilite_keyword : "") << " WITH GRANT OPTION" << (settings.hilite ? hilite_none : ""); @@ -134,16 +145,16 @@ void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, F } -void ASTGrantQuery::replaceEmptyDatabaseWithCurrent(const String & current_database) +void ASTGrantQuery::replaceEmptyDatabase(const String & current_database) { access_rights_elements.replaceEmptyDatabase(current_database); } -void ASTGrantQuery::replaceCurrentUserTagWithName(const String & current_user_name) const +void ASTGrantQuery::replaceCurrentUserTag(const String & current_user_name) const { - if (to_roles) - to_roles->replaceCurrentUserTagWithName(current_user_name); + if (grantees) + grantees->replaceCurrentUserTag(current_user_name); } } diff --git a/src/Parsers/ASTGrantQuery.h b/src/Parsers/ASTGrantQuery.h index c36e42689a5..833c4db8ec6 100644 --- a/src/Parsers/ASTGrantQuery.h +++ b/src/Parsers/ASTGrantQuery.h @@ -19,20 +19,18 @@ class ASTRolesOrUsersSet; class ASTGrantQuery : public IAST, public ASTQueryWithOnCluster { public: - using Kind = AccessRightsElementWithOptions::Kind; - Kind kind = Kind::GRANT; - bool attach = false; + bool attach_mode = false; + bool is_revoke = false; AccessRightsElements access_rights_elements; std::shared_ptr roles; - std::shared_ptr to_roles; - bool grant_option = false; bool admin_option = false; + std::shared_ptr grantees; String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; - void replaceEmptyDatabaseWithCurrent(const String & current_database); - void replaceCurrentUserTagWithName(const String & current_user_name) const; + void replaceEmptyDatabase(const String & current_database); + void replaceCurrentUserTag(const String & current_user_name) const; ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } }; } diff --git a/src/Parsers/ASTRolesOrUsersSet.cpp b/src/Parsers/ASTRolesOrUsersSet.cpp index 1e7cd79f527..9eb83cf895e 100644 --- a/src/Parsers/ASTRolesOrUsersSet.cpp +++ b/src/Parsers/ASTRolesOrUsersSet.cpp @@ -7,7 +7,7 @@ namespace DB { namespace { - void formatRoleNameOrID(const String & str, bool is_id, const IAST::FormatSettings & settings) + void formatNameOrID(const String & str, bool is_id, const IAST::FormatSettings & settings) { if (is_id) { @@ -30,6 +30,7 @@ void ASTRolesOrUsersSet::formatImpl(const FormatSettings & settings, FormatState } bool need_comma = false; + if (all) { if (std::exchange(need_comma, true)) @@ -38,11 +39,11 @@ void ASTRolesOrUsersSet::formatImpl(const FormatSettings & settings, FormatState } else { - for (const auto & role : names) + for (const auto & name : names) { if (std::exchange(need_comma, true)) settings.ostr << ", "; - formatRoleNameOrID(role, id_mode, settings); + formatNameOrID(name, id_mode, settings); } if (current_user) @@ -58,11 +59,11 @@ void ASTRolesOrUsersSet::formatImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " EXCEPT " << (settings.hilite ? IAST::hilite_none : ""); need_comma = false; - for (const auto & except_role : except_names) + for (const auto & name : except_names) { if (std::exchange(need_comma, true)) settings.ostr << ", "; - formatRoleNameOrID(except_role, id_mode, settings); + formatNameOrID(name, id_mode, settings); } if (except_current_user) @@ -75,7 +76,7 @@ void ASTRolesOrUsersSet::formatImpl(const FormatSettings & settings, FormatState } -void ASTRolesOrUsersSet::replaceCurrentUserTagWithName(const String & current_user_name) +void ASTRolesOrUsersSet::replaceCurrentUserTag(const String & current_user_name) { if (current_user) { diff --git a/src/Parsers/ASTRolesOrUsersSet.h b/src/Parsers/ASTRolesOrUsersSet.h index f18aa0bdd73..0f78f67d35b 100644 --- a/src/Parsers/ASTRolesOrUsersSet.h +++ b/src/Parsers/ASTRolesOrUsersSet.h @@ -9,22 +9,23 @@ namespace DB using Strings = std::vector; /// Represents a set of users/roles like -/// {user_name | role_name | CURRENT_USER} [,...] | NONE | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] +/// {user_name | role_name | CURRENT_USER | ALL | NONE} [,...] +/// [EXCEPT {user_name | role_name | CURRENT_USER | ALL | NONE} [,...]] class ASTRolesOrUsersSet : public IAST { public: + bool all = false; Strings names; bool current_user = false; - bool all = false; Strings except_names; bool except_current_user = false; - bool id_mode = false; /// true if `names` and `except_names` keep UUIDs, not names. - bool allow_role_names = true; /// true if this set can contain names of roles. - bool allow_user_names = true; /// true if this set can contain names of users. + bool allow_users = true; /// whether this set can contain names of users + bool allow_roles = true; /// whether this set can contain names of roles + bool id_mode = false; /// whether this set keep UUIDs instead of names bool empty() const { return names.empty() && !current_user && !all; } - void replaceCurrentUserTagWithName(const String & current_user_name); + void replaceCurrentUserTag(const String & current_user_name); String getID(char) const override { return "RolesOrUsersSet"; } ASTPtr clone() const override { return std::make_shared(*this); } diff --git a/src/Parsers/ASTRowPolicyName.cpp b/src/Parsers/ASTRowPolicyName.cpp index 3d1ac5621db..0b69c1a46b3 100644 --- a/src/Parsers/ASTRowPolicyName.cpp +++ b/src/Parsers/ASTRowPolicyName.cpp @@ -23,7 +23,7 @@ void ASTRowPolicyName::formatImpl(const FormatSettings & settings, FormatState & } -void ASTRowPolicyName::replaceEmptyDatabaseWithCurrent(const String & current_database) +void ASTRowPolicyName::replaceEmptyDatabase(const String & current_database) { if (name_parts.database.empty()) name_parts.database = current_database; @@ -125,7 +125,7 @@ Strings ASTRowPolicyNames::toStrings() const } -void ASTRowPolicyNames::replaceEmptyDatabaseWithCurrent(const String & current_database) +void ASTRowPolicyNames::replaceEmptyDatabase(const String & current_database) { for (auto & np : name_parts) if (np.database.empty()) diff --git a/src/Parsers/ASTRowPolicyName.h b/src/Parsers/ASTRowPolicyName.h index ac2f84f5d8b..b195596225b 100644 --- a/src/Parsers/ASTRowPolicyName.h +++ b/src/Parsers/ASTRowPolicyName.h @@ -22,7 +22,7 @@ public: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } - void replaceEmptyDatabaseWithCurrent(const String & current_database); + void replaceEmptyDatabase(const String & current_database); }; @@ -44,6 +44,6 @@ public: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster(clone()); } - void replaceEmptyDatabaseWithCurrent(const String & current_database); + void replaceEmptyDatabase(const String & current_database); }; } diff --git a/src/Parsers/ASTShowAccessEntitiesQuery.cpp b/src/Parsers/ASTShowAccessEntitiesQuery.cpp index bacde098640..6dd53fd5cde 100644 --- a/src/Parsers/ASTShowAccessEntitiesQuery.cpp +++ b/src/Parsers/ASTShowAccessEntitiesQuery.cpp @@ -43,7 +43,7 @@ void ASTShowAccessEntitiesQuery::formatQueryImpl(const FormatSettings & settings } -void ASTShowAccessEntitiesQuery::replaceEmptyDatabaseWithCurrent(const String & current_database) +void ASTShowAccessEntitiesQuery::replaceEmptyDatabase(const String & current_database) { if (database_and_table_name) { diff --git a/src/Parsers/ASTShowAccessEntitiesQuery.h b/src/Parsers/ASTShowAccessEntitiesQuery.h index 7ccd76bfe5e..2be1e0b92f0 100644 --- a/src/Parsers/ASTShowAccessEntitiesQuery.h +++ b/src/Parsers/ASTShowAccessEntitiesQuery.h @@ -31,7 +31,7 @@ public: String getID(char) const override; ASTPtr clone() const override { return std::make_shared(*this); } - void replaceEmptyDatabaseWithCurrent(const String & current_database); + void replaceEmptyDatabase(const String & current_database); protected: void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; diff --git a/src/Parsers/ASTShowCreateAccessEntityQuery.cpp b/src/Parsers/ASTShowCreateAccessEntityQuery.cpp index f870c98071c..5ff51a47002 100644 --- a/src/Parsers/ASTShowCreateAccessEntityQuery.cpp +++ b/src/Parsers/ASTShowCreateAccessEntityQuery.cpp @@ -72,10 +72,10 @@ void ASTShowCreateAccessEntityQuery::formatQueryImpl(const FormatSettings & sett } -void ASTShowCreateAccessEntityQuery::replaceEmptyDatabaseWithCurrent(const String & current_database) +void ASTShowCreateAccessEntityQuery::replaceEmptyDatabase(const String & current_database) { if (row_policy_names) - row_policy_names->replaceEmptyDatabaseWithCurrent(current_database); + row_policy_names->replaceEmptyDatabase(current_database); if (database_and_table_name) { diff --git a/src/Parsers/ASTShowCreateAccessEntityQuery.h b/src/Parsers/ASTShowCreateAccessEntityQuery.h index 10c4c0ca511..e20bb4f022e 100644 --- a/src/Parsers/ASTShowCreateAccessEntityQuery.h +++ b/src/Parsers/ASTShowCreateAccessEntityQuery.h @@ -40,7 +40,7 @@ public: String getID(char) const override; ASTPtr clone() const override; - void replaceEmptyDatabaseWithCurrent(const String & current_database); + void replaceEmptyDatabase(const String & current_database); protected: String getKeyword() const; diff --git a/src/Parsers/ParserCreateQuotaQuery.cpp b/src/Parsers/ParserCreateQuotaQuery.cpp index 68c53d2fc1d..a8779a68600 100644 --- a/src/Parsers/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/ParserCreateQuotaQuery.cpp @@ -226,7 +226,7 @@ namespace { ASTPtr node; ParserRolesOrUsersSet roles_p; - roles_p.allowAll().allowRoleNames().allowUserNames().allowCurrentUser().useIDMode(id_mode); + roles_p.allowAll().allowRoles().allowUsers().allowCurrentUser().useIDMode(id_mode); if (!ParserKeyword{"TO"}.ignore(pos, expected) || !roles_p.parse(pos, node, expected)) return false; diff --git a/src/Parsers/ParserCreateRowPolicyQuery.cpp b/src/Parsers/ParserCreateRowPolicyQuery.cpp index fae5bd35b43..534f781a273 100644 --- a/src/Parsers/ParserCreateRowPolicyQuery.cpp +++ b/src/Parsers/ParserCreateRowPolicyQuery.cpp @@ -187,7 +187,7 @@ namespace return false; ParserRolesOrUsersSet roles_p; - roles_p.allowAll().allowRoleNames().allowUserNames().allowCurrentUser().useIDMode(id_mode); + roles_p.allowAll().allowRoles().allowUsers().allowCurrentUser().useIDMode(id_mode); if (!roles_p.parse(pos, ast, expected)) return false; diff --git a/src/Parsers/ParserCreateSettingsProfileQuery.cpp b/src/Parsers/ParserCreateSettingsProfileQuery.cpp index 797379509e4..2d1e6824b50 100644 --- a/src/Parsers/ParserCreateSettingsProfileQuery.cpp +++ b/src/Parsers/ParserCreateSettingsProfileQuery.cpp @@ -53,7 +53,7 @@ namespace return false; ParserRolesOrUsersSet roles_p; - roles_p.allowAll().allowRoleNames().allowUserNames().allowCurrentUser().useIDMode(id_mode); + roles_p.allowAll().allowRoles().allowUsers().allowCurrentUser().useIDMode(id_mode); if (!roles_p.parse(pos, ast, expected)) return false; diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index 16c539d3ebc..b856b03ab9b 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -246,12 +246,12 @@ namespace ASTPtr ast; ParserRolesOrUsersSet default_roles_p; - default_roles_p.allowAll().allowRoleNames().useIDMode(id_mode); + default_roles_p.allowAll().allowRoles().useIDMode(id_mode); if (!default_roles_p.parse(pos, ast, expected)) return false; default_roles = typeid_cast>(ast); - default_roles->allow_user_names = false; + default_roles->allow_users = false; return true; }); } diff --git a/src/Parsers/ParserCreateUserQuery.h b/src/Parsers/ParserCreateUserQuery.h index 5b83a261fa2..0eee522979f 100644 --- a/src/Parsers/ParserCreateUserQuery.h +++ b/src/Parsers/ParserCreateUserQuery.h @@ -9,12 +9,14 @@ namespace DB * CREATE USER [IF NOT EXISTS | OR REPLACE] name * [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}}|{WITH ldap SERVER 'server_name'}|{WITH kerberos [REALM 'realm']}] * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [DEFAULT ROLE role [,...]] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] * [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password|plaintext_password|sha256_password|sha256_hash|double_sha1_password|double_sha1_hash}] BY {'password'|'hash'}}|{WITH ldap SERVER 'server_name'}|{WITH kerberos [REALM 'realm']}] * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] */ class ParserCreateUserQuery : public IParserBase diff --git a/src/Parsers/ParserGrantQuery.cpp b/src/Parsers/ParserGrantQuery.cpp index 7dd721c9af2..d3aa62e73da 100644 --- a/src/Parsers/ParserGrantQuery.cpp +++ b/src/Parsers/ParserGrantQuery.cpp @@ -8,6 +8,7 @@ #include #include #include +#include namespace DB @@ -20,8 +21,6 @@ namespace ErrorCodes namespace { - using Kind = ASTGrantQuery::Kind; - bool parseAccessFlags(IParser::Pos & pos, Expected & expected, AccessFlags & access_flags) { static constexpr auto is_one_of_access_type_words = [](IParser::Pos & pos_) @@ -87,7 +86,7 @@ namespace }); } - bool parseAccessTypesWithColumns(IParser::Pos & pos, Expected & expected, + bool parseAccessFlagsWithColumns(IParser::Pos & pos, Expected & expected, std::vector> & access_and_columns) { std::vector> res; @@ -112,7 +111,7 @@ namespace } - bool parseAccessRightsElements(IParser::Pos & pos, Expected & expected, AccessRightsElements & elements) + bool parseElementsWithoutOptions(IParser::Pos & pos, Expected & expected, AccessRightsElements & elements) { return IParserBase::wrapParseImpl(pos, [&] { @@ -121,7 +120,7 @@ namespace auto parse_around_on = [&] { std::vector> access_and_columns; - if (!parseAccessTypesWithColumns(pos, expected, access_and_columns)) + if (!parseAccessFlagsWithColumns(pos, expected, access_and_columns)) return false; if (!ParserKeyword{"ON"}.ignore(pos, expected)) @@ -157,16 +156,16 @@ namespace } - void removeNonGrantableFlags(AccessRightsElements & elements) + void eraseNonGrantable(AccessRightsElements & elements) { - for (auto & element : elements) + boost::range::remove_erase_if(elements, [](AccessRightsElement & element) { if (element.empty()) - continue; + return true; auto old_flags = element.access_flags; - element.removeNonGrantableFlags(); + element.eraseNonGrantable(); if (!element.empty()) - continue; + return false; if (!element.any_column) throw Exception(old_flags.toString() + " cannot be granted on the column level", ErrorCodes::INVALID_GRANT); @@ -176,17 +175,17 @@ namespace throw Exception(old_flags.toString() + " cannot be granted on the database level", ErrorCodes::INVALID_GRANT); else throw Exception(old_flags.toString() + " cannot be granted", ErrorCodes::INVALID_GRANT); - } + }); } - bool parseRoles(IParser::Pos & pos, Expected & expected, Kind kind, bool id_mode, std::shared_ptr & roles) + bool parseRoles(IParser::Pos & pos, Expected & expected, bool is_revoke, bool id_mode, std::shared_ptr & roles) { return IParserBase::wrapParseImpl(pos, [&] { ParserRolesOrUsersSet roles_p; - roles_p.allowRoleNames().useIDMode(id_mode); - if (kind == Kind::REVOKE) + roles_p.allowRoles().useIDMode(id_mode); + if (is_revoke) roles_p.allowAll(); ASTPtr ast; @@ -199,28 +198,20 @@ namespace } - bool parseToRoles(IParser::Pos & pos, Expected & expected, ASTGrantQuery::Kind kind, std::shared_ptr & to_roles) + bool parseToGrantees(IParser::Pos & pos, Expected & expected, bool is_revoke, std::shared_ptr & grantees) { return IParserBase::wrapParseImpl(pos, [&] { - if (kind == Kind::GRANT) - { - if (!ParserKeyword{"TO"}.ignore(pos, expected)) - return false; - } - else - { - if (!ParserKeyword{"FROM"}.ignore(pos, expected)) - return false; - } + if (!ParserKeyword{is_revoke ? "FROM" : "TO"}.ignore(pos, expected)) + return false; ASTPtr ast; ParserRolesOrUsersSet roles_p; - roles_p.allowRoleNames().allowUserNames().allowCurrentUser().allowAll(kind == Kind::REVOKE); + roles_p.allowRoles().allowUsers().allowCurrentUser().allowAll(is_revoke); if (!roles_p.parse(pos, ast, expected)) return false; - to_roles = typeid_cast>(ast); + grantees = typeid_cast>(ast); return true; }); } @@ -237,20 +228,13 @@ namespace bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - bool attach = false; - if (attach_mode) - { - if (!ParserKeyword{"ATTACH"}.ignore(pos, expected)) - return false; - attach = true; - } + if (attach_mode && !ParserKeyword{"ATTACH"}.ignore(pos, expected)) + return false; - Kind kind; - if (ParserKeyword{"GRANT"}.ignore(pos, expected)) - kind = Kind::GRANT; - else if (ParserKeyword{"REVOKE"}.ignore(pos, expected)) - kind = Kind::REVOKE; - else + bool is_revoke = false; + if (ParserKeyword{"REVOKE"}.ignore(pos, expected)) + is_revoke = true; + else if (!ParserKeyword{"GRANT"}.ignore(pos, expected)) return false; String cluster; @@ -259,7 +243,7 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) bool grant_option = false; bool admin_option = false; - if (kind == Kind::REVOKE) + if (is_revoke) { if (ParserKeyword{"GRANT OPTION FOR"}.ignore(pos, expected)) grant_option = true; @@ -269,20 +253,20 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) AccessRightsElements elements; std::shared_ptr roles; - if (!parseAccessRightsElements(pos, expected, elements) && !parseRoles(pos, expected, kind, attach, roles)) + if (!parseElementsWithoutOptions(pos, expected, elements) && !parseRoles(pos, expected, is_revoke, attach_mode, roles)) return false; if (cluster.empty()) parseOnCluster(pos, expected, cluster); - std::shared_ptr to_roles; - if (!parseToRoles(pos, expected, kind, to_roles)) + std::shared_ptr grantees; + if (!parseToGrantees(pos, expected, is_revoke, grantees)) return false; if (cluster.empty()) parseOnCluster(pos, expected, cluster); - if (kind == Kind::GRANT) + if (!is_revoke) { if (ParserKeyword{"WITH GRANT OPTION"}.ignore(pos, expected)) grant_option = true; @@ -298,19 +282,24 @@ bool ParserGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (admin_option && !elements.empty()) throw Exception("ADMIN OPTION should be specified for roles", ErrorCodes::SYNTAX_ERROR); - if (kind == Kind::GRANT) - removeNonGrantableFlags(elements); + if (grant_option) + { + for (auto & element : elements) + element.grant_option = true; + } + + if (!is_revoke) + eraseNonGrantable(elements); auto query = std::make_shared(); node = query; - query->kind = kind; - query->attach = attach; + query->is_revoke = is_revoke; + query->attach_mode = attach_mode; query->cluster = std::move(cluster); query->access_rights_elements = std::move(elements); query->roles = std::move(roles); - query->to_roles = std::move(to_roles); - query->grant_option = grant_option; + query->grantees = std::move(grantees); query->admin_option = admin_option; return true; diff --git a/src/Parsers/ParserRolesOrUsersSet.cpp b/src/Parsers/ParserRolesOrUsersSet.cpp index 0f3ba3f0f84..701c5c2f9d5 100644 --- a/src/Parsers/ParserRolesOrUsersSet.cpp +++ b/src/Parsers/ParserRolesOrUsersSet.cpp @@ -12,11 +12,7 @@ namespace DB { namespace { - bool parseRoleNameOrID( - IParserBase::Pos & pos, - Expected & expected, - bool id_mode, - String & res) + bool parseNameOrID(IParserBase::Pos & pos, Expected & expected, bool id_mode, String & res) { return IParserBase::wrapParseImpl(pos, [&] { @@ -39,20 +35,20 @@ namespace }); } - bool parseBeforeExcept( IParserBase::Pos & pos, Expected & expected, bool id_mode, bool allow_all, bool allow_current_user, - Strings & names, bool & all, + Strings & names, bool & current_user) { bool res_all = false; - bool res_current_user = false; Strings res_names; + bool res_current_user = false; + Strings res_with_roles_names; auto parse_element = [&] { @@ -72,7 +68,7 @@ namespace } String name; - if (parseRoleNameOrID(pos, expected, id_mode, name)) + if (parseNameOrID(pos, expected, id_mode, name)) { res_names.emplace_back(std::move(name)); return true; @@ -85,8 +81,8 @@ namespace return false; names = std::move(res_names); - all = res_all; current_user = res_current_user; + all = res_all; return true; } @@ -98,13 +94,12 @@ namespace Strings & except_names, bool & except_current_user) { - return IParserBase::wrapParseImpl(pos, [&] - { + return IParserBase::wrapParseImpl(pos, [&] { if (!ParserKeyword{"EXCEPT"}.ignore(pos, expected)) return false; bool unused; - return parseBeforeExcept(pos, expected, id_mode, false, allow_current_user, except_names, unused, except_current_user); + return parseBeforeExcept(pos, expected, id_mode, false, allow_current_user, unused, except_names, except_current_user); }); } } @@ -112,13 +107,13 @@ namespace bool ParserRolesOrUsersSet::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { + bool all = false; Strings names; bool current_user = false; - bool all = false; Strings except_names; bool except_current_user = false; - if (!parseBeforeExcept(pos, expected, id_mode, allow_all, allow_current_user, names, all, current_user)) + if (!parseBeforeExcept(pos, expected, id_mode, allow_all, allow_current_user, all, names, current_user)) return false; parseExceptAndAfterExcept(pos, expected, id_mode, allow_current_user, except_names, except_current_user); @@ -132,9 +127,9 @@ bool ParserRolesOrUsersSet::parseImpl(Pos & pos, ASTPtr & node, Expected & expec result->all = all; result->except_names = std::move(except_names); result->except_current_user = except_current_user; + result->allow_users = allow_users; + result->allow_roles = allow_roles; result->id_mode = id_mode; - result->allow_user_names = allow_user_names; - result->allow_role_names = allow_role_names; node = result; return true; } diff --git a/src/Parsers/ParserRolesOrUsersSet.h b/src/Parsers/ParserRolesOrUsersSet.h index c71012e874c..d63c045e7a0 100644 --- a/src/Parsers/ParserRolesOrUsersSet.h +++ b/src/Parsers/ParserRolesOrUsersSet.h @@ -6,15 +6,16 @@ namespace DB { /** Parses a string like this: - * {role|CURRENT_USER} [,...] | NONE | ALL | ALL EXCEPT {role|CURRENT_USER} [,...] + * {user_name | role_name | CURRENT_USER | ALL | NONE} [,...] + * [EXCEPT {user_name | role_name | CURRENT_USER | ALL | NONE} [,...]] */ class ParserRolesOrUsersSet : public IParserBase { public: ParserRolesOrUsersSet & allowAll(bool allow_all_ = true) { allow_all = allow_all_; return *this; } - ParserRolesOrUsersSet & allowUserNames(bool allow_user_names_ = true) { allow_user_names = allow_user_names_; return *this; } - ParserRolesOrUsersSet & allowRoleNames(bool allow_role_names_ = true) { allow_role_names = allow_role_names_; return *this; } + ParserRolesOrUsersSet & allowUsers(bool allow_users_ = true) { allow_users = allow_users_; return *this; } ParserRolesOrUsersSet & allowCurrentUser(bool allow_current_user_ = true) { allow_current_user = allow_current_user_; return *this; } + ParserRolesOrUsersSet & allowRoles(bool allow_roles_ = true) { allow_roles = allow_roles_; return *this; } ParserRolesOrUsersSet & useIDMode(bool id_mode_ = true) { id_mode = id_mode_; return *this; } protected: @@ -23,9 +24,9 @@ protected: private: bool allow_all = false; - bool allow_user_names = false; - bool allow_role_names = false; + bool allow_users = false; bool allow_current_user = false; + bool allow_roles = false; bool id_mode = false; }; diff --git a/src/Parsers/ParserSetRoleQuery.cpp b/src/Parsers/ParserSetRoleQuery.cpp index e8734f8dfc1..678474af040 100644 --- a/src/Parsers/ParserSetRoleQuery.cpp +++ b/src/Parsers/ParserSetRoleQuery.cpp @@ -15,12 +15,12 @@ namespace { ASTPtr ast; ParserRolesOrUsersSet roles_p; - roles_p.allowRoleNames().allowAll(); + roles_p.allowRoles().allowAll(); if (!roles_p.parse(pos, ast, expected)) return false; roles = typeid_cast>(ast); - roles->allow_user_names = false; + roles->allow_users = false; return true; }); } @@ -34,12 +34,12 @@ namespace ASTPtr ast; ParserRolesOrUsersSet users_p; - users_p.allowUserNames().allowCurrentUser(); + users_p.allowUsers().allowCurrentUser(); if (!users_p.parse(pos, ast, expected)) return false; to_users = typeid_cast>(ast); - to_users->allow_role_names = false; + to_users->allow_roles = false; return true; }); } diff --git a/src/Parsers/ParserShowGrantsQuery.cpp b/src/Parsers/ParserShowGrantsQuery.cpp index d25527754be..bd9e4012771 100644 --- a/src/Parsers/ParserShowGrantsQuery.cpp +++ b/src/Parsers/ParserShowGrantsQuery.cpp @@ -19,7 +19,7 @@ bool ParserShowGrantsQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec { ASTPtr for_roles_ast; ParserRolesOrUsersSet for_roles_p; - for_roles_p.allowUserNames().allowRoleNames().allowAll().allowCurrentUser(); + for_roles_p.allowUsers().allowRoles().allowAll().allowCurrentUser(); if (!for_roles_p.parse(pos, for_roles_ast, expected)) return false; diff --git a/src/Storages/System/StorageSystemGrants.cpp b/src/Storages/System/StorageSystemGrants.cpp index 360256c1f45..0c06ad99b22 100644 --- a/src/Storages/System/StorageSystemGrants.cpp +++ b/src/Storages/System/StorageSystemGrants.cpp @@ -18,7 +18,6 @@ namespace DB { using EntityType = IAccessEntity::Type; -using Kind = AccessRightsElementWithOptions::Kind; NamesAndTypesList StorageSystemGrants::getNamesAndTypes() { @@ -64,7 +63,7 @@ void StorageSystemGrants::fillData(MutableColumns & res_columns, const Context & const String * database, const String * table, const String * column, - Kind kind, + bool is_partial_revoke, bool grant_option) { if (grantee_type == EntityType::USER) @@ -119,13 +118,13 @@ void StorageSystemGrants::fillData(MutableColumns & res_columns, const Context & column_column_null_map.push_back(true); } - column_is_partial_revoke.push_back(kind == Kind::REVOKE); + column_is_partial_revoke.push_back(is_partial_revoke); column_grant_option.push_back(grant_option); }; auto add_rows = [&](const String & grantee_name, IAccessEntity::Type grantee_type, - const AccessRightsElementsWithOptions & elements) + const AccessRightsElements & elements) { for (const auto & element : elements) { @@ -139,13 +138,13 @@ void StorageSystemGrants::fillData(MutableColumns & res_columns, const Context & if (element.any_column) { for (const auto & access_type : access_types) - add_row(grantee_name, grantee_type, access_type, database, table, nullptr, element.kind, element.grant_option); + add_row(grantee_name, grantee_type, access_type, database, table, nullptr, element.is_partial_revoke, element.grant_option); } else { for (const auto & access_type : access_types) for (const auto & column : element.columns) - add_row(grantee_name, grantee_type, access_type, database, table, &column, element.kind, element.grant_option); + add_row(grantee_name, grantee_type, access_type, database, table, &column, element.is_partial_revoke, element.grant_option); } } }; diff --git a/src/Storages/System/StorageSystemRoleGrants.cpp b/src/Storages/System/StorageSystemRoleGrants.cpp index 0f0fcd831d9..cf0fad8f8ce 100644 --- a/src/Storages/System/StorageSystemRoleGrants.cpp +++ b/src/Storages/System/StorageSystemRoleGrants.cpp @@ -80,15 +80,17 @@ void StorageSystemRoleGrants::fillData(MutableColumns & res_columns, const Conte const GrantedRoles & granted_roles, const RolesOrUsersSet * default_roles) { - for (const auto & role_id : granted_roles.roles) + for (const auto & element : granted_roles.getElements()) { - auto role_name = access_control.tryReadName(role_id); - if (!role_name) - continue; + for (const auto & role_id : element.ids) + { + auto role_name = access_control.tryReadName(role_id); + if (!role_name) + continue; - bool is_default = !default_roles || default_roles->match(role_id); - bool with_admin_option = granted_roles.roles_with_admin_option.count(role_id); - add_row(grantee_name, grantee_type, *role_name, is_default, with_admin_option); + bool is_default = !default_roles || default_roles->match(role_id); + add_row(grantee_name, grantee_type, *role_name, is_default, element.admin_option); + } } }; diff --git a/tests/testflows/rbac/tests/syntax/revoke_role.py b/tests/testflows/rbac/tests/syntax/revoke_role.py index 4acdf127cec..ea8b874ff51 100755 --- a/tests/testflows/rbac/tests/syntax/revoke_role.py +++ b/tests/testflows/rbac/tests/syntax/revoke_role.py @@ -166,9 +166,10 @@ def feature(self, node="clickhouse1"): with Scenario("I revoke a role on fake cluster, throws exception", requirements=[ RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]): - with When("I revoke a role from user on a cluster"): - exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("REVOKE ON CLUSTER fake_cluster role0 FROM user0", exitcode=exitcode, message=message) + with setup(): + with When("I revoke a role from user on a cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("REVOKE ON CLUSTER fake_cluster role0 FROM user0", exitcode=exitcode, message=message) with Scenario("I revoke multiple roles from multiple users on cluster", requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), From 37ce6e26d365787ac68933db782dca1c5180d571 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 11 Mar 2021 16:44:00 +0300 Subject: [PATCH 181/333] Add a new clause GRANTEES to commands CREATE USER and ALTER USER. --- src/Access/User.cpp | 2 +- src/Access/User.h | 1 + .../InterpreterCreateUserQuery.cpp | 37 ++++++++++++--- src/Interpreters/InterpreterGrantQuery.cpp | 45 +++++++++++++++++++ ...InterpreterShowCreateAccessEntityQuery.cpp | 9 ++++ src/Parsers/ASTCreateUserQuery.cpp | 10 +++++ src/Parsers/ASTCreateUserQuery.h | 3 ++ src/Parsers/ASTRolesOrUsersSet.cpp | 3 +- src/Parsers/ASTRolesOrUsersSet.h | 7 +-- src/Parsers/ParserCreateUserQuery.cpp | 23 ++++++++++ src/Parsers/ParserCreateUserQuery.h | 2 + src/Parsers/ParserRolesOrUsersSet.cpp | 12 ++++- src/Parsers/ParserRolesOrUsersSet.h | 2 + src/Storages/System/StorageSystemUsers.cpp | 28 +++++++++--- .../integration/test_grant_and_revoke/test.py | 42 ++++++++++++++++- 15 files changed, 205 insertions(+), 21 deletions(-) diff --git a/src/Access/User.cpp b/src/Access/User.cpp index f57ec7c1359..016f378e83f 100644 --- a/src/Access/User.cpp +++ b/src/Access/User.cpp @@ -11,7 +11,7 @@ bool User::equal(const IAccessEntity & other) const const auto & other_user = typeid_cast(other); return (authentication == other_user.authentication) && (allowed_client_hosts == other_user.allowed_client_hosts) && (access == other_user.access) && (granted_roles == other_user.granted_roles) && (default_roles == other_user.default_roles) - && (settings == other_user.settings); + && (settings == other_user.settings) && (grantees == other_user.grantees); } } diff --git a/src/Access/User.h b/src/Access/User.h index 13f1e532015..5b10d953fc0 100644 --- a/src/Access/User.h +++ b/src/Access/User.h @@ -21,6 +21,7 @@ struct User : public IAccessEntity GrantedRoles granted_roles; RolesOrUsersSet default_roles = RolesOrUsersSet::AllTag{}; SettingsProfileElements settings; + RolesOrUsersSet grantees = RolesOrUsersSet::AllTag{}; bool equal(const IAccessEntity & other) const override; std::shared_ptr clone() const override { return cloneImpl(); } diff --git a/src/Interpreters/InterpreterCreateUserQuery.cpp b/src/Interpreters/InterpreterCreateUserQuery.cpp index c9b087de5b4..777cf7de297 100644 --- a/src/Interpreters/InterpreterCreateUserQuery.cpp +++ b/src/Interpreters/InterpreterCreateUserQuery.cpp @@ -20,7 +20,8 @@ namespace const ASTCreateUserQuery & query, const std::shared_ptr & override_name, const std::optional & override_default_roles, - const std::optional & override_settings) + const std::optional & override_settings, + const std::optional & override_grantees) { if (override_name) user.setName(override_name->toString()); @@ -62,6 +63,11 @@ namespace user.settings = *override_settings; else if (query.settings) user.settings = *query.settings; + + if (override_grantees) + user.grantees = *override_grantees; + else if (query.grantees) + user.grantees = *query.grantees; } } @@ -93,12 +99,17 @@ BlockIO InterpreterCreateUserQuery::execute() if (query.alter) { + std::optional grantees_from_query; + if (query.grantees) + grantees_from_query = RolesOrUsersSet{*query.grantees, access_control}; + auto update_func = [&](const AccessEntityPtr & entity) -> AccessEntityPtr { auto updated_user = typeid_cast>(entity->clone()); - updateUserFromQueryImpl(*updated_user, query, {}, default_roles_from_query, settings_from_query); + updateUserFromQueryImpl(*updated_user, query, {}, default_roles_from_query, settings_from_query, grantees_from_query); return updated_user; }; + Strings names = query.names->toStrings(); if (query.if_exists) { @@ -114,16 +125,28 @@ BlockIO InterpreterCreateUserQuery::execute() for (const auto & name : *query.names) { auto new_user = std::make_shared(); - updateUserFromQueryImpl(*new_user, query, name, default_roles_from_query, settings_from_query); + updateUserFromQueryImpl(*new_user, query, name, default_roles_from_query, settings_from_query, RolesOrUsersSet::AllTag{}); new_users.emplace_back(std::move(new_user)); } + std::vector ids; if (query.if_not_exists) - access_control.tryInsert(new_users); + ids = access_control.tryInsert(new_users); else if (query.or_replace) - access_control.insertOrReplace(new_users); + ids = access_control.insertOrReplace(new_users); else - access_control.insert(new_users); + ids = access_control.insert(new_users); + + if (query.grantees) + { + RolesOrUsersSet grantees_from_query = RolesOrUsersSet{*query.grantees, access_control}; + access_control.update(ids, [&](const AccessEntityPtr & entity) -> AccessEntityPtr + { + auto updated_user = typeid_cast>(entity->clone()); + updated_user->grantees = grantees_from_query; + return updated_user; + }); + } } return {}; @@ -132,7 +155,7 @@ BlockIO InterpreterCreateUserQuery::execute() void InterpreterCreateUserQuery::updateUserFromQuery(User & user, const ASTCreateUserQuery & query) { - updateUserFromQueryImpl(user, query, {}, {}, {}); + updateUserFromQueryImpl(user, query, {}, {}, {}, {}); } } diff --git a/src/Interpreters/InterpreterGrantQuery.cpp b/src/Interpreters/InterpreterGrantQuery.cpp index ac37266bcfb..b518178f6d0 100644 --- a/src/Interpreters/InterpreterGrantQuery.cpp +++ b/src/Interpreters/InterpreterGrantQuery.cpp @@ -16,6 +16,7 @@ namespace DB { namespace ErrorCodes { + extern const int ACCESS_DENIED; extern const int LOGICAL_ERROR; } @@ -65,6 +66,29 @@ namespace updateFromQueryTemplate(*role, query, roles_to_grant_or_revoke); } + void checkGranteeIsAllowed(const ContextAccess & access, const UUID & grantee_id, const IAccessEntity & grantee) + { + auto current_user = access.getUser(); + if (current_user && !current_user->grantees.match(grantee_id)) + throw Exception(grantee.outputTypeAndName() + " is not allowed as grantee", ErrorCodes::ACCESS_DENIED); + } + + void checkGranteesAreAllowed(const AccessControlManager & access_control, const ContextAccess & access, const std::vector & grantee_ids) + { + auto current_user = access.getUser(); + if (!current_user || (current_user->grantees == RolesOrUsersSet::AllTag{})) + return; + + for (const auto & id : grantee_ids) + { + auto entity = access_control.tryRead(id); + if (auto role = typeid_cast(entity)) + checkGranteeIsAllowed(access, id, *role); + else if (auto user = typeid_cast(entity)) + checkGranteeIsAllowed(access, id, *user); + } + } + void checkGrantOption( const AccessControlManager & access_control, const ContextAccess & access, @@ -80,11 +104,15 @@ namespace if (!query.is_revoke) { access.checkGrantOption(elements); + checkGranteesAreAllowed(access_control, access, grantees_from_query); return; } if (access.hasGrantOption(elements)) + { + checkGranteesAreAllowed(access_control, access, grantees_from_query); return; + } /// Special case for the command REVOKE: it's possible that the current user doesn't have /// the access granted with GRANT OPTION but it's still ok because the roles or users @@ -99,9 +127,15 @@ namespace { auto entity = access_control.tryRead(id); if (auto role = typeid_cast(entity)) + { + checkGranteeIsAllowed(access, id, *role); all_granted_access.makeUnion(role->access); + } else if (auto user = typeid_cast(entity)) + { + checkGranteeIsAllowed(access, id, *user); all_granted_access.makeUnion(user->access); + } } AccessRights required_access; @@ -138,6 +172,7 @@ namespace { matching_ids = roles_from_query.getMatchingIDs(access_control); access.checkAdminOption(matching_ids); + checkGranteesAreAllowed(access_control, access, grantees_from_query); return matching_ids; } @@ -145,7 +180,10 @@ namespace { matching_ids = roles_from_query.getMatchingIDs(); if (access.hasAdminOption(matching_ids)) + { + checkGranteesAreAllowed(access_control, access, grantees_from_query); return matching_ids; + } } /// Special case for the command REVOKE: it's possible that the current user doesn't have the admin option @@ -161,9 +199,15 @@ namespace { auto entity = access_control.tryRead(id); if (auto role = typeid_cast(entity)) + { + checkGranteeIsAllowed(access, id, *role); all_granted_roles.makeUnion(role->granted_roles); + } else if (auto user = typeid_cast(entity)) + { + checkGranteeIsAllowed(access, id, *user); all_granted_roles.makeUnion(user->granted_roles); + } } const auto & all_granted_roles_set = query.admin_option ? all_granted_roles.getGrantedWithAdminOption() : all_granted_roles.getGranted(); @@ -206,6 +250,7 @@ BlockIO InterpreterGrantQuery::execute() /// To execute the command GRANT the current user needs to have the access granted with GRANT OPTION. auto required_access = query.access_rights_elements; std::for_each(required_access.begin(), required_access.end(), [&](AccessRightsElement & element) { element.grant_option = true; }); + checkGranteesAreAllowed(access_control, *context.getAccess(), grantees); return executeDDLQueryOnCluster(query_ptr, context, std::move(required_access)); } diff --git a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp index 4e391035d5d..c39fed8fb62 100644 --- a/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateAccessEntityQuery.cpp @@ -73,6 +73,15 @@ namespace query->settings = user.settings.toASTWithNames(*manager); } + if (user.grantees != RolesOrUsersSet::AllTag{}) + { + if (attach_mode) + query->grantees = user.grantees.toAST(); + else + query->grantees = user.grantees.toASTWithNames(*manager); + query->grantees->use_keyword_any = true; + } + return query; } diff --git a/src/Parsers/ASTCreateUserQuery.cpp b/src/Parsers/ASTCreateUserQuery.cpp index e2e477fa622..696b88ea9c1 100644 --- a/src/Parsers/ASTCreateUserQuery.cpp +++ b/src/Parsers/ASTCreateUserQuery.cpp @@ -203,6 +203,13 @@ namespace format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " SETTINGS " << (format.hilite ? IAST::hilite_none : ""); settings.format(format); } + + + void formatGrantees(const ASTRolesOrUsersSet & grantees, const IAST::FormatSettings & settings) + { + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << " GRANTEES " << (settings.hilite ? IAST::hilite_none : ""); + grantees.format(settings); + } } @@ -260,5 +267,8 @@ void ASTCreateUserQuery::formatImpl(const FormatSettings & format, FormatState & if (settings && (!settings->empty() || alter)) formatSettings(*settings, format); + + if (grantees) + formatGrantees(*grantees, format); } } diff --git a/src/Parsers/ASTCreateUserQuery.h b/src/Parsers/ASTCreateUserQuery.h index 22992b2c408..1612c213f34 100644 --- a/src/Parsers/ASTCreateUserQuery.h +++ b/src/Parsers/ASTCreateUserQuery.h @@ -17,6 +17,7 @@ class ASTSettingsProfileElements; * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...]] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] @@ -24,6 +25,7 @@ class ASTSettingsProfileElements; * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] */ class ASTCreateUserQuery : public IAST, public ASTQueryWithOnCluster { @@ -47,6 +49,7 @@ public: std::shared_ptr default_roles; std::shared_ptr settings; + std::shared_ptr grantees; String getID(char) const override; ASTPtr clone() const override; diff --git a/src/Parsers/ASTRolesOrUsersSet.cpp b/src/Parsers/ASTRolesOrUsersSet.cpp index 9eb83cf895e..fc5385e4a58 100644 --- a/src/Parsers/ASTRolesOrUsersSet.cpp +++ b/src/Parsers/ASTRolesOrUsersSet.cpp @@ -35,7 +35,8 @@ void ASTRolesOrUsersSet::formatImpl(const FormatSettings & settings, FormatState { if (std::exchange(need_comma, true)) settings.ostr << ", "; - settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "ALL" << (settings.hilite ? IAST::hilite_none : ""); + settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << (use_keyword_any ? "ANY" : "ALL") + << (settings.hilite ? IAST::hilite_none : ""); } else { diff --git a/src/Parsers/ASTRolesOrUsersSet.h b/src/Parsers/ASTRolesOrUsersSet.h index 0f78f67d35b..15d42ee39a0 100644 --- a/src/Parsers/ASTRolesOrUsersSet.h +++ b/src/Parsers/ASTRolesOrUsersSet.h @@ -20,9 +20,10 @@ public: Strings except_names; bool except_current_user = false; - bool allow_users = true; /// whether this set can contain names of users - bool allow_roles = true; /// whether this set can contain names of roles - bool id_mode = false; /// whether this set keep UUIDs instead of names + bool allow_users = true; /// whether this set can contain names of users + bool allow_roles = true; /// whether this set can contain names of roles + bool id_mode = false; /// whether this set keep UUIDs instead of names + bool use_keyword_any = false; /// whether the keyword ANY should be used instead of the keyword ALL bool empty() const { return names.empty() && !current_user && !all; } void replaceCurrentUserTag(const String & current_user_name); diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index b856b03ab9b..84bf60d56d3 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -275,6 +275,24 @@ namespace }); } + bool parseGrantees(IParserBase::Pos & pos, Expected & expected, bool id_mode, std::shared_ptr & grantees) + { + return IParserBase::wrapParseImpl(pos, [&] + { + if (!ParserKeyword{"GRANTEES"}.ignore(pos, expected)) + return false; + + ASTPtr ast; + ParserRolesOrUsersSet grantees_p; + grantees_p.allowAny().allowUsers().allowCurrentUser().allowRoles().useIDMode(id_mode); + if (!grantees_p.parse(pos, ast, expected)) + return false; + + grantees = typeid_cast>(ast); + return true; + }); + } + bool parseOnCluster(IParserBase::Pos & pos, Expected & expected, String & cluster) { return IParserBase::wrapParseImpl(pos, [&] @@ -330,6 +348,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec std::optional remove_hosts; std::shared_ptr default_roles; std::shared_ptr settings; + std::shared_ptr grantees; String cluster; while (true) @@ -368,6 +387,9 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (cluster.empty() && parseOnCluster(pos, expected, cluster)) continue; + if (!grantees && parseGrantees(pos, expected, attach_mode, grantees)) + continue; + if (alter) { if (new_name.empty() && (names->size() == 1) && parseRenameTo(pos, expected, new_name)) @@ -422,6 +444,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query->remove_hosts = std::move(remove_hosts); query->default_roles = std::move(default_roles); query->settings = std::move(settings); + query->grantees = std::move(grantees); return true; } diff --git a/src/Parsers/ParserCreateUserQuery.h b/src/Parsers/ParserCreateUserQuery.h index 0eee522979f..215133a777c 100644 --- a/src/Parsers/ParserCreateUserQuery.h +++ b/src/Parsers/ParserCreateUserQuery.h @@ -11,6 +11,7 @@ namespace DB * [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...]] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] * * ALTER USER [IF EXISTS] name * [RENAME TO new_name] @@ -18,6 +19,7 @@ namespace DB * [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] * [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] * [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + * [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] */ class ParserCreateUserQuery : public IParserBase { diff --git a/src/Parsers/ParserRolesOrUsersSet.cpp b/src/Parsers/ParserRolesOrUsersSet.cpp index 701c5c2f9d5..41e9ee6501d 100644 --- a/src/Parsers/ParserRolesOrUsersSet.cpp +++ b/src/Parsers/ParserRolesOrUsersSet.cpp @@ -40,6 +40,7 @@ namespace Expected & expected, bool id_mode, bool allow_all, + bool allow_any, bool allow_current_user, bool & all, Strings & names, @@ -61,6 +62,12 @@ namespace return true; } + if (allow_any && ParserKeyword{"ANY"}.ignore(pos, expected)) + { + res_all = true; + return true; + } + if (allow_current_user && parseCurrentUserTag(pos, expected)) { res_current_user = true; @@ -99,7 +106,7 @@ namespace return false; bool unused; - return parseBeforeExcept(pos, expected, id_mode, false, allow_current_user, unused, except_names, except_current_user); + return parseBeforeExcept(pos, expected, id_mode, false, false, allow_current_user, unused, except_names, except_current_user); }); } } @@ -113,7 +120,7 @@ bool ParserRolesOrUsersSet::parseImpl(Pos & pos, ASTPtr & node, Expected & expec Strings except_names; bool except_current_user = false; - if (!parseBeforeExcept(pos, expected, id_mode, allow_all, allow_current_user, all, names, current_user)) + if (!parseBeforeExcept(pos, expected, id_mode, allow_all, allow_any, allow_current_user, all, names, current_user)) return false; parseExceptAndAfterExcept(pos, expected, id_mode, allow_current_user, except_names, except_current_user); @@ -130,6 +137,7 @@ bool ParserRolesOrUsersSet::parseImpl(Pos & pos, ASTPtr & node, Expected & expec result->allow_users = allow_users; result->allow_roles = allow_roles; result->id_mode = id_mode; + result->use_keyword_any = all && allow_any && !allow_all; node = result; return true; } diff --git a/src/Parsers/ParserRolesOrUsersSet.h b/src/Parsers/ParserRolesOrUsersSet.h index d63c045e7a0..9ae9937e784 100644 --- a/src/Parsers/ParserRolesOrUsersSet.h +++ b/src/Parsers/ParserRolesOrUsersSet.h @@ -13,6 +13,7 @@ class ParserRolesOrUsersSet : public IParserBase { public: ParserRolesOrUsersSet & allowAll(bool allow_all_ = true) { allow_all = allow_all_; return *this; } + ParserRolesOrUsersSet & allowAny(bool allow_any_ = true) { allow_any = allow_any_; return *this; } ParserRolesOrUsersSet & allowUsers(bool allow_users_ = true) { allow_users = allow_users_; return *this; } ParserRolesOrUsersSet & allowCurrentUser(bool allow_current_user_ = true) { allow_current_user = allow_current_user_; return *this; } ParserRolesOrUsersSet & allowRoles(bool allow_roles_ = true) { allow_roles = allow_roles_; return *this; } @@ -24,6 +25,7 @@ protected: private: bool allow_all = false; + bool allow_any = false; bool allow_users = false; bool allow_current_user = false; bool allow_roles = false; diff --git a/src/Storages/System/StorageSystemUsers.cpp b/src/Storages/System/StorageSystemUsers.cpp index bec94bc388c..eaebf759a85 100644 --- a/src/Storages/System/StorageSystemUsers.cpp +++ b/src/Storages/System/StorageSystemUsers.cpp @@ -47,6 +47,9 @@ NamesAndTypesList StorageSystemUsers::getNamesAndTypes() {"default_roles_all", std::make_shared()}, {"default_roles_list", std::make_shared(std::make_shared())}, {"default_roles_except", std::make_shared(std::make_shared())}, + {"grantees_any", std::make_shared()}, + {"grantees_list", std::make_shared(std::make_shared())}, + {"grantees_except", std::make_shared(std::make_shared())}, }; return names_and_types; } @@ -77,13 +80,19 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, const Context & auto & column_default_roles_list_offsets = assert_cast(*res_columns[column_index++]).getOffsets(); auto & column_default_roles_except = assert_cast(assert_cast(*res_columns[column_index]).getData()); auto & column_default_roles_except_offsets = assert_cast(*res_columns[column_index++]).getOffsets(); + auto & column_grantees_any = assert_cast(*res_columns[column_index++]).getData(); + auto & column_grantees_list = assert_cast(assert_cast(*res_columns[column_index]).getData()); + auto & column_grantees_list_offsets = assert_cast(*res_columns[column_index++]).getOffsets(); + auto & column_grantees_except = assert_cast(assert_cast(*res_columns[column_index]).getData()); + auto & column_grantees_except_offsets = assert_cast(*res_columns[column_index++]).getOffsets(); auto add_row = [&](const String & name, const UUID & id, const String & storage_name, const Authentication & authentication, const AllowedClientHosts & allowed_hosts, - const RolesOrUsersSet & default_roles) + const RolesOrUsersSet & default_roles, + const RolesOrUsersSet & grantees) { column_name.insertData(name.data(), name.length()); column_id.push_back(id); @@ -156,14 +165,21 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, const Context & auto default_roles_ast = default_roles.toASTWithNames(access_control); column_default_roles_all.push_back(default_roles_ast->all); - for (const auto & role_name : default_roles_ast->names) column_default_roles_list.insertData(role_name.data(), role_name.length()); column_default_roles_list_offsets.push_back(column_default_roles_list.size()); - - for (const auto & role_name : default_roles_ast->except_names) - column_default_roles_except.insertData(role_name.data(), role_name.length()); + for (const auto & except_name : default_roles_ast->except_names) + column_default_roles_except.insertData(except_name.data(), except_name.length()); column_default_roles_except_offsets.push_back(column_default_roles_except.size()); + + auto grantees_ast = grantees.toASTWithNames(access_control); + column_grantees_any.push_back(grantees_ast->all); + for (const auto & grantee_name : grantees_ast->names) + column_grantees_list.insertData(grantee_name.data(), grantee_name.length()); + column_grantees_list_offsets.push_back(column_grantees_list.size()); + for (const auto & except_name : grantees_ast->except_names) + column_grantees_except.insertData(except_name.data(), except_name.length()); + column_grantees_except_offsets.push_back(column_grantees_except.size()); }; for (const auto & id : ids) @@ -176,7 +192,7 @@ void StorageSystemUsers::fillData(MutableColumns & res_columns, const Context & if (!storage) continue; - add_row(user->getName(), id, storage->getStorageName(), user->authentication, user->allowed_client_hosts, user->default_roles); + add_row(user->getName(), id, storage->getStorageName(), user->authentication, user->allowed_client_hosts, user->default_roles, user->grantees); } } diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py index e29d63c9e0b..c1be16fe17d 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test.py @@ -26,7 +26,7 @@ def cleanup_after_test(): try: yield finally: - instance.query("DROP USER IF EXISTS A, B") + instance.query("DROP USER IF EXISTS A, B, C") instance.query("DROP TABLE IF EXISTS test.view_1") @@ -106,6 +106,46 @@ def test_revoke_requires_grant_option(): assert instance.query("SHOW GRANTS FOR B") == "" +def test_allowed_grantees(): + instance.query("CREATE USER A") + instance.query("CREATE USER B") + + instance.query('GRANT SELECT ON test.table TO A WITH GRANT OPTION') + instance.query("GRANT SELECT ON test.table TO B", user='A') + assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n" + instance.query("REVOKE SELECT ON test.table FROM B", user='A') + + instance.query('ALTER USER A GRANTEES NONE') + expected_error = "user `B` is not allowed as grantee" + assert expected_error in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A') + + instance.query('ALTER USER A GRANTEES ANY EXCEPT B') + assert instance.query('SHOW CREATE USER A') == "CREATE USER A GRANTEES ANY EXCEPT B\n" + expected_error = "user `B` is not allowed as grantee" + assert expected_error in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A') + + instance.query('ALTER USER A GRANTEES B') + instance.query("GRANT SELECT ON test.table TO B", user='A') + assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n" + instance.query("REVOKE SELECT ON test.table FROM B", user='A') + + instance.query('ALTER USER A GRANTEES ANY') + assert instance.query('SHOW CREATE USER A') == "CREATE USER A\n" + instance.query("GRANT SELECT ON test.table TO B", user='A') + assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n" + + instance.query('ALTER USER A GRANTEES NONE') + expected_error = "user `B` is not allowed as grantee" + assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A') + + instance.query("CREATE USER C GRANTEES ANY EXCEPT C") + assert instance.query('SHOW CREATE USER C') == "CREATE USER C GRANTEES ANY EXCEPT C\n" + instance.query('GRANT SELECT ON test.table TO C WITH GRANT OPTION') + assert instance.query("SELECT * FROM test.table", user='C') == "1\t5\n2\t10\n" + expected_error = "user `C` is not allowed as grantee" + assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM C", user='C') + + def test_grant_all_on_table(): instance.query("CREATE USER A, B") instance.query("GRANT ALL ON test.table TO A WITH GRANT OPTION") From 1bc21789d239cd3f90703711629b6d15905d86c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 19:52:51 +0300 Subject: [PATCH 182/333] Add more variants --- utils/memcpy-bench/CMakeLists.txt | 23 +- utils/memcpy-bench/FastMemcpy.cpp | 1 + utils/memcpy-bench/FastMemcpy.h | 2 +- utils/memcpy-bench/FastMemcpy_Avx.cpp | 1 + utils/memcpy-bench/glibc/asm-syntax.h | 24 + utils/memcpy-bench/glibc/dwarf2.h | 590 +++ utils/memcpy-bench/glibc/memcpy-ssse3-back.S | 3182 +++++++++++++++++ utils/memcpy-bench/glibc/memcpy-ssse3.S | 3152 ++++++++++++++++ .../glibc/memmove-avx-unaligned-erms.S | 12 + .../glibc/memmove-avx512-no-vzeroupper.S | 419 +++ .../glibc/memmove-avx512-unaligned-erms.S | 12 + .../glibc/memmove-sse2-unaligned-erms.S | 33 + .../glibc/memmove-vec-unaligned-erms.S | 559 +++ utils/memcpy-bench/glibc/memmove.S | 71 + utils/memcpy-bench/glibc/sysdep.h | 129 + utils/memcpy-bench/glibc/sysdep_generic.h | 113 + utils/memcpy-bench/glibc/sysdep_x86.h | 113 + utils/memcpy-bench/memcpy-bench.cpp | 208 +- 18 files changed, 8585 insertions(+), 59 deletions(-) create mode 100644 utils/memcpy-bench/FastMemcpy.cpp create mode 100644 utils/memcpy-bench/FastMemcpy_Avx.cpp create mode 100644 utils/memcpy-bench/glibc/asm-syntax.h create mode 100644 utils/memcpy-bench/glibc/dwarf2.h create mode 100644 utils/memcpy-bench/glibc/memcpy-ssse3-back.S create mode 100644 utils/memcpy-bench/glibc/memcpy-ssse3.S create mode 100644 utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S create mode 100644 utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S create mode 100644 utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S create mode 100644 utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S create mode 100644 utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S create mode 100644 utils/memcpy-bench/glibc/memmove.S create mode 100644 utils/memcpy-bench/glibc/sysdep.h create mode 100644 utils/memcpy-bench/glibc/sysdep_generic.h create mode 100644 utils/memcpy-bench/glibc/sysdep_x86.h diff --git a/utils/memcpy-bench/CMakeLists.txt b/utils/memcpy-bench/CMakeLists.txt index 54dd0398912..5fcde231688 100644 --- a/utils/memcpy-bench/CMakeLists.txt +++ b/utils/memcpy-bench/CMakeLists.txt @@ -1,5 +1,22 @@ enable_language(ASM) -add_executable (memcpy-bench memcpy-bench.cpp memcpy_jart.S) -#target_compile_options(memcpy-bench PRIVATE -mavx) -target_link_libraries(memcpy-bench PRIVATE dbms) + +add_executable (memcpy-bench + memcpy-bench.cpp + FastMemcpy.cpp + FastMemcpy_Avx.cpp + memcpy_jart.S + glibc/memcpy-ssse3.S + glibc/memcpy-ssse3-back.S + glibc/memmove-sse2-unaligned-erms.S + glibc/memmove-avx-unaligned-erms.S + glibc/memmove-avx512-unaligned-erms.S + glibc/memmove-avx512-no-vzeroupper.S + ) + +add_compile_options(memcpy-bench PRIVATE -fno-tree-loop-distribute-patterns) + +set_source_files_properties(FastMemcpy.cpp PROPERTIES COMPILE_FLAGS "-Wno-old-style-cast") +set_source_files_properties(FastMemcpy_Avx.cpp PROPERTIES COMPILE_FLAGS "-mavx -Wno-old-style-cast -Wno-cast-qual -Wno-cast-align") + +target_link_libraries(memcpy-bench PRIVATE dbms boost::program_options) diff --git a/utils/memcpy-bench/FastMemcpy.cpp b/utils/memcpy-bench/FastMemcpy.cpp new file mode 100644 index 00000000000..9a50caba2b1 --- /dev/null +++ b/utils/memcpy-bench/FastMemcpy.cpp @@ -0,0 +1 @@ +#include "FastMemcpy.h" diff --git a/utils/memcpy-bench/FastMemcpy.h b/utils/memcpy-bench/FastMemcpy.h index 9c37524443a..85d09c5f53e 100644 --- a/utils/memcpy-bench/FastMemcpy.h +++ b/utils/memcpy-bench/FastMemcpy.h @@ -93,7 +93,7 @@ static INLINE void memcpy_sse2_128(void * __restrict dst, const void * __restric /// Attribute is used to avoid an error with undefined behaviour sanitizer /// ../contrib/FastMemcpy/FastMemcpy.h:91:56: runtime error: applying zero offset to null pointer /// Found by 01307_orc_output_format.sh, cause - ORCBlockInputFormat and external ORC library. -__attribute__((__no_sanitize__("undefined"))) static INLINE void *memcpy_tiny(void * __restrict dst, const void * __restrict src, size_t size) +__attribute__((__no_sanitize__("undefined"))) inline void *memcpy_tiny(void * __restrict dst, const void * __restrict src, size_t size) { unsigned char *dd = ((unsigned char*)dst) + size; const unsigned char *ss = ((const unsigned char*)src) + size; diff --git a/utils/memcpy-bench/FastMemcpy_Avx.cpp b/utils/memcpy-bench/FastMemcpy_Avx.cpp new file mode 100644 index 00000000000..8cef0f89507 --- /dev/null +++ b/utils/memcpy-bench/FastMemcpy_Avx.cpp @@ -0,0 +1 @@ +#include "FastMemcpy_Avx.h" diff --git a/utils/memcpy-bench/glibc/asm-syntax.h b/utils/memcpy-bench/glibc/asm-syntax.h new file mode 100644 index 00000000000..6e299c1fec2 --- /dev/null +++ b/utils/memcpy-bench/glibc/asm-syntax.h @@ -0,0 +1,24 @@ +/* Definitions for x86 syntax variations. + Copyright (C) 1992-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. Its master source is NOT part of + the C library, however. The master source lives in the GNU MP Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#undef ALIGN +#define ALIGN(log) .align 1<. */ + +#ifndef _DWARF2_H +#define _DWARF2_H 1 + +/* This file is derived from the DWARF specification (a public document) + Revision 2.0.0 (July 27, 1993) developed by the UNIX International + Programming Languages Special Interest Group (UI/PLSIG) and distributed + by UNIX International. Copies of this specification are available from + UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. */ + +/* This file is shared between GCC and GDB, and should not contain + prototypes. */ + +#ifndef __ASSEMBLER__ +/* Tag names and codes. */ + +enum dwarf_tag + { + DW_TAG_padding = 0x00, + DW_TAG_array_type = 0x01, + DW_TAG_class_type = 0x02, + DW_TAG_entry_point = 0x03, + DW_TAG_enumeration_type = 0x04, + DW_TAG_formal_parameter = 0x05, + DW_TAG_imported_declaration = 0x08, + DW_TAG_label = 0x0a, + DW_TAG_lexical_block = 0x0b, + DW_TAG_member = 0x0d, + DW_TAG_pointer_type = 0x0f, + DW_TAG_reference_type = 0x10, + DW_TAG_compile_unit = 0x11, + DW_TAG_string_type = 0x12, + DW_TAG_structure_type = 0x13, + DW_TAG_subroutine_type = 0x15, + DW_TAG_typedef = 0x16, + DW_TAG_union_type = 0x17, + DW_TAG_unspecified_parameters = 0x18, + DW_TAG_variant = 0x19, + DW_TAG_common_block = 0x1a, + DW_TAG_common_inclusion = 0x1b, + DW_TAG_inheritance = 0x1c, + DW_TAG_inlined_subroutine = 0x1d, + DW_TAG_module = 0x1e, + DW_TAG_ptr_to_member_type = 0x1f, + DW_TAG_set_type = 0x20, + DW_TAG_subrange_type = 0x21, + DW_TAG_with_stmt = 0x22, + DW_TAG_access_declaration = 0x23, + DW_TAG_base_type = 0x24, + DW_TAG_catch_block = 0x25, + DW_TAG_const_type = 0x26, + DW_TAG_constant = 0x27, + DW_TAG_enumerator = 0x28, + DW_TAG_file_type = 0x29, + DW_TAG_friend = 0x2a, + DW_TAG_namelist = 0x2b, + DW_TAG_namelist_item = 0x2c, + DW_TAG_packed_type = 0x2d, + DW_TAG_subprogram = 0x2e, + DW_TAG_template_type_param = 0x2f, + DW_TAG_template_value_param = 0x30, + DW_TAG_thrown_type = 0x31, + DW_TAG_try_block = 0x32, + DW_TAG_variant_part = 0x33, + DW_TAG_variable = 0x34, + DW_TAG_volatile_type = 0x35, + /* SGI/MIPS Extensions */ + DW_TAG_MIPS_loop = 0x4081, + /* GNU extensions */ + DW_TAG_format_label = 0x4101, /* for FORTRAN 77 and Fortran 90 */ + DW_TAG_function_template = 0x4102, /* for C++ */ + DW_TAG_class_template = 0x4103, /* for C++ */ + DW_TAG_GNU_BINCL = 0x4104, + DW_TAG_GNU_EINCL = 0x4105 + }; + +#define DW_TAG_lo_user 0x4080 +#define DW_TAG_hi_user 0xffff + +/* flag that tells whether entry has a child or not */ +#define DW_children_no 0 +#define DW_children_yes 1 + +/* Form names and codes. */ +enum dwarf_form + { + DW_FORM_addr = 0x01, + DW_FORM_block2 = 0x03, + DW_FORM_block4 = 0x04, + DW_FORM_data2 = 0x05, + DW_FORM_data4 = 0x06, + DW_FORM_data8 = 0x07, + DW_FORM_string = 0x08, + DW_FORM_block = 0x09, + DW_FORM_block1 = 0x0a, + DW_FORM_data1 = 0x0b, + DW_FORM_flag = 0x0c, + DW_FORM_sdata = 0x0d, + DW_FORM_strp = 0x0e, + DW_FORM_udata = 0x0f, + DW_FORM_ref_addr = 0x10, + DW_FORM_ref1 = 0x11, + DW_FORM_ref2 = 0x12, + DW_FORM_ref4 = 0x13, + DW_FORM_ref8 = 0x14, + DW_FORM_ref_udata = 0x15, + DW_FORM_indirect = 0x16 + }; + +/* Attribute names and codes. */ + +enum dwarf_attribute + { + DW_AT_sibling = 0x01, + DW_AT_location = 0x02, + DW_AT_name = 0x03, + DW_AT_ordering = 0x09, + DW_AT_subscr_data = 0x0a, + DW_AT_byte_size = 0x0b, + DW_AT_bit_offset = 0x0c, + DW_AT_bit_size = 0x0d, + DW_AT_element_list = 0x0f, + DW_AT_stmt_list = 0x10, + DW_AT_low_pc = 0x11, + DW_AT_high_pc = 0x12, + DW_AT_language = 0x13, + DW_AT_member = 0x14, + DW_AT_discr = 0x15, + DW_AT_discr_value = 0x16, + DW_AT_visibility = 0x17, + DW_AT_import = 0x18, + DW_AT_string_length = 0x19, + DW_AT_common_reference = 0x1a, + DW_AT_comp_dir = 0x1b, + DW_AT_const_value = 0x1c, + DW_AT_containing_type = 0x1d, + DW_AT_default_value = 0x1e, + DW_AT_inline = 0x20, + DW_AT_is_optional = 0x21, + DW_AT_lower_bound = 0x22, + DW_AT_producer = 0x25, + DW_AT_prototyped = 0x27, + DW_AT_return_addr = 0x2a, + DW_AT_start_scope = 0x2c, + DW_AT_stride_size = 0x2e, + DW_AT_upper_bound = 0x2f, + DW_AT_abstract_origin = 0x31, + DW_AT_accessibility = 0x32, + DW_AT_address_class = 0x33, + DW_AT_artificial = 0x34, + DW_AT_base_types = 0x35, + DW_AT_calling_convention = 0x36, + DW_AT_count = 0x37, + DW_AT_data_member_location = 0x38, + DW_AT_decl_column = 0x39, + DW_AT_decl_file = 0x3a, + DW_AT_decl_line = 0x3b, + DW_AT_declaration = 0x3c, + DW_AT_discr_list = 0x3d, + DW_AT_encoding = 0x3e, + DW_AT_external = 0x3f, + DW_AT_frame_base = 0x40, + DW_AT_friend = 0x41, + DW_AT_identifier_case = 0x42, + DW_AT_macro_info = 0x43, + DW_AT_namelist_items = 0x44, + DW_AT_priority = 0x45, + DW_AT_segment = 0x46, + DW_AT_specification = 0x47, + DW_AT_static_link = 0x48, + DW_AT_type = 0x49, + DW_AT_use_location = 0x4a, + DW_AT_variable_parameter = 0x4b, + DW_AT_virtuality = 0x4c, + DW_AT_vtable_elem_location = 0x4d, + /* SGI/MIPS Extensions */ + DW_AT_MIPS_fde = 0x2001, + DW_AT_MIPS_loop_begin = 0x2002, + DW_AT_MIPS_tail_loop_begin = 0x2003, + DW_AT_MIPS_epilog_begin = 0x2004, + DW_AT_MIPS_loop_unroll_factor = 0x2005, + DW_AT_MIPS_software_pipeline_depth = 0x2006, + DW_AT_MIPS_linkage_name = 0x2007, + DW_AT_MIPS_stride = 0x2008, + DW_AT_MIPS_abstract_name = 0x2009, + DW_AT_MIPS_clone_origin = 0x200a, + DW_AT_MIPS_has_inlines = 0x200b, + /* GNU extensions. */ + DW_AT_sf_names = 0x2101, + DW_AT_src_info = 0x2102, + DW_AT_mac_info = 0x2103, + DW_AT_src_coords = 0x2104, + DW_AT_body_begin = 0x2105, + DW_AT_body_end = 0x2106 + }; + +#define DW_AT_lo_user 0x2000 /* implementation-defined range start */ +#define DW_AT_hi_user 0x3ff0 /* implementation-defined range end */ + +/* Location atom names and codes. */ + +enum dwarf_location_atom + { + DW_OP_addr = 0x03, + DW_OP_deref = 0x06, + DW_OP_const1u = 0x08, + DW_OP_const1s = 0x09, + DW_OP_const2u = 0x0a, + DW_OP_const2s = 0x0b, + DW_OP_const4u = 0x0c, + DW_OP_const4s = 0x0d, + DW_OP_const8u = 0x0e, + DW_OP_const8s = 0x0f, + DW_OP_constu = 0x10, + DW_OP_consts = 0x11, + DW_OP_dup = 0x12, + DW_OP_drop = 0x13, + DW_OP_over = 0x14, + DW_OP_pick = 0x15, + DW_OP_swap = 0x16, + DW_OP_rot = 0x17, + DW_OP_xderef = 0x18, + DW_OP_abs = 0x19, + DW_OP_and = 0x1a, + DW_OP_div = 0x1b, + DW_OP_minus = 0x1c, + DW_OP_mod = 0x1d, + DW_OP_mul = 0x1e, + DW_OP_neg = 0x1f, + DW_OP_not = 0x20, + DW_OP_or = 0x21, + DW_OP_plus = 0x22, + DW_OP_plus_uconst = 0x23, + DW_OP_shl = 0x24, + DW_OP_shr = 0x25, + DW_OP_shra = 0x26, + DW_OP_xor = 0x27, + DW_OP_bra = 0x28, + DW_OP_eq = 0x29, + DW_OP_ge = 0x2a, + DW_OP_gt = 0x2b, + DW_OP_le = 0x2c, + DW_OP_lt = 0x2d, + DW_OP_ne = 0x2e, + DW_OP_skip = 0x2f, + DW_OP_lit0 = 0x30, + DW_OP_lit1 = 0x31, + DW_OP_lit2 = 0x32, + DW_OP_lit3 = 0x33, + DW_OP_lit4 = 0x34, + DW_OP_lit5 = 0x35, + DW_OP_lit6 = 0x36, + DW_OP_lit7 = 0x37, + DW_OP_lit8 = 0x38, + DW_OP_lit9 = 0x39, + DW_OP_lit10 = 0x3a, + DW_OP_lit11 = 0x3b, + DW_OP_lit12 = 0x3c, + DW_OP_lit13 = 0x3d, + DW_OP_lit14 = 0x3e, + DW_OP_lit15 = 0x3f, + DW_OP_lit16 = 0x40, + DW_OP_lit17 = 0x41, + DW_OP_lit18 = 0x42, + DW_OP_lit19 = 0x43, + DW_OP_lit20 = 0x44, + DW_OP_lit21 = 0x45, + DW_OP_lit22 = 0x46, + DW_OP_lit23 = 0x47, + DW_OP_lit24 = 0x48, + DW_OP_lit25 = 0x49, + DW_OP_lit26 = 0x4a, + DW_OP_lit27 = 0x4b, + DW_OP_lit28 = 0x4c, + DW_OP_lit29 = 0x4d, + DW_OP_lit30 = 0x4e, + DW_OP_lit31 = 0x4f, + DW_OP_reg0 = 0x50, + DW_OP_reg1 = 0x51, + DW_OP_reg2 = 0x52, + DW_OP_reg3 = 0x53, + DW_OP_reg4 = 0x54, + DW_OP_reg5 = 0x55, + DW_OP_reg6 = 0x56, + DW_OP_reg7 = 0x57, + DW_OP_reg8 = 0x58, + DW_OP_reg9 = 0x59, + DW_OP_reg10 = 0x5a, + DW_OP_reg11 = 0x5b, + DW_OP_reg12 = 0x5c, + DW_OP_reg13 = 0x5d, + DW_OP_reg14 = 0x5e, + DW_OP_reg15 = 0x5f, + DW_OP_reg16 = 0x60, + DW_OP_reg17 = 0x61, + DW_OP_reg18 = 0x62, + DW_OP_reg19 = 0x63, + DW_OP_reg20 = 0x64, + DW_OP_reg21 = 0x65, + DW_OP_reg22 = 0x66, + DW_OP_reg23 = 0x67, + DW_OP_reg24 = 0x68, + DW_OP_reg25 = 0x69, + DW_OP_reg26 = 0x6a, + DW_OP_reg27 = 0x6b, + DW_OP_reg28 = 0x6c, + DW_OP_reg29 = 0x6d, + DW_OP_reg30 = 0x6e, + DW_OP_reg31 = 0x6f, + DW_OP_breg0 = 0x70, + DW_OP_breg1 = 0x71, + DW_OP_breg2 = 0x72, + DW_OP_breg3 = 0x73, + DW_OP_breg4 = 0x74, + DW_OP_breg5 = 0x75, + DW_OP_breg6 = 0x76, + DW_OP_breg7 = 0x77, + DW_OP_breg8 = 0x78, + DW_OP_breg9 = 0x79, + DW_OP_breg10 = 0x7a, + DW_OP_breg11 = 0x7b, + DW_OP_breg12 = 0x7c, + DW_OP_breg13 = 0x7d, + DW_OP_breg14 = 0x7e, + DW_OP_breg15 = 0x7f, + DW_OP_breg16 = 0x80, + DW_OP_breg17 = 0x81, + DW_OP_breg18 = 0x82, + DW_OP_breg19 = 0x83, + DW_OP_breg20 = 0x84, + DW_OP_breg21 = 0x85, + DW_OP_breg22 = 0x86, + DW_OP_breg23 = 0x87, + DW_OP_breg24 = 0x88, + DW_OP_breg25 = 0x89, + DW_OP_breg26 = 0x8a, + DW_OP_breg27 = 0x8b, + DW_OP_breg28 = 0x8c, + DW_OP_breg29 = 0x8d, + DW_OP_breg30 = 0x8e, + DW_OP_breg31 = 0x8f, + DW_OP_regx = 0x90, + DW_OP_fbreg = 0x91, + DW_OP_bregx = 0x92, + DW_OP_piece = 0x93, + DW_OP_deref_size = 0x94, + DW_OP_xderef_size = 0x95, + DW_OP_nop = 0x96 + }; + +#define DW_OP_lo_user 0x80 /* implementation-defined range start */ +#define DW_OP_hi_user 0xff /* implementation-defined range end */ + +/* Type encodings. */ + +enum dwarf_type + { + DW_ATE_void = 0x0, + DW_ATE_address = 0x1, + DW_ATE_boolean = 0x2, + DW_ATE_complex_float = 0x3, + DW_ATE_float = 0x4, + DW_ATE_signed = 0x5, + DW_ATE_signed_char = 0x6, + DW_ATE_unsigned = 0x7, + DW_ATE_unsigned_char = 0x8 + }; + +#define DW_ATE_lo_user 0x80 +#define DW_ATE_hi_user 0xff + +/* Array ordering names and codes. */ +enum dwarf_array_dim_ordering + { + DW_ORD_row_major = 0, + DW_ORD_col_major = 1 + }; + +/* access attribute */ +enum dwarf_access_attribute + { + DW_ACCESS_public = 1, + DW_ACCESS_protected = 2, + DW_ACCESS_private = 3 + }; + +/* visibility */ +enum dwarf_visibility_attribute + { + DW_VIS_local = 1, + DW_VIS_exported = 2, + DW_VIS_qualified = 3 + }; + +/* virtuality */ +enum dwarf_virtuality_attribute + { + DW_VIRTUALITY_none = 0, + DW_VIRTUALITY_virtual = 1, + DW_VIRTUALITY_pure_virtual = 2 + }; + +/* case sensitivity */ +enum dwarf_id_case + { + DW_ID_case_sensitive = 0, + DW_ID_up_case = 1, + DW_ID_down_case = 2, + DW_ID_case_insensitive = 3 + }; + +/* calling convention */ +enum dwarf_calling_convention + { + DW_CC_normal = 0x1, + DW_CC_program = 0x2, + DW_CC_nocall = 0x3 + }; + +#define DW_CC_lo_user 0x40 +#define DW_CC_hi_user 0xff + +/* inline attribute */ +enum dwarf_inline_attribute + { + DW_INL_not_inlined = 0, + DW_INL_inlined = 1, + DW_INL_declared_not_inlined = 2, + DW_INL_declared_inlined = 3 + }; + +/* discriminant lists */ +enum dwarf_discrim_list + { + DW_DSC_label = 0, + DW_DSC_range = 1 + }; + +/* line number opcodes */ +enum dwarf_line_number_ops + { + DW_LNS_extended_op = 0, + DW_LNS_copy = 1, + DW_LNS_advance_pc = 2, + DW_LNS_advance_line = 3, + DW_LNS_set_file = 4, + DW_LNS_set_column = 5, + DW_LNS_negate_stmt = 6, + DW_LNS_set_basic_block = 7, + DW_LNS_const_add_pc = 8, + DW_LNS_fixed_advance_pc = 9 + }; + +/* line number extended opcodes */ +enum dwarf_line_number_x_ops + { + DW_LNE_end_sequence = 1, + DW_LNE_set_address = 2, + DW_LNE_define_file = 3 + }; + +/* call frame information */ +enum dwarf_call_frame_info + { + DW_CFA_advance_loc = 0x40, + DW_CFA_offset = 0x80, + DW_CFA_restore = 0xc0, + DW_CFA_nop = 0x00, + DW_CFA_set_loc = 0x01, + DW_CFA_advance_loc1 = 0x02, + DW_CFA_advance_loc2 = 0x03, + DW_CFA_advance_loc4 = 0x04, + DW_CFA_offset_extended = 0x05, + DW_CFA_restore_extended = 0x06, + DW_CFA_undefined = 0x07, + DW_CFA_same_value = 0x08, + DW_CFA_register = 0x09, + DW_CFA_remember_state = 0x0a, + DW_CFA_restore_state = 0x0b, + DW_CFA_def_cfa = 0x0c, + DW_CFA_def_cfa_register = 0x0d, + DW_CFA_def_cfa_offset = 0x0e, + DW_CFA_def_cfa_expression = 0x0f, + DW_CFA_expression = 0x10, + /* Dwarf 2.1 */ + DW_CFA_offset_extended_sf = 0x11, + DW_CFA_def_cfa_sf = 0x12, + DW_CFA_def_cfa_offset_sf = 0x13, + + /* SGI/MIPS specific */ + DW_CFA_MIPS_advance_loc8 = 0x1d, + + /* GNU extensions */ + DW_CFA_GNU_window_save = 0x2d, + DW_CFA_GNU_args_size = 0x2e, + DW_CFA_GNU_negative_offset_extended = 0x2f + }; + +#define DW_CIE_ID 0xffffffff +#define DW_CIE_VERSION 1 + +#define DW_CFA_extended 0 +#define DW_CFA_low_user 0x1c +#define DW_CFA_high_user 0x3f + +#define DW_CHILDREN_no 0x00 +#define DW_CHILDREN_yes 0x01 + +#define DW_ADDR_none 0 + +/* Source language names and codes. */ + +enum dwarf_source_language + { + DW_LANG_C89 = 0x0001, + DW_LANG_C = 0x0002, + DW_LANG_Ada83 = 0x0003, + DW_LANG_C_plus_plus = 0x0004, + DW_LANG_Cobol74 = 0x0005, + DW_LANG_Cobol85 = 0x0006, + DW_LANG_Fortran77 = 0x0007, + DW_LANG_Fortran90 = 0x0008, + DW_LANG_Pascal83 = 0x0009, + DW_LANG_Modula2 = 0x000a, + DW_LANG_Java = 0x000b, + DW_LANG_Mips_Assembler = 0x8001 + }; + + +#define DW_LANG_lo_user 0x8000 /* implementation-defined range start */ +#define DW_LANG_hi_user 0xffff /* implementation-defined range start */ + +/* Names and codes for macro information. */ + +enum dwarf_macinfo_record_type + { + DW_MACINFO_define = 1, + DW_MACINFO_undef = 2, + DW_MACINFO_start_file = 3, + DW_MACINFO_end_file = 4, + DW_MACINFO_vendor_ext = 255 + }; + +#endif /* !ASSEMBLER */ + +/* @@@ For use with GNU frame unwind information. */ + +#define DW_EH_PE_absptr 0x00 +#define DW_EH_PE_omit 0xff + +#define DW_EH_PE_uleb128 0x01 +#define DW_EH_PE_udata2 0x02 +#define DW_EH_PE_udata4 0x03 +#define DW_EH_PE_udata8 0x04 +#define DW_EH_PE_sleb128 0x09 +#define DW_EH_PE_sdata2 0x0A +#define DW_EH_PE_sdata4 0x0B +#define DW_EH_PE_sdata8 0x0C +#define DW_EH_PE_signed 0x08 + +#define DW_EH_PE_pcrel 0x10 +#define DW_EH_PE_textrel 0x20 +#define DW_EH_PE_datarel 0x30 +#define DW_EH_PE_funcrel 0x40 +#define DW_EH_PE_aligned 0x50 + +#define DW_EH_PE_indirect 0x80 + +#endif /* dwarf2.h */ diff --git a/utils/memcpy-bench/glibc/memcpy-ssse3-back.S b/utils/memcpy-bench/glibc/memcpy-ssse3-back.S new file mode 100644 index 00000000000..1492dd38e73 --- /dev/null +++ b/utils/memcpy-bench/glibc/memcpy-ssse3-back.S @@ -0,0 +1,3182 @@ +/* memcpy with SSSE3 and REP string + Copyright (C) 2010-2020 Free Software Foundation, Inc. + Contributed by Intel Corporation. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include "sysdep.h" + +#if 1 + +#include "asm-syntax.h" + +#ifndef MEMCPY +# define MEMCPY __memcpy_ssse3_back +# define MEMCPY_CHK __memcpy_chk_ssse3_back +# define MEMPCPY __mempcpy_ssse3_back +# define MEMPCPY_CHK __mempcpy_chk_ssse3_back +#endif + +#define JMPTBL(I, B) I - B + +/* Branch to an entry in a jump table. TABLE is a jump table with + relative offsets. INDEX is a register contains the index into the + jump table. SCALE is the scale of INDEX. */ +#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ + lea TABLE(%rip), %r11; \ + movslq (%r11, INDEX, SCALE), INDEX; \ + lea (%r11, INDEX), INDEX; \ + _CET_NOTRACK jmp *INDEX; \ + ud2 + + .section .text.ssse3,"ax",@progbits +#if !defined USE_AS_MEMPCPY && !defined USE_AS_MEMMOVE +ENTRY (MEMPCPY_CHK) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMPCPY_CHK) + +ENTRY (MEMPCPY) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) +END (MEMPCPY) +#endif + +#if !defined USE_AS_BCOPY +ENTRY (MEMCPY_CHK) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMCPY_CHK) +#endif + +ENTRY (MEMCPY) + mov %RDI_LP, %RAX_LP +#ifdef USE_AS_MEMPCPY + add %RDX_LP, %RAX_LP +#endif + +#ifdef __ILP32__ + /* Clear the upper 32 bits. */ + mov %edx, %edx +#endif + +#ifdef USE_AS_MEMMOVE + cmp %rsi, %rdi + jb L(copy_forward) + je L(bwd_write_0bytes) + cmp $144, %rdx + jae L(copy_backward) + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) +L(copy_forward): +#endif +L(start): + cmp $144, %rdx + jae L(144bytesormore) + +L(fwd_write_less32bytes): +#ifndef USE_AS_MEMMOVE + cmp %dil, %sil + jbe L(bk_write) +#endif + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) +#ifndef USE_AS_MEMMOVE +L(bk_write): + + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) +#endif + + .p2align 4 +L(144bytesormore): + +#ifndef USE_AS_MEMMOVE + cmp %dil, %sil + jle L(copy_backward) +#endif + movdqu (%rsi), %xmm0 + mov %rdi, %r8 + and $-16, %rdi + add $16, %rdi + mov %rdi, %r9 + sub %r8, %r9 + sub %r9, %rdx + add %r9, %rsi + mov %rsi, %r9 + and $0xf, %r9 + jz L(shl_0) +#ifdef DATA_CACHE_SIZE + mov $DATA_CACHE_SIZE, %RCX_LP +#else + mov __x86_data_cache_size(%rip), %RCX_LP +#endif + cmp %rcx, %rdx + jae L(gobble_mem_fwd) + lea L(shl_table_fwd)(%rip), %r11 + sub $0x80, %rdx + movslq (%r11, %r9, 4), %r9 + add %r11, %r9 + _CET_NOTRACK jmp *%r9 + ud2 + + .p2align 4 +L(copy_backward): +#ifdef DATA_CACHE_SIZE + mov $DATA_CACHE_SIZE, %RCX_LP +#else + mov __x86_data_cache_size(%rip), %RCX_LP +#endif + shl $1, %rcx + cmp %rcx, %rdx + ja L(gobble_mem_bwd) + + add %rdx, %rdi + add %rdx, %rsi + movdqu -16(%rsi), %xmm0 + lea -16(%rdi), %r8 + mov %rdi, %r9 + and $0xf, %r9 + xor %r9, %rdi + sub %r9, %rsi + sub %r9, %rdx + mov %rsi, %r9 + and $0xf, %r9 + jz L(shl_0_bwd) + lea L(shl_table_bwd)(%rip), %r11 + sub $0x80, %rdx + movslq (%r11, %r9, 4), %r9 + add %r11, %r9 + _CET_NOTRACK jmp *%r9 + ud2 + + .p2align 4 +L(shl_0): + + mov %rdx, %r9 + shr $8, %r9 + add %rdx, %r9 +#ifdef DATA_CACHE_SIZE + cmp $DATA_CACHE_SIZE_HALF, %R9_LP +#else + cmp __x86_data_cache_size_half(%rip), %R9_LP +#endif + jae L(gobble_mem_fwd) + sub $0x80, %rdx + .p2align 4 +L(shl_0_loop): + movdqa (%rsi), %xmm1 + movdqa %xmm1, (%rdi) + movaps 0x10(%rsi), %xmm2 + movaps %xmm2, 0x10(%rdi) + movaps 0x20(%rsi), %xmm3 + movaps %xmm3, 0x20(%rdi) + movaps 0x30(%rsi), %xmm4 + movaps %xmm4, 0x30(%rdi) + movaps 0x40(%rsi), %xmm1 + movaps %xmm1, 0x40(%rdi) + movaps 0x50(%rsi), %xmm2 + movaps %xmm2, 0x50(%rdi) + movaps 0x60(%rsi), %xmm3 + movaps %xmm3, 0x60(%rdi) + movaps 0x70(%rsi), %xmm4 + movaps %xmm4, 0x70(%rdi) + sub $0x80, %rdx + lea 0x80(%rsi), %rsi + lea 0x80(%rdi), %rdi + jae L(shl_0_loop) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_0_bwd): + sub $0x80, %rdx +L(copy_backward_loop): + movaps -0x10(%rsi), %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps -0x20(%rsi), %xmm2 + movaps %xmm2, -0x20(%rdi) + movaps -0x30(%rsi), %xmm3 + movaps %xmm3, -0x30(%rdi) + movaps -0x40(%rsi), %xmm4 + movaps %xmm4, -0x40(%rdi) + movaps -0x50(%rsi), %xmm5 + movaps %xmm5, -0x50(%rdi) + movaps -0x60(%rsi), %xmm5 + movaps %xmm5, -0x60(%rdi) + movaps -0x70(%rsi), %xmm5 + movaps %xmm5, -0x70(%rdi) + movaps -0x80(%rsi), %xmm5 + movaps %xmm5, -0x80(%rdi) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(copy_backward_loop) + + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_1): + sub $0x80, %rdx + movaps -0x01(%rsi), %xmm1 + movaps 0x0f(%rsi), %xmm2 + movaps 0x1f(%rsi), %xmm3 + movaps 0x2f(%rsi), %xmm4 + movaps 0x3f(%rsi), %xmm5 + movaps 0x4f(%rsi), %xmm6 + movaps 0x5f(%rsi), %xmm7 + movaps 0x6f(%rsi), %xmm8 + movaps 0x7f(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $1, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $1, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $1, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $1, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $1, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $1, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $1, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $1, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_1) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_1_bwd): + movaps -0x01(%rsi), %xmm1 + + movaps -0x11(%rsi), %xmm2 + palignr $1, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x21(%rsi), %xmm3 + palignr $1, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x31(%rsi), %xmm4 + palignr $1, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x41(%rsi), %xmm5 + palignr $1, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x51(%rsi), %xmm6 + palignr $1, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x61(%rsi), %xmm7 + palignr $1, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x71(%rsi), %xmm8 + palignr $1, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x81(%rsi), %xmm9 + palignr $1, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_1_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_2): + sub $0x80, %rdx + movaps -0x02(%rsi), %xmm1 + movaps 0x0e(%rsi), %xmm2 + movaps 0x1e(%rsi), %xmm3 + movaps 0x2e(%rsi), %xmm4 + movaps 0x3e(%rsi), %xmm5 + movaps 0x4e(%rsi), %xmm6 + movaps 0x5e(%rsi), %xmm7 + movaps 0x6e(%rsi), %xmm8 + movaps 0x7e(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $2, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $2, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $2, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $2, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $2, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $2, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $2, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $2, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_2) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_2_bwd): + movaps -0x02(%rsi), %xmm1 + + movaps -0x12(%rsi), %xmm2 + palignr $2, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x22(%rsi), %xmm3 + palignr $2, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x32(%rsi), %xmm4 + palignr $2, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x42(%rsi), %xmm5 + palignr $2, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x52(%rsi), %xmm6 + palignr $2, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x62(%rsi), %xmm7 + palignr $2, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x72(%rsi), %xmm8 + palignr $2, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x82(%rsi), %xmm9 + palignr $2, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_2_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_3): + sub $0x80, %rdx + movaps -0x03(%rsi), %xmm1 + movaps 0x0d(%rsi), %xmm2 + movaps 0x1d(%rsi), %xmm3 + movaps 0x2d(%rsi), %xmm4 + movaps 0x3d(%rsi), %xmm5 + movaps 0x4d(%rsi), %xmm6 + movaps 0x5d(%rsi), %xmm7 + movaps 0x6d(%rsi), %xmm8 + movaps 0x7d(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $3, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $3, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $3, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $3, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $3, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $3, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $3, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $3, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_3) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_3_bwd): + movaps -0x03(%rsi), %xmm1 + + movaps -0x13(%rsi), %xmm2 + palignr $3, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x23(%rsi), %xmm3 + palignr $3, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x33(%rsi), %xmm4 + palignr $3, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x43(%rsi), %xmm5 + palignr $3, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x53(%rsi), %xmm6 + palignr $3, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x63(%rsi), %xmm7 + palignr $3, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x73(%rsi), %xmm8 + palignr $3, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x83(%rsi), %xmm9 + palignr $3, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_3_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_4): + sub $0x80, %rdx + movaps -0x04(%rsi), %xmm1 + movaps 0x0c(%rsi), %xmm2 + movaps 0x1c(%rsi), %xmm3 + movaps 0x2c(%rsi), %xmm4 + movaps 0x3c(%rsi), %xmm5 + movaps 0x4c(%rsi), %xmm6 + movaps 0x5c(%rsi), %xmm7 + movaps 0x6c(%rsi), %xmm8 + movaps 0x7c(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $4, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $4, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $4, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $4, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $4, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $4, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $4, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $4, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_4) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_4_bwd): + movaps -0x04(%rsi), %xmm1 + + movaps -0x14(%rsi), %xmm2 + palignr $4, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x24(%rsi), %xmm3 + palignr $4, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x34(%rsi), %xmm4 + palignr $4, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x44(%rsi), %xmm5 + palignr $4, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x54(%rsi), %xmm6 + palignr $4, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x64(%rsi), %xmm7 + palignr $4, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x74(%rsi), %xmm8 + palignr $4, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x84(%rsi), %xmm9 + palignr $4, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_4_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_5): + sub $0x80, %rdx + movaps -0x05(%rsi), %xmm1 + movaps 0x0b(%rsi), %xmm2 + movaps 0x1b(%rsi), %xmm3 + movaps 0x2b(%rsi), %xmm4 + movaps 0x3b(%rsi), %xmm5 + movaps 0x4b(%rsi), %xmm6 + movaps 0x5b(%rsi), %xmm7 + movaps 0x6b(%rsi), %xmm8 + movaps 0x7b(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $5, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $5, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $5, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $5, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $5, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $5, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $5, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $5, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_5) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_5_bwd): + movaps -0x05(%rsi), %xmm1 + + movaps -0x15(%rsi), %xmm2 + palignr $5, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x25(%rsi), %xmm3 + palignr $5, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x35(%rsi), %xmm4 + palignr $5, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x45(%rsi), %xmm5 + palignr $5, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x55(%rsi), %xmm6 + palignr $5, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x65(%rsi), %xmm7 + palignr $5, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x75(%rsi), %xmm8 + palignr $5, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x85(%rsi), %xmm9 + palignr $5, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_5_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_6): + sub $0x80, %rdx + movaps -0x06(%rsi), %xmm1 + movaps 0x0a(%rsi), %xmm2 + movaps 0x1a(%rsi), %xmm3 + movaps 0x2a(%rsi), %xmm4 + movaps 0x3a(%rsi), %xmm5 + movaps 0x4a(%rsi), %xmm6 + movaps 0x5a(%rsi), %xmm7 + movaps 0x6a(%rsi), %xmm8 + movaps 0x7a(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $6, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $6, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $6, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $6, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $6, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $6, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $6, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $6, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_6) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_6_bwd): + movaps -0x06(%rsi), %xmm1 + + movaps -0x16(%rsi), %xmm2 + palignr $6, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x26(%rsi), %xmm3 + palignr $6, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x36(%rsi), %xmm4 + palignr $6, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x46(%rsi), %xmm5 + palignr $6, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x56(%rsi), %xmm6 + palignr $6, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x66(%rsi), %xmm7 + palignr $6, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x76(%rsi), %xmm8 + palignr $6, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x86(%rsi), %xmm9 + palignr $6, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_6_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_7): + sub $0x80, %rdx + movaps -0x07(%rsi), %xmm1 + movaps 0x09(%rsi), %xmm2 + movaps 0x19(%rsi), %xmm3 + movaps 0x29(%rsi), %xmm4 + movaps 0x39(%rsi), %xmm5 + movaps 0x49(%rsi), %xmm6 + movaps 0x59(%rsi), %xmm7 + movaps 0x69(%rsi), %xmm8 + movaps 0x79(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $7, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $7, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $7, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $7, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $7, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $7, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $7, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $7, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_7) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_7_bwd): + movaps -0x07(%rsi), %xmm1 + + movaps -0x17(%rsi), %xmm2 + palignr $7, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x27(%rsi), %xmm3 + palignr $7, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x37(%rsi), %xmm4 + palignr $7, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x47(%rsi), %xmm5 + palignr $7, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x57(%rsi), %xmm6 + palignr $7, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x67(%rsi), %xmm7 + palignr $7, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x77(%rsi), %xmm8 + palignr $7, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x87(%rsi), %xmm9 + palignr $7, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_7_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_8): + sub $0x80, %rdx + movaps -0x08(%rsi), %xmm1 + movaps 0x08(%rsi), %xmm2 + movaps 0x18(%rsi), %xmm3 + movaps 0x28(%rsi), %xmm4 + movaps 0x38(%rsi), %xmm5 + movaps 0x48(%rsi), %xmm6 + movaps 0x58(%rsi), %xmm7 + movaps 0x68(%rsi), %xmm8 + movaps 0x78(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $8, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $8, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $8, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $8, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $8, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $8, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $8, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $8, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_8) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_8_bwd): + movaps -0x08(%rsi), %xmm1 + + movaps -0x18(%rsi), %xmm2 + palignr $8, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x28(%rsi), %xmm3 + palignr $8, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x38(%rsi), %xmm4 + palignr $8, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x48(%rsi), %xmm5 + palignr $8, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x58(%rsi), %xmm6 + palignr $8, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x68(%rsi), %xmm7 + palignr $8, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x78(%rsi), %xmm8 + palignr $8, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x88(%rsi), %xmm9 + palignr $8, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_8_bwd) +L(shl_8_end_bwd): + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_9): + sub $0x80, %rdx + movaps -0x09(%rsi), %xmm1 + movaps 0x07(%rsi), %xmm2 + movaps 0x17(%rsi), %xmm3 + movaps 0x27(%rsi), %xmm4 + movaps 0x37(%rsi), %xmm5 + movaps 0x47(%rsi), %xmm6 + movaps 0x57(%rsi), %xmm7 + movaps 0x67(%rsi), %xmm8 + movaps 0x77(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $9, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $9, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $9, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $9, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $9, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $9, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $9, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $9, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_9) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_9_bwd): + movaps -0x09(%rsi), %xmm1 + + movaps -0x19(%rsi), %xmm2 + palignr $9, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x29(%rsi), %xmm3 + palignr $9, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x39(%rsi), %xmm4 + palignr $9, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x49(%rsi), %xmm5 + palignr $9, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x59(%rsi), %xmm6 + palignr $9, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x69(%rsi), %xmm7 + palignr $9, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x79(%rsi), %xmm8 + palignr $9, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x89(%rsi), %xmm9 + palignr $9, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_9_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_10): + sub $0x80, %rdx + movaps -0x0a(%rsi), %xmm1 + movaps 0x06(%rsi), %xmm2 + movaps 0x16(%rsi), %xmm3 + movaps 0x26(%rsi), %xmm4 + movaps 0x36(%rsi), %xmm5 + movaps 0x46(%rsi), %xmm6 + movaps 0x56(%rsi), %xmm7 + movaps 0x66(%rsi), %xmm8 + movaps 0x76(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $10, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $10, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $10, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $10, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $10, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $10, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $10, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $10, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_10) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_10_bwd): + movaps -0x0a(%rsi), %xmm1 + + movaps -0x1a(%rsi), %xmm2 + palignr $10, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x2a(%rsi), %xmm3 + palignr $10, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x3a(%rsi), %xmm4 + palignr $10, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x4a(%rsi), %xmm5 + palignr $10, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x5a(%rsi), %xmm6 + palignr $10, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x6a(%rsi), %xmm7 + palignr $10, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x7a(%rsi), %xmm8 + palignr $10, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x8a(%rsi), %xmm9 + palignr $10, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_10_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_11): + sub $0x80, %rdx + movaps -0x0b(%rsi), %xmm1 + movaps 0x05(%rsi), %xmm2 + movaps 0x15(%rsi), %xmm3 + movaps 0x25(%rsi), %xmm4 + movaps 0x35(%rsi), %xmm5 + movaps 0x45(%rsi), %xmm6 + movaps 0x55(%rsi), %xmm7 + movaps 0x65(%rsi), %xmm8 + movaps 0x75(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $11, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $11, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $11, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $11, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $11, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $11, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $11, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $11, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_11) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_11_bwd): + movaps -0x0b(%rsi), %xmm1 + + movaps -0x1b(%rsi), %xmm2 + palignr $11, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x2b(%rsi), %xmm3 + palignr $11, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x3b(%rsi), %xmm4 + palignr $11, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x4b(%rsi), %xmm5 + palignr $11, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x5b(%rsi), %xmm6 + palignr $11, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x6b(%rsi), %xmm7 + palignr $11, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x7b(%rsi), %xmm8 + palignr $11, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x8b(%rsi), %xmm9 + palignr $11, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_11_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_12): + sub $0x80, %rdx + movdqa -0x0c(%rsi), %xmm1 + movaps 0x04(%rsi), %xmm2 + movaps 0x14(%rsi), %xmm3 + movaps 0x24(%rsi), %xmm4 + movaps 0x34(%rsi), %xmm5 + movaps 0x44(%rsi), %xmm6 + movaps 0x54(%rsi), %xmm7 + movaps 0x64(%rsi), %xmm8 + movaps 0x74(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $12, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $12, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $12, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $12, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $12, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $12, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $12, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $12, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + + lea 0x80(%rdi), %rdi + jae L(shl_12) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_12_bwd): + movaps -0x0c(%rsi), %xmm1 + + movaps -0x1c(%rsi), %xmm2 + palignr $12, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x2c(%rsi), %xmm3 + palignr $12, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x3c(%rsi), %xmm4 + palignr $12, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x4c(%rsi), %xmm5 + palignr $12, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x5c(%rsi), %xmm6 + palignr $12, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x6c(%rsi), %xmm7 + palignr $12, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x7c(%rsi), %xmm8 + palignr $12, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x8c(%rsi), %xmm9 + palignr $12, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_12_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_13): + sub $0x80, %rdx + movaps -0x0d(%rsi), %xmm1 + movaps 0x03(%rsi), %xmm2 + movaps 0x13(%rsi), %xmm3 + movaps 0x23(%rsi), %xmm4 + movaps 0x33(%rsi), %xmm5 + movaps 0x43(%rsi), %xmm6 + movaps 0x53(%rsi), %xmm7 + movaps 0x63(%rsi), %xmm8 + movaps 0x73(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $13, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $13, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $13, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $13, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $13, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $13, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $13, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $13, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_13) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_13_bwd): + movaps -0x0d(%rsi), %xmm1 + + movaps -0x1d(%rsi), %xmm2 + palignr $13, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x2d(%rsi), %xmm3 + palignr $13, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x3d(%rsi), %xmm4 + palignr $13, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x4d(%rsi), %xmm5 + palignr $13, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x5d(%rsi), %xmm6 + palignr $13, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x6d(%rsi), %xmm7 + palignr $13, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x7d(%rsi), %xmm8 + palignr $13, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x8d(%rsi), %xmm9 + palignr $13, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_13_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_14): + sub $0x80, %rdx + movaps -0x0e(%rsi), %xmm1 + movaps 0x02(%rsi), %xmm2 + movaps 0x12(%rsi), %xmm3 + movaps 0x22(%rsi), %xmm4 + movaps 0x32(%rsi), %xmm5 + movaps 0x42(%rsi), %xmm6 + movaps 0x52(%rsi), %xmm7 + movaps 0x62(%rsi), %xmm8 + movaps 0x72(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $14, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $14, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $14, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $14, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $14, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $14, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $14, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $14, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_14) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_14_bwd): + movaps -0x0e(%rsi), %xmm1 + + movaps -0x1e(%rsi), %xmm2 + palignr $14, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x2e(%rsi), %xmm3 + palignr $14, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x3e(%rsi), %xmm4 + palignr $14, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x4e(%rsi), %xmm5 + palignr $14, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x5e(%rsi), %xmm6 + palignr $14, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x6e(%rsi), %xmm7 + palignr $14, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x7e(%rsi), %xmm8 + palignr $14, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x8e(%rsi), %xmm9 + palignr $14, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_14_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(shl_15): + sub $0x80, %rdx + movaps -0x0f(%rsi), %xmm1 + movaps 0x01(%rsi), %xmm2 + movaps 0x11(%rsi), %xmm3 + movaps 0x21(%rsi), %xmm4 + movaps 0x31(%rsi), %xmm5 + movaps 0x41(%rsi), %xmm6 + movaps 0x51(%rsi), %xmm7 + movaps 0x61(%rsi), %xmm8 + movaps 0x71(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $15, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $15, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $15, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $15, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $15, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $15, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $15, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $15, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_15) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(shl_15_bwd): + movaps -0x0f(%rsi), %xmm1 + + movaps -0x1f(%rsi), %xmm2 + palignr $15, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) + + movaps -0x2f(%rsi), %xmm3 + palignr $15, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) + + movaps -0x3f(%rsi), %xmm4 + palignr $15, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) + + movaps -0x4f(%rsi), %xmm5 + palignr $15, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) + + movaps -0x5f(%rsi), %xmm6 + palignr $15, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) + + movaps -0x6f(%rsi), %xmm7 + palignr $15, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) + + movaps -0x7f(%rsi), %xmm8 + palignr $15, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) + + movaps -0x8f(%rsi), %xmm9 + palignr $15, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) + + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_15_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(gobble_mem_fwd): + movdqu (%rsi), %xmm1 + movdqu %xmm0, (%r8) + movdqa %xmm1, (%rdi) + sub $16, %rdx + add $16, %rsi + add $16, %rdi + +#ifdef SHARED_CACHE_SIZE_HALF + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP +#else + mov __x86_shared_cache_size_half(%rip), %RCX_LP +#endif +#ifdef USE_AS_MEMMOVE + mov %rsi, %r9 + sub %rdi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_fwd) + cmp %rcx, %r9 + jbe L(ll_cache_copy_fwd_start) +L(memmove_is_memcpy_fwd): +#endif + cmp %rcx, %rdx + ja L(bigger_in_fwd) + mov %rdx, %rcx +L(bigger_in_fwd): + sub %rcx, %rdx + cmp $0x1000, %rdx + jbe L(ll_cache_copy_fwd) + + mov %rcx, %r9 + shl $3, %r9 + cmp %r9, %rdx + jbe L(2steps_copy_fwd) + add %rcx, %rdx + xor %rcx, %rcx +L(2steps_copy_fwd): + sub $0x80, %rdx +L(gobble_mem_fwd_loop): + sub $0x80, %rdx + prefetcht0 0x200(%rsi) + prefetcht0 0x300(%rsi) + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + lfence + movntdq %xmm0, (%rdi) + movntdq %xmm1, 0x10(%rdi) + movntdq %xmm2, 0x20(%rdi) + movntdq %xmm3, 0x30(%rdi) + movntdq %xmm4, 0x40(%rdi) + movntdq %xmm5, 0x50(%rdi) + movntdq %xmm6, 0x60(%rdi) + movntdq %xmm7, 0x70(%rdi) + lea 0x80(%rsi), %rsi + lea 0x80(%rdi), %rdi + jae L(gobble_mem_fwd_loop) + sfence + cmp $0x80, %rcx + jb L(gobble_mem_fwd_end) + add $0x80, %rdx +L(ll_cache_copy_fwd): + add %rcx, %rdx +L(ll_cache_copy_fwd_start): + sub $0x80, %rdx +L(gobble_ll_loop_fwd): + prefetchnta 0x1c0(%rsi) + prefetchnta 0x280(%rsi) + prefetchnta 0x1c0(%rdi) + prefetchnta 0x280(%rdi) + sub $0x80, %rdx + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) + movdqa %xmm2, 0x20(%rdi) + movdqa %xmm3, 0x30(%rdi) + movdqa %xmm4, 0x40(%rdi) + movdqa %xmm5, 0x50(%rdi) + movdqa %xmm6, 0x60(%rdi) + movdqa %xmm7, 0x70(%rdi) + lea 0x80(%rsi), %rsi + lea 0x80(%rdi), %rdi + jae L(gobble_ll_loop_fwd) +L(gobble_mem_fwd_end): + add $0x80, %rdx + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + + .p2align 4 +L(gobble_mem_bwd): + add %rdx, %rsi + add %rdx, %rdi + + movdqu -16(%rsi), %xmm0 + lea -16(%rdi), %r8 + mov %rdi, %r9 + and $-16, %rdi + sub %rdi, %r9 + sub %r9, %rsi + sub %r9, %rdx + + +#ifdef SHARED_CACHE_SIZE_HALF + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP +#else + mov __x86_shared_cache_size_half(%rip), %RCX_LP +#endif +#ifdef USE_AS_MEMMOVE + mov %rdi, %r9 + sub %rsi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_bwd) + cmp %rcx, %r9 + jbe L(ll_cache_copy_bwd_start) +L(memmove_is_memcpy_bwd): +#endif + cmp %rcx, %rdx + ja L(bigger) + mov %rdx, %rcx +L(bigger): + sub %rcx, %rdx + cmp $0x1000, %rdx + jbe L(ll_cache_copy) + + mov %rcx, %r9 + shl $3, %r9 + cmp %r9, %rdx + jbe L(2steps_copy) + add %rcx, %rdx + xor %rcx, %rcx +L(2steps_copy): + sub $0x80, %rdx +L(gobble_mem_bwd_loop): + sub $0x80, %rdx + prefetcht0 -0x200(%rsi) + prefetcht0 -0x300(%rsi) + movdqu -0x10(%rsi), %xmm1 + movdqu -0x20(%rsi), %xmm2 + movdqu -0x30(%rsi), %xmm3 + movdqu -0x40(%rsi), %xmm4 + movdqu -0x50(%rsi), %xmm5 + movdqu -0x60(%rsi), %xmm6 + movdqu -0x70(%rsi), %xmm7 + movdqu -0x80(%rsi), %xmm8 + lfence + movntdq %xmm1, -0x10(%rdi) + movntdq %xmm2, -0x20(%rdi) + movntdq %xmm3, -0x30(%rdi) + movntdq %xmm4, -0x40(%rdi) + movntdq %xmm5, -0x50(%rdi) + movntdq %xmm6, -0x60(%rdi) + movntdq %xmm7, -0x70(%rdi) + movntdq %xmm8, -0x80(%rdi) + lea -0x80(%rsi), %rsi + lea -0x80(%rdi), %rdi + jae L(gobble_mem_bwd_loop) + sfence + cmp $0x80, %rcx + jb L(gobble_mem_bwd_end) + add $0x80, %rdx +L(ll_cache_copy): + add %rcx, %rdx +L(ll_cache_copy_bwd_start): + sub $0x80, %rdx +L(gobble_ll_loop): + prefetchnta -0x1c0(%rsi) + prefetchnta -0x280(%rsi) + prefetchnta -0x1c0(%rdi) + prefetchnta -0x280(%rdi) + sub $0x80, %rdx + movdqu -0x10(%rsi), %xmm1 + movdqu -0x20(%rsi), %xmm2 + movdqu -0x30(%rsi), %xmm3 + movdqu -0x40(%rsi), %xmm4 + movdqu -0x50(%rsi), %xmm5 + movdqu -0x60(%rsi), %xmm6 + movdqu -0x70(%rsi), %xmm7 + movdqu -0x80(%rsi), %xmm8 + movdqa %xmm1, -0x10(%rdi) + movdqa %xmm2, -0x20(%rdi) + movdqa %xmm3, -0x30(%rdi) + movdqa %xmm4, -0x40(%rdi) + movdqa %xmm5, -0x50(%rdi) + movdqa %xmm6, -0x60(%rdi) + movdqa %xmm7, -0x70(%rdi) + movdqa %xmm8, -0x80(%rdi) + lea -0x80(%rsi), %rsi + lea -0x80(%rdi), %rdi + jae L(gobble_ll_loop) +L(gobble_mem_bwd_end): + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rsi + sub %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + + .p2align 4 +L(fwd_write_128bytes): + lddqu -128(%rsi), %xmm0 + movdqu %xmm0, -128(%rdi) +L(fwd_write_112bytes): + lddqu -112(%rsi), %xmm0 + movdqu %xmm0, -112(%rdi) +L(fwd_write_96bytes): + lddqu -96(%rsi), %xmm0 + movdqu %xmm0, -96(%rdi) +L(fwd_write_80bytes): + lddqu -80(%rsi), %xmm0 + movdqu %xmm0, -80(%rdi) +L(fwd_write_64bytes): + lddqu -64(%rsi), %xmm0 + movdqu %xmm0, -64(%rdi) +L(fwd_write_48bytes): + lddqu -48(%rsi), %xmm0 + movdqu %xmm0, -48(%rdi) +L(fwd_write_32bytes): + lddqu -32(%rsi), %xmm0 + movdqu %xmm0, -32(%rdi) +L(fwd_write_16bytes): + lddqu -16(%rsi), %xmm0 + movdqu %xmm0, -16(%rdi) +L(fwd_write_0bytes): + ret + + + .p2align 4 +L(fwd_write_143bytes): + lddqu -143(%rsi), %xmm0 + movdqu %xmm0, -143(%rdi) +L(fwd_write_127bytes): + lddqu -127(%rsi), %xmm0 + movdqu %xmm0, -127(%rdi) +L(fwd_write_111bytes): + lddqu -111(%rsi), %xmm0 + movdqu %xmm0, -111(%rdi) +L(fwd_write_95bytes): + lddqu -95(%rsi), %xmm0 + movdqu %xmm0, -95(%rdi) +L(fwd_write_79bytes): + lddqu -79(%rsi), %xmm0 + movdqu %xmm0, -79(%rdi) +L(fwd_write_63bytes): + lddqu -63(%rsi), %xmm0 + movdqu %xmm0, -63(%rdi) +L(fwd_write_47bytes): + lddqu -47(%rsi), %xmm0 + movdqu %xmm0, -47(%rdi) +L(fwd_write_31bytes): + lddqu -31(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -31(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_15bytes): + mov -15(%rsi), %rdx + mov -8(%rsi), %rcx + mov %rdx, -15(%rdi) + mov %rcx, -8(%rdi) + ret + + .p2align 4 +L(fwd_write_142bytes): + lddqu -142(%rsi), %xmm0 + movdqu %xmm0, -142(%rdi) +L(fwd_write_126bytes): + lddqu -126(%rsi), %xmm0 + movdqu %xmm0, -126(%rdi) +L(fwd_write_110bytes): + lddqu -110(%rsi), %xmm0 + movdqu %xmm0, -110(%rdi) +L(fwd_write_94bytes): + lddqu -94(%rsi), %xmm0 + movdqu %xmm0, -94(%rdi) +L(fwd_write_78bytes): + lddqu -78(%rsi), %xmm0 + movdqu %xmm0, -78(%rdi) +L(fwd_write_62bytes): + lddqu -62(%rsi), %xmm0 + movdqu %xmm0, -62(%rdi) +L(fwd_write_46bytes): + lddqu -46(%rsi), %xmm0 + movdqu %xmm0, -46(%rdi) +L(fwd_write_30bytes): + lddqu -30(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -30(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_14bytes): + mov -14(%rsi), %rdx + mov -8(%rsi), %rcx + mov %rdx, -14(%rdi) + mov %rcx, -8(%rdi) + ret + + .p2align 4 +L(fwd_write_141bytes): + lddqu -141(%rsi), %xmm0 + movdqu %xmm0, -141(%rdi) +L(fwd_write_125bytes): + lddqu -125(%rsi), %xmm0 + movdqu %xmm0, -125(%rdi) +L(fwd_write_109bytes): + lddqu -109(%rsi), %xmm0 + movdqu %xmm0, -109(%rdi) +L(fwd_write_93bytes): + lddqu -93(%rsi), %xmm0 + movdqu %xmm0, -93(%rdi) +L(fwd_write_77bytes): + lddqu -77(%rsi), %xmm0 + movdqu %xmm0, -77(%rdi) +L(fwd_write_61bytes): + lddqu -61(%rsi), %xmm0 + movdqu %xmm0, -61(%rdi) +L(fwd_write_45bytes): + lddqu -45(%rsi), %xmm0 + movdqu %xmm0, -45(%rdi) +L(fwd_write_29bytes): + lddqu -29(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -29(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_13bytes): + mov -13(%rsi), %rdx + mov -8(%rsi), %rcx + mov %rdx, -13(%rdi) + mov %rcx, -8(%rdi) + ret + + .p2align 4 +L(fwd_write_140bytes): + lddqu -140(%rsi), %xmm0 + movdqu %xmm0, -140(%rdi) +L(fwd_write_124bytes): + lddqu -124(%rsi), %xmm0 + movdqu %xmm0, -124(%rdi) +L(fwd_write_108bytes): + lddqu -108(%rsi), %xmm0 + movdqu %xmm0, -108(%rdi) +L(fwd_write_92bytes): + lddqu -92(%rsi), %xmm0 + movdqu %xmm0, -92(%rdi) +L(fwd_write_76bytes): + lddqu -76(%rsi), %xmm0 + movdqu %xmm0, -76(%rdi) +L(fwd_write_60bytes): + lddqu -60(%rsi), %xmm0 + movdqu %xmm0, -60(%rdi) +L(fwd_write_44bytes): + lddqu -44(%rsi), %xmm0 + movdqu %xmm0, -44(%rdi) +L(fwd_write_28bytes): + lddqu -28(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -28(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_12bytes): + mov -12(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -12(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_139bytes): + lddqu -139(%rsi), %xmm0 + movdqu %xmm0, -139(%rdi) +L(fwd_write_123bytes): + lddqu -123(%rsi), %xmm0 + movdqu %xmm0, -123(%rdi) +L(fwd_write_107bytes): + lddqu -107(%rsi), %xmm0 + movdqu %xmm0, -107(%rdi) +L(fwd_write_91bytes): + lddqu -91(%rsi), %xmm0 + movdqu %xmm0, -91(%rdi) +L(fwd_write_75bytes): + lddqu -75(%rsi), %xmm0 + movdqu %xmm0, -75(%rdi) +L(fwd_write_59bytes): + lddqu -59(%rsi), %xmm0 + movdqu %xmm0, -59(%rdi) +L(fwd_write_43bytes): + lddqu -43(%rsi), %xmm0 + movdqu %xmm0, -43(%rdi) +L(fwd_write_27bytes): + lddqu -27(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -27(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_11bytes): + mov -11(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -11(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_138bytes): + lddqu -138(%rsi), %xmm0 + movdqu %xmm0, -138(%rdi) +L(fwd_write_122bytes): + lddqu -122(%rsi), %xmm0 + movdqu %xmm0, -122(%rdi) +L(fwd_write_106bytes): + lddqu -106(%rsi), %xmm0 + movdqu %xmm0, -106(%rdi) +L(fwd_write_90bytes): + lddqu -90(%rsi), %xmm0 + movdqu %xmm0, -90(%rdi) +L(fwd_write_74bytes): + lddqu -74(%rsi), %xmm0 + movdqu %xmm0, -74(%rdi) +L(fwd_write_58bytes): + lddqu -58(%rsi), %xmm0 + movdqu %xmm0, -58(%rdi) +L(fwd_write_42bytes): + lddqu -42(%rsi), %xmm0 + movdqu %xmm0, -42(%rdi) +L(fwd_write_26bytes): + lddqu -26(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -26(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_10bytes): + mov -10(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -10(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_137bytes): + lddqu -137(%rsi), %xmm0 + movdqu %xmm0, -137(%rdi) +L(fwd_write_121bytes): + lddqu -121(%rsi), %xmm0 + movdqu %xmm0, -121(%rdi) +L(fwd_write_105bytes): + lddqu -105(%rsi), %xmm0 + movdqu %xmm0, -105(%rdi) +L(fwd_write_89bytes): + lddqu -89(%rsi), %xmm0 + movdqu %xmm0, -89(%rdi) +L(fwd_write_73bytes): + lddqu -73(%rsi), %xmm0 + movdqu %xmm0, -73(%rdi) +L(fwd_write_57bytes): + lddqu -57(%rsi), %xmm0 + movdqu %xmm0, -57(%rdi) +L(fwd_write_41bytes): + lddqu -41(%rsi), %xmm0 + movdqu %xmm0, -41(%rdi) +L(fwd_write_25bytes): + lddqu -25(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -25(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_9bytes): + mov -9(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -9(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_136bytes): + lddqu -136(%rsi), %xmm0 + movdqu %xmm0, -136(%rdi) +L(fwd_write_120bytes): + lddqu -120(%rsi), %xmm0 + movdqu %xmm0, -120(%rdi) +L(fwd_write_104bytes): + lddqu -104(%rsi), %xmm0 + movdqu %xmm0, -104(%rdi) +L(fwd_write_88bytes): + lddqu -88(%rsi), %xmm0 + movdqu %xmm0, -88(%rdi) +L(fwd_write_72bytes): + lddqu -72(%rsi), %xmm0 + movdqu %xmm0, -72(%rdi) +L(fwd_write_56bytes): + lddqu -56(%rsi), %xmm0 + movdqu %xmm0, -56(%rdi) +L(fwd_write_40bytes): + lddqu -40(%rsi), %xmm0 + movdqu %xmm0, -40(%rdi) +L(fwd_write_24bytes): + lddqu -24(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -24(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_8bytes): + mov -8(%rsi), %rdx + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(fwd_write_135bytes): + lddqu -135(%rsi), %xmm0 + movdqu %xmm0, -135(%rdi) +L(fwd_write_119bytes): + lddqu -119(%rsi), %xmm0 + movdqu %xmm0, -119(%rdi) +L(fwd_write_103bytes): + lddqu -103(%rsi), %xmm0 + movdqu %xmm0, -103(%rdi) +L(fwd_write_87bytes): + lddqu -87(%rsi), %xmm0 + movdqu %xmm0, -87(%rdi) +L(fwd_write_71bytes): + lddqu -71(%rsi), %xmm0 + movdqu %xmm0, -71(%rdi) +L(fwd_write_55bytes): + lddqu -55(%rsi), %xmm0 + movdqu %xmm0, -55(%rdi) +L(fwd_write_39bytes): + lddqu -39(%rsi), %xmm0 + movdqu %xmm0, -39(%rdi) +L(fwd_write_23bytes): + lddqu -23(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -23(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_7bytes): + mov -7(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -7(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_134bytes): + lddqu -134(%rsi), %xmm0 + movdqu %xmm0, -134(%rdi) +L(fwd_write_118bytes): + lddqu -118(%rsi), %xmm0 + movdqu %xmm0, -118(%rdi) +L(fwd_write_102bytes): + lddqu -102(%rsi), %xmm0 + movdqu %xmm0, -102(%rdi) +L(fwd_write_86bytes): + lddqu -86(%rsi), %xmm0 + movdqu %xmm0, -86(%rdi) +L(fwd_write_70bytes): + lddqu -70(%rsi), %xmm0 + movdqu %xmm0, -70(%rdi) +L(fwd_write_54bytes): + lddqu -54(%rsi), %xmm0 + movdqu %xmm0, -54(%rdi) +L(fwd_write_38bytes): + lddqu -38(%rsi), %xmm0 + movdqu %xmm0, -38(%rdi) +L(fwd_write_22bytes): + lddqu -22(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -22(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_6bytes): + mov -6(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -6(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_133bytes): + lddqu -133(%rsi), %xmm0 + movdqu %xmm0, -133(%rdi) +L(fwd_write_117bytes): + lddqu -117(%rsi), %xmm0 + movdqu %xmm0, -117(%rdi) +L(fwd_write_101bytes): + lddqu -101(%rsi), %xmm0 + movdqu %xmm0, -101(%rdi) +L(fwd_write_85bytes): + lddqu -85(%rsi), %xmm0 + movdqu %xmm0, -85(%rdi) +L(fwd_write_69bytes): + lddqu -69(%rsi), %xmm0 + movdqu %xmm0, -69(%rdi) +L(fwd_write_53bytes): + lddqu -53(%rsi), %xmm0 + movdqu %xmm0, -53(%rdi) +L(fwd_write_37bytes): + lddqu -37(%rsi), %xmm0 + movdqu %xmm0, -37(%rdi) +L(fwd_write_21bytes): + lddqu -21(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -21(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_5bytes): + mov -5(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -5(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_132bytes): + lddqu -132(%rsi), %xmm0 + movdqu %xmm0, -132(%rdi) +L(fwd_write_116bytes): + lddqu -116(%rsi), %xmm0 + movdqu %xmm0, -116(%rdi) +L(fwd_write_100bytes): + lddqu -100(%rsi), %xmm0 + movdqu %xmm0, -100(%rdi) +L(fwd_write_84bytes): + lddqu -84(%rsi), %xmm0 + movdqu %xmm0, -84(%rdi) +L(fwd_write_68bytes): + lddqu -68(%rsi), %xmm0 + movdqu %xmm0, -68(%rdi) +L(fwd_write_52bytes): + lddqu -52(%rsi), %xmm0 + movdqu %xmm0, -52(%rdi) +L(fwd_write_36bytes): + lddqu -36(%rsi), %xmm0 + movdqu %xmm0, -36(%rdi) +L(fwd_write_20bytes): + lddqu -20(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -20(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_4bytes): + mov -4(%rsi), %edx + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(fwd_write_131bytes): + lddqu -131(%rsi), %xmm0 + movdqu %xmm0, -131(%rdi) +L(fwd_write_115bytes): + lddqu -115(%rsi), %xmm0 + movdqu %xmm0, -115(%rdi) +L(fwd_write_99bytes): + lddqu -99(%rsi), %xmm0 + movdqu %xmm0, -99(%rdi) +L(fwd_write_83bytes): + lddqu -83(%rsi), %xmm0 + movdqu %xmm0, -83(%rdi) +L(fwd_write_67bytes): + lddqu -67(%rsi), %xmm0 + movdqu %xmm0, -67(%rdi) +L(fwd_write_51bytes): + lddqu -51(%rsi), %xmm0 + movdqu %xmm0, -51(%rdi) +L(fwd_write_35bytes): + lddqu -35(%rsi), %xmm0 + movdqu %xmm0, -35(%rdi) +L(fwd_write_19bytes): + lddqu -19(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -19(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_3bytes): + mov -3(%rsi), %dx + mov -2(%rsi), %cx + mov %dx, -3(%rdi) + mov %cx, -2(%rdi) + ret + + .p2align 4 +L(fwd_write_130bytes): + lddqu -130(%rsi), %xmm0 + movdqu %xmm0, -130(%rdi) +L(fwd_write_114bytes): + lddqu -114(%rsi), %xmm0 + movdqu %xmm0, -114(%rdi) +L(fwd_write_98bytes): + lddqu -98(%rsi), %xmm0 + movdqu %xmm0, -98(%rdi) +L(fwd_write_82bytes): + lddqu -82(%rsi), %xmm0 + movdqu %xmm0, -82(%rdi) +L(fwd_write_66bytes): + lddqu -66(%rsi), %xmm0 + movdqu %xmm0, -66(%rdi) +L(fwd_write_50bytes): + lddqu -50(%rsi), %xmm0 + movdqu %xmm0, -50(%rdi) +L(fwd_write_34bytes): + lddqu -34(%rsi), %xmm0 + movdqu %xmm0, -34(%rdi) +L(fwd_write_18bytes): + lddqu -18(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -18(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_2bytes): + movzwl -2(%rsi), %edx + mov %dx, -2(%rdi) + ret + + .p2align 4 +L(fwd_write_129bytes): + lddqu -129(%rsi), %xmm0 + movdqu %xmm0, -129(%rdi) +L(fwd_write_113bytes): + lddqu -113(%rsi), %xmm0 + movdqu %xmm0, -113(%rdi) +L(fwd_write_97bytes): + lddqu -97(%rsi), %xmm0 + movdqu %xmm0, -97(%rdi) +L(fwd_write_81bytes): + lddqu -81(%rsi), %xmm0 + movdqu %xmm0, -81(%rdi) +L(fwd_write_65bytes): + lddqu -65(%rsi), %xmm0 + movdqu %xmm0, -65(%rdi) +L(fwd_write_49bytes): + lddqu -49(%rsi), %xmm0 + movdqu %xmm0, -49(%rdi) +L(fwd_write_33bytes): + lddqu -33(%rsi), %xmm0 + movdqu %xmm0, -33(%rdi) +L(fwd_write_17bytes): + lddqu -17(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -17(%rdi) + movdqu %xmm1, -16(%rdi) + ret + + .p2align 4 +L(fwd_write_1bytes): + movzbl -1(%rsi), %edx + mov %dl, -1(%rdi) + ret + + .p2align 4 +L(bwd_write_128bytes): + lddqu 112(%rsi), %xmm0 + movdqu %xmm0, 112(%rdi) +L(bwd_write_112bytes): + lddqu 96(%rsi), %xmm0 + movdqu %xmm0, 96(%rdi) +L(bwd_write_96bytes): + lddqu 80(%rsi), %xmm0 + movdqu %xmm0, 80(%rdi) +L(bwd_write_80bytes): + lddqu 64(%rsi), %xmm0 + movdqu %xmm0, 64(%rdi) +L(bwd_write_64bytes): + lddqu 48(%rsi), %xmm0 + movdqu %xmm0, 48(%rdi) +L(bwd_write_48bytes): + lddqu 32(%rsi), %xmm0 + movdqu %xmm0, 32(%rdi) +L(bwd_write_32bytes): + lddqu 16(%rsi), %xmm0 + movdqu %xmm0, 16(%rdi) +L(bwd_write_16bytes): + lddqu (%rsi), %xmm0 + movdqu %xmm0, (%rdi) +L(bwd_write_0bytes): + ret + + .p2align 4 +L(bwd_write_143bytes): + lddqu 127(%rsi), %xmm0 + movdqu %xmm0, 127(%rdi) +L(bwd_write_127bytes): + lddqu 111(%rsi), %xmm0 + movdqu %xmm0, 111(%rdi) +L(bwd_write_111bytes): + lddqu 95(%rsi), %xmm0 + movdqu %xmm0, 95(%rdi) +L(bwd_write_95bytes): + lddqu 79(%rsi), %xmm0 + movdqu %xmm0, 79(%rdi) +L(bwd_write_79bytes): + lddqu 63(%rsi), %xmm0 + movdqu %xmm0, 63(%rdi) +L(bwd_write_63bytes): + lddqu 47(%rsi), %xmm0 + movdqu %xmm0, 47(%rdi) +L(bwd_write_47bytes): + lddqu 31(%rsi), %xmm0 + movdqu %xmm0, 31(%rdi) +L(bwd_write_31bytes): + lddqu 15(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 15(%rdi) + movdqu %xmm1, (%rdi) + ret + + + .p2align 4 +L(bwd_write_15bytes): + mov 7(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 7(%rdi) + mov %rcx, (%rdi) + ret + + .p2align 4 +L(bwd_write_142bytes): + lddqu 126(%rsi), %xmm0 + movdqu %xmm0, 126(%rdi) +L(bwd_write_126bytes): + lddqu 110(%rsi), %xmm0 + movdqu %xmm0, 110(%rdi) +L(bwd_write_110bytes): + lddqu 94(%rsi), %xmm0 + movdqu %xmm0, 94(%rdi) +L(bwd_write_94bytes): + lddqu 78(%rsi), %xmm0 + movdqu %xmm0, 78(%rdi) +L(bwd_write_78bytes): + lddqu 62(%rsi), %xmm0 + movdqu %xmm0, 62(%rdi) +L(bwd_write_62bytes): + lddqu 46(%rsi), %xmm0 + movdqu %xmm0, 46(%rdi) +L(bwd_write_46bytes): + lddqu 30(%rsi), %xmm0 + movdqu %xmm0, 30(%rdi) +L(bwd_write_30bytes): + lddqu 14(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 14(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_14bytes): + mov 6(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 6(%rdi) + mov %rcx, (%rdi) + ret + + .p2align 4 +L(bwd_write_141bytes): + lddqu 125(%rsi), %xmm0 + movdqu %xmm0, 125(%rdi) +L(bwd_write_125bytes): + lddqu 109(%rsi), %xmm0 + movdqu %xmm0, 109(%rdi) +L(bwd_write_109bytes): + lddqu 93(%rsi), %xmm0 + movdqu %xmm0, 93(%rdi) +L(bwd_write_93bytes): + lddqu 77(%rsi), %xmm0 + movdqu %xmm0, 77(%rdi) +L(bwd_write_77bytes): + lddqu 61(%rsi), %xmm0 + movdqu %xmm0, 61(%rdi) +L(bwd_write_61bytes): + lddqu 45(%rsi), %xmm0 + movdqu %xmm0, 45(%rdi) +L(bwd_write_45bytes): + lddqu 29(%rsi), %xmm0 + movdqu %xmm0, 29(%rdi) +L(bwd_write_29bytes): + lddqu 13(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 13(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_13bytes): + mov 5(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 5(%rdi) + mov %rcx, (%rdi) + ret + + .p2align 4 +L(bwd_write_140bytes): + lddqu 124(%rsi), %xmm0 + movdqu %xmm0, 124(%rdi) +L(bwd_write_124bytes): + lddqu 108(%rsi), %xmm0 + movdqu %xmm0, 108(%rdi) +L(bwd_write_108bytes): + lddqu 92(%rsi), %xmm0 + movdqu %xmm0, 92(%rdi) +L(bwd_write_92bytes): + lddqu 76(%rsi), %xmm0 + movdqu %xmm0, 76(%rdi) +L(bwd_write_76bytes): + lddqu 60(%rsi), %xmm0 + movdqu %xmm0, 60(%rdi) +L(bwd_write_60bytes): + lddqu 44(%rsi), %xmm0 + movdqu %xmm0, 44(%rdi) +L(bwd_write_44bytes): + lddqu 28(%rsi), %xmm0 + movdqu %xmm0, 28(%rdi) +L(bwd_write_28bytes): + lddqu 12(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 12(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_12bytes): + mov 4(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 4(%rdi) + mov %rcx, (%rdi) + ret + + .p2align 4 +L(bwd_write_139bytes): + lddqu 123(%rsi), %xmm0 + movdqu %xmm0, 123(%rdi) +L(bwd_write_123bytes): + lddqu 107(%rsi), %xmm0 + movdqu %xmm0, 107(%rdi) +L(bwd_write_107bytes): + lddqu 91(%rsi), %xmm0 + movdqu %xmm0, 91(%rdi) +L(bwd_write_91bytes): + lddqu 75(%rsi), %xmm0 + movdqu %xmm0, 75(%rdi) +L(bwd_write_75bytes): + lddqu 59(%rsi), %xmm0 + movdqu %xmm0, 59(%rdi) +L(bwd_write_59bytes): + lddqu 43(%rsi), %xmm0 + movdqu %xmm0, 43(%rdi) +L(bwd_write_43bytes): + lddqu 27(%rsi), %xmm0 + movdqu %xmm0, 27(%rdi) +L(bwd_write_27bytes): + lddqu 11(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 11(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_11bytes): + mov 3(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 3(%rdi) + mov %rcx, (%rdi) + ret + + .p2align 4 +L(bwd_write_138bytes): + lddqu 122(%rsi), %xmm0 + movdqu %xmm0, 122(%rdi) +L(bwd_write_122bytes): + lddqu 106(%rsi), %xmm0 + movdqu %xmm0, 106(%rdi) +L(bwd_write_106bytes): + lddqu 90(%rsi), %xmm0 + movdqu %xmm0, 90(%rdi) +L(bwd_write_90bytes): + lddqu 74(%rsi), %xmm0 + movdqu %xmm0, 74(%rdi) +L(bwd_write_74bytes): + lddqu 58(%rsi), %xmm0 + movdqu %xmm0, 58(%rdi) +L(bwd_write_58bytes): + lddqu 42(%rsi), %xmm0 + movdqu %xmm0, 42(%rdi) +L(bwd_write_42bytes): + lddqu 26(%rsi), %xmm0 + movdqu %xmm0, 26(%rdi) +L(bwd_write_26bytes): + lddqu 10(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 10(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_10bytes): + mov 2(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 2(%rdi) + mov %rcx, (%rdi) + ret + + .p2align 4 +L(bwd_write_137bytes): + lddqu 121(%rsi), %xmm0 + movdqu %xmm0, 121(%rdi) +L(bwd_write_121bytes): + lddqu 105(%rsi), %xmm0 + movdqu %xmm0, 105(%rdi) +L(bwd_write_105bytes): + lddqu 89(%rsi), %xmm0 + movdqu %xmm0, 89(%rdi) +L(bwd_write_89bytes): + lddqu 73(%rsi), %xmm0 + movdqu %xmm0, 73(%rdi) +L(bwd_write_73bytes): + lddqu 57(%rsi), %xmm0 + movdqu %xmm0, 57(%rdi) +L(bwd_write_57bytes): + lddqu 41(%rsi), %xmm0 + movdqu %xmm0, 41(%rdi) +L(bwd_write_41bytes): + lddqu 25(%rsi), %xmm0 + movdqu %xmm0, 25(%rdi) +L(bwd_write_25bytes): + lddqu 9(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 9(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_9bytes): + mov 1(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 1(%rdi) + mov %rcx, (%rdi) + ret + + .p2align 4 +L(bwd_write_136bytes): + lddqu 120(%rsi), %xmm0 + movdqu %xmm0, 120(%rdi) +L(bwd_write_120bytes): + lddqu 104(%rsi), %xmm0 + movdqu %xmm0, 104(%rdi) +L(bwd_write_104bytes): + lddqu 88(%rsi), %xmm0 + movdqu %xmm0, 88(%rdi) +L(bwd_write_88bytes): + lddqu 72(%rsi), %xmm0 + movdqu %xmm0, 72(%rdi) +L(bwd_write_72bytes): + lddqu 56(%rsi), %xmm0 + movdqu %xmm0, 56(%rdi) +L(bwd_write_56bytes): + lddqu 40(%rsi), %xmm0 + movdqu %xmm0, 40(%rdi) +L(bwd_write_40bytes): + lddqu 24(%rsi), %xmm0 + movdqu %xmm0, 24(%rdi) +L(bwd_write_24bytes): + lddqu 8(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 8(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_8bytes): + mov (%rsi), %rdx + mov %rdx, (%rdi) + ret + + .p2align 4 +L(bwd_write_135bytes): + lddqu 119(%rsi), %xmm0 + movdqu %xmm0, 119(%rdi) +L(bwd_write_119bytes): + lddqu 103(%rsi), %xmm0 + movdqu %xmm0, 103(%rdi) +L(bwd_write_103bytes): + lddqu 87(%rsi), %xmm0 + movdqu %xmm0, 87(%rdi) +L(bwd_write_87bytes): + lddqu 71(%rsi), %xmm0 + movdqu %xmm0, 71(%rdi) +L(bwd_write_71bytes): + lddqu 55(%rsi), %xmm0 + movdqu %xmm0, 55(%rdi) +L(bwd_write_55bytes): + lddqu 39(%rsi), %xmm0 + movdqu %xmm0, 39(%rdi) +L(bwd_write_39bytes): + lddqu 23(%rsi), %xmm0 + movdqu %xmm0, 23(%rdi) +L(bwd_write_23bytes): + lddqu 7(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 7(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_7bytes): + mov 3(%rsi), %edx + mov (%rsi), %ecx + mov %edx, 3(%rdi) + mov %ecx, (%rdi) + ret + + .p2align 4 +L(bwd_write_134bytes): + lddqu 118(%rsi), %xmm0 + movdqu %xmm0, 118(%rdi) +L(bwd_write_118bytes): + lddqu 102(%rsi), %xmm0 + movdqu %xmm0, 102(%rdi) +L(bwd_write_102bytes): + lddqu 86(%rsi), %xmm0 + movdqu %xmm0, 86(%rdi) +L(bwd_write_86bytes): + lddqu 70(%rsi), %xmm0 + movdqu %xmm0, 70(%rdi) +L(bwd_write_70bytes): + lddqu 54(%rsi), %xmm0 + movdqu %xmm0, 54(%rdi) +L(bwd_write_54bytes): + lddqu 38(%rsi), %xmm0 + movdqu %xmm0, 38(%rdi) +L(bwd_write_38bytes): + lddqu 22(%rsi), %xmm0 + movdqu %xmm0, 22(%rdi) +L(bwd_write_22bytes): + lddqu 6(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 6(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_6bytes): + mov 2(%rsi), %edx + mov (%rsi), %ecx + mov %edx, 2(%rdi) + mov %ecx, (%rdi) + ret + + .p2align 4 +L(bwd_write_133bytes): + lddqu 117(%rsi), %xmm0 + movdqu %xmm0, 117(%rdi) +L(bwd_write_117bytes): + lddqu 101(%rsi), %xmm0 + movdqu %xmm0, 101(%rdi) +L(bwd_write_101bytes): + lddqu 85(%rsi), %xmm0 + movdqu %xmm0, 85(%rdi) +L(bwd_write_85bytes): + lddqu 69(%rsi), %xmm0 + movdqu %xmm0, 69(%rdi) +L(bwd_write_69bytes): + lddqu 53(%rsi), %xmm0 + movdqu %xmm0, 53(%rdi) +L(bwd_write_53bytes): + lddqu 37(%rsi), %xmm0 + movdqu %xmm0, 37(%rdi) +L(bwd_write_37bytes): + lddqu 21(%rsi), %xmm0 + movdqu %xmm0, 21(%rdi) +L(bwd_write_21bytes): + lddqu 5(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 5(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_5bytes): + mov 1(%rsi), %edx + mov (%rsi), %ecx + mov %edx, 1(%rdi) + mov %ecx, (%rdi) + ret + + .p2align 4 +L(bwd_write_132bytes): + lddqu 116(%rsi), %xmm0 + movdqu %xmm0, 116(%rdi) +L(bwd_write_116bytes): + lddqu 100(%rsi), %xmm0 + movdqu %xmm0, 100(%rdi) +L(bwd_write_100bytes): + lddqu 84(%rsi), %xmm0 + movdqu %xmm0, 84(%rdi) +L(bwd_write_84bytes): + lddqu 68(%rsi), %xmm0 + movdqu %xmm0, 68(%rdi) +L(bwd_write_68bytes): + lddqu 52(%rsi), %xmm0 + movdqu %xmm0, 52(%rdi) +L(bwd_write_52bytes): + lddqu 36(%rsi), %xmm0 + movdqu %xmm0, 36(%rdi) +L(bwd_write_36bytes): + lddqu 20(%rsi), %xmm0 + movdqu %xmm0, 20(%rdi) +L(bwd_write_20bytes): + lddqu 4(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 4(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_4bytes): + mov (%rsi), %edx + mov %edx, (%rdi) + ret + + .p2align 4 +L(bwd_write_131bytes): + lddqu 115(%rsi), %xmm0 + movdqu %xmm0, 115(%rdi) +L(bwd_write_115bytes): + lddqu 99(%rsi), %xmm0 + movdqu %xmm0, 99(%rdi) +L(bwd_write_99bytes): + lddqu 83(%rsi), %xmm0 + movdqu %xmm0, 83(%rdi) +L(bwd_write_83bytes): + lddqu 67(%rsi), %xmm0 + movdqu %xmm0, 67(%rdi) +L(bwd_write_67bytes): + lddqu 51(%rsi), %xmm0 + movdqu %xmm0, 51(%rdi) +L(bwd_write_51bytes): + lddqu 35(%rsi), %xmm0 + movdqu %xmm0, 35(%rdi) +L(bwd_write_35bytes): + lddqu 19(%rsi), %xmm0 + movdqu %xmm0, 19(%rdi) +L(bwd_write_19bytes): + lddqu 3(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 3(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_3bytes): + mov 1(%rsi), %dx + mov (%rsi), %cx + mov %dx, 1(%rdi) + mov %cx, (%rdi) + ret + + .p2align 4 +L(bwd_write_130bytes): + lddqu 114(%rsi), %xmm0 + movdqu %xmm0, 114(%rdi) +L(bwd_write_114bytes): + lddqu 98(%rsi), %xmm0 + movdqu %xmm0, 98(%rdi) +L(bwd_write_98bytes): + lddqu 82(%rsi), %xmm0 + movdqu %xmm0, 82(%rdi) +L(bwd_write_82bytes): + lddqu 66(%rsi), %xmm0 + movdqu %xmm0, 66(%rdi) +L(bwd_write_66bytes): + lddqu 50(%rsi), %xmm0 + movdqu %xmm0, 50(%rdi) +L(bwd_write_50bytes): + lddqu 34(%rsi), %xmm0 + movdqu %xmm0, 34(%rdi) +L(bwd_write_34bytes): + lddqu 18(%rsi), %xmm0 + movdqu %xmm0, 18(%rdi) +L(bwd_write_18bytes): + lddqu 2(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 2(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_2bytes): + movzwl (%rsi), %edx + mov %dx, (%rdi) + ret + + .p2align 4 +L(bwd_write_129bytes): + lddqu 113(%rsi), %xmm0 + movdqu %xmm0, 113(%rdi) +L(bwd_write_113bytes): + lddqu 97(%rsi), %xmm0 + movdqu %xmm0, 97(%rdi) +L(bwd_write_97bytes): + lddqu 81(%rsi), %xmm0 + movdqu %xmm0, 81(%rdi) +L(bwd_write_81bytes): + lddqu 65(%rsi), %xmm0 + movdqu %xmm0, 65(%rdi) +L(bwd_write_65bytes): + lddqu 49(%rsi), %xmm0 + movdqu %xmm0, 49(%rdi) +L(bwd_write_49bytes): + lddqu 33(%rsi), %xmm0 + movdqu %xmm0, 33(%rdi) +L(bwd_write_33bytes): + lddqu 17(%rsi), %xmm0 + movdqu %xmm0, 17(%rdi) +L(bwd_write_17bytes): + lddqu 1(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 1(%rdi) + movdqu %xmm1, (%rdi) + ret + + .p2align 4 +L(bwd_write_1bytes): + movzbl (%rsi), %edx + mov %dl, (%rdi) + ret + +END (MEMCPY) + + .section .rodata.ssse3,"a",@progbits + .p2align 3 +L(table_144_bytes_bwd): + .int JMPTBL (L(bwd_write_0bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_1bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_2bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_3bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_4bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_5bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_6bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_7bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_8bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_9bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_10bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_11bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_12bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_13bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_14bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_15bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_16bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_17bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_18bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_19bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_20bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_21bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_22bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_23bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_24bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_25bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_26bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_27bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_28bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_29bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_30bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_31bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_32bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_33bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_34bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_35bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_36bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_37bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_38bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_39bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_40bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_41bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_42bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_43bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_44bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_45bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_46bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_47bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_48bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_49bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_50bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_51bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_52bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_53bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_54bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_55bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_56bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_57bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_58bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_59bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_60bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_61bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_62bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_63bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_64bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_65bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_66bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_67bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_68bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_69bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_70bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_71bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_72bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_73bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_74bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_75bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_76bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_77bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_78bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_79bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_80bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_81bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_82bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_83bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_84bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_85bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_86bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_87bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_88bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_89bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_90bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_91bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_92bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_93bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_94bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_95bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_96bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_97bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_98bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_99bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_100bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_101bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_102bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_103bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_104bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_105bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_106bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_107bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_108bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_109bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_110bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_111bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_112bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_113bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_114bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_115bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_116bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_117bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_118bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_119bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_120bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_121bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_122bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_123bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_124bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_125bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_126bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_127bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_128bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_129bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_130bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_131bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_132bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_133bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_134bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_135bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_136bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_137bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_138bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_139bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_140bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_141bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_142bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_143bytes), L(table_144_bytes_bwd)) + + .p2align 3 +L(table_144_bytes_fwd): + .int JMPTBL (L(fwd_write_0bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_1bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_2bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_3bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_4bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_5bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_6bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_7bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_8bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_9bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_10bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_11bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_12bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_13bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_14bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_15bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_16bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_17bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_18bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_19bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_20bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_21bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_22bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_23bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_24bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_25bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_26bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_27bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_28bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_29bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_30bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_31bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_32bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_33bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_34bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_35bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_36bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_37bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_38bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_39bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_40bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_41bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_42bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_43bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_44bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_45bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_46bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_47bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_48bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_49bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_50bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_51bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_52bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_53bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_54bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_55bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_56bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_57bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_58bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_59bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_60bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_61bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_62bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_63bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_64bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_65bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_66bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_67bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_68bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_69bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_70bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_71bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_72bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_73bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_74bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_75bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_76bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_77bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_78bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_79bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_80bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_81bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_82bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_83bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_84bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_85bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_86bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_87bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_88bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_89bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_90bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_91bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_92bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_93bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_94bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_95bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_96bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_97bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_98bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_99bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_100bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_101bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_102bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_103bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_104bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_105bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_106bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_107bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_108bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_109bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_110bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_111bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_112bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_113bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_114bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_115bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_116bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_117bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_118bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_119bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_120bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_121bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_122bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_123bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_124bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_125bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_126bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_127bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_128bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_129bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_130bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_131bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_132bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_133bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_134bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_135bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_136bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_137bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_138bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_139bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_140bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_141bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_142bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_143bytes), L(table_144_bytes_fwd)) + + .p2align 3 +L(shl_table_fwd): + .int JMPTBL (L(shl_0), L(shl_table_fwd)) + .int JMPTBL (L(shl_1), L(shl_table_fwd)) + .int JMPTBL (L(shl_2), L(shl_table_fwd)) + .int JMPTBL (L(shl_3), L(shl_table_fwd)) + .int JMPTBL (L(shl_4), L(shl_table_fwd)) + .int JMPTBL (L(shl_5), L(shl_table_fwd)) + .int JMPTBL (L(shl_6), L(shl_table_fwd)) + .int JMPTBL (L(shl_7), L(shl_table_fwd)) + .int JMPTBL (L(shl_8), L(shl_table_fwd)) + .int JMPTBL (L(shl_9), L(shl_table_fwd)) + .int JMPTBL (L(shl_10), L(shl_table_fwd)) + .int JMPTBL (L(shl_11), L(shl_table_fwd)) + .int JMPTBL (L(shl_12), L(shl_table_fwd)) + .int JMPTBL (L(shl_13), L(shl_table_fwd)) + .int JMPTBL (L(shl_14), L(shl_table_fwd)) + .int JMPTBL (L(shl_15), L(shl_table_fwd)) + + .p2align 3 +L(shl_table_bwd): + .int JMPTBL (L(shl_0_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_1_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_2_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_3_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_4_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_5_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_6_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_7_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_8_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_9_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_10_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_11_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_12_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_13_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_14_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_15_bwd), L(shl_table_bwd)) + +#endif diff --git a/utils/memcpy-bench/glibc/memcpy-ssse3.S b/utils/memcpy-bench/glibc/memcpy-ssse3.S new file mode 100644 index 00000000000..2fd26651645 --- /dev/null +++ b/utils/memcpy-bench/glibc/memcpy-ssse3.S @@ -0,0 +1,3152 @@ +/* memcpy with SSSE3 + Copyright (C) 2010-2020 Free Software Foundation, Inc. + Contributed by Intel Corporation. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include "sysdep.h" + +#if 1 + +#include "asm-syntax.h" + +#ifndef MEMCPY +# define MEMCPY __memcpy_ssse3 +# define MEMCPY_CHK __memcpy_chk_ssse3 +# define MEMPCPY __mempcpy_ssse3 +# define MEMPCPY_CHK __mempcpy_chk_ssse3 +#endif + +#define JMPTBL(I, B) I - B + +/* Branch to an entry in a jump table. TABLE is a jump table with + relative offsets. INDEX is a register contains the index into the + jump table. SCALE is the scale of INDEX. */ +#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ + lea TABLE(%rip), %r11; \ + movslq (%r11, INDEX, SCALE), INDEX; \ + lea (%r11, INDEX), INDEX; \ + _CET_NOTRACK jmp *INDEX; \ + ud2 + + .section .text.ssse3,"ax",@progbits +#if !defined USE_AS_MEMPCPY && !defined USE_AS_MEMMOVE +ENTRY (MEMPCPY_CHK) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMPCPY_CHK) + +ENTRY (MEMPCPY) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) +END (MEMPCPY) +#endif + +#if !defined USE_AS_BCOPY +ENTRY (MEMCPY_CHK) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMCPY_CHK) +#endif + +ENTRY (MEMCPY) + mov %RDI_LP, %RAX_LP +#ifdef USE_AS_MEMPCPY + add %RDX_LP, %RAX_LP +#endif + +#ifdef __ILP32__ + /* Clear the upper 32 bits. */ + mov %edx, %edx +#endif + +#ifdef USE_AS_MEMMOVE + cmp %rsi, %rdi + jb L(copy_forward) + je L(write_0bytes) + cmp $79, %rdx + jbe L(copy_forward) + jmp L(copy_backward) +L(copy_forward): +#endif +L(start): + cmp $79, %rdx + lea L(table_less_80bytes)(%rip), %r11 + ja L(80bytesormore) + movslq (%r11, %rdx, 4), %r9 + add %rdx, %rsi + add %rdx, %rdi + add %r11, %r9 + _CET_NOTRACK jmp *%r9 + ud2 + + .p2align 4 +L(80bytesormore): +#ifndef USE_AS_MEMMOVE + cmp %dil, %sil + jle L(copy_backward) +#endif + + movdqu (%rsi), %xmm0 + mov %rdi, %rcx + and $-16, %rdi + add $16, %rdi + mov %rcx, %r8 + sub %rdi, %rcx + add %rcx, %rdx + sub %rcx, %rsi + +#ifdef SHARED_CACHE_SIZE_HALF + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP +#else + mov __x86_shared_cache_size_half(%rip), %RCX_LP +#endif + cmp %rcx, %rdx + mov %rsi, %r9 + ja L(large_page_fwd) + and $0xf, %r9 + jz L(shl_0) +#ifdef DATA_CACHE_SIZE_HALF + mov $DATA_CACHE_SIZE_HALF, %RCX_LP +#else + mov __x86_data_cache_size_half(%rip), %RCX_LP +#endif + BRANCH_TO_JMPTBL_ENTRY (L(shl_table), %r9, 4) + + .p2align 4 +L(copy_backward): + movdqu -16(%rsi, %rdx), %xmm0 + add %rdx, %rsi + lea -16(%rdi, %rdx), %r8 + add %rdx, %rdi + + mov %rdi, %rcx + and $0xf, %rcx + xor %rcx, %rdi + sub %rcx, %rdx + sub %rcx, %rsi + +#ifdef SHARED_CACHE_SIZE_HALF + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP +#else + mov __x86_shared_cache_size_half(%rip), %RCX_LP +#endif + + cmp %rcx, %rdx + mov %rsi, %r9 + ja L(large_page_bwd) + and $0xf, %r9 + jz L(shl_0_bwd) +#ifdef DATA_CACHE_SIZE_HALF + mov $DATA_CACHE_SIZE_HALF, %RCX_LP +#else + mov __x86_data_cache_size_half(%rip), %RCX_LP +#endif + BRANCH_TO_JMPTBL_ENTRY (L(shl_table_bwd), %r9, 4) + + .p2align 4 +L(shl_0): + sub $16, %rdx + movdqa (%rsi), %xmm1 + add $16, %rsi + movdqa %xmm1, (%rdi) + add $16, %rdi + cmp $128, %rdx + movdqu %xmm0, (%r8) + ja L(shl_0_gobble) + cmp $64, %rdx + jb L(shl_0_less_64bytes) + movaps (%rsi), %xmm4 + movaps 16(%rsi), %xmm1 + movaps 32(%rsi), %xmm2 + movaps 48(%rsi), %xmm3 + movaps %xmm4, (%rdi) + movaps %xmm1, 16(%rdi) + movaps %xmm2, 32(%rdi) + movaps %xmm3, 48(%rdi) + sub $64, %rdx + add $64, %rsi + add $64, %rdi +L(shl_0_less_64bytes): + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_0_gobble): +#ifdef DATA_CACHE_SIZE_HALF + cmp $DATA_CACHE_SIZE_HALF, %RDX_LP +#else + cmp __x86_data_cache_size_half(%rip), %RDX_LP +#endif + lea -128(%rdx), %rdx + jae L(shl_0_gobble_mem_loop) +L(shl_0_gobble_cache_loop): + movdqa (%rsi), %xmm4 + movaps 0x10(%rsi), %xmm1 + movaps 0x20(%rsi), %xmm2 + movaps 0x30(%rsi), %xmm3 + + movdqa %xmm4, (%rdi) + movaps %xmm1, 0x10(%rdi) + movaps %xmm2, 0x20(%rdi) + movaps %xmm3, 0x30(%rdi) + + sub $128, %rdx + movaps 0x40(%rsi), %xmm4 + movaps 0x50(%rsi), %xmm5 + movaps 0x60(%rsi), %xmm6 + movaps 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi + movaps %xmm4, 0x40(%rdi) + movaps %xmm5, 0x50(%rdi) + movaps %xmm6, 0x60(%rdi) + movaps %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi + + jae L(shl_0_gobble_cache_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_cache_less_64bytes) + + movdqa (%rsi), %xmm4 + sub $0x40, %rdx + movdqa 0x10(%rsi), %xmm1 + + movdqa %xmm4, (%rdi) + movdqa %xmm1, 0x10(%rdi) + + movdqa 0x20(%rsi), %xmm4 + movdqa 0x30(%rsi), %xmm1 + add $0x40, %rsi + + movdqa %xmm4, 0x20(%rdi) + movdqa %xmm1, 0x30(%rdi) + add $0x40, %rdi +L(shl_0_cache_less_64bytes): + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_0_gobble_mem_loop): + prefetcht0 0x1c0(%rsi) + prefetcht0 0x280(%rsi) + + movdqa (%rsi), %xmm0 + movdqa 0x10(%rsi), %xmm1 + movdqa 0x20(%rsi), %xmm2 + movdqa 0x30(%rsi), %xmm3 + movdqa 0x40(%rsi), %xmm4 + movdqa 0x50(%rsi), %xmm5 + movdqa 0x60(%rsi), %xmm6 + movdqa 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi + sub $0x80, %rdx + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) + movdqa %xmm2, 0x20(%rdi) + movdqa %xmm3, 0x30(%rdi) + movdqa %xmm4, 0x40(%rdi) + movdqa %xmm5, 0x50(%rdi) + movdqa %xmm6, 0x60(%rdi) + movdqa %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi + + jae L(shl_0_gobble_mem_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_mem_less_64bytes) + + movdqa (%rsi), %xmm0 + sub $0x40, %rdx + movdqa 0x10(%rsi), %xmm1 + + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) + + movdqa 0x20(%rsi), %xmm0 + movdqa 0x30(%rsi), %xmm1 + add $0x40, %rsi + + movdqa %xmm0, 0x20(%rdi) + movdqa %xmm1, 0x30(%rdi) + add $0x40, %rdi +L(shl_0_mem_less_64bytes): + cmp $0x20, %rdx + jb L(shl_0_mem_less_32bytes) + movdqa (%rsi), %xmm0 + sub $0x20, %rdx + movdqa 0x10(%rsi), %xmm1 + add $0x20, %rsi + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) + add $0x20, %rdi +L(shl_0_mem_less_32bytes): + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_0_bwd): + sub $16, %rdx + movdqa -0x10(%rsi), %xmm1 + sub $16, %rsi + movdqa %xmm1, -0x10(%rdi) + sub $16, %rdi + cmp $0x80, %rdx + movdqu %xmm0, (%r8) + ja L(shl_0_gobble_bwd) + cmp $64, %rdx + jb L(shl_0_less_64bytes_bwd) + movaps -0x10(%rsi), %xmm0 + movaps -0x20(%rsi), %xmm1 + movaps -0x30(%rsi), %xmm2 + movaps -0x40(%rsi), %xmm3 + movaps %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) + sub $64, %rdx + sub $0x40, %rsi + sub $0x40, %rdi +L(shl_0_less_64bytes_bwd): + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_0_gobble_bwd): +#ifdef DATA_CACHE_SIZE_HALF + cmp $DATA_CACHE_SIZE_HALF, %RDX_LP +#else + cmp __x86_data_cache_size_half(%rip), %RDX_LP +#endif + lea -128(%rdx), %rdx + jae L(shl_0_gobble_mem_bwd_loop) +L(shl_0_gobble_bwd_loop): + movdqa -0x10(%rsi), %xmm0 + movaps -0x20(%rsi), %xmm1 + movaps -0x30(%rsi), %xmm2 + movaps -0x40(%rsi), %xmm3 + + movdqa %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) + + sub $0x80, %rdx + movaps -0x50(%rsi), %xmm4 + movaps -0x60(%rsi), %xmm5 + movaps -0x70(%rsi), %xmm6 + movaps -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi + movaps %xmm4, -0x50(%rdi) + movaps %xmm5, -0x60(%rdi) + movaps %xmm6, -0x70(%rdi) + movaps %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi + + jae L(shl_0_gobble_bwd_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_gobble_bwd_less_64bytes) + + movdqa -0x10(%rsi), %xmm0 + sub $0x40, %rdx + movdqa -0x20(%rsi), %xmm1 + + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) + + movdqa -0x30(%rsi), %xmm0 + movdqa -0x40(%rsi), %xmm1 + sub $0x40, %rsi + + movdqa %xmm0, -0x30(%rdi) + movdqa %xmm1, -0x40(%rdi) + sub $0x40, %rdi +L(shl_0_gobble_bwd_less_64bytes): + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_0_gobble_mem_bwd_loop): + prefetcht0 -0x1c0(%rsi) + prefetcht0 -0x280(%rsi) + movdqa -0x10(%rsi), %xmm0 + movdqa -0x20(%rsi), %xmm1 + movdqa -0x30(%rsi), %xmm2 + movdqa -0x40(%rsi), %xmm3 + movdqa -0x50(%rsi), %xmm4 + movdqa -0x60(%rsi), %xmm5 + movdqa -0x70(%rsi), %xmm6 + movdqa -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi + sub $0x80, %rdx + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) + movdqa %xmm2, -0x30(%rdi) + movdqa %xmm3, -0x40(%rdi) + movdqa %xmm4, -0x50(%rdi) + movdqa %xmm5, -0x60(%rdi) + movdqa %xmm6, -0x70(%rdi) + movdqa %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi + + jae L(shl_0_gobble_mem_bwd_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_mem_bwd_less_64bytes) + + movdqa -0x10(%rsi), %xmm0 + sub $0x40, %rdx + movdqa -0x20(%rsi), %xmm1 + + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) + + movdqa -0x30(%rsi), %xmm0 + movdqa -0x40(%rsi), %xmm1 + sub $0x40, %rsi + + movdqa %xmm0, -0x30(%rdi) + movdqa %xmm1, -0x40(%rdi) + sub $0x40, %rdi +L(shl_0_mem_bwd_less_64bytes): + cmp $0x20, %rdx + jb L(shl_0_mem_bwd_less_32bytes) + movdqa -0x10(%rsi), %xmm0 + sub $0x20, %rdx + movdqa -0x20(%rsi), %xmm1 + sub $0x20, %rsi + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) + sub $0x20, %rdi +L(shl_0_mem_bwd_less_32bytes): + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_1): + lea (L(shl_1_loop_L1)-L(shl_1))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x01(%rsi), %xmm1 + jb L(L1_fwd) + lea (L(shl_1_loop_L2)-L(shl_1_loop_L1))(%r9), %r9 +L(L1_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_1_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_1_loop_L1): + sub $64, %rdx + movaps 0x0f(%rsi), %xmm2 + movaps 0x1f(%rsi), %xmm3 + movaps 0x2f(%rsi), %xmm4 + movaps 0x3f(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $1, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $1, %xmm3, %xmm4 + palignr $1, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $1, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_1_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_1_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_1_bwd): + lea (L(shl_1_bwd_loop_L1)-L(shl_1_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x01(%rsi), %xmm1 + jb L(L1_bwd) + lea (L(shl_1_bwd_loop_L2)-L(shl_1_bwd_loop_L1))(%r9), %r9 +L(L1_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_1_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_1_bwd_loop_L1): + movaps -0x11(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x21(%rsi), %xmm3 + movaps -0x31(%rsi), %xmm4 + movaps -0x41(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $1, %xmm2, %xmm1 + palignr $1, %xmm3, %xmm2 + palignr $1, %xmm4, %xmm3 + palignr $1, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_1_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_1_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_2): + lea (L(shl_2_loop_L1)-L(shl_2))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x02(%rsi), %xmm1 + jb L(L2_fwd) + lea (L(shl_2_loop_L2)-L(shl_2_loop_L1))(%r9), %r9 +L(L2_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_2_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_2_loop_L1): + sub $64, %rdx + movaps 0x0e(%rsi), %xmm2 + movaps 0x1e(%rsi), %xmm3 + movaps 0x2e(%rsi), %xmm4 + movaps 0x3e(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $2, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $2, %xmm3, %xmm4 + palignr $2, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $2, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_2_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_2_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_2_bwd): + lea (L(shl_2_bwd_loop_L1)-L(shl_2_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x02(%rsi), %xmm1 + jb L(L2_bwd) + lea (L(shl_2_bwd_loop_L2)-L(shl_2_bwd_loop_L1))(%r9), %r9 +L(L2_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_2_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_2_bwd_loop_L1): + movaps -0x12(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x22(%rsi), %xmm3 + movaps -0x32(%rsi), %xmm4 + movaps -0x42(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $2, %xmm2, %xmm1 + palignr $2, %xmm3, %xmm2 + palignr $2, %xmm4, %xmm3 + palignr $2, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_2_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_2_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_3): + lea (L(shl_3_loop_L1)-L(shl_3))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x03(%rsi), %xmm1 + jb L(L3_fwd) + lea (L(shl_3_loop_L2)-L(shl_3_loop_L1))(%r9), %r9 +L(L3_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_3_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_3_loop_L1): + sub $64, %rdx + movaps 0x0d(%rsi), %xmm2 + movaps 0x1d(%rsi), %xmm3 + movaps 0x2d(%rsi), %xmm4 + movaps 0x3d(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $3, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $3, %xmm3, %xmm4 + palignr $3, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $3, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_3_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_3_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_3_bwd): + lea (L(shl_3_bwd_loop_L1)-L(shl_3_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x03(%rsi), %xmm1 + jb L(L3_bwd) + lea (L(shl_3_bwd_loop_L2)-L(shl_3_bwd_loop_L1))(%r9), %r9 +L(L3_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_3_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_3_bwd_loop_L1): + movaps -0x13(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x23(%rsi), %xmm3 + movaps -0x33(%rsi), %xmm4 + movaps -0x43(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $3, %xmm2, %xmm1 + palignr $3, %xmm3, %xmm2 + palignr $3, %xmm4, %xmm3 + palignr $3, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_3_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_3_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_4): + lea (L(shl_4_loop_L1)-L(shl_4))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x04(%rsi), %xmm1 + jb L(L4_fwd) + lea (L(shl_4_loop_L2)-L(shl_4_loop_L1))(%r9), %r9 +L(L4_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_4_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_4_loop_L1): + sub $64, %rdx + movaps 0x0c(%rsi), %xmm2 + movaps 0x1c(%rsi), %xmm3 + movaps 0x2c(%rsi), %xmm4 + movaps 0x3c(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $4, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $4, %xmm3, %xmm4 + palignr $4, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $4, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_4_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_4_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_4_bwd): + lea (L(shl_4_bwd_loop_L1)-L(shl_4_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x04(%rsi), %xmm1 + jb L(L4_bwd) + lea (L(shl_4_bwd_loop_L2)-L(shl_4_bwd_loop_L1))(%r9), %r9 +L(L4_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_4_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_4_bwd_loop_L1): + movaps -0x14(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x24(%rsi), %xmm3 + movaps -0x34(%rsi), %xmm4 + movaps -0x44(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $4, %xmm2, %xmm1 + palignr $4, %xmm3, %xmm2 + palignr $4, %xmm4, %xmm3 + palignr $4, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_4_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_4_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_5): + lea (L(shl_5_loop_L1)-L(shl_5))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x05(%rsi), %xmm1 + jb L(L5_fwd) + lea (L(shl_5_loop_L2)-L(shl_5_loop_L1))(%r9), %r9 +L(L5_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_5_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_5_loop_L1): + sub $64, %rdx + movaps 0x0b(%rsi), %xmm2 + movaps 0x1b(%rsi), %xmm3 + movaps 0x2b(%rsi), %xmm4 + movaps 0x3b(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $5, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $5, %xmm3, %xmm4 + palignr $5, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $5, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_5_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_5_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_5_bwd): + lea (L(shl_5_bwd_loop_L1)-L(shl_5_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x05(%rsi), %xmm1 + jb L(L5_bwd) + lea (L(shl_5_bwd_loop_L2)-L(shl_5_bwd_loop_L1))(%r9), %r9 +L(L5_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_5_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_5_bwd_loop_L1): + movaps -0x15(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x25(%rsi), %xmm3 + movaps -0x35(%rsi), %xmm4 + movaps -0x45(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $5, %xmm2, %xmm1 + palignr $5, %xmm3, %xmm2 + palignr $5, %xmm4, %xmm3 + palignr $5, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_5_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_5_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_6): + lea (L(shl_6_loop_L1)-L(shl_6))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x06(%rsi), %xmm1 + jb L(L6_fwd) + lea (L(shl_6_loop_L2)-L(shl_6_loop_L1))(%r9), %r9 +L(L6_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_6_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_6_loop_L1): + sub $64, %rdx + movaps 0x0a(%rsi), %xmm2 + movaps 0x1a(%rsi), %xmm3 + movaps 0x2a(%rsi), %xmm4 + movaps 0x3a(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $6, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $6, %xmm3, %xmm4 + palignr $6, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $6, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_6_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_6_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_6_bwd): + lea (L(shl_6_bwd_loop_L1)-L(shl_6_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x06(%rsi), %xmm1 + jb L(L6_bwd) + lea (L(shl_6_bwd_loop_L2)-L(shl_6_bwd_loop_L1))(%r9), %r9 +L(L6_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_6_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_6_bwd_loop_L1): + movaps -0x16(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x26(%rsi), %xmm3 + movaps -0x36(%rsi), %xmm4 + movaps -0x46(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $6, %xmm2, %xmm1 + palignr $6, %xmm3, %xmm2 + palignr $6, %xmm4, %xmm3 + palignr $6, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_6_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_6_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_7): + lea (L(shl_7_loop_L1)-L(shl_7))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x07(%rsi), %xmm1 + jb L(L7_fwd) + lea (L(shl_7_loop_L2)-L(shl_7_loop_L1))(%r9), %r9 +L(L7_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_7_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_7_loop_L1): + sub $64, %rdx + movaps 0x09(%rsi), %xmm2 + movaps 0x19(%rsi), %xmm3 + movaps 0x29(%rsi), %xmm4 + movaps 0x39(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $7, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $7, %xmm3, %xmm4 + palignr $7, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $7, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_7_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_7_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_7_bwd): + lea (L(shl_7_bwd_loop_L1)-L(shl_7_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x07(%rsi), %xmm1 + jb L(L7_bwd) + lea (L(shl_7_bwd_loop_L2)-L(shl_7_bwd_loop_L1))(%r9), %r9 +L(L7_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_7_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_7_bwd_loop_L1): + movaps -0x17(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x27(%rsi), %xmm3 + movaps -0x37(%rsi), %xmm4 + movaps -0x47(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $7, %xmm2, %xmm1 + palignr $7, %xmm3, %xmm2 + palignr $7, %xmm4, %xmm3 + palignr $7, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_7_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_7_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_8): + lea (L(shl_8_loop_L1)-L(shl_8))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x08(%rsi), %xmm1 + jb L(L8_fwd) + lea (L(shl_8_loop_L2)-L(shl_8_loop_L1))(%r9), %r9 +L(L8_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 +L(shl_8_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_8_loop_L1): + sub $64, %rdx + movaps 0x08(%rsi), %xmm2 + movaps 0x18(%rsi), %xmm3 + movaps 0x28(%rsi), %xmm4 + movaps 0x38(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $8, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $8, %xmm3, %xmm4 + palignr $8, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $8, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_8_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 + .p2align 4 +L(shl_8_end): + lea 64(%rdx), %rdx + movaps %xmm4, -0x20(%rdi) + add %rdx, %rsi + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_8_bwd): + lea (L(shl_8_bwd_loop_L1)-L(shl_8_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x08(%rsi), %xmm1 + jb L(L8_bwd) + lea (L(shl_8_bwd_loop_L2)-L(shl_8_bwd_loop_L1))(%r9), %r9 +L(L8_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_8_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_8_bwd_loop_L1): + movaps -0x18(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x28(%rsi), %xmm3 + movaps -0x38(%rsi), %xmm4 + movaps -0x48(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $8, %xmm2, %xmm1 + palignr $8, %xmm3, %xmm2 + palignr $8, %xmm4, %xmm3 + palignr $8, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_8_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_8_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_9): + lea (L(shl_9_loop_L1)-L(shl_9))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x09(%rsi), %xmm1 + jb L(L9_fwd) + lea (L(shl_9_loop_L2)-L(shl_9_loop_L1))(%r9), %r9 +L(L9_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_9_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_9_loop_L1): + sub $64, %rdx + movaps 0x07(%rsi), %xmm2 + movaps 0x17(%rsi), %xmm3 + movaps 0x27(%rsi), %xmm4 + movaps 0x37(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $9, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $9, %xmm3, %xmm4 + palignr $9, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $9, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_9_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_9_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_9_bwd): + lea (L(shl_9_bwd_loop_L1)-L(shl_9_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x09(%rsi), %xmm1 + jb L(L9_bwd) + lea (L(shl_9_bwd_loop_L2)-L(shl_9_bwd_loop_L1))(%r9), %r9 +L(L9_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_9_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_9_bwd_loop_L1): + movaps -0x19(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x29(%rsi), %xmm3 + movaps -0x39(%rsi), %xmm4 + movaps -0x49(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $9, %xmm2, %xmm1 + palignr $9, %xmm3, %xmm2 + palignr $9, %xmm4, %xmm3 + palignr $9, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_9_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_9_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_10): + lea (L(shl_10_loop_L1)-L(shl_10))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0a(%rsi), %xmm1 + jb L(L10_fwd) + lea (L(shl_10_loop_L2)-L(shl_10_loop_L1))(%r9), %r9 +L(L10_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_10_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_10_loop_L1): + sub $64, %rdx + movaps 0x06(%rsi), %xmm2 + movaps 0x16(%rsi), %xmm3 + movaps 0x26(%rsi), %xmm4 + movaps 0x36(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $10, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $10, %xmm3, %xmm4 + palignr $10, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $10, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_10_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_10_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_10_bwd): + lea (L(shl_10_bwd_loop_L1)-L(shl_10_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0a(%rsi), %xmm1 + jb L(L10_bwd) + lea (L(shl_10_bwd_loop_L2)-L(shl_10_bwd_loop_L1))(%r9), %r9 +L(L10_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_10_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_10_bwd_loop_L1): + movaps -0x1a(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2a(%rsi), %xmm3 + movaps -0x3a(%rsi), %xmm4 + movaps -0x4a(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $10, %xmm2, %xmm1 + palignr $10, %xmm3, %xmm2 + palignr $10, %xmm4, %xmm3 + palignr $10, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_10_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_10_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_11): + lea (L(shl_11_loop_L1)-L(shl_11))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0b(%rsi), %xmm1 + jb L(L11_fwd) + lea (L(shl_11_loop_L2)-L(shl_11_loop_L1))(%r9), %r9 +L(L11_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_11_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_11_loop_L1): + sub $64, %rdx + movaps 0x05(%rsi), %xmm2 + movaps 0x15(%rsi), %xmm3 + movaps 0x25(%rsi), %xmm4 + movaps 0x35(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $11, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $11, %xmm3, %xmm4 + palignr $11, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $11, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_11_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_11_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_11_bwd): + lea (L(shl_11_bwd_loop_L1)-L(shl_11_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0b(%rsi), %xmm1 + jb L(L11_bwd) + lea (L(shl_11_bwd_loop_L2)-L(shl_11_bwd_loop_L1))(%r9), %r9 +L(L11_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_11_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_11_bwd_loop_L1): + movaps -0x1b(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2b(%rsi), %xmm3 + movaps -0x3b(%rsi), %xmm4 + movaps -0x4b(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $11, %xmm2, %xmm1 + palignr $11, %xmm3, %xmm2 + palignr $11, %xmm4, %xmm3 + palignr $11, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_11_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_11_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_12): + lea (L(shl_12_loop_L1)-L(shl_12))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0c(%rsi), %xmm1 + jb L(L12_fwd) + lea (L(shl_12_loop_L2)-L(shl_12_loop_L1))(%r9), %r9 +L(L12_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_12_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_12_loop_L1): + sub $64, %rdx + movaps 0x04(%rsi), %xmm2 + movaps 0x14(%rsi), %xmm3 + movaps 0x24(%rsi), %xmm4 + movaps 0x34(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $12, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $12, %xmm3, %xmm4 + palignr $12, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $12, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_12_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_12_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_12_bwd): + lea (L(shl_12_bwd_loop_L1)-L(shl_12_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0c(%rsi), %xmm1 + jb L(L12_bwd) + lea (L(shl_12_bwd_loop_L2)-L(shl_12_bwd_loop_L1))(%r9), %r9 +L(L12_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_12_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_12_bwd_loop_L1): + movaps -0x1c(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2c(%rsi), %xmm3 + movaps -0x3c(%rsi), %xmm4 + movaps -0x4c(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $12, %xmm2, %xmm1 + palignr $12, %xmm3, %xmm2 + palignr $12, %xmm4, %xmm3 + palignr $12, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_12_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_12_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_13): + lea (L(shl_13_loop_L1)-L(shl_13))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0d(%rsi), %xmm1 + jb L(L13_fwd) + lea (L(shl_13_loop_L2)-L(shl_13_loop_L1))(%r9), %r9 +L(L13_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_13_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_13_loop_L1): + sub $64, %rdx + movaps 0x03(%rsi), %xmm2 + movaps 0x13(%rsi), %xmm3 + movaps 0x23(%rsi), %xmm4 + movaps 0x33(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $13, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $13, %xmm3, %xmm4 + palignr $13, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $13, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_13_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_13_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_13_bwd): + lea (L(shl_13_bwd_loop_L1)-L(shl_13_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0d(%rsi), %xmm1 + jb L(L13_bwd) + lea (L(shl_13_bwd_loop_L2)-L(shl_13_bwd_loop_L1))(%r9), %r9 +L(L13_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_13_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_13_bwd_loop_L1): + movaps -0x1d(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2d(%rsi), %xmm3 + movaps -0x3d(%rsi), %xmm4 + movaps -0x4d(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $13, %xmm2, %xmm1 + palignr $13, %xmm3, %xmm2 + palignr $13, %xmm4, %xmm3 + palignr $13, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_13_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_13_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_14): + lea (L(shl_14_loop_L1)-L(shl_14))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0e(%rsi), %xmm1 + jb L(L14_fwd) + lea (L(shl_14_loop_L2)-L(shl_14_loop_L1))(%r9), %r9 +L(L14_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_14_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_14_loop_L1): + sub $64, %rdx + movaps 0x02(%rsi), %xmm2 + movaps 0x12(%rsi), %xmm3 + movaps 0x22(%rsi), %xmm4 + movaps 0x32(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $14, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $14, %xmm3, %xmm4 + palignr $14, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $14, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_14_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_14_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_14_bwd): + lea (L(shl_14_bwd_loop_L1)-L(shl_14_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0e(%rsi), %xmm1 + jb L(L14_bwd) + lea (L(shl_14_bwd_loop_L2)-L(shl_14_bwd_loop_L1))(%r9), %r9 +L(L14_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_14_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_14_bwd_loop_L1): + movaps -0x1e(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2e(%rsi), %xmm3 + movaps -0x3e(%rsi), %xmm4 + movaps -0x4e(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $14, %xmm2, %xmm1 + palignr $14, %xmm3, %xmm2 + palignr $14, %xmm4, %xmm3 + palignr $14, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_14_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_14_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_15): + lea (L(shl_15_loop_L1)-L(shl_15))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0f(%rsi), %xmm1 + jb L(L15_fwd) + lea (L(shl_15_loop_L2)-L(shl_15_loop_L1))(%r9), %r9 +L(L15_fwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_15_loop_L2): + prefetchnta 0x1c0(%rsi) +L(shl_15_loop_L1): + sub $64, %rdx + movaps 0x01(%rsi), %xmm2 + movaps 0x11(%rsi), %xmm3 + movaps 0x21(%rsi), %xmm4 + movaps 0x31(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $15, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $15, %xmm3, %xmm4 + palignr $15, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $15, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_15_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_15_end): + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(shl_15_bwd): + lea (L(shl_15_bwd_loop_L1)-L(shl_15_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0f(%rsi), %xmm1 + jb L(L15_bwd) + lea (L(shl_15_bwd_loop_L2)-L(shl_15_bwd_loop_L1))(%r9), %r9 +L(L15_bwd): + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_15_bwd_loop_L2): + prefetchnta -0x1c0(%rsi) +L(shl_15_bwd_loop_L1): + movaps -0x1f(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2f(%rsi), %xmm3 + movaps -0x3f(%rsi), %xmm4 + movaps -0x4f(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $15, %xmm2, %xmm1 + palignr $15, %xmm3, %xmm2 + palignr $15, %xmm4, %xmm3 + palignr $15, %xmm5, %xmm4 + + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 + + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi + + movaps %xmm3, 0x10(%rdi) + jb L(shl_15_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 +L(shl_15_bwd_end): + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + + .p2align 4 +L(write_72bytes): + movdqu -72(%rsi), %xmm0 + movdqu -56(%rsi), %xmm1 + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rcx + movdqu %xmm0, -72(%rdi) + movdqu %xmm1, -56(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rcx, -8(%rdi) + ret + + .p2align 4 +L(write_64bytes): + movdqu -64(%rsi), %xmm0 + mov -48(%rsi), %rcx + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + movdqu %xmm0, -64(%rdi) + mov %rcx, -48(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_56bytes): + movdqu -56(%rsi), %xmm0 + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rcx + movdqu %xmm0, -56(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rcx, -8(%rdi) + ret + + .p2align 4 +L(write_48bytes): + mov -48(%rsi), %rcx + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %rcx, -48(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_40bytes): + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_32bytes): + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_24bytes): + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_16bytes): + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_8bytes): + mov -8(%rsi), %rdx + mov %rdx, -8(%rdi) +L(write_0bytes): + ret + + .p2align 4 +L(write_73bytes): + movdqu -73(%rsi), %xmm0 + movdqu -57(%rsi), %xmm1 + mov -41(%rsi), %rcx + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %r8 + mov -4(%rsi), %edx + movdqu %xmm0, -73(%rdi) + movdqu %xmm1, -57(%rdi) + mov %rcx, -41(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %r8, -9(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_65bytes): + movdqu -65(%rsi), %xmm0 + movdqu -49(%rsi), %xmm1 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -65(%rdi) + movdqu %xmm1, -49(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_57bytes): + movdqu -57(%rsi), %xmm0 + mov -41(%rsi), %r8 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -57(%rdi) + mov %r8, -41(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_49bytes): + movdqu -49(%rsi), %xmm0 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -49(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_41bytes): + mov -41(%rsi), %r8 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -1(%rsi), %dl + mov %r8, -41(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %dl, -1(%rdi) + ret + + .p2align 4 +L(write_33bytes): + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -1(%rsi), %dl + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %dl, -1(%rdi) + ret + + .p2align 4 +L(write_25bytes): + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -1(%rsi), %dl + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %dl, -1(%rdi) + ret + + .p2align 4 +L(write_17bytes): + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_9bytes): + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_1bytes): + mov -1(%rsi), %dl + mov %dl, -1(%rdi) + ret + + .p2align 4 +L(write_74bytes): + movdqu -74(%rsi), %xmm0 + movdqu -58(%rsi), %xmm1 + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -74(%rdi) + movdqu %xmm1, -58(%rdi) + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_66bytes): + movdqu -66(%rsi), %xmm0 + movdqu -50(%rsi), %xmm1 + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -66(%rdi) + movdqu %xmm1, -50(%rdi) + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_58bytes): + movdqu -58(%rsi), %xmm1 + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm1, -58(%rdi) + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_50bytes): + movdqu -50(%rsi), %xmm0 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -50(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_42bytes): + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_34bytes): + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_26bytes): + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_18bytes): + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_10bytes): + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_2bytes): + mov -2(%rsi), %dx + mov %dx, -2(%rdi) + ret + + .p2align 4 +L(write_75bytes): + movdqu -75(%rsi), %xmm0 + movdqu -59(%rsi), %xmm1 + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -75(%rdi) + movdqu %xmm1, -59(%rdi) + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_67bytes): + movdqu -67(%rsi), %xmm0 + movdqu -59(%rsi), %xmm1 + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -67(%rdi) + movdqu %xmm1, -59(%rdi) + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_59bytes): + movdqu -59(%rsi), %xmm0 + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -59(%rdi) + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_51bytes): + movdqu -51(%rsi), %xmm0 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -51(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_43bytes): + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_35bytes): + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_27bytes): + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_19bytes): + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_11bytes): + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_3bytes): + mov -3(%rsi), %dx + mov -2(%rsi), %cx + mov %dx, -3(%rdi) + mov %cx, -2(%rdi) + ret + + .p2align 4 +L(write_76bytes): + movdqu -76(%rsi), %xmm0 + movdqu -60(%rsi), %xmm1 + mov -44(%rsi), %r8 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -76(%rdi) + movdqu %xmm1, -60(%rdi) + mov %r8, -44(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_68bytes): + movdqu -68(%rsi), %xmm0 + movdqu -52(%rsi), %xmm1 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -68(%rdi) + movdqu %xmm1, -52(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_60bytes): + movdqu -60(%rsi), %xmm0 + mov -44(%rsi), %r8 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -60(%rdi) + mov %r8, -44(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_52bytes): + movdqu -52(%rsi), %xmm0 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -52(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_44bytes): + mov -44(%rsi), %r8 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r8, -44(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_36bytes): + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_28bytes): + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_20bytes): + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_12bytes): + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_4bytes): + mov -4(%rsi), %edx + mov %edx, -4(%rdi) + ret + + .p2align 4 +L(write_77bytes): + movdqu -77(%rsi), %xmm0 + movdqu -61(%rsi), %xmm1 + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -77(%rdi) + movdqu %xmm1, -61(%rdi) + mov %r8, -45(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_69bytes): + movdqu -69(%rsi), %xmm0 + movdqu -53(%rsi), %xmm1 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -69(%rdi) + movdqu %xmm1, -53(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_61bytes): + movdqu -61(%rsi), %xmm0 + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -61(%rdi) + mov %r8, -45(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_53bytes): + movdqu -53(%rsi), %xmm0 + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -53(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_45bytes): + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r8, -45(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_37bytes): + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_29bytes): + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_21bytes): + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_13bytes): + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_5bytes): + mov -5(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -5(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(write_78bytes): + movdqu -78(%rsi), %xmm0 + movdqu -62(%rsi), %xmm1 + mov -46(%rsi), %r8 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -78(%rdi) + movdqu %xmm1, -62(%rdi) + mov %r8, -46(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_70bytes): + movdqu -70(%rsi), %xmm0 + movdqu -54(%rsi), %xmm1 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -70(%rdi) + movdqu %xmm1, -54(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_62bytes): + movdqu -62(%rsi), %xmm0 + mov -46(%rsi), %r8 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -62(%rdi) + mov %r8, -46(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_54bytes): + movdqu -54(%rsi), %xmm0 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -54(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_46bytes): + mov -46(%rsi), %r8 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r8, -46(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_38bytes): + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_30bytes): + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_22bytes): + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_14bytes): + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_6bytes): + mov -6(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -6(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(write_79bytes): + movdqu -79(%rsi), %xmm0 + movdqu -63(%rsi), %xmm1 + mov -47(%rsi), %r8 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -79(%rdi) + movdqu %xmm1, -63(%rdi) + mov %r8, -47(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_71bytes): + movdqu -71(%rsi), %xmm0 + movdqu -55(%rsi), %xmm1 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -71(%rdi) + movdqu %xmm1, -55(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_63bytes): + movdqu -63(%rsi), %xmm0 + mov -47(%rsi), %r8 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -63(%rdi) + mov %r8, -47(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_55bytes): + movdqu -55(%rsi), %xmm0 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -55(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_47bytes): + mov -47(%rsi), %r8 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r8, -47(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_39bytes): + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_31bytes): + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_23bytes): + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_15bytes): + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret + + .p2align 4 +L(write_7bytes): + mov -7(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -7(%rdi) + mov %ecx, -4(%rdi) + ret + + .p2align 4 +L(large_page_fwd): + movdqu (%rsi), %xmm1 + lea 16(%rsi), %rsi + movdqu %xmm0, (%r8) + movntdq %xmm1, (%rdi) + lea 16(%rdi), %rdi + lea -0x90(%rdx), %rdx +#ifdef USE_AS_MEMMOVE + mov %rsi, %r9 + sub %rdi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_fwd) + shl $2, %rcx + cmp %rcx, %rdx + jb L(ll_cache_copy_fwd_start) +L(memmove_is_memcpy_fwd): +#endif +L(large_page_loop): + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi + + sub $0x80, %rdx + movntdq %xmm0, (%rdi) + movntdq %xmm1, 0x10(%rdi) + movntdq %xmm2, 0x20(%rdi) + movntdq %xmm3, 0x30(%rdi) + movntdq %xmm4, 0x40(%rdi) + movntdq %xmm5, 0x50(%rdi) + movntdq %xmm6, 0x60(%rdi) + movntdq %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi + jae L(large_page_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_less_64bytes) + + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + lea 0x40(%rsi), %rsi + + movntdq %xmm0, (%rdi) + movntdq %xmm1, 0x10(%rdi) + movntdq %xmm2, 0x20(%rdi) + movntdq %xmm3, 0x30(%rdi) + lea 0x40(%rdi), %rdi + sub $0x40, %rdx +L(large_page_less_64bytes): + add %rdx, %rsi + add %rdx, %rdi + sfence + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + +#ifdef USE_AS_MEMMOVE + .p2align 4 +L(ll_cache_copy_fwd_start): + prefetcht0 0x1c0(%rsi) + prefetcht0 0x200(%rsi) + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi + + sub $0x80, %rdx + movaps %xmm0, (%rdi) + movaps %xmm1, 0x10(%rdi) + movaps %xmm2, 0x20(%rdi) + movaps %xmm3, 0x30(%rdi) + movaps %xmm4, 0x40(%rdi) + movaps %xmm5, 0x50(%rdi) + movaps %xmm6, 0x60(%rdi) + movaps %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi + jae L(ll_cache_copy_fwd_start) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_ll_less_fwd_64bytes) + + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + lea 0x40(%rsi), %rsi + + movaps %xmm0, (%rdi) + movaps %xmm1, 0x10(%rdi) + movaps %xmm2, 0x20(%rdi) + movaps %xmm3, 0x30(%rdi) + lea 0x40(%rdi), %rdi + sub $0x40, %rdx +L(large_page_ll_less_fwd_64bytes): + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + +#endif + .p2align 4 +L(large_page_bwd): + movdqu -0x10(%rsi), %xmm1 + lea -16(%rsi), %rsi + movdqu %xmm0, (%r8) + movdqa %xmm1, -0x10(%rdi) + lea -16(%rdi), %rdi + lea -0x90(%rdx), %rdx +#ifdef USE_AS_MEMMOVE + mov %rdi, %r9 + sub %rsi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_bwd) + cmp %rcx, %r9 + jb L(ll_cache_copy_bwd_start) +L(memmove_is_memcpy_bwd): +#endif +L(large_page_bwd_loop): + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + movdqu -0x50(%rsi), %xmm4 + movdqu -0x60(%rsi), %xmm5 + movdqu -0x70(%rsi), %xmm6 + movdqu -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi + + sub $0x80, %rdx + movntdq %xmm0, -0x10(%rdi) + movntdq %xmm1, -0x20(%rdi) + movntdq %xmm2, -0x30(%rdi) + movntdq %xmm3, -0x40(%rdi) + movntdq %xmm4, -0x50(%rdi) + movntdq %xmm5, -0x60(%rdi) + movntdq %xmm6, -0x70(%rdi) + movntdq %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi + jae L(large_page_bwd_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_less_bwd_64bytes) + + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + lea -0x40(%rsi), %rsi + + movntdq %xmm0, -0x10(%rdi) + movntdq %xmm1, -0x20(%rdi) + movntdq %xmm2, -0x30(%rdi) + movntdq %xmm3, -0x40(%rdi) + lea -0x40(%rdi), %rdi + sub $0x40, %rdx +L(large_page_less_bwd_64bytes): + sfence + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + +#ifdef USE_AS_MEMMOVE + .p2align 4 +L(ll_cache_copy_bwd_start): + prefetcht0 -0x1c0(%rsi) + prefetcht0 -0x200(%rsi) + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + movdqu -0x50(%rsi), %xmm4 + movdqu -0x60(%rsi), %xmm5 + movdqu -0x70(%rsi), %xmm6 + movdqu -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi + + sub $0x80, %rdx + movaps %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) + movaps %xmm4, -0x50(%rdi) + movaps %xmm5, -0x60(%rdi) + movaps %xmm6, -0x70(%rdi) + movaps %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi + jae L(ll_cache_copy_bwd_start) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_ll_less_bwd_64bytes) + + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + lea -0x40(%rsi), %rsi + + movaps %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) + lea -0x40(%rdi), %rdi + sub $0x40, %rdx +L(large_page_ll_less_bwd_64bytes): + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) +#endif + +END (MEMCPY) + + .section .rodata.ssse3,"a",@progbits + .p2align 3 +L(table_less_80bytes): + .int JMPTBL (L(write_0bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_1bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_2bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_3bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_4bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_5bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_6bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_7bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_8bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_9bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_10bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_11bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_12bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_13bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_14bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_15bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_16bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_17bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_18bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_19bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_20bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_21bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_22bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_23bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_24bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_25bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_26bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_27bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_28bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_29bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_30bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_31bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_32bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_33bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_34bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_35bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_36bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_37bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_38bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_39bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_40bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_41bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_42bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_43bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_44bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_45bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_46bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_47bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_48bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_49bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_50bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_51bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_52bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_53bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_54bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_55bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_56bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_57bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_58bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_59bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_60bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_61bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_62bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_63bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_64bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_65bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_66bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_67bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_68bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_69bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_70bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_71bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_72bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_73bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_74bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_75bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_76bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_77bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_78bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_79bytes), L(table_less_80bytes)) + + .p2align 3 +L(shl_table): + .int JMPTBL (L(shl_0), L(shl_table)) + .int JMPTBL (L(shl_1), L(shl_table)) + .int JMPTBL (L(shl_2), L(shl_table)) + .int JMPTBL (L(shl_3), L(shl_table)) + .int JMPTBL (L(shl_4), L(shl_table)) + .int JMPTBL (L(shl_5), L(shl_table)) + .int JMPTBL (L(shl_6), L(shl_table)) + .int JMPTBL (L(shl_7), L(shl_table)) + .int JMPTBL (L(shl_8), L(shl_table)) + .int JMPTBL (L(shl_9), L(shl_table)) + .int JMPTBL (L(shl_10), L(shl_table)) + .int JMPTBL (L(shl_11), L(shl_table)) + .int JMPTBL (L(shl_12), L(shl_table)) + .int JMPTBL (L(shl_13), L(shl_table)) + .int JMPTBL (L(shl_14), L(shl_table)) + .int JMPTBL (L(shl_15), L(shl_table)) + + .p2align 3 +L(shl_table_bwd): + .int JMPTBL (L(shl_0_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_1_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_2_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_3_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_4_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_5_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_6_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_7_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_8_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_9_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_10_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_11_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_12_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_13_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_14_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_15_bwd), L(shl_table_bwd)) + +#endif diff --git a/utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S new file mode 100644 index 00000000000..9ee6f0a71c3 --- /dev/null +++ b/utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S @@ -0,0 +1,12 @@ +#if 1 +# define VEC_SIZE 32 +# define VEC(i) ymm##i +# define VMOVNT vmovntdq +# define VMOVU vmovdqu +# define VMOVA vmovdqa + +# define SECTION(p) p##.avx +# define MEMMOVE_SYMBOL(p,s) p##_avx_##s + +# include "memmove-vec-unaligned-erms.S" +#endif diff --git a/utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S b/utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S new file mode 100644 index 00000000000..b14d92fd6a8 --- /dev/null +++ b/utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S @@ -0,0 +1,419 @@ +/* memmove/memcpy/mempcpy optimized with AVX512 for KNL hardware. + Copyright (C) 2016-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include "sysdep.h" + +#if 1 + +# include "asm-syntax.h" + + .section .text.avx512,"ax",@progbits +ENTRY (__mempcpy_chk_avx512_no_vzeroupper) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (__mempcpy_chk_avx512_no_vzeroupper) + +ENTRY (__mempcpy_avx512_no_vzeroupper) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) +END (__mempcpy_avx512_no_vzeroupper) + +ENTRY (__memmove_chk_avx512_no_vzeroupper) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (__memmove_chk_avx512_no_vzeroupper) + +ENTRY (__memmove_avx512_no_vzeroupper) + mov %RDI_LP, %RAX_LP +# ifdef USE_AS_MEMPCPY + add %RDX_LP, %RAX_LP +# endif +L(start): +# ifdef __ILP32__ + /* Clear the upper 32 bits. */ + mov %edx, %edx +# endif + lea (%rsi, %rdx), %rcx + lea (%rdi, %rdx), %r9 + cmp $512, %rdx + ja L(512bytesormore) + +L(check): + cmp $16, %rdx + jbe L(less_16bytes) + cmp $256, %rdx + jb L(less_256bytes) + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups 0x80(%rsi), %zmm2 + vmovups 0xC0(%rsi), %zmm3 + vmovups -0x100(%rcx), %zmm4 + vmovups -0xC0(%rcx), %zmm5 + vmovups -0x80(%rcx), %zmm6 + vmovups -0x40(%rcx), %zmm7 + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, 0x80(%rdi) + vmovups %zmm3, 0xC0(%rdi) + vmovups %zmm4, -0x100(%r9) + vmovups %zmm5, -0xC0(%r9) + vmovups %zmm6, -0x80(%r9) + vmovups %zmm7, -0x40(%r9) + ret + +L(less_256bytes): + cmp $128, %dl + jb L(less_128bytes) + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups -0x80(%rcx), %zmm2 + vmovups -0x40(%rcx), %zmm3 + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, -0x80(%r9) + vmovups %zmm3, -0x40(%r9) + ret + +L(less_128bytes): + cmp $64, %dl + jb L(less_64bytes) + vmovdqu (%rsi), %ymm0 + vmovdqu 0x20(%rsi), %ymm1 + vmovdqu -0x40(%rcx), %ymm2 + vmovdqu -0x20(%rcx), %ymm3 + vmovdqu %ymm0, (%rdi) + vmovdqu %ymm1, 0x20(%rdi) + vmovdqu %ymm2, -0x40(%r9) + vmovdqu %ymm3, -0x20(%r9) + ret + +L(less_64bytes): + cmp $32, %dl + jb L(less_32bytes) + vmovdqu (%rsi), %ymm0 + vmovdqu -0x20(%rcx), %ymm1 + vmovdqu %ymm0, (%rdi) + vmovdqu %ymm1, -0x20(%r9) + ret + +L(less_32bytes): + vmovdqu (%rsi), %xmm0 + vmovdqu -0x10(%rcx), %xmm1 + vmovdqu %xmm0, (%rdi) + vmovdqu %xmm1, -0x10(%r9) + ret + +L(less_16bytes): + cmp $8, %dl + jb L(less_8bytes) + movq (%rsi), %rsi + movq -0x8(%rcx), %rcx + movq %rsi, (%rdi) + movq %rcx, -0x8(%r9) + ret + +L(less_8bytes): + cmp $4, %dl + jb L(less_4bytes) + mov (%rsi), %esi + mov -0x4(%rcx), %ecx + mov %esi, (%rdi) + mov %ecx, -0x4(%r9) + ret + +L(less_4bytes): + cmp $2, %dl + jb L(less_2bytes) + mov (%rsi), %si + mov -0x2(%rcx), %cx + mov %si, (%rdi) + mov %cx, -0x2(%r9) + ret + +L(less_2bytes): + cmp $1, %dl + jb L(less_1bytes) + mov (%rsi), %cl + mov %cl, (%rdi) +L(less_1bytes): + ret + +L(512bytesormore): +# ifdef SHARED_CACHE_SIZE_HALF + mov $SHARED_CACHE_SIZE_HALF, %r8 +# else + mov __x86_shared_cache_size_half(%rip), %r8 +# endif + cmp %r8, %rdx + jae L(preloop_large) + cmp $1024, %rdx + ja L(1024bytesormore) + prefetcht1 (%rsi) + prefetcht1 0x40(%rsi) + prefetcht1 0x80(%rsi) + prefetcht1 0xC0(%rsi) + prefetcht1 0x100(%rsi) + prefetcht1 0x140(%rsi) + prefetcht1 0x180(%rsi) + prefetcht1 0x1C0(%rsi) + prefetcht1 -0x200(%rcx) + prefetcht1 -0x1C0(%rcx) + prefetcht1 -0x180(%rcx) + prefetcht1 -0x140(%rcx) + prefetcht1 -0x100(%rcx) + prefetcht1 -0xC0(%rcx) + prefetcht1 -0x80(%rcx) + prefetcht1 -0x40(%rcx) + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups 0x80(%rsi), %zmm2 + vmovups 0xC0(%rsi), %zmm3 + vmovups 0x100(%rsi), %zmm4 + vmovups 0x140(%rsi), %zmm5 + vmovups 0x180(%rsi), %zmm6 + vmovups 0x1C0(%rsi), %zmm7 + vmovups -0x200(%rcx), %zmm8 + vmovups -0x1C0(%rcx), %zmm9 + vmovups -0x180(%rcx), %zmm10 + vmovups -0x140(%rcx), %zmm11 + vmovups -0x100(%rcx), %zmm12 + vmovups -0xC0(%rcx), %zmm13 + vmovups -0x80(%rcx), %zmm14 + vmovups -0x40(%rcx), %zmm15 + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, 0x80(%rdi) + vmovups %zmm3, 0xC0(%rdi) + vmovups %zmm4, 0x100(%rdi) + vmovups %zmm5, 0x140(%rdi) + vmovups %zmm6, 0x180(%rdi) + vmovups %zmm7, 0x1C0(%rdi) + vmovups %zmm8, -0x200(%r9) + vmovups %zmm9, -0x1C0(%r9) + vmovups %zmm10, -0x180(%r9) + vmovups %zmm11, -0x140(%r9) + vmovups %zmm12, -0x100(%r9) + vmovups %zmm13, -0xC0(%r9) + vmovups %zmm14, -0x80(%r9) + vmovups %zmm15, -0x40(%r9) + ret + +L(1024bytesormore): + cmp %rsi, %rdi + ja L(1024bytesormore_bkw) + sub $512, %r9 + vmovups -0x200(%rcx), %zmm8 + vmovups -0x1C0(%rcx), %zmm9 + vmovups -0x180(%rcx), %zmm10 + vmovups -0x140(%rcx), %zmm11 + vmovups -0x100(%rcx), %zmm12 + vmovups -0xC0(%rcx), %zmm13 + vmovups -0x80(%rcx), %zmm14 + vmovups -0x40(%rcx), %zmm15 + prefetcht1 (%rsi) + prefetcht1 0x40(%rsi) + prefetcht1 0x80(%rsi) + prefetcht1 0xC0(%rsi) + prefetcht1 0x100(%rsi) + prefetcht1 0x140(%rsi) + prefetcht1 0x180(%rsi) + prefetcht1 0x1C0(%rsi) + +/* Loop with unaligned memory access. */ +L(gobble_512bytes_loop): + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups 0x80(%rsi), %zmm2 + vmovups 0xC0(%rsi), %zmm3 + vmovups 0x100(%rsi), %zmm4 + vmovups 0x140(%rsi), %zmm5 + vmovups 0x180(%rsi), %zmm6 + vmovups 0x1C0(%rsi), %zmm7 + add $512, %rsi + prefetcht1 (%rsi) + prefetcht1 0x40(%rsi) + prefetcht1 0x80(%rsi) + prefetcht1 0xC0(%rsi) + prefetcht1 0x100(%rsi) + prefetcht1 0x140(%rsi) + prefetcht1 0x180(%rsi) + prefetcht1 0x1C0(%rsi) + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, 0x80(%rdi) + vmovups %zmm3, 0xC0(%rdi) + vmovups %zmm4, 0x100(%rdi) + vmovups %zmm5, 0x140(%rdi) + vmovups %zmm6, 0x180(%rdi) + vmovups %zmm7, 0x1C0(%rdi) + add $512, %rdi + cmp %r9, %rdi + jb L(gobble_512bytes_loop) + vmovups %zmm8, (%r9) + vmovups %zmm9, 0x40(%r9) + vmovups %zmm10, 0x80(%r9) + vmovups %zmm11, 0xC0(%r9) + vmovups %zmm12, 0x100(%r9) + vmovups %zmm13, 0x140(%r9) + vmovups %zmm14, 0x180(%r9) + vmovups %zmm15, 0x1C0(%r9) + ret + +L(1024bytesormore_bkw): + add $512, %rdi + vmovups 0x1C0(%rsi), %zmm8 + vmovups 0x180(%rsi), %zmm9 + vmovups 0x140(%rsi), %zmm10 + vmovups 0x100(%rsi), %zmm11 + vmovups 0xC0(%rsi), %zmm12 + vmovups 0x80(%rsi), %zmm13 + vmovups 0x40(%rsi), %zmm14 + vmovups (%rsi), %zmm15 + prefetcht1 -0x40(%rcx) + prefetcht1 -0x80(%rcx) + prefetcht1 -0xC0(%rcx) + prefetcht1 -0x100(%rcx) + prefetcht1 -0x140(%rcx) + prefetcht1 -0x180(%rcx) + prefetcht1 -0x1C0(%rcx) + prefetcht1 -0x200(%rcx) + +/* Backward loop with unaligned memory access. */ +L(gobble_512bytes_loop_bkw): + vmovups -0x40(%rcx), %zmm0 + vmovups -0x80(%rcx), %zmm1 + vmovups -0xC0(%rcx), %zmm2 + vmovups -0x100(%rcx), %zmm3 + vmovups -0x140(%rcx), %zmm4 + vmovups -0x180(%rcx), %zmm5 + vmovups -0x1C0(%rcx), %zmm6 + vmovups -0x200(%rcx), %zmm7 + sub $512, %rcx + prefetcht1 -0x40(%rcx) + prefetcht1 -0x80(%rcx) + prefetcht1 -0xC0(%rcx) + prefetcht1 -0x100(%rcx) + prefetcht1 -0x140(%rcx) + prefetcht1 -0x180(%rcx) + prefetcht1 -0x1C0(%rcx) + prefetcht1 -0x200(%rcx) + vmovups %zmm0, -0x40(%r9) + vmovups %zmm1, -0x80(%r9) + vmovups %zmm2, -0xC0(%r9) + vmovups %zmm3, -0x100(%r9) + vmovups %zmm4, -0x140(%r9) + vmovups %zmm5, -0x180(%r9) + vmovups %zmm6, -0x1C0(%r9) + vmovups %zmm7, -0x200(%r9) + sub $512, %r9 + cmp %rdi, %r9 + ja L(gobble_512bytes_loop_bkw) + vmovups %zmm8, -0x40(%rdi) + vmovups %zmm9, -0x80(%rdi) + vmovups %zmm10, -0xC0(%rdi) + vmovups %zmm11, -0x100(%rdi) + vmovups %zmm12, -0x140(%rdi) + vmovups %zmm13, -0x180(%rdi) + vmovups %zmm14, -0x1C0(%rdi) + vmovups %zmm15, -0x200(%rdi) + ret + +L(preloop_large): + cmp %rsi, %rdi + ja L(preloop_large_bkw) + vmovups (%rsi), %zmm4 + vmovups 0x40(%rsi), %zmm5 + + mov %rdi, %r11 +/* Align destination for access with non-temporal stores in the loop. */ + mov %rdi, %r8 + and $-0x80, %rdi + add $0x80, %rdi + sub %rdi, %r8 + sub %r8, %rsi + add %r8, %rdx +L(gobble_256bytes_nt_loop): + prefetcht1 0x200(%rsi) + prefetcht1 0x240(%rsi) + prefetcht1 0x280(%rsi) + prefetcht1 0x2C0(%rsi) + prefetcht1 0x300(%rsi) + prefetcht1 0x340(%rsi) + prefetcht1 0x380(%rsi) + prefetcht1 0x3C0(%rsi) + vmovdqu64 (%rsi), %zmm0 + vmovdqu64 0x40(%rsi), %zmm1 + vmovdqu64 0x80(%rsi), %zmm2 + vmovdqu64 0xC0(%rsi), %zmm3 + vmovntdq %zmm0, (%rdi) + vmovntdq %zmm1, 0x40(%rdi) + vmovntdq %zmm2, 0x80(%rdi) + vmovntdq %zmm3, 0xC0(%rdi) + sub $256, %rdx + add $256, %rsi + add $256, %rdi + cmp $256, %rdx + ja L(gobble_256bytes_nt_loop) + sfence + vmovups %zmm4, (%r11) + vmovups %zmm5, 0x40(%r11) + jmp L(check) + +L(preloop_large_bkw): + vmovups -0x80(%rcx), %zmm4 + vmovups -0x40(%rcx), %zmm5 + +/* Align end of destination for access with non-temporal stores. */ + mov %r9, %r8 + and $-0x80, %r9 + sub %r9, %r8 + sub %r8, %rcx + sub %r8, %rdx + add %r9, %r8 +L(gobble_256bytes_nt_loop_bkw): + prefetcht1 -0x400(%rcx) + prefetcht1 -0x3C0(%rcx) + prefetcht1 -0x380(%rcx) + prefetcht1 -0x340(%rcx) + prefetcht1 -0x300(%rcx) + prefetcht1 -0x2C0(%rcx) + prefetcht1 -0x280(%rcx) + prefetcht1 -0x240(%rcx) + vmovdqu64 -0x100(%rcx), %zmm0 + vmovdqu64 -0xC0(%rcx), %zmm1 + vmovdqu64 -0x80(%rcx), %zmm2 + vmovdqu64 -0x40(%rcx), %zmm3 + vmovntdq %zmm0, -0x100(%r9) + vmovntdq %zmm1, -0xC0(%r9) + vmovntdq %zmm2, -0x80(%r9) + vmovntdq %zmm3, -0x40(%r9) + sub $256, %rdx + sub $256, %rcx + sub $256, %r9 + cmp $256, %rdx + ja L(gobble_256bytes_nt_loop_bkw) + sfence + vmovups %zmm4, -0x80(%r8) + vmovups %zmm5, -0x40(%r8) + jmp L(check) +END (__memmove_avx512_no_vzeroupper) + +strong_alias (__memmove_avx512_no_vzeroupper, __memcpy_avx512_no_vzeroupper) +strong_alias (__memmove_chk_avx512_no_vzeroupper, __memcpy_chk_avx512_no_vzeroupper) +#endif diff --git a/utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S new file mode 100644 index 00000000000..db70fdf1b4e --- /dev/null +++ b/utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S @@ -0,0 +1,12 @@ +#if 1 +# define VEC_SIZE 64 +# define VEC(i) zmm##i +# define VMOVNT vmovntdq +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 + +# define SECTION(p) p##.avx512 +# define MEMMOVE_SYMBOL(p,s) p##_avx512_##s + +# include "memmove-vec-unaligned-erms.S" +#endif diff --git a/utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S new file mode 100644 index 00000000000..17b4f861621 --- /dev/null +++ b/utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S @@ -0,0 +1,33 @@ +/* memmove with SSE2. + Copyright (C) 2017-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if 1 +# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s +#else +weak_alias (__mempcpy, mempcpy) +#endif + +#include "memmove.S" + +#if defined SHARED +# include +# if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14) +/* Use __memmove_sse2_unaligned to support overlapping addresses. */ +compat_symbol (libc, __memmove_sse2_unaligned, memcpy, GLIBC_2_2_5); +# endif +#endif diff --git a/utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S new file mode 100644 index 00000000000..21be351b4e7 --- /dev/null +++ b/utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S @@ -0,0 +1,559 @@ +/* memmove/memcpy/mempcpy with unaligned load/store and rep movsb + Copyright (C) 2016-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* memmove/memcpy/mempcpy is implemented as: + 1. Use overlapping load and store to avoid branch. + 2. Load all sources into registers and store them together to avoid + possible address overlap between source and destination. + 3. If size is 8 * VEC_SIZE or less, load all sources into registers + and store them together. + 4. If address of destination > address of source, backward copy + 4 * VEC_SIZE at a time with unaligned load and aligned store. + Load the first 4 * VEC and last VEC before the loop and store + them after the loop to support overlapping addresses. + 5. Otherwise, forward copy 4 * VEC_SIZE at a time with unaligned + load and aligned store. Load the last 4 * VEC and first VEC + before the loop and store them after the loop to support + overlapping addresses. + 6. If size >= __x86_shared_non_temporal_threshold and there is no + overlap between destination and source, use non-temporal store + instead of aligned store. */ + +#include "sysdep.h" + +#ifndef MEMCPY_SYMBOL +# define MEMCPY_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) +#endif + +#ifndef MEMPCPY_SYMBOL +# define MEMPCPY_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) +#endif + +#ifndef MEMMOVE_CHK_SYMBOL +# define MEMMOVE_CHK_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) +#endif + +#ifndef VZEROUPPER +# if VEC_SIZE > 16 +# define VZEROUPPER vzeroupper +# else +# define VZEROUPPER +# endif +#endif + +#ifndef PREFETCH +# define PREFETCH(addr) prefetcht0 addr +#endif + +/* Assume 64-byte prefetch size. */ +#ifndef PREFETCH_SIZE +# define PREFETCH_SIZE 64 +#endif + +#define PREFETCHED_LOAD_SIZE (VEC_SIZE * 4) + +#if PREFETCH_SIZE == 64 +# if PREFETCHED_LOAD_SIZE == PREFETCH_SIZE +# define PREFETCH_ONE_SET(dir, base, offset) \ + PREFETCH ((offset)base) +# elif PREFETCHED_LOAD_SIZE == 2 * PREFETCH_SIZE +# define PREFETCH_ONE_SET(dir, base, offset) \ + PREFETCH ((offset)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE)base) +# elif PREFETCHED_LOAD_SIZE == 4 * PREFETCH_SIZE +# define PREFETCH_ONE_SET(dir, base, offset) \ + PREFETCH ((offset)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE * 2)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE * 3)base) +# else +# error Unsupported PREFETCHED_LOAD_SIZE! +# endif +#else +# error Unsupported PREFETCH_SIZE! +#endif + +#ifndef SECTION +# error SECTION is not defined! +#endif + + .section SECTION(.text),"ax",@progbits +#if defined SHARED +ENTRY (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned)) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned)) +#endif + +ENTRY (MEMPCPY_SYMBOL (__mempcpy, unaligned)) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) +END (MEMPCPY_SYMBOL (__mempcpy, unaligned)) + +#if defined SHARED +ENTRY (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned)) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned)) +#endif + +ENTRY (MEMMOVE_SYMBOL (__memmove, unaligned)) + movq %rdi, %rax +L(start): +# ifdef __ILP32__ + /* Clear the upper 32 bits. */ + movl %edx, %edx +# endif + cmp $VEC_SIZE, %RDX_LP + jb L(less_vec) + cmp $(VEC_SIZE * 2), %RDX_LP + ja L(more_2x_vec) +#if !defined USE_MULTIARCH +L(last_2x_vec): +#endif + /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ + VMOVU (%rsi), %VEC(0) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) + VZEROUPPER +#if !defined USE_MULTIARCH +L(nop): +#endif + ret +#if defined USE_MULTIARCH +END (MEMMOVE_SYMBOL (__memmove, unaligned)) + +# if VEC_SIZE == 16 +ENTRY (__mempcpy_chk_erms) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (__mempcpy_chk_erms) + +/* Only used to measure performance of REP MOVSB. */ +ENTRY (__mempcpy_erms) + mov %RDI_LP, %RAX_LP + /* Skip zero length. */ + test %RDX_LP, %RDX_LP + jz 2f + add %RDX_LP, %RAX_LP + jmp L(start_movsb) +END (__mempcpy_erms) + +ENTRY (__memmove_chk_erms) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (__memmove_chk_erms) + +ENTRY (__memmove_erms) + movq %rdi, %rax + /* Skip zero length. */ + test %RDX_LP, %RDX_LP + jz 2f +L(start_movsb): + mov %RDX_LP, %RCX_LP + cmp %RSI_LP, %RDI_LP + jb 1f + /* Source == destination is less common. */ + je 2f + lea (%rsi,%rcx), %RDX_LP + cmp %RDX_LP, %RDI_LP + jb L(movsb_backward) +1: + rep movsb +2: + ret +L(movsb_backward): + leaq -1(%rdi,%rcx), %rdi + leaq -1(%rsi,%rcx), %rsi + std + rep movsb + cld + ret +END (__memmove_erms) +strong_alias (__memmove_erms, __memcpy_erms) +strong_alias (__memmove_chk_erms, __memcpy_chk_erms) +# endif + +# ifdef SHARED +ENTRY (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned_erms)) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned_erms)) +# endif + +ENTRY (MEMMOVE_SYMBOL (__mempcpy, unaligned_erms)) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start_erms) +END (MEMMOVE_SYMBOL (__mempcpy, unaligned_erms)) + +# ifdef SHARED +ENTRY (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned_erms)) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned_erms)) +# endif + +ENTRY (MEMMOVE_SYMBOL (__memmove, unaligned_erms)) + movq %rdi, %rax +L(start_erms): +# ifdef __ILP32__ + /* Clear the upper 32 bits. */ + movl %edx, %edx +# endif + cmp $VEC_SIZE, %RDX_LP + jb L(less_vec) + cmp $(VEC_SIZE * 2), %RDX_LP + ja L(movsb_more_2x_vec) +L(last_2x_vec): + /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ + VMOVU (%rsi), %VEC(0) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) +L(return): + VZEROUPPER + ret + +L(movsb): + cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP + jae L(more_8x_vec) + cmpq %rsi, %rdi + jb 1f + /* Source == destination is less common. */ + je L(nop) + leaq (%rsi,%rdx), %r9 + cmpq %r9, %rdi + /* Avoid slow backward REP MOVSB. */ + jb L(more_8x_vec_backward) +1: + mov %RDX_LP, %RCX_LP + rep movsb +L(nop): + ret +#endif + +L(less_vec): + /* Less than 1 VEC. */ +#if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 +# error Unsupported VEC_SIZE! +#endif +#if VEC_SIZE > 32 + cmpb $32, %dl + jae L(between_32_63) +#endif +#if VEC_SIZE > 16 + cmpb $16, %dl + jae L(between_16_31) +#endif + cmpb $8, %dl + jae L(between_8_15) + cmpb $4, %dl + jae L(between_4_7) + cmpb $1, %dl + ja L(between_2_3) + jb 1f + movzbl (%rsi), %ecx + movb %cl, (%rdi) +1: + ret +#if VEC_SIZE > 32 +L(between_32_63): + /* From 32 to 63. No branch when size == 32. */ + vmovdqu (%rsi), %ymm0 + vmovdqu -32(%rsi,%rdx), %ymm1 + vmovdqu %ymm0, (%rdi) + vmovdqu %ymm1, -32(%rdi,%rdx) + VZEROUPPER + ret +#endif +#if VEC_SIZE > 16 + /* From 16 to 31. No branch when size == 16. */ +L(between_16_31): + vmovdqu (%rsi), %xmm0 + vmovdqu -16(%rsi,%rdx), %xmm1 + vmovdqu %xmm0, (%rdi) + vmovdqu %xmm1, -16(%rdi,%rdx) + ret +#endif +L(between_8_15): + /* From 8 to 15. No branch when size == 8. */ + movq -8(%rsi,%rdx), %rcx + movq (%rsi), %rsi + movq %rcx, -8(%rdi,%rdx) + movq %rsi, (%rdi) + ret +L(between_4_7): + /* From 4 to 7. No branch when size == 4. */ + movl -4(%rsi,%rdx), %ecx + movl (%rsi), %esi + movl %ecx, -4(%rdi,%rdx) + movl %esi, (%rdi) + ret +L(between_2_3): + /* From 2 to 3. No branch when size == 2. */ + movzwl -2(%rsi,%rdx), %ecx + movzwl (%rsi), %esi + movw %cx, -2(%rdi,%rdx) + movw %si, (%rdi) + ret + +#if defined USE_MULTIARCH +L(movsb_more_2x_vec): + cmp $REP_MOSB_THRESHOLD, %RDX_LP + ja L(movsb) +#endif +L(more_2x_vec): + /* More than 2 * VEC and there may be overlap between destination + and source. */ + cmpq $(VEC_SIZE * 8), %rdx + ja L(more_8x_vec) + cmpq $(VEC_SIZE * 4), %rdx + jb L(last_4x_vec) + /* Copy from 4 * VEC to 8 * VEC, inclusively. */ + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(4) + VMOVU -(VEC_SIZE * 2)(%rsi,%rdx), %VEC(5) + VMOVU -(VEC_SIZE * 3)(%rsi,%rdx), %VEC(6) + VMOVU -(VEC_SIZE * 4)(%rsi,%rdx), %VEC(7) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), VEC_SIZE(%rdi) + VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi) + VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi) + VMOVU %VEC(4), -VEC_SIZE(%rdi,%rdx) + VMOVU %VEC(5), -(VEC_SIZE * 2)(%rdi,%rdx) + VMOVU %VEC(6), -(VEC_SIZE * 3)(%rdi,%rdx) + VMOVU %VEC(7), -(VEC_SIZE * 4)(%rdi,%rdx) + VZEROUPPER + ret +L(last_4x_vec): + /* Copy from 2 * VEC to 4 * VEC. */ + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(2) + VMOVU -(VEC_SIZE * 2)(%rsi,%rdx), %VEC(3) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), VEC_SIZE(%rdi) + VMOVU %VEC(2), -VEC_SIZE(%rdi,%rdx) + VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi,%rdx) + VZEROUPPER + ret + +L(more_8x_vec): + cmpq %rsi, %rdi + ja L(more_8x_vec_backward) + /* Source == destination is less common. */ + je L(nop) + /* Load the first VEC and last 4 * VEC to support overlapping + addresses. */ + VMOVU (%rsi), %VEC(4) + VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(5) + VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(6) + VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(7) + VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(8) + /* Save start and stop of the destination buffer. */ + movq %rdi, %r11 + leaq -VEC_SIZE(%rdi, %rdx), %rcx + /* Align destination for aligned stores in the loop. Compute + how much destination is misaligned. */ + movq %rdi, %r8 + andq $(VEC_SIZE - 1), %r8 + /* Get the negative of offset for alignment. */ + subq $VEC_SIZE, %r8 + /* Adjust source. */ + subq %r8, %rsi + /* Adjust destination which should be aligned now. */ + subq %r8, %rdi + /* Adjust length. */ + addq %r8, %rdx +#if (defined USE_MULTIARCH || VEC_SIZE == 16) + /* Check non-temporal store threshold. */ + cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP + ja L(large_forward) +#endif +L(loop_4x_vec_forward): + /* Copy 4 * VEC a time forward. */ + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) + addq $(VEC_SIZE * 4), %rsi + subq $(VEC_SIZE * 4), %rdx + VMOVA %VEC(0), (%rdi) + VMOVA %VEC(1), VEC_SIZE(%rdi) + VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi) + VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi) + addq $(VEC_SIZE * 4), %rdi + cmpq $(VEC_SIZE * 4), %rdx + ja L(loop_4x_vec_forward) + /* Store the last 4 * VEC. */ + VMOVU %VEC(5), (%rcx) + VMOVU %VEC(6), -VEC_SIZE(%rcx) + VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx) + VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) + /* Store the first VEC. */ + VMOVU %VEC(4), (%r11) + VZEROUPPER + ret + +L(more_8x_vec_backward): + /* Load the first 4 * VEC and last VEC to support overlapping + addresses. */ + VMOVU (%rsi), %VEC(4) + VMOVU VEC_SIZE(%rsi), %VEC(5) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(6) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(7) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(8) + /* Save stop of the destination buffer. */ + leaq -VEC_SIZE(%rdi, %rdx), %r11 + /* Align destination end for aligned stores in the loop. Compute + how much destination end is misaligned. */ + leaq -VEC_SIZE(%rsi, %rdx), %rcx + movq %r11, %r9 + movq %r11, %r8 + andq $(VEC_SIZE - 1), %r8 + /* Adjust source. */ + subq %r8, %rcx + /* Adjust the end of destination which should be aligned now. */ + subq %r8, %r9 + /* Adjust length. */ + subq %r8, %rdx +#if (defined USE_MULTIARCH || VEC_SIZE == 16) + /* Check non-temporal store threshold. */ + cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP + ja L(large_backward) +#endif +L(loop_4x_vec_backward): + /* Copy 4 * VEC a time backward. */ + VMOVU (%rcx), %VEC(0) + VMOVU -VEC_SIZE(%rcx), %VEC(1) + VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2) + VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3) + subq $(VEC_SIZE * 4), %rcx + subq $(VEC_SIZE * 4), %rdx + VMOVA %VEC(0), (%r9) + VMOVA %VEC(1), -VEC_SIZE(%r9) + VMOVA %VEC(2), -(VEC_SIZE * 2)(%r9) + VMOVA %VEC(3), -(VEC_SIZE * 3)(%r9) + subq $(VEC_SIZE * 4), %r9 + cmpq $(VEC_SIZE * 4), %rdx + ja L(loop_4x_vec_backward) + /* Store the first 4 * VEC. */ + VMOVU %VEC(4), (%rdi) + VMOVU %VEC(5), VEC_SIZE(%rdi) + VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi) + VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) + /* Store the last VEC. */ + VMOVU %VEC(8), (%r11) + VZEROUPPER + ret + +#if (defined USE_MULTIARCH || VEC_SIZE == 16) +L(large_forward): + /* Don't use non-temporal store if there is overlap between + destination and source since destination may be in cache + when source is loaded. */ + leaq (%rdi, %rdx), %r10 + cmpq %r10, %rsi + jb L(loop_4x_vec_forward) +L(loop_large_forward): + /* Copy 4 * VEC a time forward with non-temporal stores. */ + PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 2) + PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 3) + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) + addq $PREFETCHED_LOAD_SIZE, %rsi + subq $PREFETCHED_LOAD_SIZE, %rdx + VMOVNT %VEC(0), (%rdi) + VMOVNT %VEC(1), VEC_SIZE(%rdi) + VMOVNT %VEC(2), (VEC_SIZE * 2)(%rdi) + VMOVNT %VEC(3), (VEC_SIZE * 3)(%rdi) + addq $PREFETCHED_LOAD_SIZE, %rdi + cmpq $PREFETCHED_LOAD_SIZE, %rdx + ja L(loop_large_forward) + sfence + /* Store the last 4 * VEC. */ + VMOVU %VEC(5), (%rcx) + VMOVU %VEC(6), -VEC_SIZE(%rcx) + VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx) + VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) + /* Store the first VEC. */ + VMOVU %VEC(4), (%r11) + VZEROUPPER + ret + +L(large_backward): + /* Don't use non-temporal store if there is overlap between + destination and source since destination may be in cache + when source is loaded. */ + leaq (%rcx, %rdx), %r10 + cmpq %r10, %r9 + jb L(loop_4x_vec_backward) +L(loop_large_backward): + /* Copy 4 * VEC a time backward with non-temporal stores. */ + PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 2) + PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 3) + VMOVU (%rcx), %VEC(0) + VMOVU -VEC_SIZE(%rcx), %VEC(1) + VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2) + VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3) + subq $PREFETCHED_LOAD_SIZE, %rcx + subq $PREFETCHED_LOAD_SIZE, %rdx + VMOVNT %VEC(0), (%r9) + VMOVNT %VEC(1), -VEC_SIZE(%r9) + VMOVNT %VEC(2), -(VEC_SIZE * 2)(%r9) + VMOVNT %VEC(3), -(VEC_SIZE * 3)(%r9) + subq $PREFETCHED_LOAD_SIZE, %r9 + cmpq $PREFETCHED_LOAD_SIZE, %rdx + ja L(loop_large_backward) + sfence + /* Store the first 4 * VEC. */ + VMOVU %VEC(4), (%rdi) + VMOVU %VEC(5), VEC_SIZE(%rdi) + VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi) + VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) + /* Store the last VEC. */ + VMOVU %VEC(8), (%r11) + VZEROUPPER + ret +#endif +END (MEMMOVE_SYMBOL (__memmove, unaligned_erms)) + +#if 1 +# ifdef USE_MULTIARCH +strong_alias (MEMMOVE_SYMBOL (__memmove, unaligned_erms), + MEMMOVE_SYMBOL (__memcpy, unaligned_erms)) +# ifdef SHARED +strong_alias (MEMMOVE_SYMBOL (__memmove_chk, unaligned_erms), + MEMMOVE_SYMBOL (__memcpy_chk, unaligned_erms)) +# endif +# endif +# ifdef SHARED +strong_alias (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned), + MEMMOVE_CHK_SYMBOL (__memcpy_chk, unaligned)) +# endif +#endif +strong_alias (MEMMOVE_SYMBOL (__memmove, unaligned), + MEMCPY_SYMBOL (__memcpy, unaligned)) diff --git a/utils/memcpy-bench/glibc/memmove.S b/utils/memcpy-bench/glibc/memmove.S new file mode 100644 index 00000000000..97e735facff --- /dev/null +++ b/utils/memcpy-bench/glibc/memmove.S @@ -0,0 +1,71 @@ +/* Optimized memmove for x86-64. + Copyright (C) 2016-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include "sysdep.h" + +#define VEC_SIZE 16 +#define VEC(i) xmm##i +#define PREFETCHNT prefetchnta +#define VMOVNT movntdq +/* Use movups and movaps for smaller code sizes. */ +#define VMOVU movups +#define VMOVA movaps + +#define SECTION(p) p + +#ifdef USE_MULTIARCH +# if 0 +# define MEMCPY_SYMBOL(p,s) memcpy +# endif +#else +# if defined SHARED +# define MEMCPY_SYMBOL(p,s) __memcpy +# else +# define MEMCPY_SYMBOL(p,s) memcpy +# endif +#endif +#if !defined USE_MULTIARCH +# define MEMPCPY_SYMBOL(p,s) __mempcpy +#endif +#ifndef MEMMOVE_SYMBOL +# define MEMMOVE_CHK_SYMBOL(p,s) p +# define MEMMOVE_SYMBOL(p,s) memmove +#endif + +#include "memmove-vec-unaligned-erms.S" + +#ifndef USE_MULTIARCH +libc_hidden_builtin_def (memmove) +# if defined SHARED && IS_IN (libc) +strong_alias (memmove, __memcpy) +libc_hidden_ver (memmove, memcpy) +# endif +libc_hidden_def (__mempcpy) +weak_alias (__mempcpy, mempcpy) +libc_hidden_builtin_def (mempcpy) + +# if defined SHARED && IS_IN (libc) +# undef memcpy +# include +versioned_symbol (libc, __memcpy, memcpy, GLIBC_2_14); + +# if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14) +compat_symbol (libc, memmove, memcpy, GLIBC_2_2_5); +# endif +# endif +#endif diff --git a/utils/memcpy-bench/glibc/sysdep.h b/utils/memcpy-bench/glibc/sysdep.h new file mode 100644 index 00000000000..099134b2a2f --- /dev/null +++ b/utils/memcpy-bench/glibc/sysdep.h @@ -0,0 +1,129 @@ +/* Assembler macros for x86-64. + Copyright (C) 2001-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _X86_64_SYSDEP_H +#define _X86_64_SYSDEP_H 1 + +#include "sysdep_x86.h" + +#ifdef __ASSEMBLER__ + +/* Syntactic details of assembler. */ + +/* This macro is for setting proper CFI with DW_CFA_expression describing + the register as saved relative to %rsp instead of relative to the CFA. + Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset + from %rsp. */ +#define cfi_offset_rel_rsp(regn, off) .cfi_escape 0x10, regn, 0x4, 0x13, \ + 0x77, off & 0x7F | 0x80, off >> 7 + +/* If compiled for profiling, call `mcount' at the start of each function. */ +#ifdef PROF +/* The mcount code relies on a normal frame pointer being on the stack + to locate our caller, so push one just for its benefit. */ +#define CALL_MCOUNT \ + pushq %rbp; \ + cfi_adjust_cfa_offset(8); \ + movq %rsp, %rbp; \ + cfi_def_cfa_register(%rbp); \ + call JUMPTARGET(mcount); \ + popq %rbp; \ + cfi_def_cfa(rsp,8); +#else +#define CALL_MCOUNT /* Do nothing. */ +#endif + +#define PSEUDO(name, syscall_name, args) \ +lose: \ + jmp JUMPTARGET(syscall_error) \ + .globl syscall_error; \ + ENTRY (name) \ + DO_CALL (syscall_name, args); \ + jb lose + +#undef JUMPTARGET +#ifdef SHARED +# ifdef BIND_NOW +# define JUMPTARGET(name) *name##@GOTPCREL(%rip) +# else +# define JUMPTARGET(name) name##@PLT +# endif +#else +/* For static archives, branch to target directly. */ +# define JUMPTARGET(name) name +#endif + +/* Long and pointer size in bytes. */ +#define LP_SIZE 8 + +/* Instruction to operate on long and pointer. */ +#define LP_OP(insn) insn##q + +/* Assembler address directive. */ +#define ASM_ADDR .quad + +/* Registers to hold long and pointer. */ +#define RAX_LP rax +#define RBP_LP rbp +#define RBX_LP rbx +#define RCX_LP rcx +#define RDI_LP rdi +#define RDX_LP rdx +#define RSI_LP rsi +#define RSP_LP rsp +#define R8_LP r8 +#define R9_LP r9 +#define R10_LP r10 +#define R11_LP r11 +#define R12_LP r12 +#define R13_LP r13 +#define R14_LP r14 +#define R15_LP r15 + +#else /* __ASSEMBLER__ */ + +/* Long and pointer size in bytes. */ +#define LP_SIZE "8" + +/* Instruction to operate on long and pointer. */ +#define LP_OP(insn) #insn "q" + +/* Assembler address directive. */ +#define ASM_ADDR ".quad" + +/* Registers to hold long and pointer. */ +#define RAX_LP "rax" +#define RBP_LP "rbp" +#define RBX_LP "rbx" +#define RCX_LP "rcx" +#define RDI_LP "rdi" +#define RDX_LP "rdx" +#define RSI_LP "rsi" +#define RSP_LP "rsp" +#define R8_LP "r8" +#define R9_LP "r9" +#define R10_LP "r10" +#define R11_LP "r11" +#define R12_LP "r12" +#define R13_LP "r13" +#define R14_LP "r14" +#define R15_LP "r15" + +#endif /* __ASSEMBLER__ */ + +#endif /* _X86_64_SYSDEP_H */ diff --git a/utils/memcpy-bench/glibc/sysdep_generic.h b/utils/memcpy-bench/glibc/sysdep_generic.h new file mode 100644 index 00000000000..91f78e1b04d --- /dev/null +++ b/utils/memcpy-bench/glibc/sysdep_generic.h @@ -0,0 +1,113 @@ +/* Generic asm macros used on many machines. + Copyright (C) 1991-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#define C_SYMBOL_NAME(name) name +#define HIDDEN_JUMPTARGET(name) 0x0 +#define SHARED_CACHE_SIZE_HALF (1024*1024) +#define DATA_CACHE_SIZE_HALF (1024*32/2) +#define DATA_CACHE_SIZE (1024*32) +#define SHARED_NON_TEMPORAL_THRESHOLD (1024*1024*4) +#define REP_MOSB_THRESHOLD 1024 + +#define USE_MULTIARCH + +#define ASM_LINE_SEP ; + +#define strong_alias(original, alias) \ + .globl C_SYMBOL_NAME (alias) ASM_LINE_SEP \ + C_SYMBOL_NAME (alias) = C_SYMBOL_NAME (original) + +#ifndef C_LABEL + +/* Define a macro we can use to construct the asm name for a C symbol. */ +# define C_LABEL(name) name##: + +#endif + +#ifdef __ASSEMBLER__ +/* Mark the end of function named SYM. This is used on some platforms + to generate correct debugging information. */ +# ifndef END +# define END(sym) +# endif + +# ifndef JUMPTARGET +# define JUMPTARGET(sym) sym +# endif +#endif + +/* Makros to generate eh_frame unwind information. */ +#ifdef __ASSEMBLER__ +# define cfi_startproc .cfi_startproc +# define cfi_endproc .cfi_endproc +# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off +# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg +# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off +# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +# define cfi_offset(reg, off) .cfi_offset reg, off +# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +# define cfi_register(r1, r2) .cfi_register r1, r2 +# define cfi_return_column(reg) .cfi_return_column reg +# define cfi_restore(reg) .cfi_restore reg +# define cfi_same_value(reg) .cfi_same_value reg +# define cfi_undefined(reg) .cfi_undefined reg +# define cfi_remember_state .cfi_remember_state +# define cfi_restore_state .cfi_restore_state +# define cfi_window_save .cfi_window_save +# define cfi_personality(enc, exp) .cfi_personality enc, exp +# define cfi_lsda(enc, exp) .cfi_lsda enc, exp + +#else /* ! ASSEMBLER */ + +# define CFI_STRINGIFY(Name) CFI_STRINGIFY2 (Name) +# define CFI_STRINGIFY2(Name) #Name +# define CFI_STARTPROC ".cfi_startproc" +# define CFI_ENDPROC ".cfi_endproc" +# define CFI_DEF_CFA(reg, off) \ + ".cfi_def_cfa " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) +# define CFI_DEF_CFA_REGISTER(reg) \ + ".cfi_def_cfa_register " CFI_STRINGIFY(reg) +# define CFI_DEF_CFA_OFFSET(off) \ + ".cfi_def_cfa_offset " CFI_STRINGIFY(off) +# define CFI_ADJUST_CFA_OFFSET(off) \ + ".cfi_adjust_cfa_offset " CFI_STRINGIFY(off) +# define CFI_OFFSET(reg, off) \ + ".cfi_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) +# define CFI_REL_OFFSET(reg, off) \ + ".cfi_rel_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) +# define CFI_REGISTER(r1, r2) \ + ".cfi_register " CFI_STRINGIFY(r1) "," CFI_STRINGIFY(r2) +# define CFI_RETURN_COLUMN(reg) \ + ".cfi_return_column " CFI_STRINGIFY(reg) +# define CFI_RESTORE(reg) \ + ".cfi_restore " CFI_STRINGIFY(reg) +# define CFI_UNDEFINED(reg) \ + ".cfi_undefined " CFI_STRINGIFY(reg) +# define CFI_REMEMBER_STATE \ + ".cfi_remember_state" +# define CFI_RESTORE_STATE \ + ".cfi_restore_state" +# define CFI_WINDOW_SAVE \ + ".cfi_window_save" +# define CFI_PERSONALITY(enc, exp) \ + ".cfi_personality " CFI_STRINGIFY(enc) "," CFI_STRINGIFY(exp) +# define CFI_LSDA(enc, exp) \ + ".cfi_lsda " CFI_STRINGIFY(enc) "," CFI_STRINGIFY(exp) +#endif + +#include "dwarf2.h" diff --git a/utils/memcpy-bench/glibc/sysdep_x86.h b/utils/memcpy-bench/glibc/sysdep_x86.h new file mode 100644 index 00000000000..a3fecd01268 --- /dev/null +++ b/utils/memcpy-bench/glibc/sysdep_x86.h @@ -0,0 +1,113 @@ +/* Assembler macros for x86. + Copyright (C) 2017-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _X86_SYSDEP_H +#define _X86_SYSDEP_H 1 + +#include "sysdep_generic.h" + +/* __CET__ is defined by GCC with Control-Flow Protection values: + +enum cf_protection_level +{ + CF_NONE = 0, + CF_BRANCH = 1 << 0, + CF_RETURN = 1 << 1, + CF_FULL = CF_BRANCH | CF_RETURN, + CF_SET = 1 << 2 +}; +*/ + +/* Set if CF_BRANCH (IBT) is enabled. */ +#define X86_FEATURE_1_IBT (1U << 0) +/* Set if CF_RETURN (SHSTK) is enabled. */ +#define X86_FEATURE_1_SHSTK (1U << 1) + +#ifdef __CET__ +# define CET_ENABLED 1 +# define IBT_ENABLED (__CET__ & X86_FEATURE_1_IBT) +# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) +#else +# define CET_ENABLED 0 +# define IBT_ENABLED 0 +# define SHSTK_ENABLED 0 +#endif + +/* Offset for fxsave/xsave area used by _dl_runtime_resolve. Also need + space to preserve RCX, RDX, RSI, RDI, R8, R9 and RAX. It must be + aligned to 16 bytes for fxsave and 64 bytes for xsave. */ +#define STATE_SAVE_OFFSET (8 * 7 + 8) + +/* Save SSE, AVX, AVX512, mask and bound registers. */ +#define STATE_SAVE_MASK \ + ((1 << 1) | (1 << 2) | (1 << 3) | (1 << 5) | (1 << 6) | (1 << 7)) + +#ifdef __ASSEMBLER__ + +/* Syntactic details of assembler. */ + +#ifdef _CET_ENDBR +# define _CET_NOTRACK notrack +#else +# define _CET_ENDBR +# define _CET_NOTRACK +#endif + +/* ELF uses byte-counts for .align, most others use log2 of count of bytes. */ +#define ALIGNARG(log2) 1< #include +#include #include #include #include @@ -14,15 +15,11 @@ #include -#pragma GCC diagnostic ignored "-Wold-style-cast" -#pragma GCC diagnostic ignored "-Wcast-align" -#pragma GCC diagnostic ignored "-Wcast-qual" -#include "FastMemcpy.h" -//#include "FastMemcpy_Avx.h" - #include #include +#include + template void NO_INLINE loop(uint8_t * dst, uint8_t * src, size_t size, F && chunk_size_distribution, MemcpyImpl && impl) @@ -47,7 +44,7 @@ size_t generatorUniform(RNG & rng) { return rng() % N; }; template -void test(uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size_t num_threads, F && generator, MemcpyImpl && impl) +uint64_t test(uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size_t num_threads, F && generator, MemcpyImpl && impl, const char * name) { Stopwatch watch; @@ -76,15 +73,22 @@ void test(uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size_t n for (auto & thread : threads) thread.join(); - double elapsed_ns = watch.elapsed(); + uint64_t elapsed_ns = watch.elapsed(); /// Validation size_t sum = 0; + size_t reference = 0; for (size_t i = 0; i < size; ++i) + { sum += dst[i]; + reference += uint8_t(i); + } - std::cerr << std::fixed << std::setprecision(3) - << "Processed in " << (elapsed_ns / 1e9) << "sec, " << (size * iterations * 1.0 / elapsed_ns) << " GB/sec (sum = " << sum << ")\n"; + if (sum != reference) + throw std::logic_error("Incorrect result"); + + std::cout << name; + return elapsed_ns; } @@ -101,9 +105,30 @@ static void * memcpy_erms(void * dst, const void * src, size_t size) return dst; } +static void * memcpy_trivial(void * __restrict dst_, const void * __restrict src_, size_t size) +{ + char * __restrict dst = reinterpret_cast(dst_); + const char * __restrict src = reinterpret_cast(src_); + void * ret = dst; + + while (size > 0) + { + *dst = *src; + ++dst; + ++src; + --size; + } + + return ret; +} + extern "C" void * memcpy_jart(void * dst, const void * src, size_t size); extern "C" void MemCpy(void * dst, const void * src, size_t size); +void * memcpy_fast_sse(void * dst, const void * src, size_t size); +void * memcpy_fast_avx(void * dst, const void * src, size_t size); +void * memcpy_tiny(void * dst, const void * src, size_t size); + static void * memcpySSE2(void * __restrict destination, const void * __restrict source, size_t size) { @@ -329,7 +354,7 @@ void memcpy_my_medium_avx(uint8_t * __restrict & __restrict dst, const uint8_t * if (padding > 0) { __m256i head = _mm256_loadu_si256(reinterpret_cast(src)); - _mm256_storeu_si256((__m256i*)dst, head); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst), head); dst += padding; src += padding; size -= padding; @@ -539,70 +564,125 @@ tail: return ret; } +extern "C" void * __memcpy_erms(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_sse2_unaligned(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_ssse3(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_ssse3_back(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_avx_unaligned(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_avx_unaligned_erms(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_avx512_unaligned(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_avx512_unaligned_erms(void * __restrict destination, const void * __restrict source, size_t size); +extern "C" void * __memcpy_avx512_no_vzeroupper(void * __restrict destination, const void * __restrict source, size_t size); + + +#define VARIANT(N, NAME) \ + if (memcpy_variant == N) \ + return test(dst, src, size, iterations, num_threads, std::forward(generator), NAME, #NAME); template -void dispatchMemcpyVariants(size_t memcpy_variant, uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size_t num_threads, F && generator) +uint64_t dispatchMemcpyVariants(size_t memcpy_variant, uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size_t num_threads, F && generator) { - memcpy_type memcpy_libc = reinterpret_cast(dlsym(RTLD_NEXT, "memcpy")); + memcpy_type memcpy_libc_old = reinterpret_cast(dlsym(RTLD_NEXT, "memcpy")); - if (memcpy_variant == 1) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpy); - if (memcpy_variant == 2) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpy_libc); - if (memcpy_variant == 3) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpy_erms); - if (memcpy_variant == 4) - test(dst, src, size, iterations, num_threads, std::forward(generator), MemCpy); - if (memcpy_variant == 5) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpySSE2); - if (memcpy_variant == 6) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpySSE2Unrolled2); - if (memcpy_variant == 7) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpySSE2Unrolled4); - if (memcpy_variant == 8) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpySSE2Unrolled8); -// if (memcpy_variant == 9) -// test(dst, src, size, iterations, num_threads, std::forward(generator), memcpy_fast_avx); - if (memcpy_variant == 10) - test(dst, src, size, iterations, num_threads, std::forward(generator), memcpy_my); + VARIANT(1, memcpy) + VARIANT(2, memcpy_trivial) + VARIANT(3, memcpy_libc_old) + VARIANT(4, memcpy_erms) + VARIANT(5, MemCpy) + VARIANT(6, memcpySSE2) + VARIANT(7, memcpySSE2Unrolled2) + VARIANT(8, memcpySSE2Unrolled4) + VARIANT(9, memcpySSE2Unrolled8) + VARIANT(10, memcpy_fast_sse) + VARIANT(11, memcpy_fast_avx) + VARIANT(12, memcpy_my) + + VARIANT(21, __memcpy_erms) + VARIANT(22, __memcpy_sse2_unaligned) + VARIANT(23, __memcpy_ssse3) + VARIANT(24, __memcpy_ssse3_back) + VARIANT(25, __memcpy_avx_unaligned) + VARIANT(26, __memcpy_avx_unaligned_erms) + VARIANT(27, __memcpy_avx512_unaligned) + VARIANT(28, __memcpy_avx512_unaligned_erms) + VARIANT(29, __memcpy_avx512_no_vzeroupper) + + return 0; } -void dispatchVariants(size_t memcpy_variant, size_t generator_variant, uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size_t num_threads) +uint64_t dispatchVariants( + size_t memcpy_variant, size_t generator_variant, uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size_t num_threads) { if (generator_variant == 1) - dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<16>); + return dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<16>); if (generator_variant == 2) - dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<256>); + return dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<256>); if (generator_variant == 3) - dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<4096>); + return dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<4096>); if (generator_variant == 4) - dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<65536>); + return dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<65536>); if (generator_variant == 5) - dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<1048576>); + return dispatchMemcpyVariants(memcpy_variant, dst, src, size, iterations, num_threads, generatorUniform<1048576>); + + return 0; } int main(int argc, char ** argv) { - size_t size = 1000000000; - if (argc >= 2) - size = std::stoull(argv[1]); + boost::program_options::options_description desc("Allowed options"); + desc.add_options()("help,h", "produce help message") + ("size", boost::program_options::value()->default_value(1000000), "Bytes to copy on every iteration") + ("iterations", boost::program_options::value(), "Number of iterations") + ("threads", boost::program_options::value()->default_value(1), "Number of copying threads") + ("distribution", boost::program_options::value()->default_value(4), "Distribution of chunk sizes to perform copy") + ("variant", boost::program_options::value(), "Variant of memcpy implementation") + ("tsv", "Print result in tab-separated format") + ; - size_t iterations = 10; - if (argc >= 3) - iterations = std::stoull(argv[2]); + boost::program_options::variables_map options; + boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options); - size_t num_threads = 1; - if (argc >= 4) - num_threads = std::stoull(argv[3]); + if (options.count("help") || !options.count("variant")) + { + std::cout << R"(Usage: - size_t memcpy_variant = 1; - if (argc >= 5) - memcpy_variant = std::stoull(argv[4]); +for size in 4096 16384 50000 65536 100000 1000000 10000000 100000000; do + for threads in 1 2 4 $(($(nproc) / 2)) $(nproc); do + for distribution in 1 2 3 4 5; do + for variant in {1..12} {21..29}; do + for i in {1..10}; do + ./memcpy-bench --tsv --size $size --variant $variant --threads $threads --distribution $distribution; + done; + done; + done; + done; +done | tee result.tsv - size_t generator_variant = 1; - if (argc >= 6) - generator_variant = std::stoull(argv[5]); +)" << std::endl; + std::cout << desc << std::endl; + return 1; + } + + size_t size = options["size"].as(); + size_t num_threads = options["threads"].as(); + size_t memcpy_variant = options["variant"].as(); + size_t generator_variant = options["distribution"].as(); + + size_t iterations; + if (options.count("iterations")) + { + iterations = options["iterations"].as(); + } + else + { + iterations = 10000000000ULL * num_threads / size; + + if (generator_variant == 1) + iterations /= 100; + if (generator_variant == 2) + iterations /= 10; + } std::unique_ptr src(new uint8_t[size]); std::unique_ptr dst(new uint8_t[size]); @@ -614,7 +694,25 @@ int main(int argc, char ** argv) /// Fill dst to avoid page faults. memset(dst.get(), 0, size); - dispatchVariants(memcpy_variant, generator_variant, dst.get(), src.get(), size, iterations, num_threads); + uint64_t elapsed_ns = dispatchVariants(memcpy_variant, generator_variant, dst.get(), src.get(), size, iterations, num_threads); + + std::cout << std::fixed << std::setprecision(3); + + if (options.count("tsv")) + { + std::cout + << '\t' << size + << '\t' << iterations + << '\t' << num_threads + << '\t' << generator_variant + << '\t' << memcpy_variant + << '\t' << elapsed_ns + << '\n'; + } + else + { + std::cout << ": processed in " << (elapsed_ns / 1e9) << " sec, " << (size * iterations * 1.0 / elapsed_ns) << " GB/sec\n"; + } return 0; } From 402bf77783cbda48a9ee1b748bfce3c52ef8fe11 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 13 Mar 2021 18:05:54 +0300 Subject: [PATCH 183/333] Fix concurrent OPTIMIZE and DROP for ReplicatedMergeTree Found with fuzzer [1] for 00992_system_parts_race_condition_zookeeper: 2021.03.13 11:12:30.385188 [ 42042 ] {2d3a8e17-26be-47c1-974f-bd2c9fc7c3af} executeQuery: (from [::1]:58192, using production parser) (comment: '/usr/share/clickhouse-test/queries/1_stateful/00153_aggregate_arena_race.sql') CREATE TABLE alter_tabl e (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_3.alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_p eriod = 1, cleanup_delay_period_random_add = 0; ... 2021.03.13 11:12:30.678387 [ 42042 ] {528cafc5-a02b-4df8-a531-a9a98e37b478} executeQuery: (from [::1]:58192, using production parser) (comment: '/usr/share/clickhouse-test/queries/1_stateful/00153_aggregate_arena_race.sql') CREATE TABLE alter_table2 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_3.alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0; ... 2021.03.13 11:12:40.671994 [ 4193 ] {d96ee93c-69b0-4e89-b411-16c382ae27a8} executeQuery: (from [::1]:59714, using production parser) (comment: '/usr/share/clickhouse-test/queries/1_stateful/00153_aggregate_arena_race.sql') OPTIMIZE TABLE alter_table FINAL ... 2021.03.13 11:12:40.990174 [ 2298 ] {a80f9306-3a73-4778-a921-db53249247e3} executeQuery: (from [::1]:59768, using production parser) (comment: '/usr/share/clickhouse-test/queries/1_stateful/00153_aggregate_arena_race.sql') DROP TABLE alter_table; ... 2021.03.13 11:12:41.333054 [ 2298 ] {a80f9306-3a73-4778-a921-db53249247e3} test_3.alter_table (d4fedaca-e0f6-4c22-9a4f-9f4d11b6b705): Removing part from filesystem 7_0_0_0 ... 2021.03.13 11:12:41.335380 [ 2298 ] {a80f9306-3a73-4778-a921-db53249247e3} DatabaseCatalog: Waiting for table d4fedaca-e0f6-4c22-9a4f-9f4d11b6b705 to be finally dropped ... 2021.03.13 11:12:41.781032 [ 4193 ] {d96ee93c-69b0-4e89-b411-16c382ae27a8} test_3.alter_table (d4fedaca-e0f6-4c22-9a4f-9f4d11b6b705): Waiting for queue-0000000085 to disappear from r2 queue ... 2021.03.13 11:12:41.900039 [ 371 ] {} test_3.alter_table2 (ReplicatedMergeTreeQueue): Not executing log entry queue-0000000085 of type MERGE_PARTS for part 7_0_0_1 because part 7_0_0_0 is not ready yet (log entry for that part is being processed). 2021.03.13 11:12:41.900213 [ 365 ] {} test_3.alter_table2 (ReplicatedMergeTreeQueue): Cannot execute alter metadata queue-0000000056 with version 22 because another alter 21 must be executed before 2021.03.13 11:12:41.900231 [ 13762 ] {} test_3.alter_table2 (ae877c49-0d30-416d-9afe-27fd457d8fc4): Executing log entry to merge parts -7_0_0_0 to -7_0_0_1 2021.03.13 11:12:41.900330 [ 13762 ] {} test_3.alter_table2 (ae877c49-0d30-416d-9afe-27fd457d8fc4): Don't have all parts for merge -7_0_0_1; will try to fetch it instead ... [1]: https://clickhouse-test-reports.s3.yandex.net/21691/eb3710c164b991b8d4f86b1435a65f9eceb8f1f5/stress_test_(address).html#fail1 --- src/Storages/StorageReplicatedMergeTree.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index d08c5b6ad7c..2a280508364 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -4151,6 +4151,10 @@ bool StorageReplicatedMergeTree::optimize( const Names & deduplicate_by_columns, const Context & query_context) { + /// NOTE: exclusive lock cannot be used here, since this may lead to deadlock (see comments below), + /// but it should be safe to use non-exclusive to avoid dropping parts that may be required for processing queue. + auto table_lock = lockForShare(query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); + assertNotReadonly(); if (!is_leader) From ce364955b7d070808b4bf862dcabec04c5d4d50d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 23:19:53 +0300 Subject: [PATCH 184/333] Fix broken link --- docs/ru/sql-reference/table-functions/s3.md | 168 ++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 docs/ru/sql-reference/table-functions/s3.md diff --git a/docs/ru/sql-reference/table-functions/s3.md b/docs/ru/sql-reference/table-functions/s3.md new file mode 100644 index 00000000000..2427f0f863c --- /dev/null +++ b/docs/ru/sql-reference/table-functions/s3.md @@ -0,0 +1,168 @@ +--- +toc_priority: 45 +toc_title: s3 +--- + +# s3 {#s3} + +Provides table-like interface to select/insert files in S3. This table function is similar to [hdfs](../../sql-reference/table-functions/hdfs.md). + +``` sql +s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +``` + +**Input parameters** + +- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: *, ?, {abc,def} and {N..M} where N, M — numbers, `’abc’, ‘def’ — strings. +- `format` — The [format](../../interfaces/formats.md#formats) of the file. +- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. +- `compression` — Parameter is optional. Supported values: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. By default, it will autodetect compression by file extension. + +**Returned value** + +A table with the specified structure for reading or writing data in the specified file. + +**Example** + +Table from S3 file `https://storage.yandexcloud.net/my-test-bucket-768/data.csv` and selection of the first two rows from it: + +``` sql +SELECT * +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/data.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +The similar but from file with `gzip` compression: + +``` sql +SELECT * +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/data.csv.gz', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32', 'gzip') +LIMIT 2 +``` + +``` text +┌─column1─┬─column2─┬─column3─┐ +│ 1 │ 2 │ 3 │ +│ 3 │ 2 │ 1 │ +└─────────┴─────────┴─────────┘ +``` + +**Globs in path** + +Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix). + +- `*` — Substitutes any number of any characters except `/` including empty string. +- `?` — Substitutes any single character. +- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. +- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`. + +Constructions with `{}` are similar to the [remote table function](../../sql-reference/table-functions/remote.md)). + +**Example** + +1. Suppose that we have several files with following URIs on S3: + +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_4.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv’ +- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_4.csv’ + +2. Query the amount of rows in files end with number from 1 to 3: + + + +``` sql +SELECT count(*) +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}.csv', 'CSV', 'name String, value UInt32') +``` + +``` text +┌─count()─┐ +│ 18 │ +└─────────┘ +``` + +3. Query the amount of rows in all files of these two directories: + + + +``` sql +SELECT count(*) +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV', 'name String, value UInt32') +``` + +``` text +┌─count()─┐ +│ 24 │ +└─────────┘ +``` + + +!!! warning "Warning" + If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`. + +**Example** + +Query the data from files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`: + +``` sql +SELECT count(*) +FROM s3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV', 'name String, value UInt32') +``` + +``` text +┌─count()─┐ +│ 12 │ +└─────────┘ +``` + +**Data insert** + +The S3 table function may be used for data insert as well. + +**Example** + +Insert a data into file `test-data.csv.gz`: + +``` sql +INSERT INTO s3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') +VALUES ('test-data', 1), ('test-data-2', 2) +``` + +Insert a data into file `test-data.csv.gz` from existing table: + +``` sql +INSERT INTO s3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip') +SELECT name, value FROM existing_table +``` + +## Virtual Columns {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +## S3-related settings {#settings} + +The following settings can be set before query execution or placed into configuration file. + +- `s3_max_single_part_upload_size` — Default value is `64Mb`. The maximum size of object to upload using singlepart upload to S3. +- `s3_min_upload_part_size` — Default value is `512Mb`. The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +- `s3_max_redirects` — Default value is `10`. Max number of S3 redirects hops allowed. + +Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration. + +**See Also** + +- [Virtual columns](../../engines/table-engines/index.md#table_engines-virtual_columns) + From 145116bfb64a9a135cd60a8d5b5ebdd6d8310676 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 23:22:12 +0300 Subject: [PATCH 185/333] Fix style --- utils/memcpy-bench/glibc/dwarf2.h | 74 +- utils/memcpy-bench/glibc/memcpy-ssse3-back.S | 4988 ++++++++-------- utils/memcpy-bench/glibc/memcpy-ssse3.S | 5176 ++++++++--------- .../glibc/memmove-avx-unaligned-erms.S | 14 +- .../glibc/memmove-avx512-no-vzeroupper.S | 662 +-- .../glibc/memmove-avx512-unaligned-erms.S | 14 +- .../glibc/memmove-sse2-unaligned-erms.S | 2 +- .../glibc/memmove-vec-unaligned-erms.S | 694 +-- utils/memcpy-bench/glibc/memmove.S | 26 +- utils/memcpy-bench/glibc/sysdep.h | 100 +- utils/memcpy-bench/glibc/sysdep_generic.h | 52 +- utils/memcpy-bench/glibc/sysdep_x86.h | 52 +- 12 files changed, 5927 insertions(+), 5927 deletions(-) diff --git a/utils/memcpy-bench/glibc/dwarf2.h b/utils/memcpy-bench/glibc/dwarf2.h index 4c7de0d8737..2be827f00ae 100644 --- a/utils/memcpy-bench/glibc/dwarf2.h +++ b/utils/memcpy-bench/glibc/dwarf2.h @@ -21,7 +21,7 @@ . */ #ifndef _DWARF2_H -#define _DWARF2_H 1 +#define _DWARF2_H 1 /* This file is derived from the DWARF specification (a public document) Revision 2.0.0 (July 27, 1993) developed by the UNIX International @@ -88,19 +88,19 @@ enum dwarf_tag /* SGI/MIPS Extensions */ DW_TAG_MIPS_loop = 0x4081, /* GNU extensions */ - DW_TAG_format_label = 0x4101, /* for FORTRAN 77 and Fortran 90 */ - DW_TAG_function_template = 0x4102, /* for C++ */ - DW_TAG_class_template = 0x4103, /* for C++ */ + DW_TAG_format_label = 0x4101, /* for FORTRAN 77 and Fortran 90 */ + DW_TAG_function_template = 0x4102, /* for C++ */ + DW_TAG_class_template = 0x4103, /* for C++ */ DW_TAG_GNU_BINCL = 0x4104, DW_TAG_GNU_EINCL = 0x4105 }; -#define DW_TAG_lo_user 0x4080 -#define DW_TAG_hi_user 0xffff +#define DW_TAG_lo_user 0x4080 +#define DW_TAG_hi_user 0xffff /* flag that tells whether entry has a child or not */ #define DW_children_no 0 -#define DW_children_yes 1 +#define DW_children_yes 1 /* Form names and codes. */ enum dwarf_form @@ -215,8 +215,8 @@ enum dwarf_attribute DW_AT_body_end = 0x2106 }; -#define DW_AT_lo_user 0x2000 /* implementation-defined range start */ -#define DW_AT_hi_user 0x3ff0 /* implementation-defined range end */ +#define DW_AT_lo_user 0x2000 /* implementation-defined range start */ +#define DW_AT_hi_user 0x3ff0 /* implementation-defined range end */ /* Location atom names and codes. */ @@ -369,8 +369,8 @@ enum dwarf_location_atom DW_OP_nop = 0x96 }; -#define DW_OP_lo_user 0x80 /* implementation-defined range start */ -#define DW_OP_hi_user 0xff /* implementation-defined range end */ +#define DW_OP_lo_user 0x80 /* implementation-defined range start */ +#define DW_OP_hi_user 0xff /* implementation-defined range end */ /* Type encodings. */ @@ -387,8 +387,8 @@ enum dwarf_type DW_ATE_unsigned_char = 0x8 }; -#define DW_ATE_lo_user 0x80 -#define DW_ATE_hi_user 0xff +#define DW_ATE_lo_user 0x80 +#define DW_ATE_hi_user 0xff /* Array ordering names and codes. */ enum dwarf_array_dim_ordering @@ -517,17 +517,17 @@ enum dwarf_call_frame_info DW_CFA_GNU_negative_offset_extended = 0x2f }; -#define DW_CIE_ID 0xffffffff -#define DW_CIE_VERSION 1 +#define DW_CIE_ID 0xffffffff +#define DW_CIE_VERSION 1 #define DW_CFA_extended 0 #define DW_CFA_low_user 0x1c #define DW_CFA_high_user 0x3f -#define DW_CHILDREN_no 0x00 -#define DW_CHILDREN_yes 0x01 +#define DW_CHILDREN_no 0x00 +#define DW_CHILDREN_yes 0x01 -#define DW_ADDR_none 0 +#define DW_ADDR_none 0 /* Source language names and codes. */ @@ -548,8 +548,8 @@ enum dwarf_source_language }; -#define DW_LANG_lo_user 0x8000 /* implementation-defined range start */ -#define DW_LANG_hi_user 0xffff /* implementation-defined range start */ +#define DW_LANG_lo_user 0x8000 /* implementation-defined range start */ +#define DW_LANG_hi_user 0xffff /* implementation-defined range start */ /* Names and codes for macro information. */ @@ -566,25 +566,25 @@ enum dwarf_macinfo_record_type /* @@@ For use with GNU frame unwind information. */ -#define DW_EH_PE_absptr 0x00 -#define DW_EH_PE_omit 0xff +#define DW_EH_PE_absptr 0x00 +#define DW_EH_PE_omit 0xff -#define DW_EH_PE_uleb128 0x01 -#define DW_EH_PE_udata2 0x02 -#define DW_EH_PE_udata4 0x03 -#define DW_EH_PE_udata8 0x04 -#define DW_EH_PE_sleb128 0x09 -#define DW_EH_PE_sdata2 0x0A -#define DW_EH_PE_sdata4 0x0B -#define DW_EH_PE_sdata8 0x0C -#define DW_EH_PE_signed 0x08 +#define DW_EH_PE_uleb128 0x01 +#define DW_EH_PE_udata2 0x02 +#define DW_EH_PE_udata4 0x03 +#define DW_EH_PE_udata8 0x04 +#define DW_EH_PE_sleb128 0x09 +#define DW_EH_PE_sdata2 0x0A +#define DW_EH_PE_sdata4 0x0B +#define DW_EH_PE_sdata8 0x0C +#define DW_EH_PE_signed 0x08 -#define DW_EH_PE_pcrel 0x10 -#define DW_EH_PE_textrel 0x20 -#define DW_EH_PE_datarel 0x30 -#define DW_EH_PE_funcrel 0x40 -#define DW_EH_PE_aligned 0x50 +#define DW_EH_PE_pcrel 0x10 +#define DW_EH_PE_textrel 0x20 +#define DW_EH_PE_datarel 0x30 +#define DW_EH_PE_funcrel 0x40 +#define DW_EH_PE_aligned 0x50 -#define DW_EH_PE_indirect 0x80 +#define DW_EH_PE_indirect 0x80 #endif /* dwarf2.h */ diff --git a/utils/memcpy-bench/glibc/memcpy-ssse3-back.S b/utils/memcpy-bench/glibc/memcpy-ssse3-back.S index 1492dd38e73..c5257592efa 100644 --- a/utils/memcpy-bench/glibc/memcpy-ssse3-back.S +++ b/utils/memcpy-bench/glibc/memcpy-ssse3-back.S @@ -24,3159 +24,3159 @@ #include "asm-syntax.h" #ifndef MEMCPY -# define MEMCPY __memcpy_ssse3_back -# define MEMCPY_CHK __memcpy_chk_ssse3_back -# define MEMPCPY __mempcpy_ssse3_back -# define MEMPCPY_CHK __mempcpy_chk_ssse3_back +# define MEMCPY __memcpy_ssse3_back +# define MEMCPY_CHK __memcpy_chk_ssse3_back +# define MEMPCPY __mempcpy_ssse3_back +# define MEMPCPY_CHK __mempcpy_chk_ssse3_back #endif -#define JMPTBL(I, B) I - B +#define JMPTBL(I, B) I - B /* Branch to an entry in a jump table. TABLE is a jump table with relative offsets. INDEX is a register contains the index into the jump table. SCALE is the scale of INDEX. */ -#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ - lea TABLE(%rip), %r11; \ - movslq (%r11, INDEX, SCALE), INDEX; \ - lea (%r11, INDEX), INDEX; \ - _CET_NOTRACK jmp *INDEX; \ +#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ + lea TABLE(%rip), %r11; \ + movslq (%r11, INDEX, SCALE), INDEX; \ + lea (%r11, INDEX), INDEX; \ + _CET_NOTRACK jmp *INDEX; \ ud2 - .section .text.ssse3,"ax",@progbits + .section .text.ssse3,"ax",@progbits #if !defined USE_AS_MEMPCPY && !defined USE_AS_MEMMOVE ENTRY (MEMPCPY_CHK) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMPCPY_CHK) ENTRY (MEMPCPY) - mov %RDI_LP, %RAX_LP - add %RDX_LP, %RAX_LP - jmp L(start) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) END (MEMPCPY) #endif #if !defined USE_AS_BCOPY ENTRY (MEMCPY_CHK) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMCPY_CHK) #endif ENTRY (MEMCPY) - mov %RDI_LP, %RAX_LP + mov %RDI_LP, %RAX_LP #ifdef USE_AS_MEMPCPY - add %RDX_LP, %RAX_LP + add %RDX_LP, %RAX_LP #endif #ifdef __ILP32__ - /* Clear the upper 32 bits. */ - mov %edx, %edx + /* Clear the upper 32 bits. */ + mov %edx, %edx #endif #ifdef USE_AS_MEMMOVE - cmp %rsi, %rdi - jb L(copy_forward) - je L(bwd_write_0bytes) - cmp $144, %rdx - jae L(copy_backward) - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + cmp %rsi, %rdi + jb L(copy_forward) + je L(bwd_write_0bytes) + cmp $144, %rdx + jae L(copy_backward) + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) L(copy_forward): #endif L(start): - cmp $144, %rdx - jae L(144bytesormore) + cmp $144, %rdx + jae L(144bytesormore) L(fwd_write_less32bytes): #ifndef USE_AS_MEMMOVE - cmp %dil, %sil - jbe L(bk_write) + cmp %dil, %sil + jbe L(bk_write) #endif - add %rdx, %rsi - add %rdx, %rdi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) #ifndef USE_AS_MEMMOVE L(bk_write): - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) #endif - .p2align 4 + .p2align 4 L(144bytesormore): #ifndef USE_AS_MEMMOVE - cmp %dil, %sil - jle L(copy_backward) + cmp %dil, %sil + jle L(copy_backward) #endif - movdqu (%rsi), %xmm0 - mov %rdi, %r8 - and $-16, %rdi - add $16, %rdi - mov %rdi, %r9 - sub %r8, %r9 - sub %r9, %rdx - add %r9, %rsi - mov %rsi, %r9 - and $0xf, %r9 - jz L(shl_0) + movdqu (%rsi), %xmm0 + mov %rdi, %r8 + and $-16, %rdi + add $16, %rdi + mov %rdi, %r9 + sub %r8, %r9 + sub %r9, %rdx + add %r9, %rsi + mov %rsi, %r9 + and $0xf, %r9 + jz L(shl_0) #ifdef DATA_CACHE_SIZE - mov $DATA_CACHE_SIZE, %RCX_LP + mov $DATA_CACHE_SIZE, %RCX_LP #else - mov __x86_data_cache_size(%rip), %RCX_LP + mov __x86_data_cache_size(%rip), %RCX_LP #endif - cmp %rcx, %rdx - jae L(gobble_mem_fwd) - lea L(shl_table_fwd)(%rip), %r11 - sub $0x80, %rdx - movslq (%r11, %r9, 4), %r9 - add %r11, %r9 - _CET_NOTRACK jmp *%r9 - ud2 + cmp %rcx, %rdx + jae L(gobble_mem_fwd) + lea L(shl_table_fwd)(%rip), %r11 + sub $0x80, %rdx + movslq (%r11, %r9, 4), %r9 + add %r11, %r9 + _CET_NOTRACK jmp *%r9 + ud2 - .p2align 4 + .p2align 4 L(copy_backward): #ifdef DATA_CACHE_SIZE - mov $DATA_CACHE_SIZE, %RCX_LP + mov $DATA_CACHE_SIZE, %RCX_LP #else - mov __x86_data_cache_size(%rip), %RCX_LP + mov __x86_data_cache_size(%rip), %RCX_LP #endif - shl $1, %rcx - cmp %rcx, %rdx - ja L(gobble_mem_bwd) + shl $1, %rcx + cmp %rcx, %rdx + ja L(gobble_mem_bwd) - add %rdx, %rdi - add %rdx, %rsi - movdqu -16(%rsi), %xmm0 - lea -16(%rdi), %r8 - mov %rdi, %r9 - and $0xf, %r9 - xor %r9, %rdi - sub %r9, %rsi - sub %r9, %rdx - mov %rsi, %r9 - and $0xf, %r9 - jz L(shl_0_bwd) - lea L(shl_table_bwd)(%rip), %r11 - sub $0x80, %rdx - movslq (%r11, %r9, 4), %r9 - add %r11, %r9 - _CET_NOTRACK jmp *%r9 - ud2 + add %rdx, %rdi + add %rdx, %rsi + movdqu -16(%rsi), %xmm0 + lea -16(%rdi), %r8 + mov %rdi, %r9 + and $0xf, %r9 + xor %r9, %rdi + sub %r9, %rsi + sub %r9, %rdx + mov %rsi, %r9 + and $0xf, %r9 + jz L(shl_0_bwd) + lea L(shl_table_bwd)(%rip), %r11 + sub $0x80, %rdx + movslq (%r11, %r9, 4), %r9 + add %r11, %r9 + _CET_NOTRACK jmp *%r9 + ud2 - .p2align 4 + .p2align 4 L(shl_0): - mov %rdx, %r9 - shr $8, %r9 - add %rdx, %r9 + mov %rdx, %r9 + shr $8, %r9 + add %rdx, %r9 #ifdef DATA_CACHE_SIZE - cmp $DATA_CACHE_SIZE_HALF, %R9_LP + cmp $DATA_CACHE_SIZE_HALF, %R9_LP #else - cmp __x86_data_cache_size_half(%rip), %R9_LP + cmp __x86_data_cache_size_half(%rip), %R9_LP #endif - jae L(gobble_mem_fwd) - sub $0x80, %rdx - .p2align 4 + jae L(gobble_mem_fwd) + sub $0x80, %rdx + .p2align 4 L(shl_0_loop): - movdqa (%rsi), %xmm1 - movdqa %xmm1, (%rdi) - movaps 0x10(%rsi), %xmm2 - movaps %xmm2, 0x10(%rdi) - movaps 0x20(%rsi), %xmm3 - movaps %xmm3, 0x20(%rdi) - movaps 0x30(%rsi), %xmm4 - movaps %xmm4, 0x30(%rdi) - movaps 0x40(%rsi), %xmm1 - movaps %xmm1, 0x40(%rdi) - movaps 0x50(%rsi), %xmm2 - movaps %xmm2, 0x50(%rdi) - movaps 0x60(%rsi), %xmm3 - movaps %xmm3, 0x60(%rdi) - movaps 0x70(%rsi), %xmm4 - movaps %xmm4, 0x70(%rdi) - sub $0x80, %rdx - lea 0x80(%rsi), %rsi - lea 0x80(%rdi), %rdi - jae L(shl_0_loop) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rsi - add %rdx, %rdi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + movdqa (%rsi), %xmm1 + movdqa %xmm1, (%rdi) + movaps 0x10(%rsi), %xmm2 + movaps %xmm2, 0x10(%rdi) + movaps 0x20(%rsi), %xmm3 + movaps %xmm3, 0x20(%rdi) + movaps 0x30(%rsi), %xmm4 + movaps %xmm4, 0x30(%rdi) + movaps 0x40(%rsi), %xmm1 + movaps %xmm1, 0x40(%rdi) + movaps 0x50(%rsi), %xmm2 + movaps %xmm2, 0x50(%rdi) + movaps 0x60(%rsi), %xmm3 + movaps %xmm3, 0x60(%rdi) + movaps 0x70(%rsi), %xmm4 + movaps %xmm4, 0x70(%rdi) + sub $0x80, %rdx + lea 0x80(%rsi), %rsi + lea 0x80(%rdi), %rdi + jae L(shl_0_loop) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_0_bwd): - sub $0x80, %rdx + sub $0x80, %rdx L(copy_backward_loop): - movaps -0x10(%rsi), %xmm1 - movaps %xmm1, -0x10(%rdi) - movaps -0x20(%rsi), %xmm2 - movaps %xmm2, -0x20(%rdi) - movaps -0x30(%rsi), %xmm3 - movaps %xmm3, -0x30(%rdi) - movaps -0x40(%rsi), %xmm4 - movaps %xmm4, -0x40(%rdi) - movaps -0x50(%rsi), %xmm5 - movaps %xmm5, -0x50(%rdi) - movaps -0x60(%rsi), %xmm5 - movaps %xmm5, -0x60(%rdi) - movaps -0x70(%rsi), %xmm5 - movaps %xmm5, -0x70(%rdi) - movaps -0x80(%rsi), %xmm5 - movaps %xmm5, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(copy_backward_loop) + movaps -0x10(%rsi), %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps -0x20(%rsi), %xmm2 + movaps %xmm2, -0x20(%rdi) + movaps -0x30(%rsi), %xmm3 + movaps %xmm3, -0x30(%rdi) + movaps -0x40(%rsi), %xmm4 + movaps %xmm4, -0x40(%rdi) + movaps -0x50(%rsi), %xmm5 + movaps %xmm5, -0x50(%rdi) + movaps -0x60(%rsi), %xmm5 + movaps %xmm5, -0x60(%rdi) + movaps -0x70(%rsi), %xmm5 + movaps %xmm5, -0x70(%rdi) + movaps -0x80(%rsi), %xmm5 + movaps %xmm5, -0x80(%rdi) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(copy_backward_loop) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_1): - sub $0x80, %rdx - movaps -0x01(%rsi), %xmm1 - movaps 0x0f(%rsi), %xmm2 - movaps 0x1f(%rsi), %xmm3 - movaps 0x2f(%rsi), %xmm4 - movaps 0x3f(%rsi), %xmm5 - movaps 0x4f(%rsi), %xmm6 - movaps 0x5f(%rsi), %xmm7 - movaps 0x6f(%rsi), %xmm8 - movaps 0x7f(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $1, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $1, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $1, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $1, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $1, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $1, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $1, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $1, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_1) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x01(%rsi), %xmm1 + movaps 0x0f(%rsi), %xmm2 + movaps 0x1f(%rsi), %xmm3 + movaps 0x2f(%rsi), %xmm4 + movaps 0x3f(%rsi), %xmm5 + movaps 0x4f(%rsi), %xmm6 + movaps 0x5f(%rsi), %xmm7 + movaps 0x6f(%rsi), %xmm8 + movaps 0x7f(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $1, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $1, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $1, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $1, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $1, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $1, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $1, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $1, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_1) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_1_bwd): - movaps -0x01(%rsi), %xmm1 + movaps -0x01(%rsi), %xmm1 - movaps -0x11(%rsi), %xmm2 - palignr $1, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x11(%rsi), %xmm2 + palignr $1, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x21(%rsi), %xmm3 - palignr $1, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x21(%rsi), %xmm3 + palignr $1, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x31(%rsi), %xmm4 - palignr $1, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x31(%rsi), %xmm4 + palignr $1, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x41(%rsi), %xmm5 - palignr $1, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x41(%rsi), %xmm5 + palignr $1, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x51(%rsi), %xmm6 - palignr $1, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x51(%rsi), %xmm6 + palignr $1, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x61(%rsi), %xmm7 - palignr $1, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x61(%rsi), %xmm7 + palignr $1, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x71(%rsi), %xmm8 - palignr $1, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x71(%rsi), %xmm8 + palignr $1, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x81(%rsi), %xmm9 - palignr $1, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x81(%rsi), %xmm9 + palignr $1, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_1_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_1_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_2): - sub $0x80, %rdx - movaps -0x02(%rsi), %xmm1 - movaps 0x0e(%rsi), %xmm2 - movaps 0x1e(%rsi), %xmm3 - movaps 0x2e(%rsi), %xmm4 - movaps 0x3e(%rsi), %xmm5 - movaps 0x4e(%rsi), %xmm6 - movaps 0x5e(%rsi), %xmm7 - movaps 0x6e(%rsi), %xmm8 - movaps 0x7e(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $2, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $2, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $2, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $2, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $2, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $2, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $2, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $2, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_2) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x02(%rsi), %xmm1 + movaps 0x0e(%rsi), %xmm2 + movaps 0x1e(%rsi), %xmm3 + movaps 0x2e(%rsi), %xmm4 + movaps 0x3e(%rsi), %xmm5 + movaps 0x4e(%rsi), %xmm6 + movaps 0x5e(%rsi), %xmm7 + movaps 0x6e(%rsi), %xmm8 + movaps 0x7e(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $2, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $2, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $2, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $2, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $2, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $2, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $2, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $2, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_2) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_2_bwd): - movaps -0x02(%rsi), %xmm1 + movaps -0x02(%rsi), %xmm1 - movaps -0x12(%rsi), %xmm2 - palignr $2, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x12(%rsi), %xmm2 + palignr $2, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x22(%rsi), %xmm3 - palignr $2, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x22(%rsi), %xmm3 + palignr $2, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x32(%rsi), %xmm4 - palignr $2, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x32(%rsi), %xmm4 + palignr $2, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x42(%rsi), %xmm5 - palignr $2, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x42(%rsi), %xmm5 + palignr $2, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x52(%rsi), %xmm6 - palignr $2, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x52(%rsi), %xmm6 + palignr $2, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x62(%rsi), %xmm7 - palignr $2, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x62(%rsi), %xmm7 + palignr $2, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x72(%rsi), %xmm8 - palignr $2, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x72(%rsi), %xmm8 + palignr $2, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x82(%rsi), %xmm9 - palignr $2, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x82(%rsi), %xmm9 + palignr $2, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_2_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_2_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_3): - sub $0x80, %rdx - movaps -0x03(%rsi), %xmm1 - movaps 0x0d(%rsi), %xmm2 - movaps 0x1d(%rsi), %xmm3 - movaps 0x2d(%rsi), %xmm4 - movaps 0x3d(%rsi), %xmm5 - movaps 0x4d(%rsi), %xmm6 - movaps 0x5d(%rsi), %xmm7 - movaps 0x6d(%rsi), %xmm8 - movaps 0x7d(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $3, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $3, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $3, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $3, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $3, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $3, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $3, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $3, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_3) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x03(%rsi), %xmm1 + movaps 0x0d(%rsi), %xmm2 + movaps 0x1d(%rsi), %xmm3 + movaps 0x2d(%rsi), %xmm4 + movaps 0x3d(%rsi), %xmm5 + movaps 0x4d(%rsi), %xmm6 + movaps 0x5d(%rsi), %xmm7 + movaps 0x6d(%rsi), %xmm8 + movaps 0x7d(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $3, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $3, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $3, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $3, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $3, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $3, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $3, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $3, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_3) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_3_bwd): - movaps -0x03(%rsi), %xmm1 + movaps -0x03(%rsi), %xmm1 - movaps -0x13(%rsi), %xmm2 - palignr $3, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x13(%rsi), %xmm2 + palignr $3, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x23(%rsi), %xmm3 - palignr $3, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x23(%rsi), %xmm3 + palignr $3, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x33(%rsi), %xmm4 - palignr $3, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x33(%rsi), %xmm4 + palignr $3, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x43(%rsi), %xmm5 - palignr $3, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x43(%rsi), %xmm5 + palignr $3, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x53(%rsi), %xmm6 - palignr $3, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x53(%rsi), %xmm6 + palignr $3, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x63(%rsi), %xmm7 - palignr $3, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x63(%rsi), %xmm7 + palignr $3, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x73(%rsi), %xmm8 - palignr $3, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x73(%rsi), %xmm8 + palignr $3, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x83(%rsi), %xmm9 - palignr $3, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x83(%rsi), %xmm9 + palignr $3, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_3_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_3_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_4): - sub $0x80, %rdx - movaps -0x04(%rsi), %xmm1 - movaps 0x0c(%rsi), %xmm2 - movaps 0x1c(%rsi), %xmm3 - movaps 0x2c(%rsi), %xmm4 - movaps 0x3c(%rsi), %xmm5 - movaps 0x4c(%rsi), %xmm6 - movaps 0x5c(%rsi), %xmm7 - movaps 0x6c(%rsi), %xmm8 - movaps 0x7c(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $4, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $4, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $4, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $4, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $4, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $4, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $4, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $4, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_4) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x04(%rsi), %xmm1 + movaps 0x0c(%rsi), %xmm2 + movaps 0x1c(%rsi), %xmm3 + movaps 0x2c(%rsi), %xmm4 + movaps 0x3c(%rsi), %xmm5 + movaps 0x4c(%rsi), %xmm6 + movaps 0x5c(%rsi), %xmm7 + movaps 0x6c(%rsi), %xmm8 + movaps 0x7c(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $4, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $4, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $4, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $4, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $4, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $4, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $4, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $4, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_4) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_4_bwd): - movaps -0x04(%rsi), %xmm1 + movaps -0x04(%rsi), %xmm1 - movaps -0x14(%rsi), %xmm2 - palignr $4, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x14(%rsi), %xmm2 + palignr $4, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x24(%rsi), %xmm3 - palignr $4, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x24(%rsi), %xmm3 + palignr $4, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x34(%rsi), %xmm4 - palignr $4, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x34(%rsi), %xmm4 + palignr $4, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x44(%rsi), %xmm5 - palignr $4, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x44(%rsi), %xmm5 + palignr $4, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x54(%rsi), %xmm6 - palignr $4, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x54(%rsi), %xmm6 + palignr $4, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x64(%rsi), %xmm7 - palignr $4, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x64(%rsi), %xmm7 + palignr $4, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x74(%rsi), %xmm8 - palignr $4, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x74(%rsi), %xmm8 + palignr $4, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x84(%rsi), %xmm9 - palignr $4, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x84(%rsi), %xmm9 + palignr $4, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_4_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_4_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_5): - sub $0x80, %rdx - movaps -0x05(%rsi), %xmm1 - movaps 0x0b(%rsi), %xmm2 - movaps 0x1b(%rsi), %xmm3 - movaps 0x2b(%rsi), %xmm4 - movaps 0x3b(%rsi), %xmm5 - movaps 0x4b(%rsi), %xmm6 - movaps 0x5b(%rsi), %xmm7 - movaps 0x6b(%rsi), %xmm8 - movaps 0x7b(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $5, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $5, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $5, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $5, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $5, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $5, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $5, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $5, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_5) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x05(%rsi), %xmm1 + movaps 0x0b(%rsi), %xmm2 + movaps 0x1b(%rsi), %xmm3 + movaps 0x2b(%rsi), %xmm4 + movaps 0x3b(%rsi), %xmm5 + movaps 0x4b(%rsi), %xmm6 + movaps 0x5b(%rsi), %xmm7 + movaps 0x6b(%rsi), %xmm8 + movaps 0x7b(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $5, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $5, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $5, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $5, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $5, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $5, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $5, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $5, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_5) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_5_bwd): - movaps -0x05(%rsi), %xmm1 + movaps -0x05(%rsi), %xmm1 - movaps -0x15(%rsi), %xmm2 - palignr $5, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x15(%rsi), %xmm2 + palignr $5, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x25(%rsi), %xmm3 - palignr $5, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x25(%rsi), %xmm3 + palignr $5, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x35(%rsi), %xmm4 - palignr $5, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x35(%rsi), %xmm4 + palignr $5, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x45(%rsi), %xmm5 - palignr $5, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x45(%rsi), %xmm5 + palignr $5, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x55(%rsi), %xmm6 - palignr $5, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x55(%rsi), %xmm6 + palignr $5, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x65(%rsi), %xmm7 - palignr $5, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x65(%rsi), %xmm7 + palignr $5, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x75(%rsi), %xmm8 - palignr $5, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x75(%rsi), %xmm8 + palignr $5, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x85(%rsi), %xmm9 - palignr $5, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x85(%rsi), %xmm9 + palignr $5, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_5_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_5_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_6): - sub $0x80, %rdx - movaps -0x06(%rsi), %xmm1 - movaps 0x0a(%rsi), %xmm2 - movaps 0x1a(%rsi), %xmm3 - movaps 0x2a(%rsi), %xmm4 - movaps 0x3a(%rsi), %xmm5 - movaps 0x4a(%rsi), %xmm6 - movaps 0x5a(%rsi), %xmm7 - movaps 0x6a(%rsi), %xmm8 - movaps 0x7a(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $6, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $6, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $6, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $6, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $6, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $6, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $6, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $6, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_6) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x06(%rsi), %xmm1 + movaps 0x0a(%rsi), %xmm2 + movaps 0x1a(%rsi), %xmm3 + movaps 0x2a(%rsi), %xmm4 + movaps 0x3a(%rsi), %xmm5 + movaps 0x4a(%rsi), %xmm6 + movaps 0x5a(%rsi), %xmm7 + movaps 0x6a(%rsi), %xmm8 + movaps 0x7a(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $6, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $6, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $6, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $6, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $6, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $6, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $6, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $6, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_6) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_6_bwd): - movaps -0x06(%rsi), %xmm1 + movaps -0x06(%rsi), %xmm1 - movaps -0x16(%rsi), %xmm2 - palignr $6, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x16(%rsi), %xmm2 + palignr $6, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x26(%rsi), %xmm3 - palignr $6, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x26(%rsi), %xmm3 + palignr $6, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x36(%rsi), %xmm4 - palignr $6, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x36(%rsi), %xmm4 + palignr $6, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x46(%rsi), %xmm5 - palignr $6, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x46(%rsi), %xmm5 + palignr $6, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x56(%rsi), %xmm6 - palignr $6, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x56(%rsi), %xmm6 + palignr $6, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x66(%rsi), %xmm7 - palignr $6, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x66(%rsi), %xmm7 + palignr $6, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x76(%rsi), %xmm8 - palignr $6, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x76(%rsi), %xmm8 + palignr $6, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x86(%rsi), %xmm9 - palignr $6, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x86(%rsi), %xmm9 + palignr $6, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_6_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_6_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_7): - sub $0x80, %rdx - movaps -0x07(%rsi), %xmm1 - movaps 0x09(%rsi), %xmm2 - movaps 0x19(%rsi), %xmm3 - movaps 0x29(%rsi), %xmm4 - movaps 0x39(%rsi), %xmm5 - movaps 0x49(%rsi), %xmm6 - movaps 0x59(%rsi), %xmm7 - movaps 0x69(%rsi), %xmm8 - movaps 0x79(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $7, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $7, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $7, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $7, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $7, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $7, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $7, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $7, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_7) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x07(%rsi), %xmm1 + movaps 0x09(%rsi), %xmm2 + movaps 0x19(%rsi), %xmm3 + movaps 0x29(%rsi), %xmm4 + movaps 0x39(%rsi), %xmm5 + movaps 0x49(%rsi), %xmm6 + movaps 0x59(%rsi), %xmm7 + movaps 0x69(%rsi), %xmm8 + movaps 0x79(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $7, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $7, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $7, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $7, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $7, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $7, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $7, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $7, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_7) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_7_bwd): - movaps -0x07(%rsi), %xmm1 + movaps -0x07(%rsi), %xmm1 - movaps -0x17(%rsi), %xmm2 - palignr $7, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x17(%rsi), %xmm2 + palignr $7, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x27(%rsi), %xmm3 - palignr $7, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x27(%rsi), %xmm3 + palignr $7, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x37(%rsi), %xmm4 - palignr $7, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x37(%rsi), %xmm4 + palignr $7, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x47(%rsi), %xmm5 - palignr $7, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x47(%rsi), %xmm5 + palignr $7, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x57(%rsi), %xmm6 - palignr $7, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x57(%rsi), %xmm6 + palignr $7, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x67(%rsi), %xmm7 - palignr $7, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x67(%rsi), %xmm7 + palignr $7, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x77(%rsi), %xmm8 - palignr $7, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x77(%rsi), %xmm8 + palignr $7, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x87(%rsi), %xmm9 - palignr $7, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x87(%rsi), %xmm9 + palignr $7, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_7_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_7_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_8): - sub $0x80, %rdx - movaps -0x08(%rsi), %xmm1 - movaps 0x08(%rsi), %xmm2 - movaps 0x18(%rsi), %xmm3 - movaps 0x28(%rsi), %xmm4 - movaps 0x38(%rsi), %xmm5 - movaps 0x48(%rsi), %xmm6 - movaps 0x58(%rsi), %xmm7 - movaps 0x68(%rsi), %xmm8 - movaps 0x78(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $8, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $8, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $8, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $8, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $8, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $8, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $8, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $8, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_8) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x08(%rsi), %xmm1 + movaps 0x08(%rsi), %xmm2 + movaps 0x18(%rsi), %xmm3 + movaps 0x28(%rsi), %xmm4 + movaps 0x38(%rsi), %xmm5 + movaps 0x48(%rsi), %xmm6 + movaps 0x58(%rsi), %xmm7 + movaps 0x68(%rsi), %xmm8 + movaps 0x78(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $8, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $8, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $8, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $8, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $8, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $8, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $8, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $8, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_8) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_8_bwd): - movaps -0x08(%rsi), %xmm1 + movaps -0x08(%rsi), %xmm1 - movaps -0x18(%rsi), %xmm2 - palignr $8, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x18(%rsi), %xmm2 + palignr $8, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x28(%rsi), %xmm3 - palignr $8, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x28(%rsi), %xmm3 + palignr $8, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x38(%rsi), %xmm4 - palignr $8, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x38(%rsi), %xmm4 + palignr $8, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x48(%rsi), %xmm5 - palignr $8, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x48(%rsi), %xmm5 + palignr $8, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x58(%rsi), %xmm6 - palignr $8, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x58(%rsi), %xmm6 + palignr $8, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x68(%rsi), %xmm7 - palignr $8, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x68(%rsi), %xmm7 + palignr $8, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x78(%rsi), %xmm8 - palignr $8, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x78(%rsi), %xmm8 + palignr $8, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x88(%rsi), %xmm9 - palignr $8, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x88(%rsi), %xmm9 + palignr $8, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_8_bwd) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_8_bwd) L(shl_8_end_bwd): - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_9): - sub $0x80, %rdx - movaps -0x09(%rsi), %xmm1 - movaps 0x07(%rsi), %xmm2 - movaps 0x17(%rsi), %xmm3 - movaps 0x27(%rsi), %xmm4 - movaps 0x37(%rsi), %xmm5 - movaps 0x47(%rsi), %xmm6 - movaps 0x57(%rsi), %xmm7 - movaps 0x67(%rsi), %xmm8 - movaps 0x77(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $9, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $9, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $9, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $9, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $9, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $9, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $9, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $9, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_9) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x09(%rsi), %xmm1 + movaps 0x07(%rsi), %xmm2 + movaps 0x17(%rsi), %xmm3 + movaps 0x27(%rsi), %xmm4 + movaps 0x37(%rsi), %xmm5 + movaps 0x47(%rsi), %xmm6 + movaps 0x57(%rsi), %xmm7 + movaps 0x67(%rsi), %xmm8 + movaps 0x77(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $9, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $9, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $9, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $9, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $9, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $9, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $9, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $9, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_9) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_9_bwd): - movaps -0x09(%rsi), %xmm1 + movaps -0x09(%rsi), %xmm1 - movaps -0x19(%rsi), %xmm2 - palignr $9, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x19(%rsi), %xmm2 + palignr $9, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x29(%rsi), %xmm3 - palignr $9, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x29(%rsi), %xmm3 + palignr $9, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x39(%rsi), %xmm4 - palignr $9, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x39(%rsi), %xmm4 + palignr $9, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x49(%rsi), %xmm5 - palignr $9, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x49(%rsi), %xmm5 + palignr $9, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x59(%rsi), %xmm6 - palignr $9, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x59(%rsi), %xmm6 + palignr $9, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x69(%rsi), %xmm7 - palignr $9, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x69(%rsi), %xmm7 + palignr $9, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x79(%rsi), %xmm8 - palignr $9, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x79(%rsi), %xmm8 + palignr $9, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x89(%rsi), %xmm9 - palignr $9, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x89(%rsi), %xmm9 + palignr $9, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_9_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_9_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_10): - sub $0x80, %rdx - movaps -0x0a(%rsi), %xmm1 - movaps 0x06(%rsi), %xmm2 - movaps 0x16(%rsi), %xmm3 - movaps 0x26(%rsi), %xmm4 - movaps 0x36(%rsi), %xmm5 - movaps 0x46(%rsi), %xmm6 - movaps 0x56(%rsi), %xmm7 - movaps 0x66(%rsi), %xmm8 - movaps 0x76(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $10, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $10, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $10, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $10, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $10, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $10, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $10, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $10, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_10) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x0a(%rsi), %xmm1 + movaps 0x06(%rsi), %xmm2 + movaps 0x16(%rsi), %xmm3 + movaps 0x26(%rsi), %xmm4 + movaps 0x36(%rsi), %xmm5 + movaps 0x46(%rsi), %xmm6 + movaps 0x56(%rsi), %xmm7 + movaps 0x66(%rsi), %xmm8 + movaps 0x76(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $10, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $10, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $10, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $10, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $10, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $10, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $10, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $10, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_10) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_10_bwd): - movaps -0x0a(%rsi), %xmm1 + movaps -0x0a(%rsi), %xmm1 - movaps -0x1a(%rsi), %xmm2 - palignr $10, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x1a(%rsi), %xmm2 + palignr $10, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x2a(%rsi), %xmm3 - palignr $10, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x2a(%rsi), %xmm3 + palignr $10, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x3a(%rsi), %xmm4 - palignr $10, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x3a(%rsi), %xmm4 + palignr $10, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x4a(%rsi), %xmm5 - palignr $10, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x4a(%rsi), %xmm5 + palignr $10, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x5a(%rsi), %xmm6 - palignr $10, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x5a(%rsi), %xmm6 + palignr $10, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x6a(%rsi), %xmm7 - palignr $10, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x6a(%rsi), %xmm7 + palignr $10, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x7a(%rsi), %xmm8 - palignr $10, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x7a(%rsi), %xmm8 + palignr $10, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x8a(%rsi), %xmm9 - palignr $10, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x8a(%rsi), %xmm9 + palignr $10, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_10_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_10_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_11): - sub $0x80, %rdx - movaps -0x0b(%rsi), %xmm1 - movaps 0x05(%rsi), %xmm2 - movaps 0x15(%rsi), %xmm3 - movaps 0x25(%rsi), %xmm4 - movaps 0x35(%rsi), %xmm5 - movaps 0x45(%rsi), %xmm6 - movaps 0x55(%rsi), %xmm7 - movaps 0x65(%rsi), %xmm8 - movaps 0x75(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $11, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $11, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $11, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $11, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $11, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $11, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $11, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $11, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_11) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x0b(%rsi), %xmm1 + movaps 0x05(%rsi), %xmm2 + movaps 0x15(%rsi), %xmm3 + movaps 0x25(%rsi), %xmm4 + movaps 0x35(%rsi), %xmm5 + movaps 0x45(%rsi), %xmm6 + movaps 0x55(%rsi), %xmm7 + movaps 0x65(%rsi), %xmm8 + movaps 0x75(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $11, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $11, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $11, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $11, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $11, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $11, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $11, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $11, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_11) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_11_bwd): - movaps -0x0b(%rsi), %xmm1 + movaps -0x0b(%rsi), %xmm1 - movaps -0x1b(%rsi), %xmm2 - palignr $11, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x1b(%rsi), %xmm2 + palignr $11, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x2b(%rsi), %xmm3 - palignr $11, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x2b(%rsi), %xmm3 + palignr $11, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x3b(%rsi), %xmm4 - palignr $11, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x3b(%rsi), %xmm4 + palignr $11, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x4b(%rsi), %xmm5 - palignr $11, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x4b(%rsi), %xmm5 + palignr $11, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x5b(%rsi), %xmm6 - palignr $11, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x5b(%rsi), %xmm6 + palignr $11, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x6b(%rsi), %xmm7 - palignr $11, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x6b(%rsi), %xmm7 + palignr $11, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x7b(%rsi), %xmm8 - palignr $11, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x7b(%rsi), %xmm8 + palignr $11, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x8b(%rsi), %xmm9 - palignr $11, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x8b(%rsi), %xmm9 + palignr $11, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_11_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_11_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_12): - sub $0x80, %rdx - movdqa -0x0c(%rsi), %xmm1 - movaps 0x04(%rsi), %xmm2 - movaps 0x14(%rsi), %xmm3 - movaps 0x24(%rsi), %xmm4 - movaps 0x34(%rsi), %xmm5 - movaps 0x44(%rsi), %xmm6 - movaps 0x54(%rsi), %xmm7 - movaps 0x64(%rsi), %xmm8 - movaps 0x74(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $12, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $12, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $12, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $12, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $12, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $12, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $12, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $12, %xmm1, %xmm2 - movaps %xmm2, (%rdi) + sub $0x80, %rdx + movdqa -0x0c(%rsi), %xmm1 + movaps 0x04(%rsi), %xmm2 + movaps 0x14(%rsi), %xmm3 + movaps 0x24(%rsi), %xmm4 + movaps 0x34(%rsi), %xmm5 + movaps 0x44(%rsi), %xmm6 + movaps 0x54(%rsi), %xmm7 + movaps 0x64(%rsi), %xmm8 + movaps 0x74(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $12, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $12, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $12, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $12, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $12, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $12, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $12, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $12, %xmm1, %xmm2 + movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_12) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + lea 0x80(%rdi), %rdi + jae L(shl_12) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_12_bwd): - movaps -0x0c(%rsi), %xmm1 + movaps -0x0c(%rsi), %xmm1 - movaps -0x1c(%rsi), %xmm2 - palignr $12, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x1c(%rsi), %xmm2 + palignr $12, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x2c(%rsi), %xmm3 - palignr $12, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x2c(%rsi), %xmm3 + palignr $12, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x3c(%rsi), %xmm4 - palignr $12, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x3c(%rsi), %xmm4 + palignr $12, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x4c(%rsi), %xmm5 - palignr $12, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x4c(%rsi), %xmm5 + palignr $12, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x5c(%rsi), %xmm6 - palignr $12, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x5c(%rsi), %xmm6 + palignr $12, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x6c(%rsi), %xmm7 - palignr $12, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x6c(%rsi), %xmm7 + palignr $12, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x7c(%rsi), %xmm8 - palignr $12, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x7c(%rsi), %xmm8 + palignr $12, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x8c(%rsi), %xmm9 - palignr $12, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x8c(%rsi), %xmm9 + palignr $12, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_12_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_12_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_13): - sub $0x80, %rdx - movaps -0x0d(%rsi), %xmm1 - movaps 0x03(%rsi), %xmm2 - movaps 0x13(%rsi), %xmm3 - movaps 0x23(%rsi), %xmm4 - movaps 0x33(%rsi), %xmm5 - movaps 0x43(%rsi), %xmm6 - movaps 0x53(%rsi), %xmm7 - movaps 0x63(%rsi), %xmm8 - movaps 0x73(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $13, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $13, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $13, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $13, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $13, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $13, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $13, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $13, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_13) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x0d(%rsi), %xmm1 + movaps 0x03(%rsi), %xmm2 + movaps 0x13(%rsi), %xmm3 + movaps 0x23(%rsi), %xmm4 + movaps 0x33(%rsi), %xmm5 + movaps 0x43(%rsi), %xmm6 + movaps 0x53(%rsi), %xmm7 + movaps 0x63(%rsi), %xmm8 + movaps 0x73(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $13, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $13, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $13, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $13, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $13, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $13, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $13, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $13, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_13) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_13_bwd): - movaps -0x0d(%rsi), %xmm1 + movaps -0x0d(%rsi), %xmm1 - movaps -0x1d(%rsi), %xmm2 - palignr $13, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x1d(%rsi), %xmm2 + palignr $13, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x2d(%rsi), %xmm3 - palignr $13, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x2d(%rsi), %xmm3 + palignr $13, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x3d(%rsi), %xmm4 - palignr $13, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x3d(%rsi), %xmm4 + palignr $13, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x4d(%rsi), %xmm5 - palignr $13, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x4d(%rsi), %xmm5 + palignr $13, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x5d(%rsi), %xmm6 - palignr $13, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x5d(%rsi), %xmm6 + palignr $13, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x6d(%rsi), %xmm7 - palignr $13, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x6d(%rsi), %xmm7 + palignr $13, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x7d(%rsi), %xmm8 - palignr $13, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x7d(%rsi), %xmm8 + palignr $13, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x8d(%rsi), %xmm9 - palignr $13, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x8d(%rsi), %xmm9 + palignr $13, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_13_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_13_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_14): - sub $0x80, %rdx - movaps -0x0e(%rsi), %xmm1 - movaps 0x02(%rsi), %xmm2 - movaps 0x12(%rsi), %xmm3 - movaps 0x22(%rsi), %xmm4 - movaps 0x32(%rsi), %xmm5 - movaps 0x42(%rsi), %xmm6 - movaps 0x52(%rsi), %xmm7 - movaps 0x62(%rsi), %xmm8 - movaps 0x72(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $14, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $14, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $14, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $14, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $14, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $14, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $14, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $14, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_14) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x0e(%rsi), %xmm1 + movaps 0x02(%rsi), %xmm2 + movaps 0x12(%rsi), %xmm3 + movaps 0x22(%rsi), %xmm4 + movaps 0x32(%rsi), %xmm5 + movaps 0x42(%rsi), %xmm6 + movaps 0x52(%rsi), %xmm7 + movaps 0x62(%rsi), %xmm8 + movaps 0x72(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $14, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $14, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $14, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $14, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $14, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $14, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $14, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $14, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_14) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_14_bwd): - movaps -0x0e(%rsi), %xmm1 + movaps -0x0e(%rsi), %xmm1 - movaps -0x1e(%rsi), %xmm2 - palignr $14, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x1e(%rsi), %xmm2 + palignr $14, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x2e(%rsi), %xmm3 - palignr $14, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x2e(%rsi), %xmm3 + palignr $14, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x3e(%rsi), %xmm4 - palignr $14, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x3e(%rsi), %xmm4 + palignr $14, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x4e(%rsi), %xmm5 - palignr $14, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x4e(%rsi), %xmm5 + palignr $14, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x5e(%rsi), %xmm6 - palignr $14, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x5e(%rsi), %xmm6 + palignr $14, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x6e(%rsi), %xmm7 - palignr $14, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x6e(%rsi), %xmm7 + palignr $14, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x7e(%rsi), %xmm8 - palignr $14, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x7e(%rsi), %xmm8 + palignr $14, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x8e(%rsi), %xmm9 - palignr $14, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x8e(%rsi), %xmm9 + palignr $14, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_14_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_14_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_15): - sub $0x80, %rdx - movaps -0x0f(%rsi), %xmm1 - movaps 0x01(%rsi), %xmm2 - movaps 0x11(%rsi), %xmm3 - movaps 0x21(%rsi), %xmm4 - movaps 0x31(%rsi), %xmm5 - movaps 0x41(%rsi), %xmm6 - movaps 0x51(%rsi), %xmm7 - movaps 0x61(%rsi), %xmm8 - movaps 0x71(%rsi), %xmm9 - lea 0x80(%rsi), %rsi - palignr $15, %xmm8, %xmm9 - movaps %xmm9, 0x70(%rdi) - palignr $15, %xmm7, %xmm8 - movaps %xmm8, 0x60(%rdi) - palignr $15, %xmm6, %xmm7 - movaps %xmm7, 0x50(%rdi) - palignr $15, %xmm5, %xmm6 - movaps %xmm6, 0x40(%rdi) - palignr $15, %xmm4, %xmm5 - movaps %xmm5, 0x30(%rdi) - palignr $15, %xmm3, %xmm4 - movaps %xmm4, 0x20(%rdi) - palignr $15, %xmm2, %xmm3 - movaps %xmm3, 0x10(%rdi) - palignr $15, %xmm1, %xmm2 - movaps %xmm2, (%rdi) - lea 0x80(%rdi), %rdi - jae L(shl_15) - movdqu %xmm0, (%r8) - add $0x80, %rdx - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + sub $0x80, %rdx + movaps -0x0f(%rsi), %xmm1 + movaps 0x01(%rsi), %xmm2 + movaps 0x11(%rsi), %xmm3 + movaps 0x21(%rsi), %xmm4 + movaps 0x31(%rsi), %xmm5 + movaps 0x41(%rsi), %xmm6 + movaps 0x51(%rsi), %xmm7 + movaps 0x61(%rsi), %xmm8 + movaps 0x71(%rsi), %xmm9 + lea 0x80(%rsi), %rsi + palignr $15, %xmm8, %xmm9 + movaps %xmm9, 0x70(%rdi) + palignr $15, %xmm7, %xmm8 + movaps %xmm8, 0x60(%rdi) + palignr $15, %xmm6, %xmm7 + movaps %xmm7, 0x50(%rdi) + palignr $15, %xmm5, %xmm6 + movaps %xmm6, 0x40(%rdi) + palignr $15, %xmm4, %xmm5 + movaps %xmm5, 0x30(%rdi) + palignr $15, %xmm3, %xmm4 + movaps %xmm4, 0x20(%rdi) + palignr $15, %xmm2, %xmm3 + movaps %xmm3, 0x10(%rdi) + palignr $15, %xmm1, %xmm2 + movaps %xmm2, (%rdi) + lea 0x80(%rdi), %rdi + jae L(shl_15) + movdqu %xmm0, (%r8) + add $0x80, %rdx + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_15_bwd): - movaps -0x0f(%rsi), %xmm1 + movaps -0x0f(%rsi), %xmm1 - movaps -0x1f(%rsi), %xmm2 - palignr $15, %xmm2, %xmm1 - movaps %xmm1, -0x10(%rdi) + movaps -0x1f(%rsi), %xmm2 + palignr $15, %xmm2, %xmm1 + movaps %xmm1, -0x10(%rdi) - movaps -0x2f(%rsi), %xmm3 - palignr $15, %xmm3, %xmm2 - movaps %xmm2, -0x20(%rdi) + movaps -0x2f(%rsi), %xmm3 + palignr $15, %xmm3, %xmm2 + movaps %xmm2, -0x20(%rdi) - movaps -0x3f(%rsi), %xmm4 - palignr $15, %xmm4, %xmm3 - movaps %xmm3, -0x30(%rdi) + movaps -0x3f(%rsi), %xmm4 + palignr $15, %xmm4, %xmm3 + movaps %xmm3, -0x30(%rdi) - movaps -0x4f(%rsi), %xmm5 - palignr $15, %xmm5, %xmm4 - movaps %xmm4, -0x40(%rdi) + movaps -0x4f(%rsi), %xmm5 + palignr $15, %xmm5, %xmm4 + movaps %xmm4, -0x40(%rdi) - movaps -0x5f(%rsi), %xmm6 - palignr $15, %xmm6, %xmm5 - movaps %xmm5, -0x50(%rdi) + movaps -0x5f(%rsi), %xmm6 + palignr $15, %xmm6, %xmm5 + movaps %xmm5, -0x50(%rdi) - movaps -0x6f(%rsi), %xmm7 - palignr $15, %xmm7, %xmm6 - movaps %xmm6, -0x60(%rdi) + movaps -0x6f(%rsi), %xmm7 + palignr $15, %xmm7, %xmm6 + movaps %xmm6, -0x60(%rdi) - movaps -0x7f(%rsi), %xmm8 - palignr $15, %xmm8, %xmm7 - movaps %xmm7, -0x70(%rdi) + movaps -0x7f(%rsi), %xmm8 + palignr $15, %xmm8, %xmm7 + movaps %xmm7, -0x70(%rdi) - movaps -0x8f(%rsi), %xmm9 - palignr $15, %xmm9, %xmm8 - movaps %xmm8, -0x80(%rdi) + movaps -0x8f(%rsi), %xmm9 + palignr $15, %xmm9, %xmm8 + movaps %xmm8, -0x80(%rdi) - sub $0x80, %rdx - lea -0x80(%rdi), %rdi - lea -0x80(%rsi), %rsi - jae L(shl_15_bwd) - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rdi - sub %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + sub $0x80, %rdx + lea -0x80(%rdi), %rdi + lea -0x80(%rsi), %rsi + jae L(shl_15_bwd) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rdi + sub %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(gobble_mem_fwd): - movdqu (%rsi), %xmm1 - movdqu %xmm0, (%r8) - movdqa %xmm1, (%rdi) - sub $16, %rdx - add $16, %rsi - add $16, %rdi + movdqu (%rsi), %xmm1 + movdqu %xmm0, (%r8) + movdqa %xmm1, (%rdi) + sub $16, %rdx + add $16, %rsi + add $16, %rdi #ifdef SHARED_CACHE_SIZE_HALF - mov $SHARED_CACHE_SIZE_HALF, %RCX_LP + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP #else - mov __x86_shared_cache_size_half(%rip), %RCX_LP + mov __x86_shared_cache_size_half(%rip), %RCX_LP #endif #ifdef USE_AS_MEMMOVE - mov %rsi, %r9 - sub %rdi, %r9 - cmp %rdx, %r9 - jae L(memmove_is_memcpy_fwd) - cmp %rcx, %r9 - jbe L(ll_cache_copy_fwd_start) + mov %rsi, %r9 + sub %rdi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_fwd) + cmp %rcx, %r9 + jbe L(ll_cache_copy_fwd_start) L(memmove_is_memcpy_fwd): #endif - cmp %rcx, %rdx - ja L(bigger_in_fwd) - mov %rdx, %rcx + cmp %rcx, %rdx + ja L(bigger_in_fwd) + mov %rdx, %rcx L(bigger_in_fwd): - sub %rcx, %rdx - cmp $0x1000, %rdx - jbe L(ll_cache_copy_fwd) + sub %rcx, %rdx + cmp $0x1000, %rdx + jbe L(ll_cache_copy_fwd) - mov %rcx, %r9 - shl $3, %r9 - cmp %r9, %rdx - jbe L(2steps_copy_fwd) - add %rcx, %rdx - xor %rcx, %rcx + mov %rcx, %r9 + shl $3, %r9 + cmp %r9, %rdx + jbe L(2steps_copy_fwd) + add %rcx, %rdx + xor %rcx, %rcx L(2steps_copy_fwd): - sub $0x80, %rdx + sub $0x80, %rdx L(gobble_mem_fwd_loop): - sub $0x80, %rdx - prefetcht0 0x200(%rsi) - prefetcht0 0x300(%rsi) - movdqu (%rsi), %xmm0 - movdqu 0x10(%rsi), %xmm1 - movdqu 0x20(%rsi), %xmm2 - movdqu 0x30(%rsi), %xmm3 - movdqu 0x40(%rsi), %xmm4 - movdqu 0x50(%rsi), %xmm5 - movdqu 0x60(%rsi), %xmm6 - movdqu 0x70(%rsi), %xmm7 - lfence - movntdq %xmm0, (%rdi) - movntdq %xmm1, 0x10(%rdi) - movntdq %xmm2, 0x20(%rdi) - movntdq %xmm3, 0x30(%rdi) - movntdq %xmm4, 0x40(%rdi) - movntdq %xmm5, 0x50(%rdi) - movntdq %xmm6, 0x60(%rdi) - movntdq %xmm7, 0x70(%rdi) - lea 0x80(%rsi), %rsi - lea 0x80(%rdi), %rdi - jae L(gobble_mem_fwd_loop) - sfence - cmp $0x80, %rcx - jb L(gobble_mem_fwd_end) - add $0x80, %rdx + sub $0x80, %rdx + prefetcht0 0x200(%rsi) + prefetcht0 0x300(%rsi) + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + lfence + movntdq %xmm0, (%rdi) + movntdq %xmm1, 0x10(%rdi) + movntdq %xmm2, 0x20(%rdi) + movntdq %xmm3, 0x30(%rdi) + movntdq %xmm4, 0x40(%rdi) + movntdq %xmm5, 0x50(%rdi) + movntdq %xmm6, 0x60(%rdi) + movntdq %xmm7, 0x70(%rdi) + lea 0x80(%rsi), %rsi + lea 0x80(%rdi), %rdi + jae L(gobble_mem_fwd_loop) + sfence + cmp $0x80, %rcx + jb L(gobble_mem_fwd_end) + add $0x80, %rdx L(ll_cache_copy_fwd): - add %rcx, %rdx + add %rcx, %rdx L(ll_cache_copy_fwd_start): - sub $0x80, %rdx + sub $0x80, %rdx L(gobble_ll_loop_fwd): - prefetchnta 0x1c0(%rsi) - prefetchnta 0x280(%rsi) - prefetchnta 0x1c0(%rdi) - prefetchnta 0x280(%rdi) - sub $0x80, %rdx - movdqu (%rsi), %xmm0 - movdqu 0x10(%rsi), %xmm1 - movdqu 0x20(%rsi), %xmm2 - movdqu 0x30(%rsi), %xmm3 - movdqu 0x40(%rsi), %xmm4 - movdqu 0x50(%rsi), %xmm5 - movdqu 0x60(%rsi), %xmm6 - movdqu 0x70(%rsi), %xmm7 - movdqa %xmm0, (%rdi) - movdqa %xmm1, 0x10(%rdi) - movdqa %xmm2, 0x20(%rdi) - movdqa %xmm3, 0x30(%rdi) - movdqa %xmm4, 0x40(%rdi) - movdqa %xmm5, 0x50(%rdi) - movdqa %xmm6, 0x60(%rdi) - movdqa %xmm7, 0x70(%rdi) - lea 0x80(%rsi), %rsi - lea 0x80(%rdi), %rdi - jae L(gobble_ll_loop_fwd) + prefetchnta 0x1c0(%rsi) + prefetchnta 0x280(%rsi) + prefetchnta 0x1c0(%rdi) + prefetchnta 0x280(%rdi) + sub $0x80, %rdx + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) + movdqa %xmm2, 0x20(%rdi) + movdqa %xmm3, 0x30(%rdi) + movdqa %xmm4, 0x40(%rdi) + movdqa %xmm5, 0x50(%rdi) + movdqa %xmm6, 0x60(%rdi) + movdqa %xmm7, 0x70(%rdi) + lea 0x80(%rsi), %rsi + lea 0x80(%rdi), %rdi + jae L(gobble_ll_loop_fwd) L(gobble_mem_fwd_end): - add $0x80, %rdx - add %rdx, %rsi - add %rdx, %rdi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) + add $0x80, %rdx + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_fwd), %rdx, 4) - .p2align 4 + .p2align 4 L(gobble_mem_bwd): - add %rdx, %rsi - add %rdx, %rdi + add %rdx, %rsi + add %rdx, %rdi - movdqu -16(%rsi), %xmm0 - lea -16(%rdi), %r8 - mov %rdi, %r9 - and $-16, %rdi - sub %rdi, %r9 - sub %r9, %rsi - sub %r9, %rdx + movdqu -16(%rsi), %xmm0 + lea -16(%rdi), %r8 + mov %rdi, %r9 + and $-16, %rdi + sub %rdi, %r9 + sub %r9, %rsi + sub %r9, %rdx #ifdef SHARED_CACHE_SIZE_HALF - mov $SHARED_CACHE_SIZE_HALF, %RCX_LP + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP #else - mov __x86_shared_cache_size_half(%rip), %RCX_LP + mov __x86_shared_cache_size_half(%rip), %RCX_LP #endif #ifdef USE_AS_MEMMOVE - mov %rdi, %r9 - sub %rsi, %r9 - cmp %rdx, %r9 - jae L(memmove_is_memcpy_bwd) - cmp %rcx, %r9 - jbe L(ll_cache_copy_bwd_start) + mov %rdi, %r9 + sub %rsi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_bwd) + cmp %rcx, %r9 + jbe L(ll_cache_copy_bwd_start) L(memmove_is_memcpy_bwd): #endif - cmp %rcx, %rdx - ja L(bigger) - mov %rdx, %rcx + cmp %rcx, %rdx + ja L(bigger) + mov %rdx, %rcx L(bigger): - sub %rcx, %rdx - cmp $0x1000, %rdx - jbe L(ll_cache_copy) + sub %rcx, %rdx + cmp $0x1000, %rdx + jbe L(ll_cache_copy) - mov %rcx, %r9 - shl $3, %r9 - cmp %r9, %rdx - jbe L(2steps_copy) - add %rcx, %rdx - xor %rcx, %rcx + mov %rcx, %r9 + shl $3, %r9 + cmp %r9, %rdx + jbe L(2steps_copy) + add %rcx, %rdx + xor %rcx, %rcx L(2steps_copy): - sub $0x80, %rdx + sub $0x80, %rdx L(gobble_mem_bwd_loop): - sub $0x80, %rdx - prefetcht0 -0x200(%rsi) - prefetcht0 -0x300(%rsi) - movdqu -0x10(%rsi), %xmm1 - movdqu -0x20(%rsi), %xmm2 - movdqu -0x30(%rsi), %xmm3 - movdqu -0x40(%rsi), %xmm4 - movdqu -0x50(%rsi), %xmm5 - movdqu -0x60(%rsi), %xmm6 - movdqu -0x70(%rsi), %xmm7 - movdqu -0x80(%rsi), %xmm8 - lfence - movntdq %xmm1, -0x10(%rdi) - movntdq %xmm2, -0x20(%rdi) - movntdq %xmm3, -0x30(%rdi) - movntdq %xmm4, -0x40(%rdi) - movntdq %xmm5, -0x50(%rdi) - movntdq %xmm6, -0x60(%rdi) - movntdq %xmm7, -0x70(%rdi) - movntdq %xmm8, -0x80(%rdi) - lea -0x80(%rsi), %rsi - lea -0x80(%rdi), %rdi - jae L(gobble_mem_bwd_loop) - sfence - cmp $0x80, %rcx - jb L(gobble_mem_bwd_end) - add $0x80, %rdx + sub $0x80, %rdx + prefetcht0 -0x200(%rsi) + prefetcht0 -0x300(%rsi) + movdqu -0x10(%rsi), %xmm1 + movdqu -0x20(%rsi), %xmm2 + movdqu -0x30(%rsi), %xmm3 + movdqu -0x40(%rsi), %xmm4 + movdqu -0x50(%rsi), %xmm5 + movdqu -0x60(%rsi), %xmm6 + movdqu -0x70(%rsi), %xmm7 + movdqu -0x80(%rsi), %xmm8 + lfence + movntdq %xmm1, -0x10(%rdi) + movntdq %xmm2, -0x20(%rdi) + movntdq %xmm3, -0x30(%rdi) + movntdq %xmm4, -0x40(%rdi) + movntdq %xmm5, -0x50(%rdi) + movntdq %xmm6, -0x60(%rdi) + movntdq %xmm7, -0x70(%rdi) + movntdq %xmm8, -0x80(%rdi) + lea -0x80(%rsi), %rsi + lea -0x80(%rdi), %rdi + jae L(gobble_mem_bwd_loop) + sfence + cmp $0x80, %rcx + jb L(gobble_mem_bwd_end) + add $0x80, %rdx L(ll_cache_copy): - add %rcx, %rdx + add %rcx, %rdx L(ll_cache_copy_bwd_start): - sub $0x80, %rdx + sub $0x80, %rdx L(gobble_ll_loop): - prefetchnta -0x1c0(%rsi) - prefetchnta -0x280(%rsi) - prefetchnta -0x1c0(%rdi) - prefetchnta -0x280(%rdi) - sub $0x80, %rdx - movdqu -0x10(%rsi), %xmm1 - movdqu -0x20(%rsi), %xmm2 - movdqu -0x30(%rsi), %xmm3 - movdqu -0x40(%rsi), %xmm4 - movdqu -0x50(%rsi), %xmm5 - movdqu -0x60(%rsi), %xmm6 - movdqu -0x70(%rsi), %xmm7 - movdqu -0x80(%rsi), %xmm8 - movdqa %xmm1, -0x10(%rdi) - movdqa %xmm2, -0x20(%rdi) - movdqa %xmm3, -0x30(%rdi) - movdqa %xmm4, -0x40(%rdi) - movdqa %xmm5, -0x50(%rdi) - movdqa %xmm6, -0x60(%rdi) - movdqa %xmm7, -0x70(%rdi) - movdqa %xmm8, -0x80(%rdi) - lea -0x80(%rsi), %rsi - lea -0x80(%rdi), %rdi - jae L(gobble_ll_loop) + prefetchnta -0x1c0(%rsi) + prefetchnta -0x280(%rsi) + prefetchnta -0x1c0(%rdi) + prefetchnta -0x280(%rdi) + sub $0x80, %rdx + movdqu -0x10(%rsi), %xmm1 + movdqu -0x20(%rsi), %xmm2 + movdqu -0x30(%rsi), %xmm3 + movdqu -0x40(%rsi), %xmm4 + movdqu -0x50(%rsi), %xmm5 + movdqu -0x60(%rsi), %xmm6 + movdqu -0x70(%rsi), %xmm7 + movdqu -0x80(%rsi), %xmm8 + movdqa %xmm1, -0x10(%rdi) + movdqa %xmm2, -0x20(%rdi) + movdqa %xmm3, -0x30(%rdi) + movdqa %xmm4, -0x40(%rdi) + movdqa %xmm5, -0x50(%rdi) + movdqa %xmm6, -0x60(%rdi) + movdqa %xmm7, -0x70(%rdi) + movdqa %xmm8, -0x80(%rdi) + lea -0x80(%rsi), %rsi + lea -0x80(%rdi), %rdi + jae L(gobble_ll_loop) L(gobble_mem_bwd_end): - movdqu %xmm0, (%r8) - add $0x80, %rdx - sub %rdx, %rsi - sub %rdx, %rdi - BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) + movdqu %xmm0, (%r8) + add $0x80, %rdx + sub %rdx, %rsi + sub %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_144_bytes_bwd), %rdx, 4) - .p2align 4 + .p2align 4 L(fwd_write_128bytes): - lddqu -128(%rsi), %xmm0 - movdqu %xmm0, -128(%rdi) + lddqu -128(%rsi), %xmm0 + movdqu %xmm0, -128(%rdi) L(fwd_write_112bytes): - lddqu -112(%rsi), %xmm0 - movdqu %xmm0, -112(%rdi) + lddqu -112(%rsi), %xmm0 + movdqu %xmm0, -112(%rdi) L(fwd_write_96bytes): - lddqu -96(%rsi), %xmm0 - movdqu %xmm0, -96(%rdi) + lddqu -96(%rsi), %xmm0 + movdqu %xmm0, -96(%rdi) L(fwd_write_80bytes): - lddqu -80(%rsi), %xmm0 - movdqu %xmm0, -80(%rdi) + lddqu -80(%rsi), %xmm0 + movdqu %xmm0, -80(%rdi) L(fwd_write_64bytes): - lddqu -64(%rsi), %xmm0 - movdqu %xmm0, -64(%rdi) + lddqu -64(%rsi), %xmm0 + movdqu %xmm0, -64(%rdi) L(fwd_write_48bytes): - lddqu -48(%rsi), %xmm0 - movdqu %xmm0, -48(%rdi) + lddqu -48(%rsi), %xmm0 + movdqu %xmm0, -48(%rdi) L(fwd_write_32bytes): - lddqu -32(%rsi), %xmm0 - movdqu %xmm0, -32(%rdi) + lddqu -32(%rsi), %xmm0 + movdqu %xmm0, -32(%rdi) L(fwd_write_16bytes): - lddqu -16(%rsi), %xmm0 - movdqu %xmm0, -16(%rdi) + lddqu -16(%rsi), %xmm0 + movdqu %xmm0, -16(%rdi) L(fwd_write_0bytes): - ret + ret - .p2align 4 + .p2align 4 L(fwd_write_143bytes): - lddqu -143(%rsi), %xmm0 - movdqu %xmm0, -143(%rdi) + lddqu -143(%rsi), %xmm0 + movdqu %xmm0, -143(%rdi) L(fwd_write_127bytes): - lddqu -127(%rsi), %xmm0 - movdqu %xmm0, -127(%rdi) + lddqu -127(%rsi), %xmm0 + movdqu %xmm0, -127(%rdi) L(fwd_write_111bytes): - lddqu -111(%rsi), %xmm0 - movdqu %xmm0, -111(%rdi) + lddqu -111(%rsi), %xmm0 + movdqu %xmm0, -111(%rdi) L(fwd_write_95bytes): - lddqu -95(%rsi), %xmm0 - movdqu %xmm0, -95(%rdi) + lddqu -95(%rsi), %xmm0 + movdqu %xmm0, -95(%rdi) L(fwd_write_79bytes): - lddqu -79(%rsi), %xmm0 - movdqu %xmm0, -79(%rdi) + lddqu -79(%rsi), %xmm0 + movdqu %xmm0, -79(%rdi) L(fwd_write_63bytes): - lddqu -63(%rsi), %xmm0 - movdqu %xmm0, -63(%rdi) + lddqu -63(%rsi), %xmm0 + movdqu %xmm0, -63(%rdi) L(fwd_write_47bytes): - lddqu -47(%rsi), %xmm0 - movdqu %xmm0, -47(%rdi) + lddqu -47(%rsi), %xmm0 + movdqu %xmm0, -47(%rdi) L(fwd_write_31bytes): - lddqu -31(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -31(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -31(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -31(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_15bytes): - mov -15(%rsi), %rdx - mov -8(%rsi), %rcx - mov %rdx, -15(%rdi) - mov %rcx, -8(%rdi) - ret + mov -15(%rsi), %rdx + mov -8(%rsi), %rcx + mov %rdx, -15(%rdi) + mov %rcx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_142bytes): - lddqu -142(%rsi), %xmm0 - movdqu %xmm0, -142(%rdi) + lddqu -142(%rsi), %xmm0 + movdqu %xmm0, -142(%rdi) L(fwd_write_126bytes): - lddqu -126(%rsi), %xmm0 - movdqu %xmm0, -126(%rdi) + lddqu -126(%rsi), %xmm0 + movdqu %xmm0, -126(%rdi) L(fwd_write_110bytes): - lddqu -110(%rsi), %xmm0 - movdqu %xmm0, -110(%rdi) + lddqu -110(%rsi), %xmm0 + movdqu %xmm0, -110(%rdi) L(fwd_write_94bytes): - lddqu -94(%rsi), %xmm0 - movdqu %xmm0, -94(%rdi) + lddqu -94(%rsi), %xmm0 + movdqu %xmm0, -94(%rdi) L(fwd_write_78bytes): - lddqu -78(%rsi), %xmm0 - movdqu %xmm0, -78(%rdi) + lddqu -78(%rsi), %xmm0 + movdqu %xmm0, -78(%rdi) L(fwd_write_62bytes): - lddqu -62(%rsi), %xmm0 - movdqu %xmm0, -62(%rdi) + lddqu -62(%rsi), %xmm0 + movdqu %xmm0, -62(%rdi) L(fwd_write_46bytes): - lddqu -46(%rsi), %xmm0 - movdqu %xmm0, -46(%rdi) + lddqu -46(%rsi), %xmm0 + movdqu %xmm0, -46(%rdi) L(fwd_write_30bytes): - lddqu -30(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -30(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -30(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -30(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_14bytes): - mov -14(%rsi), %rdx - mov -8(%rsi), %rcx - mov %rdx, -14(%rdi) - mov %rcx, -8(%rdi) - ret + mov -14(%rsi), %rdx + mov -8(%rsi), %rcx + mov %rdx, -14(%rdi) + mov %rcx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_141bytes): - lddqu -141(%rsi), %xmm0 - movdqu %xmm0, -141(%rdi) + lddqu -141(%rsi), %xmm0 + movdqu %xmm0, -141(%rdi) L(fwd_write_125bytes): - lddqu -125(%rsi), %xmm0 - movdqu %xmm0, -125(%rdi) + lddqu -125(%rsi), %xmm0 + movdqu %xmm0, -125(%rdi) L(fwd_write_109bytes): - lddqu -109(%rsi), %xmm0 - movdqu %xmm0, -109(%rdi) + lddqu -109(%rsi), %xmm0 + movdqu %xmm0, -109(%rdi) L(fwd_write_93bytes): - lddqu -93(%rsi), %xmm0 - movdqu %xmm0, -93(%rdi) + lddqu -93(%rsi), %xmm0 + movdqu %xmm0, -93(%rdi) L(fwd_write_77bytes): - lddqu -77(%rsi), %xmm0 - movdqu %xmm0, -77(%rdi) + lddqu -77(%rsi), %xmm0 + movdqu %xmm0, -77(%rdi) L(fwd_write_61bytes): - lddqu -61(%rsi), %xmm0 - movdqu %xmm0, -61(%rdi) + lddqu -61(%rsi), %xmm0 + movdqu %xmm0, -61(%rdi) L(fwd_write_45bytes): - lddqu -45(%rsi), %xmm0 - movdqu %xmm0, -45(%rdi) + lddqu -45(%rsi), %xmm0 + movdqu %xmm0, -45(%rdi) L(fwd_write_29bytes): - lddqu -29(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -29(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -29(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -29(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_13bytes): - mov -13(%rsi), %rdx - mov -8(%rsi), %rcx - mov %rdx, -13(%rdi) - mov %rcx, -8(%rdi) - ret + mov -13(%rsi), %rdx + mov -8(%rsi), %rcx + mov %rdx, -13(%rdi) + mov %rcx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_140bytes): - lddqu -140(%rsi), %xmm0 - movdqu %xmm0, -140(%rdi) + lddqu -140(%rsi), %xmm0 + movdqu %xmm0, -140(%rdi) L(fwd_write_124bytes): - lddqu -124(%rsi), %xmm0 - movdqu %xmm0, -124(%rdi) + lddqu -124(%rsi), %xmm0 + movdqu %xmm0, -124(%rdi) L(fwd_write_108bytes): - lddqu -108(%rsi), %xmm0 - movdqu %xmm0, -108(%rdi) + lddqu -108(%rsi), %xmm0 + movdqu %xmm0, -108(%rdi) L(fwd_write_92bytes): - lddqu -92(%rsi), %xmm0 - movdqu %xmm0, -92(%rdi) + lddqu -92(%rsi), %xmm0 + movdqu %xmm0, -92(%rdi) L(fwd_write_76bytes): - lddqu -76(%rsi), %xmm0 - movdqu %xmm0, -76(%rdi) + lddqu -76(%rsi), %xmm0 + movdqu %xmm0, -76(%rdi) L(fwd_write_60bytes): - lddqu -60(%rsi), %xmm0 - movdqu %xmm0, -60(%rdi) + lddqu -60(%rsi), %xmm0 + movdqu %xmm0, -60(%rdi) L(fwd_write_44bytes): - lddqu -44(%rsi), %xmm0 - movdqu %xmm0, -44(%rdi) + lddqu -44(%rsi), %xmm0 + movdqu %xmm0, -44(%rdi) L(fwd_write_28bytes): - lddqu -28(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -28(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -28(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -28(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_12bytes): - mov -12(%rsi), %rdx - mov -4(%rsi), %ecx - mov %rdx, -12(%rdi) - mov %ecx, -4(%rdi) - ret + mov -12(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -12(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_139bytes): - lddqu -139(%rsi), %xmm0 - movdqu %xmm0, -139(%rdi) + lddqu -139(%rsi), %xmm0 + movdqu %xmm0, -139(%rdi) L(fwd_write_123bytes): - lddqu -123(%rsi), %xmm0 - movdqu %xmm0, -123(%rdi) + lddqu -123(%rsi), %xmm0 + movdqu %xmm0, -123(%rdi) L(fwd_write_107bytes): - lddqu -107(%rsi), %xmm0 - movdqu %xmm0, -107(%rdi) + lddqu -107(%rsi), %xmm0 + movdqu %xmm0, -107(%rdi) L(fwd_write_91bytes): - lddqu -91(%rsi), %xmm0 - movdqu %xmm0, -91(%rdi) + lddqu -91(%rsi), %xmm0 + movdqu %xmm0, -91(%rdi) L(fwd_write_75bytes): - lddqu -75(%rsi), %xmm0 - movdqu %xmm0, -75(%rdi) + lddqu -75(%rsi), %xmm0 + movdqu %xmm0, -75(%rdi) L(fwd_write_59bytes): - lddqu -59(%rsi), %xmm0 - movdqu %xmm0, -59(%rdi) + lddqu -59(%rsi), %xmm0 + movdqu %xmm0, -59(%rdi) L(fwd_write_43bytes): - lddqu -43(%rsi), %xmm0 - movdqu %xmm0, -43(%rdi) + lddqu -43(%rsi), %xmm0 + movdqu %xmm0, -43(%rdi) L(fwd_write_27bytes): - lddqu -27(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -27(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -27(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -27(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_11bytes): - mov -11(%rsi), %rdx - mov -4(%rsi), %ecx - mov %rdx, -11(%rdi) - mov %ecx, -4(%rdi) - ret + mov -11(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -11(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_138bytes): - lddqu -138(%rsi), %xmm0 - movdqu %xmm0, -138(%rdi) + lddqu -138(%rsi), %xmm0 + movdqu %xmm0, -138(%rdi) L(fwd_write_122bytes): - lddqu -122(%rsi), %xmm0 - movdqu %xmm0, -122(%rdi) + lddqu -122(%rsi), %xmm0 + movdqu %xmm0, -122(%rdi) L(fwd_write_106bytes): - lddqu -106(%rsi), %xmm0 - movdqu %xmm0, -106(%rdi) + lddqu -106(%rsi), %xmm0 + movdqu %xmm0, -106(%rdi) L(fwd_write_90bytes): - lddqu -90(%rsi), %xmm0 - movdqu %xmm0, -90(%rdi) + lddqu -90(%rsi), %xmm0 + movdqu %xmm0, -90(%rdi) L(fwd_write_74bytes): - lddqu -74(%rsi), %xmm0 - movdqu %xmm0, -74(%rdi) + lddqu -74(%rsi), %xmm0 + movdqu %xmm0, -74(%rdi) L(fwd_write_58bytes): - lddqu -58(%rsi), %xmm0 - movdqu %xmm0, -58(%rdi) + lddqu -58(%rsi), %xmm0 + movdqu %xmm0, -58(%rdi) L(fwd_write_42bytes): - lddqu -42(%rsi), %xmm0 - movdqu %xmm0, -42(%rdi) + lddqu -42(%rsi), %xmm0 + movdqu %xmm0, -42(%rdi) L(fwd_write_26bytes): - lddqu -26(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -26(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -26(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -26(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_10bytes): - mov -10(%rsi), %rdx - mov -4(%rsi), %ecx - mov %rdx, -10(%rdi) - mov %ecx, -4(%rdi) - ret + mov -10(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -10(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_137bytes): - lddqu -137(%rsi), %xmm0 - movdqu %xmm0, -137(%rdi) + lddqu -137(%rsi), %xmm0 + movdqu %xmm0, -137(%rdi) L(fwd_write_121bytes): - lddqu -121(%rsi), %xmm0 - movdqu %xmm0, -121(%rdi) + lddqu -121(%rsi), %xmm0 + movdqu %xmm0, -121(%rdi) L(fwd_write_105bytes): - lddqu -105(%rsi), %xmm0 - movdqu %xmm0, -105(%rdi) + lddqu -105(%rsi), %xmm0 + movdqu %xmm0, -105(%rdi) L(fwd_write_89bytes): - lddqu -89(%rsi), %xmm0 - movdqu %xmm0, -89(%rdi) + lddqu -89(%rsi), %xmm0 + movdqu %xmm0, -89(%rdi) L(fwd_write_73bytes): - lddqu -73(%rsi), %xmm0 - movdqu %xmm0, -73(%rdi) + lddqu -73(%rsi), %xmm0 + movdqu %xmm0, -73(%rdi) L(fwd_write_57bytes): - lddqu -57(%rsi), %xmm0 - movdqu %xmm0, -57(%rdi) + lddqu -57(%rsi), %xmm0 + movdqu %xmm0, -57(%rdi) L(fwd_write_41bytes): - lddqu -41(%rsi), %xmm0 - movdqu %xmm0, -41(%rdi) + lddqu -41(%rsi), %xmm0 + movdqu %xmm0, -41(%rdi) L(fwd_write_25bytes): - lddqu -25(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -25(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -25(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -25(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_9bytes): - mov -9(%rsi), %rdx - mov -4(%rsi), %ecx - mov %rdx, -9(%rdi) - mov %ecx, -4(%rdi) - ret + mov -9(%rsi), %rdx + mov -4(%rsi), %ecx + mov %rdx, -9(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_136bytes): - lddqu -136(%rsi), %xmm0 - movdqu %xmm0, -136(%rdi) + lddqu -136(%rsi), %xmm0 + movdqu %xmm0, -136(%rdi) L(fwd_write_120bytes): - lddqu -120(%rsi), %xmm0 - movdqu %xmm0, -120(%rdi) + lddqu -120(%rsi), %xmm0 + movdqu %xmm0, -120(%rdi) L(fwd_write_104bytes): - lddqu -104(%rsi), %xmm0 - movdqu %xmm0, -104(%rdi) + lddqu -104(%rsi), %xmm0 + movdqu %xmm0, -104(%rdi) L(fwd_write_88bytes): - lddqu -88(%rsi), %xmm0 - movdqu %xmm0, -88(%rdi) + lddqu -88(%rsi), %xmm0 + movdqu %xmm0, -88(%rdi) L(fwd_write_72bytes): - lddqu -72(%rsi), %xmm0 - movdqu %xmm0, -72(%rdi) + lddqu -72(%rsi), %xmm0 + movdqu %xmm0, -72(%rdi) L(fwd_write_56bytes): - lddqu -56(%rsi), %xmm0 - movdqu %xmm0, -56(%rdi) + lddqu -56(%rsi), %xmm0 + movdqu %xmm0, -56(%rdi) L(fwd_write_40bytes): - lddqu -40(%rsi), %xmm0 - movdqu %xmm0, -40(%rdi) + lddqu -40(%rsi), %xmm0 + movdqu %xmm0, -40(%rdi) L(fwd_write_24bytes): - lddqu -24(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -24(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -24(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -24(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_8bytes): - mov -8(%rsi), %rdx - mov %rdx, -8(%rdi) - ret + mov -8(%rsi), %rdx + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_135bytes): - lddqu -135(%rsi), %xmm0 - movdqu %xmm0, -135(%rdi) + lddqu -135(%rsi), %xmm0 + movdqu %xmm0, -135(%rdi) L(fwd_write_119bytes): - lddqu -119(%rsi), %xmm0 - movdqu %xmm0, -119(%rdi) + lddqu -119(%rsi), %xmm0 + movdqu %xmm0, -119(%rdi) L(fwd_write_103bytes): - lddqu -103(%rsi), %xmm0 - movdqu %xmm0, -103(%rdi) + lddqu -103(%rsi), %xmm0 + movdqu %xmm0, -103(%rdi) L(fwd_write_87bytes): - lddqu -87(%rsi), %xmm0 - movdqu %xmm0, -87(%rdi) + lddqu -87(%rsi), %xmm0 + movdqu %xmm0, -87(%rdi) L(fwd_write_71bytes): - lddqu -71(%rsi), %xmm0 - movdqu %xmm0, -71(%rdi) + lddqu -71(%rsi), %xmm0 + movdqu %xmm0, -71(%rdi) L(fwd_write_55bytes): - lddqu -55(%rsi), %xmm0 - movdqu %xmm0, -55(%rdi) + lddqu -55(%rsi), %xmm0 + movdqu %xmm0, -55(%rdi) L(fwd_write_39bytes): - lddqu -39(%rsi), %xmm0 - movdqu %xmm0, -39(%rdi) + lddqu -39(%rsi), %xmm0 + movdqu %xmm0, -39(%rdi) L(fwd_write_23bytes): - lddqu -23(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -23(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -23(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -23(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_7bytes): - mov -7(%rsi), %edx - mov -4(%rsi), %ecx - mov %edx, -7(%rdi) - mov %ecx, -4(%rdi) - ret + mov -7(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -7(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_134bytes): - lddqu -134(%rsi), %xmm0 - movdqu %xmm0, -134(%rdi) + lddqu -134(%rsi), %xmm0 + movdqu %xmm0, -134(%rdi) L(fwd_write_118bytes): - lddqu -118(%rsi), %xmm0 - movdqu %xmm0, -118(%rdi) + lddqu -118(%rsi), %xmm0 + movdqu %xmm0, -118(%rdi) L(fwd_write_102bytes): - lddqu -102(%rsi), %xmm0 - movdqu %xmm0, -102(%rdi) + lddqu -102(%rsi), %xmm0 + movdqu %xmm0, -102(%rdi) L(fwd_write_86bytes): - lddqu -86(%rsi), %xmm0 - movdqu %xmm0, -86(%rdi) + lddqu -86(%rsi), %xmm0 + movdqu %xmm0, -86(%rdi) L(fwd_write_70bytes): - lddqu -70(%rsi), %xmm0 - movdqu %xmm0, -70(%rdi) + lddqu -70(%rsi), %xmm0 + movdqu %xmm0, -70(%rdi) L(fwd_write_54bytes): - lddqu -54(%rsi), %xmm0 - movdqu %xmm0, -54(%rdi) + lddqu -54(%rsi), %xmm0 + movdqu %xmm0, -54(%rdi) L(fwd_write_38bytes): - lddqu -38(%rsi), %xmm0 - movdqu %xmm0, -38(%rdi) + lddqu -38(%rsi), %xmm0 + movdqu %xmm0, -38(%rdi) L(fwd_write_22bytes): - lddqu -22(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -22(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -22(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -22(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_6bytes): - mov -6(%rsi), %edx - mov -4(%rsi), %ecx - mov %edx, -6(%rdi) - mov %ecx, -4(%rdi) - ret + mov -6(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -6(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_133bytes): - lddqu -133(%rsi), %xmm0 - movdqu %xmm0, -133(%rdi) + lddqu -133(%rsi), %xmm0 + movdqu %xmm0, -133(%rdi) L(fwd_write_117bytes): - lddqu -117(%rsi), %xmm0 - movdqu %xmm0, -117(%rdi) + lddqu -117(%rsi), %xmm0 + movdqu %xmm0, -117(%rdi) L(fwd_write_101bytes): - lddqu -101(%rsi), %xmm0 - movdqu %xmm0, -101(%rdi) + lddqu -101(%rsi), %xmm0 + movdqu %xmm0, -101(%rdi) L(fwd_write_85bytes): - lddqu -85(%rsi), %xmm0 - movdqu %xmm0, -85(%rdi) + lddqu -85(%rsi), %xmm0 + movdqu %xmm0, -85(%rdi) L(fwd_write_69bytes): - lddqu -69(%rsi), %xmm0 - movdqu %xmm0, -69(%rdi) + lddqu -69(%rsi), %xmm0 + movdqu %xmm0, -69(%rdi) L(fwd_write_53bytes): - lddqu -53(%rsi), %xmm0 - movdqu %xmm0, -53(%rdi) + lddqu -53(%rsi), %xmm0 + movdqu %xmm0, -53(%rdi) L(fwd_write_37bytes): - lddqu -37(%rsi), %xmm0 - movdqu %xmm0, -37(%rdi) + lddqu -37(%rsi), %xmm0 + movdqu %xmm0, -37(%rdi) L(fwd_write_21bytes): - lddqu -21(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -21(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -21(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -21(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_5bytes): - mov -5(%rsi), %edx - mov -4(%rsi), %ecx - mov %edx, -5(%rdi) - mov %ecx, -4(%rdi) - ret + mov -5(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -5(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_132bytes): - lddqu -132(%rsi), %xmm0 - movdqu %xmm0, -132(%rdi) + lddqu -132(%rsi), %xmm0 + movdqu %xmm0, -132(%rdi) L(fwd_write_116bytes): - lddqu -116(%rsi), %xmm0 - movdqu %xmm0, -116(%rdi) + lddqu -116(%rsi), %xmm0 + movdqu %xmm0, -116(%rdi) L(fwd_write_100bytes): - lddqu -100(%rsi), %xmm0 - movdqu %xmm0, -100(%rdi) + lddqu -100(%rsi), %xmm0 + movdqu %xmm0, -100(%rdi) L(fwd_write_84bytes): - lddqu -84(%rsi), %xmm0 - movdqu %xmm0, -84(%rdi) + lddqu -84(%rsi), %xmm0 + movdqu %xmm0, -84(%rdi) L(fwd_write_68bytes): - lddqu -68(%rsi), %xmm0 - movdqu %xmm0, -68(%rdi) + lddqu -68(%rsi), %xmm0 + movdqu %xmm0, -68(%rdi) L(fwd_write_52bytes): - lddqu -52(%rsi), %xmm0 - movdqu %xmm0, -52(%rdi) + lddqu -52(%rsi), %xmm0 + movdqu %xmm0, -52(%rdi) L(fwd_write_36bytes): - lddqu -36(%rsi), %xmm0 - movdqu %xmm0, -36(%rdi) + lddqu -36(%rsi), %xmm0 + movdqu %xmm0, -36(%rdi) L(fwd_write_20bytes): - lddqu -20(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -20(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -20(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -20(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_4bytes): - mov -4(%rsi), %edx - mov %edx, -4(%rdi) - ret + mov -4(%rsi), %edx + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_131bytes): - lddqu -131(%rsi), %xmm0 - movdqu %xmm0, -131(%rdi) + lddqu -131(%rsi), %xmm0 + movdqu %xmm0, -131(%rdi) L(fwd_write_115bytes): - lddqu -115(%rsi), %xmm0 - movdqu %xmm0, -115(%rdi) + lddqu -115(%rsi), %xmm0 + movdqu %xmm0, -115(%rdi) L(fwd_write_99bytes): - lddqu -99(%rsi), %xmm0 - movdqu %xmm0, -99(%rdi) + lddqu -99(%rsi), %xmm0 + movdqu %xmm0, -99(%rdi) L(fwd_write_83bytes): - lddqu -83(%rsi), %xmm0 - movdqu %xmm0, -83(%rdi) + lddqu -83(%rsi), %xmm0 + movdqu %xmm0, -83(%rdi) L(fwd_write_67bytes): - lddqu -67(%rsi), %xmm0 - movdqu %xmm0, -67(%rdi) + lddqu -67(%rsi), %xmm0 + movdqu %xmm0, -67(%rdi) L(fwd_write_51bytes): - lddqu -51(%rsi), %xmm0 - movdqu %xmm0, -51(%rdi) + lddqu -51(%rsi), %xmm0 + movdqu %xmm0, -51(%rdi) L(fwd_write_35bytes): - lddqu -35(%rsi), %xmm0 - movdqu %xmm0, -35(%rdi) + lddqu -35(%rsi), %xmm0 + movdqu %xmm0, -35(%rdi) L(fwd_write_19bytes): - lddqu -19(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -19(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -19(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -19(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_3bytes): - mov -3(%rsi), %dx - mov -2(%rsi), %cx - mov %dx, -3(%rdi) - mov %cx, -2(%rdi) - ret + mov -3(%rsi), %dx + mov -2(%rsi), %cx + mov %dx, -3(%rdi) + mov %cx, -2(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_130bytes): - lddqu -130(%rsi), %xmm0 - movdqu %xmm0, -130(%rdi) + lddqu -130(%rsi), %xmm0 + movdqu %xmm0, -130(%rdi) L(fwd_write_114bytes): - lddqu -114(%rsi), %xmm0 - movdqu %xmm0, -114(%rdi) + lddqu -114(%rsi), %xmm0 + movdqu %xmm0, -114(%rdi) L(fwd_write_98bytes): - lddqu -98(%rsi), %xmm0 - movdqu %xmm0, -98(%rdi) + lddqu -98(%rsi), %xmm0 + movdqu %xmm0, -98(%rdi) L(fwd_write_82bytes): - lddqu -82(%rsi), %xmm0 - movdqu %xmm0, -82(%rdi) + lddqu -82(%rsi), %xmm0 + movdqu %xmm0, -82(%rdi) L(fwd_write_66bytes): - lddqu -66(%rsi), %xmm0 - movdqu %xmm0, -66(%rdi) + lddqu -66(%rsi), %xmm0 + movdqu %xmm0, -66(%rdi) L(fwd_write_50bytes): - lddqu -50(%rsi), %xmm0 - movdqu %xmm0, -50(%rdi) + lddqu -50(%rsi), %xmm0 + movdqu %xmm0, -50(%rdi) L(fwd_write_34bytes): - lddqu -34(%rsi), %xmm0 - movdqu %xmm0, -34(%rdi) + lddqu -34(%rsi), %xmm0 + movdqu %xmm0, -34(%rdi) L(fwd_write_18bytes): - lddqu -18(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -18(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -18(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -18(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_2bytes): - movzwl -2(%rsi), %edx - mov %dx, -2(%rdi) - ret + movzwl -2(%rsi), %edx + mov %dx, -2(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_129bytes): - lddqu -129(%rsi), %xmm0 - movdqu %xmm0, -129(%rdi) + lddqu -129(%rsi), %xmm0 + movdqu %xmm0, -129(%rdi) L(fwd_write_113bytes): - lddqu -113(%rsi), %xmm0 - movdqu %xmm0, -113(%rdi) + lddqu -113(%rsi), %xmm0 + movdqu %xmm0, -113(%rdi) L(fwd_write_97bytes): - lddqu -97(%rsi), %xmm0 - movdqu %xmm0, -97(%rdi) + lddqu -97(%rsi), %xmm0 + movdqu %xmm0, -97(%rdi) L(fwd_write_81bytes): - lddqu -81(%rsi), %xmm0 - movdqu %xmm0, -81(%rdi) + lddqu -81(%rsi), %xmm0 + movdqu %xmm0, -81(%rdi) L(fwd_write_65bytes): - lddqu -65(%rsi), %xmm0 - movdqu %xmm0, -65(%rdi) + lddqu -65(%rsi), %xmm0 + movdqu %xmm0, -65(%rdi) L(fwd_write_49bytes): - lddqu -49(%rsi), %xmm0 - movdqu %xmm0, -49(%rdi) + lddqu -49(%rsi), %xmm0 + movdqu %xmm0, -49(%rdi) L(fwd_write_33bytes): - lddqu -33(%rsi), %xmm0 - movdqu %xmm0, -33(%rdi) + lddqu -33(%rsi), %xmm0 + movdqu %xmm0, -33(%rdi) L(fwd_write_17bytes): - lddqu -17(%rsi), %xmm0 - lddqu -16(%rsi), %xmm1 - movdqu %xmm0, -17(%rdi) - movdqu %xmm1, -16(%rdi) - ret + lddqu -17(%rsi), %xmm0 + lddqu -16(%rsi), %xmm1 + movdqu %xmm0, -17(%rdi) + movdqu %xmm1, -16(%rdi) + ret - .p2align 4 + .p2align 4 L(fwd_write_1bytes): - movzbl -1(%rsi), %edx - mov %dl, -1(%rdi) - ret + movzbl -1(%rsi), %edx + mov %dl, -1(%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_128bytes): - lddqu 112(%rsi), %xmm0 - movdqu %xmm0, 112(%rdi) + lddqu 112(%rsi), %xmm0 + movdqu %xmm0, 112(%rdi) L(bwd_write_112bytes): - lddqu 96(%rsi), %xmm0 - movdqu %xmm0, 96(%rdi) + lddqu 96(%rsi), %xmm0 + movdqu %xmm0, 96(%rdi) L(bwd_write_96bytes): - lddqu 80(%rsi), %xmm0 - movdqu %xmm0, 80(%rdi) + lddqu 80(%rsi), %xmm0 + movdqu %xmm0, 80(%rdi) L(bwd_write_80bytes): - lddqu 64(%rsi), %xmm0 - movdqu %xmm0, 64(%rdi) + lddqu 64(%rsi), %xmm0 + movdqu %xmm0, 64(%rdi) L(bwd_write_64bytes): - lddqu 48(%rsi), %xmm0 - movdqu %xmm0, 48(%rdi) + lddqu 48(%rsi), %xmm0 + movdqu %xmm0, 48(%rdi) L(bwd_write_48bytes): - lddqu 32(%rsi), %xmm0 - movdqu %xmm0, 32(%rdi) + lddqu 32(%rsi), %xmm0 + movdqu %xmm0, 32(%rdi) L(bwd_write_32bytes): - lddqu 16(%rsi), %xmm0 - movdqu %xmm0, 16(%rdi) + lddqu 16(%rsi), %xmm0 + movdqu %xmm0, 16(%rdi) L(bwd_write_16bytes): - lddqu (%rsi), %xmm0 - movdqu %xmm0, (%rdi) + lddqu (%rsi), %xmm0 + movdqu %xmm0, (%rdi) L(bwd_write_0bytes): - ret + ret - .p2align 4 + .p2align 4 L(bwd_write_143bytes): - lddqu 127(%rsi), %xmm0 - movdqu %xmm0, 127(%rdi) + lddqu 127(%rsi), %xmm0 + movdqu %xmm0, 127(%rdi) L(bwd_write_127bytes): - lddqu 111(%rsi), %xmm0 - movdqu %xmm0, 111(%rdi) + lddqu 111(%rsi), %xmm0 + movdqu %xmm0, 111(%rdi) L(bwd_write_111bytes): - lddqu 95(%rsi), %xmm0 - movdqu %xmm0, 95(%rdi) + lddqu 95(%rsi), %xmm0 + movdqu %xmm0, 95(%rdi) L(bwd_write_95bytes): - lddqu 79(%rsi), %xmm0 - movdqu %xmm0, 79(%rdi) + lddqu 79(%rsi), %xmm0 + movdqu %xmm0, 79(%rdi) L(bwd_write_79bytes): - lddqu 63(%rsi), %xmm0 - movdqu %xmm0, 63(%rdi) + lddqu 63(%rsi), %xmm0 + movdqu %xmm0, 63(%rdi) L(bwd_write_63bytes): - lddqu 47(%rsi), %xmm0 - movdqu %xmm0, 47(%rdi) + lddqu 47(%rsi), %xmm0 + movdqu %xmm0, 47(%rdi) L(bwd_write_47bytes): - lddqu 31(%rsi), %xmm0 - movdqu %xmm0, 31(%rdi) + lddqu 31(%rsi), %xmm0 + movdqu %xmm0, 31(%rdi) L(bwd_write_31bytes): - lddqu 15(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 15(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 15(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 15(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_15bytes): - mov 7(%rsi), %rdx - mov (%rsi), %rcx - mov %rdx, 7(%rdi) - mov %rcx, (%rdi) - ret + mov 7(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 7(%rdi) + mov %rcx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_142bytes): - lddqu 126(%rsi), %xmm0 - movdqu %xmm0, 126(%rdi) + lddqu 126(%rsi), %xmm0 + movdqu %xmm0, 126(%rdi) L(bwd_write_126bytes): - lddqu 110(%rsi), %xmm0 - movdqu %xmm0, 110(%rdi) + lddqu 110(%rsi), %xmm0 + movdqu %xmm0, 110(%rdi) L(bwd_write_110bytes): - lddqu 94(%rsi), %xmm0 - movdqu %xmm0, 94(%rdi) + lddqu 94(%rsi), %xmm0 + movdqu %xmm0, 94(%rdi) L(bwd_write_94bytes): - lddqu 78(%rsi), %xmm0 - movdqu %xmm0, 78(%rdi) + lddqu 78(%rsi), %xmm0 + movdqu %xmm0, 78(%rdi) L(bwd_write_78bytes): - lddqu 62(%rsi), %xmm0 - movdqu %xmm0, 62(%rdi) + lddqu 62(%rsi), %xmm0 + movdqu %xmm0, 62(%rdi) L(bwd_write_62bytes): - lddqu 46(%rsi), %xmm0 - movdqu %xmm0, 46(%rdi) + lddqu 46(%rsi), %xmm0 + movdqu %xmm0, 46(%rdi) L(bwd_write_46bytes): - lddqu 30(%rsi), %xmm0 - movdqu %xmm0, 30(%rdi) + lddqu 30(%rsi), %xmm0 + movdqu %xmm0, 30(%rdi) L(bwd_write_30bytes): - lddqu 14(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 14(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 14(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 14(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_14bytes): - mov 6(%rsi), %rdx - mov (%rsi), %rcx - mov %rdx, 6(%rdi) - mov %rcx, (%rdi) - ret + mov 6(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 6(%rdi) + mov %rcx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_141bytes): - lddqu 125(%rsi), %xmm0 - movdqu %xmm0, 125(%rdi) + lddqu 125(%rsi), %xmm0 + movdqu %xmm0, 125(%rdi) L(bwd_write_125bytes): - lddqu 109(%rsi), %xmm0 - movdqu %xmm0, 109(%rdi) + lddqu 109(%rsi), %xmm0 + movdqu %xmm0, 109(%rdi) L(bwd_write_109bytes): - lddqu 93(%rsi), %xmm0 - movdqu %xmm0, 93(%rdi) + lddqu 93(%rsi), %xmm0 + movdqu %xmm0, 93(%rdi) L(bwd_write_93bytes): - lddqu 77(%rsi), %xmm0 - movdqu %xmm0, 77(%rdi) + lddqu 77(%rsi), %xmm0 + movdqu %xmm0, 77(%rdi) L(bwd_write_77bytes): - lddqu 61(%rsi), %xmm0 - movdqu %xmm0, 61(%rdi) + lddqu 61(%rsi), %xmm0 + movdqu %xmm0, 61(%rdi) L(bwd_write_61bytes): - lddqu 45(%rsi), %xmm0 - movdqu %xmm0, 45(%rdi) + lddqu 45(%rsi), %xmm0 + movdqu %xmm0, 45(%rdi) L(bwd_write_45bytes): - lddqu 29(%rsi), %xmm0 - movdqu %xmm0, 29(%rdi) + lddqu 29(%rsi), %xmm0 + movdqu %xmm0, 29(%rdi) L(bwd_write_29bytes): - lddqu 13(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 13(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 13(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 13(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_13bytes): - mov 5(%rsi), %rdx - mov (%rsi), %rcx - mov %rdx, 5(%rdi) - mov %rcx, (%rdi) - ret + mov 5(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 5(%rdi) + mov %rcx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_140bytes): - lddqu 124(%rsi), %xmm0 - movdqu %xmm0, 124(%rdi) + lddqu 124(%rsi), %xmm0 + movdqu %xmm0, 124(%rdi) L(bwd_write_124bytes): - lddqu 108(%rsi), %xmm0 - movdqu %xmm0, 108(%rdi) + lddqu 108(%rsi), %xmm0 + movdqu %xmm0, 108(%rdi) L(bwd_write_108bytes): - lddqu 92(%rsi), %xmm0 - movdqu %xmm0, 92(%rdi) + lddqu 92(%rsi), %xmm0 + movdqu %xmm0, 92(%rdi) L(bwd_write_92bytes): - lddqu 76(%rsi), %xmm0 - movdqu %xmm0, 76(%rdi) + lddqu 76(%rsi), %xmm0 + movdqu %xmm0, 76(%rdi) L(bwd_write_76bytes): - lddqu 60(%rsi), %xmm0 - movdqu %xmm0, 60(%rdi) + lddqu 60(%rsi), %xmm0 + movdqu %xmm0, 60(%rdi) L(bwd_write_60bytes): - lddqu 44(%rsi), %xmm0 - movdqu %xmm0, 44(%rdi) + lddqu 44(%rsi), %xmm0 + movdqu %xmm0, 44(%rdi) L(bwd_write_44bytes): - lddqu 28(%rsi), %xmm0 - movdqu %xmm0, 28(%rdi) + lddqu 28(%rsi), %xmm0 + movdqu %xmm0, 28(%rdi) L(bwd_write_28bytes): - lddqu 12(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 12(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 12(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 12(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_12bytes): - mov 4(%rsi), %rdx - mov (%rsi), %rcx - mov %rdx, 4(%rdi) - mov %rcx, (%rdi) - ret + mov 4(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 4(%rdi) + mov %rcx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_139bytes): - lddqu 123(%rsi), %xmm0 - movdqu %xmm0, 123(%rdi) + lddqu 123(%rsi), %xmm0 + movdqu %xmm0, 123(%rdi) L(bwd_write_123bytes): - lddqu 107(%rsi), %xmm0 - movdqu %xmm0, 107(%rdi) + lddqu 107(%rsi), %xmm0 + movdqu %xmm0, 107(%rdi) L(bwd_write_107bytes): - lddqu 91(%rsi), %xmm0 - movdqu %xmm0, 91(%rdi) + lddqu 91(%rsi), %xmm0 + movdqu %xmm0, 91(%rdi) L(bwd_write_91bytes): - lddqu 75(%rsi), %xmm0 - movdqu %xmm0, 75(%rdi) + lddqu 75(%rsi), %xmm0 + movdqu %xmm0, 75(%rdi) L(bwd_write_75bytes): - lddqu 59(%rsi), %xmm0 - movdqu %xmm0, 59(%rdi) + lddqu 59(%rsi), %xmm0 + movdqu %xmm0, 59(%rdi) L(bwd_write_59bytes): - lddqu 43(%rsi), %xmm0 - movdqu %xmm0, 43(%rdi) + lddqu 43(%rsi), %xmm0 + movdqu %xmm0, 43(%rdi) L(bwd_write_43bytes): - lddqu 27(%rsi), %xmm0 - movdqu %xmm0, 27(%rdi) + lddqu 27(%rsi), %xmm0 + movdqu %xmm0, 27(%rdi) L(bwd_write_27bytes): - lddqu 11(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 11(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 11(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 11(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_11bytes): - mov 3(%rsi), %rdx - mov (%rsi), %rcx - mov %rdx, 3(%rdi) - mov %rcx, (%rdi) - ret + mov 3(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 3(%rdi) + mov %rcx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_138bytes): - lddqu 122(%rsi), %xmm0 - movdqu %xmm0, 122(%rdi) + lddqu 122(%rsi), %xmm0 + movdqu %xmm0, 122(%rdi) L(bwd_write_122bytes): - lddqu 106(%rsi), %xmm0 - movdqu %xmm0, 106(%rdi) + lddqu 106(%rsi), %xmm0 + movdqu %xmm0, 106(%rdi) L(bwd_write_106bytes): - lddqu 90(%rsi), %xmm0 - movdqu %xmm0, 90(%rdi) + lddqu 90(%rsi), %xmm0 + movdqu %xmm0, 90(%rdi) L(bwd_write_90bytes): - lddqu 74(%rsi), %xmm0 - movdqu %xmm0, 74(%rdi) + lddqu 74(%rsi), %xmm0 + movdqu %xmm0, 74(%rdi) L(bwd_write_74bytes): - lddqu 58(%rsi), %xmm0 - movdqu %xmm0, 58(%rdi) + lddqu 58(%rsi), %xmm0 + movdqu %xmm0, 58(%rdi) L(bwd_write_58bytes): - lddqu 42(%rsi), %xmm0 - movdqu %xmm0, 42(%rdi) + lddqu 42(%rsi), %xmm0 + movdqu %xmm0, 42(%rdi) L(bwd_write_42bytes): - lddqu 26(%rsi), %xmm0 - movdqu %xmm0, 26(%rdi) + lddqu 26(%rsi), %xmm0 + movdqu %xmm0, 26(%rdi) L(bwd_write_26bytes): - lddqu 10(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 10(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 10(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 10(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_10bytes): - mov 2(%rsi), %rdx - mov (%rsi), %rcx - mov %rdx, 2(%rdi) - mov %rcx, (%rdi) - ret + mov 2(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 2(%rdi) + mov %rcx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_137bytes): - lddqu 121(%rsi), %xmm0 - movdqu %xmm0, 121(%rdi) + lddqu 121(%rsi), %xmm0 + movdqu %xmm0, 121(%rdi) L(bwd_write_121bytes): - lddqu 105(%rsi), %xmm0 - movdqu %xmm0, 105(%rdi) + lddqu 105(%rsi), %xmm0 + movdqu %xmm0, 105(%rdi) L(bwd_write_105bytes): - lddqu 89(%rsi), %xmm0 - movdqu %xmm0, 89(%rdi) + lddqu 89(%rsi), %xmm0 + movdqu %xmm0, 89(%rdi) L(bwd_write_89bytes): - lddqu 73(%rsi), %xmm0 - movdqu %xmm0, 73(%rdi) + lddqu 73(%rsi), %xmm0 + movdqu %xmm0, 73(%rdi) L(bwd_write_73bytes): - lddqu 57(%rsi), %xmm0 - movdqu %xmm0, 57(%rdi) + lddqu 57(%rsi), %xmm0 + movdqu %xmm0, 57(%rdi) L(bwd_write_57bytes): - lddqu 41(%rsi), %xmm0 - movdqu %xmm0, 41(%rdi) + lddqu 41(%rsi), %xmm0 + movdqu %xmm0, 41(%rdi) L(bwd_write_41bytes): - lddqu 25(%rsi), %xmm0 - movdqu %xmm0, 25(%rdi) + lddqu 25(%rsi), %xmm0 + movdqu %xmm0, 25(%rdi) L(bwd_write_25bytes): - lddqu 9(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 9(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 9(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 9(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_9bytes): - mov 1(%rsi), %rdx - mov (%rsi), %rcx - mov %rdx, 1(%rdi) - mov %rcx, (%rdi) - ret + mov 1(%rsi), %rdx + mov (%rsi), %rcx + mov %rdx, 1(%rdi) + mov %rcx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_136bytes): - lddqu 120(%rsi), %xmm0 - movdqu %xmm0, 120(%rdi) + lddqu 120(%rsi), %xmm0 + movdqu %xmm0, 120(%rdi) L(bwd_write_120bytes): - lddqu 104(%rsi), %xmm0 - movdqu %xmm0, 104(%rdi) + lddqu 104(%rsi), %xmm0 + movdqu %xmm0, 104(%rdi) L(bwd_write_104bytes): - lddqu 88(%rsi), %xmm0 - movdqu %xmm0, 88(%rdi) + lddqu 88(%rsi), %xmm0 + movdqu %xmm0, 88(%rdi) L(bwd_write_88bytes): - lddqu 72(%rsi), %xmm0 - movdqu %xmm0, 72(%rdi) + lddqu 72(%rsi), %xmm0 + movdqu %xmm0, 72(%rdi) L(bwd_write_72bytes): - lddqu 56(%rsi), %xmm0 - movdqu %xmm0, 56(%rdi) + lddqu 56(%rsi), %xmm0 + movdqu %xmm0, 56(%rdi) L(bwd_write_56bytes): - lddqu 40(%rsi), %xmm0 - movdqu %xmm0, 40(%rdi) + lddqu 40(%rsi), %xmm0 + movdqu %xmm0, 40(%rdi) L(bwd_write_40bytes): - lddqu 24(%rsi), %xmm0 - movdqu %xmm0, 24(%rdi) + lddqu 24(%rsi), %xmm0 + movdqu %xmm0, 24(%rdi) L(bwd_write_24bytes): - lddqu 8(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 8(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 8(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 8(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_8bytes): - mov (%rsi), %rdx - mov %rdx, (%rdi) - ret + mov (%rsi), %rdx + mov %rdx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_135bytes): - lddqu 119(%rsi), %xmm0 - movdqu %xmm0, 119(%rdi) + lddqu 119(%rsi), %xmm0 + movdqu %xmm0, 119(%rdi) L(bwd_write_119bytes): - lddqu 103(%rsi), %xmm0 - movdqu %xmm0, 103(%rdi) + lddqu 103(%rsi), %xmm0 + movdqu %xmm0, 103(%rdi) L(bwd_write_103bytes): - lddqu 87(%rsi), %xmm0 - movdqu %xmm0, 87(%rdi) + lddqu 87(%rsi), %xmm0 + movdqu %xmm0, 87(%rdi) L(bwd_write_87bytes): - lddqu 71(%rsi), %xmm0 - movdqu %xmm0, 71(%rdi) + lddqu 71(%rsi), %xmm0 + movdqu %xmm0, 71(%rdi) L(bwd_write_71bytes): - lddqu 55(%rsi), %xmm0 - movdqu %xmm0, 55(%rdi) + lddqu 55(%rsi), %xmm0 + movdqu %xmm0, 55(%rdi) L(bwd_write_55bytes): - lddqu 39(%rsi), %xmm0 - movdqu %xmm0, 39(%rdi) + lddqu 39(%rsi), %xmm0 + movdqu %xmm0, 39(%rdi) L(bwd_write_39bytes): - lddqu 23(%rsi), %xmm0 - movdqu %xmm0, 23(%rdi) + lddqu 23(%rsi), %xmm0 + movdqu %xmm0, 23(%rdi) L(bwd_write_23bytes): - lddqu 7(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 7(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 7(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 7(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_7bytes): - mov 3(%rsi), %edx - mov (%rsi), %ecx - mov %edx, 3(%rdi) - mov %ecx, (%rdi) - ret + mov 3(%rsi), %edx + mov (%rsi), %ecx + mov %edx, 3(%rdi) + mov %ecx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_134bytes): - lddqu 118(%rsi), %xmm0 - movdqu %xmm0, 118(%rdi) + lddqu 118(%rsi), %xmm0 + movdqu %xmm0, 118(%rdi) L(bwd_write_118bytes): - lddqu 102(%rsi), %xmm0 - movdqu %xmm0, 102(%rdi) + lddqu 102(%rsi), %xmm0 + movdqu %xmm0, 102(%rdi) L(bwd_write_102bytes): - lddqu 86(%rsi), %xmm0 - movdqu %xmm0, 86(%rdi) + lddqu 86(%rsi), %xmm0 + movdqu %xmm0, 86(%rdi) L(bwd_write_86bytes): - lddqu 70(%rsi), %xmm0 - movdqu %xmm0, 70(%rdi) + lddqu 70(%rsi), %xmm0 + movdqu %xmm0, 70(%rdi) L(bwd_write_70bytes): - lddqu 54(%rsi), %xmm0 - movdqu %xmm0, 54(%rdi) + lddqu 54(%rsi), %xmm0 + movdqu %xmm0, 54(%rdi) L(bwd_write_54bytes): - lddqu 38(%rsi), %xmm0 - movdqu %xmm0, 38(%rdi) + lddqu 38(%rsi), %xmm0 + movdqu %xmm0, 38(%rdi) L(bwd_write_38bytes): - lddqu 22(%rsi), %xmm0 - movdqu %xmm0, 22(%rdi) + lddqu 22(%rsi), %xmm0 + movdqu %xmm0, 22(%rdi) L(bwd_write_22bytes): - lddqu 6(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 6(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 6(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 6(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_6bytes): - mov 2(%rsi), %edx - mov (%rsi), %ecx - mov %edx, 2(%rdi) - mov %ecx, (%rdi) - ret + mov 2(%rsi), %edx + mov (%rsi), %ecx + mov %edx, 2(%rdi) + mov %ecx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_133bytes): - lddqu 117(%rsi), %xmm0 - movdqu %xmm0, 117(%rdi) + lddqu 117(%rsi), %xmm0 + movdqu %xmm0, 117(%rdi) L(bwd_write_117bytes): - lddqu 101(%rsi), %xmm0 - movdqu %xmm0, 101(%rdi) + lddqu 101(%rsi), %xmm0 + movdqu %xmm0, 101(%rdi) L(bwd_write_101bytes): - lddqu 85(%rsi), %xmm0 - movdqu %xmm0, 85(%rdi) + lddqu 85(%rsi), %xmm0 + movdqu %xmm0, 85(%rdi) L(bwd_write_85bytes): - lddqu 69(%rsi), %xmm0 - movdqu %xmm0, 69(%rdi) + lddqu 69(%rsi), %xmm0 + movdqu %xmm0, 69(%rdi) L(bwd_write_69bytes): - lddqu 53(%rsi), %xmm0 - movdqu %xmm0, 53(%rdi) + lddqu 53(%rsi), %xmm0 + movdqu %xmm0, 53(%rdi) L(bwd_write_53bytes): - lddqu 37(%rsi), %xmm0 - movdqu %xmm0, 37(%rdi) + lddqu 37(%rsi), %xmm0 + movdqu %xmm0, 37(%rdi) L(bwd_write_37bytes): - lddqu 21(%rsi), %xmm0 - movdqu %xmm0, 21(%rdi) + lddqu 21(%rsi), %xmm0 + movdqu %xmm0, 21(%rdi) L(bwd_write_21bytes): - lddqu 5(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 5(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 5(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 5(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_5bytes): - mov 1(%rsi), %edx - mov (%rsi), %ecx - mov %edx, 1(%rdi) - mov %ecx, (%rdi) - ret + mov 1(%rsi), %edx + mov (%rsi), %ecx + mov %edx, 1(%rdi) + mov %ecx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_132bytes): - lddqu 116(%rsi), %xmm0 - movdqu %xmm0, 116(%rdi) + lddqu 116(%rsi), %xmm0 + movdqu %xmm0, 116(%rdi) L(bwd_write_116bytes): - lddqu 100(%rsi), %xmm0 - movdqu %xmm0, 100(%rdi) + lddqu 100(%rsi), %xmm0 + movdqu %xmm0, 100(%rdi) L(bwd_write_100bytes): - lddqu 84(%rsi), %xmm0 - movdqu %xmm0, 84(%rdi) + lddqu 84(%rsi), %xmm0 + movdqu %xmm0, 84(%rdi) L(bwd_write_84bytes): - lddqu 68(%rsi), %xmm0 - movdqu %xmm0, 68(%rdi) + lddqu 68(%rsi), %xmm0 + movdqu %xmm0, 68(%rdi) L(bwd_write_68bytes): - lddqu 52(%rsi), %xmm0 - movdqu %xmm0, 52(%rdi) + lddqu 52(%rsi), %xmm0 + movdqu %xmm0, 52(%rdi) L(bwd_write_52bytes): - lddqu 36(%rsi), %xmm0 - movdqu %xmm0, 36(%rdi) + lddqu 36(%rsi), %xmm0 + movdqu %xmm0, 36(%rdi) L(bwd_write_36bytes): - lddqu 20(%rsi), %xmm0 - movdqu %xmm0, 20(%rdi) + lddqu 20(%rsi), %xmm0 + movdqu %xmm0, 20(%rdi) L(bwd_write_20bytes): - lddqu 4(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 4(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 4(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 4(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_4bytes): - mov (%rsi), %edx - mov %edx, (%rdi) - ret + mov (%rsi), %edx + mov %edx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_131bytes): - lddqu 115(%rsi), %xmm0 - movdqu %xmm0, 115(%rdi) + lddqu 115(%rsi), %xmm0 + movdqu %xmm0, 115(%rdi) L(bwd_write_115bytes): - lddqu 99(%rsi), %xmm0 - movdqu %xmm0, 99(%rdi) + lddqu 99(%rsi), %xmm0 + movdqu %xmm0, 99(%rdi) L(bwd_write_99bytes): - lddqu 83(%rsi), %xmm0 - movdqu %xmm0, 83(%rdi) + lddqu 83(%rsi), %xmm0 + movdqu %xmm0, 83(%rdi) L(bwd_write_83bytes): - lddqu 67(%rsi), %xmm0 - movdqu %xmm0, 67(%rdi) + lddqu 67(%rsi), %xmm0 + movdqu %xmm0, 67(%rdi) L(bwd_write_67bytes): - lddqu 51(%rsi), %xmm0 - movdqu %xmm0, 51(%rdi) + lddqu 51(%rsi), %xmm0 + movdqu %xmm0, 51(%rdi) L(bwd_write_51bytes): - lddqu 35(%rsi), %xmm0 - movdqu %xmm0, 35(%rdi) + lddqu 35(%rsi), %xmm0 + movdqu %xmm0, 35(%rdi) L(bwd_write_35bytes): - lddqu 19(%rsi), %xmm0 - movdqu %xmm0, 19(%rdi) + lddqu 19(%rsi), %xmm0 + movdqu %xmm0, 19(%rdi) L(bwd_write_19bytes): - lddqu 3(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 3(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 3(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 3(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_3bytes): - mov 1(%rsi), %dx - mov (%rsi), %cx - mov %dx, 1(%rdi) - mov %cx, (%rdi) - ret + mov 1(%rsi), %dx + mov (%rsi), %cx + mov %dx, 1(%rdi) + mov %cx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_130bytes): - lddqu 114(%rsi), %xmm0 - movdqu %xmm0, 114(%rdi) + lddqu 114(%rsi), %xmm0 + movdqu %xmm0, 114(%rdi) L(bwd_write_114bytes): - lddqu 98(%rsi), %xmm0 - movdqu %xmm0, 98(%rdi) + lddqu 98(%rsi), %xmm0 + movdqu %xmm0, 98(%rdi) L(bwd_write_98bytes): - lddqu 82(%rsi), %xmm0 - movdqu %xmm0, 82(%rdi) + lddqu 82(%rsi), %xmm0 + movdqu %xmm0, 82(%rdi) L(bwd_write_82bytes): - lddqu 66(%rsi), %xmm0 - movdqu %xmm0, 66(%rdi) + lddqu 66(%rsi), %xmm0 + movdqu %xmm0, 66(%rdi) L(bwd_write_66bytes): - lddqu 50(%rsi), %xmm0 - movdqu %xmm0, 50(%rdi) + lddqu 50(%rsi), %xmm0 + movdqu %xmm0, 50(%rdi) L(bwd_write_50bytes): - lddqu 34(%rsi), %xmm0 - movdqu %xmm0, 34(%rdi) + lddqu 34(%rsi), %xmm0 + movdqu %xmm0, 34(%rdi) L(bwd_write_34bytes): - lddqu 18(%rsi), %xmm0 - movdqu %xmm0, 18(%rdi) + lddqu 18(%rsi), %xmm0 + movdqu %xmm0, 18(%rdi) L(bwd_write_18bytes): - lddqu 2(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 2(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 2(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 2(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_2bytes): - movzwl (%rsi), %edx - mov %dx, (%rdi) - ret + movzwl (%rsi), %edx + mov %dx, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_129bytes): - lddqu 113(%rsi), %xmm0 - movdqu %xmm0, 113(%rdi) + lddqu 113(%rsi), %xmm0 + movdqu %xmm0, 113(%rdi) L(bwd_write_113bytes): - lddqu 97(%rsi), %xmm0 - movdqu %xmm0, 97(%rdi) + lddqu 97(%rsi), %xmm0 + movdqu %xmm0, 97(%rdi) L(bwd_write_97bytes): - lddqu 81(%rsi), %xmm0 - movdqu %xmm0, 81(%rdi) + lddqu 81(%rsi), %xmm0 + movdqu %xmm0, 81(%rdi) L(bwd_write_81bytes): - lddqu 65(%rsi), %xmm0 - movdqu %xmm0, 65(%rdi) + lddqu 65(%rsi), %xmm0 + movdqu %xmm0, 65(%rdi) L(bwd_write_65bytes): - lddqu 49(%rsi), %xmm0 - movdqu %xmm0, 49(%rdi) + lddqu 49(%rsi), %xmm0 + movdqu %xmm0, 49(%rdi) L(bwd_write_49bytes): - lddqu 33(%rsi), %xmm0 - movdqu %xmm0, 33(%rdi) + lddqu 33(%rsi), %xmm0 + movdqu %xmm0, 33(%rdi) L(bwd_write_33bytes): - lddqu 17(%rsi), %xmm0 - movdqu %xmm0, 17(%rdi) + lddqu 17(%rsi), %xmm0 + movdqu %xmm0, 17(%rdi) L(bwd_write_17bytes): - lddqu 1(%rsi), %xmm0 - lddqu (%rsi), %xmm1 - movdqu %xmm0, 1(%rdi) - movdqu %xmm1, (%rdi) - ret + lddqu 1(%rsi), %xmm0 + lddqu (%rsi), %xmm1 + movdqu %xmm0, 1(%rdi) + movdqu %xmm1, (%rdi) + ret - .p2align 4 + .p2align 4 L(bwd_write_1bytes): - movzbl (%rsi), %edx - mov %dl, (%rdi) - ret + movzbl (%rsi), %edx + mov %dl, (%rdi) + ret END (MEMCPY) - .section .rodata.ssse3,"a",@progbits - .p2align 3 + .section .rodata.ssse3,"a",@progbits + .p2align 3 L(table_144_bytes_bwd): - .int JMPTBL (L(bwd_write_0bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_1bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_2bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_3bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_4bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_5bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_6bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_7bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_8bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_9bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_10bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_11bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_12bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_13bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_14bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_15bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_16bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_17bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_18bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_19bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_20bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_21bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_22bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_23bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_24bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_25bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_26bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_27bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_28bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_29bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_30bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_31bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_32bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_33bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_34bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_35bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_36bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_37bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_38bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_39bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_40bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_41bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_42bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_43bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_44bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_45bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_46bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_47bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_48bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_49bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_50bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_51bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_52bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_53bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_54bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_55bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_56bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_57bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_58bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_59bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_60bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_61bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_62bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_63bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_64bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_65bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_66bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_67bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_68bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_69bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_70bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_71bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_72bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_73bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_74bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_75bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_76bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_77bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_78bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_79bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_80bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_81bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_82bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_83bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_84bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_85bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_86bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_87bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_88bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_89bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_90bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_91bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_92bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_93bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_94bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_95bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_96bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_97bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_98bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_99bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_100bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_101bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_102bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_103bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_104bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_105bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_106bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_107bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_108bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_109bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_110bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_111bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_112bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_113bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_114bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_115bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_116bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_117bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_118bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_119bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_120bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_121bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_122bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_123bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_124bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_125bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_126bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_127bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_128bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_129bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_130bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_131bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_132bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_133bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_134bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_135bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_136bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_137bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_138bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_139bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_140bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_141bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_142bytes), L(table_144_bytes_bwd)) - .int JMPTBL (L(bwd_write_143bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_0bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_1bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_2bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_3bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_4bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_5bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_6bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_7bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_8bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_9bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_10bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_11bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_12bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_13bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_14bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_15bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_16bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_17bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_18bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_19bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_20bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_21bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_22bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_23bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_24bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_25bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_26bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_27bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_28bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_29bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_30bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_31bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_32bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_33bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_34bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_35bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_36bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_37bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_38bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_39bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_40bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_41bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_42bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_43bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_44bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_45bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_46bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_47bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_48bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_49bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_50bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_51bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_52bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_53bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_54bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_55bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_56bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_57bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_58bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_59bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_60bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_61bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_62bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_63bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_64bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_65bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_66bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_67bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_68bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_69bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_70bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_71bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_72bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_73bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_74bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_75bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_76bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_77bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_78bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_79bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_80bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_81bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_82bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_83bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_84bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_85bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_86bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_87bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_88bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_89bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_90bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_91bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_92bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_93bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_94bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_95bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_96bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_97bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_98bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_99bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_100bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_101bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_102bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_103bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_104bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_105bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_106bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_107bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_108bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_109bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_110bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_111bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_112bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_113bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_114bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_115bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_116bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_117bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_118bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_119bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_120bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_121bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_122bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_123bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_124bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_125bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_126bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_127bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_128bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_129bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_130bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_131bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_132bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_133bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_134bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_135bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_136bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_137bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_138bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_139bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_140bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_141bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_142bytes), L(table_144_bytes_bwd)) + .int JMPTBL (L(bwd_write_143bytes), L(table_144_bytes_bwd)) - .p2align 3 + .p2align 3 L(table_144_bytes_fwd): - .int JMPTBL (L(fwd_write_0bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_1bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_2bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_3bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_4bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_5bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_6bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_7bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_8bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_9bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_10bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_11bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_12bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_13bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_14bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_15bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_16bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_17bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_18bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_19bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_20bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_21bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_22bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_23bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_24bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_25bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_26bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_27bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_28bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_29bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_30bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_31bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_32bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_33bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_34bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_35bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_36bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_37bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_38bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_39bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_40bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_41bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_42bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_43bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_44bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_45bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_46bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_47bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_48bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_49bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_50bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_51bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_52bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_53bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_54bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_55bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_56bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_57bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_58bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_59bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_60bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_61bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_62bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_63bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_64bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_65bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_66bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_67bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_68bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_69bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_70bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_71bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_72bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_73bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_74bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_75bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_76bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_77bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_78bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_79bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_80bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_81bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_82bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_83bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_84bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_85bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_86bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_87bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_88bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_89bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_90bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_91bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_92bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_93bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_94bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_95bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_96bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_97bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_98bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_99bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_100bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_101bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_102bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_103bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_104bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_105bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_106bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_107bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_108bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_109bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_110bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_111bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_112bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_113bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_114bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_115bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_116bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_117bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_118bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_119bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_120bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_121bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_122bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_123bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_124bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_125bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_126bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_127bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_128bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_129bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_130bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_131bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_132bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_133bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_134bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_135bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_136bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_137bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_138bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_139bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_140bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_141bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_142bytes), L(table_144_bytes_fwd)) - .int JMPTBL (L(fwd_write_143bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_0bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_1bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_2bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_3bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_4bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_5bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_6bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_7bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_8bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_9bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_10bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_11bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_12bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_13bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_14bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_15bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_16bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_17bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_18bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_19bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_20bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_21bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_22bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_23bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_24bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_25bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_26bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_27bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_28bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_29bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_30bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_31bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_32bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_33bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_34bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_35bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_36bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_37bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_38bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_39bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_40bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_41bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_42bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_43bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_44bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_45bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_46bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_47bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_48bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_49bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_50bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_51bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_52bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_53bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_54bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_55bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_56bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_57bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_58bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_59bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_60bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_61bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_62bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_63bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_64bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_65bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_66bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_67bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_68bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_69bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_70bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_71bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_72bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_73bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_74bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_75bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_76bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_77bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_78bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_79bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_80bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_81bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_82bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_83bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_84bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_85bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_86bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_87bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_88bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_89bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_90bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_91bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_92bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_93bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_94bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_95bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_96bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_97bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_98bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_99bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_100bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_101bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_102bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_103bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_104bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_105bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_106bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_107bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_108bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_109bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_110bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_111bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_112bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_113bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_114bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_115bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_116bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_117bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_118bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_119bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_120bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_121bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_122bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_123bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_124bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_125bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_126bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_127bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_128bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_129bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_130bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_131bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_132bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_133bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_134bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_135bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_136bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_137bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_138bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_139bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_140bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_141bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_142bytes), L(table_144_bytes_fwd)) + .int JMPTBL (L(fwd_write_143bytes), L(table_144_bytes_fwd)) - .p2align 3 + .p2align 3 L(shl_table_fwd): - .int JMPTBL (L(shl_0), L(shl_table_fwd)) - .int JMPTBL (L(shl_1), L(shl_table_fwd)) - .int JMPTBL (L(shl_2), L(shl_table_fwd)) - .int JMPTBL (L(shl_3), L(shl_table_fwd)) - .int JMPTBL (L(shl_4), L(shl_table_fwd)) - .int JMPTBL (L(shl_5), L(shl_table_fwd)) - .int JMPTBL (L(shl_6), L(shl_table_fwd)) - .int JMPTBL (L(shl_7), L(shl_table_fwd)) - .int JMPTBL (L(shl_8), L(shl_table_fwd)) - .int JMPTBL (L(shl_9), L(shl_table_fwd)) - .int JMPTBL (L(shl_10), L(shl_table_fwd)) - .int JMPTBL (L(shl_11), L(shl_table_fwd)) - .int JMPTBL (L(shl_12), L(shl_table_fwd)) - .int JMPTBL (L(shl_13), L(shl_table_fwd)) - .int JMPTBL (L(shl_14), L(shl_table_fwd)) - .int JMPTBL (L(shl_15), L(shl_table_fwd)) + .int JMPTBL (L(shl_0), L(shl_table_fwd)) + .int JMPTBL (L(shl_1), L(shl_table_fwd)) + .int JMPTBL (L(shl_2), L(shl_table_fwd)) + .int JMPTBL (L(shl_3), L(shl_table_fwd)) + .int JMPTBL (L(shl_4), L(shl_table_fwd)) + .int JMPTBL (L(shl_5), L(shl_table_fwd)) + .int JMPTBL (L(shl_6), L(shl_table_fwd)) + .int JMPTBL (L(shl_7), L(shl_table_fwd)) + .int JMPTBL (L(shl_8), L(shl_table_fwd)) + .int JMPTBL (L(shl_9), L(shl_table_fwd)) + .int JMPTBL (L(shl_10), L(shl_table_fwd)) + .int JMPTBL (L(shl_11), L(shl_table_fwd)) + .int JMPTBL (L(shl_12), L(shl_table_fwd)) + .int JMPTBL (L(shl_13), L(shl_table_fwd)) + .int JMPTBL (L(shl_14), L(shl_table_fwd)) + .int JMPTBL (L(shl_15), L(shl_table_fwd)) - .p2align 3 + .p2align 3 L(shl_table_bwd): - .int JMPTBL (L(shl_0_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_1_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_2_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_3_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_4_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_5_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_6_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_7_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_8_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_9_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_10_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_11_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_12_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_13_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_14_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_15_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_0_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_1_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_2_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_3_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_4_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_5_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_6_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_7_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_8_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_9_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_10_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_11_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_12_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_13_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_14_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_15_bwd), L(shl_table_bwd)) #endif diff --git a/utils/memcpy-bench/glibc/memcpy-ssse3.S b/utils/memcpy-bench/glibc/memcpy-ssse3.S index 2fd26651645..11cb6559a8b 100644 --- a/utils/memcpy-bench/glibc/memcpy-ssse3.S +++ b/utils/memcpy-bench/glibc/memcpy-ssse3.S @@ -24,3129 +24,3129 @@ #include "asm-syntax.h" #ifndef MEMCPY -# define MEMCPY __memcpy_ssse3 -# define MEMCPY_CHK __memcpy_chk_ssse3 -# define MEMPCPY __mempcpy_ssse3 -# define MEMPCPY_CHK __mempcpy_chk_ssse3 +# define MEMCPY __memcpy_ssse3 +# define MEMCPY_CHK __memcpy_chk_ssse3 +# define MEMPCPY __mempcpy_ssse3 +# define MEMPCPY_CHK __mempcpy_chk_ssse3 #endif -#define JMPTBL(I, B) I - B +#define JMPTBL(I, B) I - B /* Branch to an entry in a jump table. TABLE is a jump table with relative offsets. INDEX is a register contains the index into the jump table. SCALE is the scale of INDEX. */ -#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ - lea TABLE(%rip), %r11; \ - movslq (%r11, INDEX, SCALE), INDEX; \ - lea (%r11, INDEX), INDEX; \ - _CET_NOTRACK jmp *INDEX; \ +#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \ + lea TABLE(%rip), %r11; \ + movslq (%r11, INDEX, SCALE), INDEX; \ + lea (%r11, INDEX), INDEX; \ + _CET_NOTRACK jmp *INDEX; \ ud2 - .section .text.ssse3,"ax",@progbits + .section .text.ssse3,"ax",@progbits #if !defined USE_AS_MEMPCPY && !defined USE_AS_MEMMOVE ENTRY (MEMPCPY_CHK) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMPCPY_CHK) ENTRY (MEMPCPY) - mov %RDI_LP, %RAX_LP - add %RDX_LP, %RAX_LP - jmp L(start) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) END (MEMPCPY) #endif #if !defined USE_AS_BCOPY ENTRY (MEMCPY_CHK) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMCPY_CHK) #endif ENTRY (MEMCPY) - mov %RDI_LP, %RAX_LP + mov %RDI_LP, %RAX_LP #ifdef USE_AS_MEMPCPY - add %RDX_LP, %RAX_LP + add %RDX_LP, %RAX_LP #endif #ifdef __ILP32__ - /* Clear the upper 32 bits. */ - mov %edx, %edx + /* Clear the upper 32 bits. */ + mov %edx, %edx #endif #ifdef USE_AS_MEMMOVE - cmp %rsi, %rdi - jb L(copy_forward) - je L(write_0bytes) - cmp $79, %rdx - jbe L(copy_forward) - jmp L(copy_backward) + cmp %rsi, %rdi + jb L(copy_forward) + je L(write_0bytes) + cmp $79, %rdx + jbe L(copy_forward) + jmp L(copy_backward) L(copy_forward): #endif L(start): - cmp $79, %rdx - lea L(table_less_80bytes)(%rip), %r11 - ja L(80bytesormore) - movslq (%r11, %rdx, 4), %r9 - add %rdx, %rsi - add %rdx, %rdi - add %r11, %r9 - _CET_NOTRACK jmp *%r9 - ud2 + cmp $79, %rdx + lea L(table_less_80bytes)(%rip), %r11 + ja L(80bytesormore) + movslq (%r11, %rdx, 4), %r9 + add %rdx, %rsi + add %rdx, %rdi + add %r11, %r9 + _CET_NOTRACK jmp *%r9 + ud2 - .p2align 4 + .p2align 4 L(80bytesormore): #ifndef USE_AS_MEMMOVE - cmp %dil, %sil - jle L(copy_backward) + cmp %dil, %sil + jle L(copy_backward) #endif - movdqu (%rsi), %xmm0 - mov %rdi, %rcx - and $-16, %rdi - add $16, %rdi - mov %rcx, %r8 - sub %rdi, %rcx - add %rcx, %rdx - sub %rcx, %rsi + movdqu (%rsi), %xmm0 + mov %rdi, %rcx + and $-16, %rdi + add $16, %rdi + mov %rcx, %r8 + sub %rdi, %rcx + add %rcx, %rdx + sub %rcx, %rsi #ifdef SHARED_CACHE_SIZE_HALF - mov $SHARED_CACHE_SIZE_HALF, %RCX_LP + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP #else - mov __x86_shared_cache_size_half(%rip), %RCX_LP + mov __x86_shared_cache_size_half(%rip), %RCX_LP #endif - cmp %rcx, %rdx - mov %rsi, %r9 - ja L(large_page_fwd) - and $0xf, %r9 - jz L(shl_0) + cmp %rcx, %rdx + mov %rsi, %r9 + ja L(large_page_fwd) + and $0xf, %r9 + jz L(shl_0) #ifdef DATA_CACHE_SIZE_HALF - mov $DATA_CACHE_SIZE_HALF, %RCX_LP + mov $DATA_CACHE_SIZE_HALF, %RCX_LP #else - mov __x86_data_cache_size_half(%rip), %RCX_LP + mov __x86_data_cache_size_half(%rip), %RCX_LP #endif - BRANCH_TO_JMPTBL_ENTRY (L(shl_table), %r9, 4) + BRANCH_TO_JMPTBL_ENTRY (L(shl_table), %r9, 4) - .p2align 4 + .p2align 4 L(copy_backward): - movdqu -16(%rsi, %rdx), %xmm0 - add %rdx, %rsi - lea -16(%rdi, %rdx), %r8 - add %rdx, %rdi + movdqu -16(%rsi, %rdx), %xmm0 + add %rdx, %rsi + lea -16(%rdi, %rdx), %r8 + add %rdx, %rdi - mov %rdi, %rcx - and $0xf, %rcx - xor %rcx, %rdi - sub %rcx, %rdx - sub %rcx, %rsi + mov %rdi, %rcx + and $0xf, %rcx + xor %rcx, %rdi + sub %rcx, %rdx + sub %rcx, %rsi #ifdef SHARED_CACHE_SIZE_HALF - mov $SHARED_CACHE_SIZE_HALF, %RCX_LP + mov $SHARED_CACHE_SIZE_HALF, %RCX_LP #else - mov __x86_shared_cache_size_half(%rip), %RCX_LP + mov __x86_shared_cache_size_half(%rip), %RCX_LP #endif - cmp %rcx, %rdx - mov %rsi, %r9 - ja L(large_page_bwd) - and $0xf, %r9 - jz L(shl_0_bwd) + cmp %rcx, %rdx + mov %rsi, %r9 + ja L(large_page_bwd) + and $0xf, %r9 + jz L(shl_0_bwd) #ifdef DATA_CACHE_SIZE_HALF - mov $DATA_CACHE_SIZE_HALF, %RCX_LP + mov $DATA_CACHE_SIZE_HALF, %RCX_LP #else - mov __x86_data_cache_size_half(%rip), %RCX_LP + mov __x86_data_cache_size_half(%rip), %RCX_LP #endif - BRANCH_TO_JMPTBL_ENTRY (L(shl_table_bwd), %r9, 4) + BRANCH_TO_JMPTBL_ENTRY (L(shl_table_bwd), %r9, 4) - .p2align 4 + .p2align 4 L(shl_0): - sub $16, %rdx - movdqa (%rsi), %xmm1 - add $16, %rsi - movdqa %xmm1, (%rdi) - add $16, %rdi - cmp $128, %rdx - movdqu %xmm0, (%r8) - ja L(shl_0_gobble) - cmp $64, %rdx - jb L(shl_0_less_64bytes) - movaps (%rsi), %xmm4 - movaps 16(%rsi), %xmm1 - movaps 32(%rsi), %xmm2 - movaps 48(%rsi), %xmm3 - movaps %xmm4, (%rdi) - movaps %xmm1, 16(%rdi) - movaps %xmm2, 32(%rdi) - movaps %xmm3, 48(%rdi) - sub $64, %rdx - add $64, %rsi - add $64, %rdi + sub $16, %rdx + movdqa (%rsi), %xmm1 + add $16, %rsi + movdqa %xmm1, (%rdi) + add $16, %rdi + cmp $128, %rdx + movdqu %xmm0, (%r8) + ja L(shl_0_gobble) + cmp $64, %rdx + jb L(shl_0_less_64bytes) + movaps (%rsi), %xmm4 + movaps 16(%rsi), %xmm1 + movaps 32(%rsi), %xmm2 + movaps 48(%rsi), %xmm3 + movaps %xmm4, (%rdi) + movaps %xmm1, 16(%rdi) + movaps %xmm2, 32(%rdi) + movaps %xmm3, 48(%rdi) + sub $64, %rdx + add $64, %rsi + add $64, %rdi L(shl_0_less_64bytes): - add %rdx, %rsi - add %rdx, %rdi - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_0_gobble): #ifdef DATA_CACHE_SIZE_HALF - cmp $DATA_CACHE_SIZE_HALF, %RDX_LP + cmp $DATA_CACHE_SIZE_HALF, %RDX_LP #else - cmp __x86_data_cache_size_half(%rip), %RDX_LP + cmp __x86_data_cache_size_half(%rip), %RDX_LP #endif - lea -128(%rdx), %rdx - jae L(shl_0_gobble_mem_loop) + lea -128(%rdx), %rdx + jae L(shl_0_gobble_mem_loop) L(shl_0_gobble_cache_loop): - movdqa (%rsi), %xmm4 - movaps 0x10(%rsi), %xmm1 - movaps 0x20(%rsi), %xmm2 - movaps 0x30(%rsi), %xmm3 + movdqa (%rsi), %xmm4 + movaps 0x10(%rsi), %xmm1 + movaps 0x20(%rsi), %xmm2 + movaps 0x30(%rsi), %xmm3 - movdqa %xmm4, (%rdi) - movaps %xmm1, 0x10(%rdi) - movaps %xmm2, 0x20(%rdi) - movaps %xmm3, 0x30(%rdi) + movdqa %xmm4, (%rdi) + movaps %xmm1, 0x10(%rdi) + movaps %xmm2, 0x20(%rdi) + movaps %xmm3, 0x30(%rdi) - sub $128, %rdx - movaps 0x40(%rsi), %xmm4 - movaps 0x50(%rsi), %xmm5 - movaps 0x60(%rsi), %xmm6 - movaps 0x70(%rsi), %xmm7 - lea 0x80(%rsi), %rsi - movaps %xmm4, 0x40(%rdi) - movaps %xmm5, 0x50(%rdi) - movaps %xmm6, 0x60(%rdi) - movaps %xmm7, 0x70(%rdi) - lea 0x80(%rdi), %rdi + sub $128, %rdx + movaps 0x40(%rsi), %xmm4 + movaps 0x50(%rsi), %xmm5 + movaps 0x60(%rsi), %xmm6 + movaps 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi + movaps %xmm4, 0x40(%rdi) + movaps %xmm5, 0x50(%rdi) + movaps %xmm6, 0x60(%rdi) + movaps %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi - jae L(shl_0_gobble_cache_loop) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(shl_0_cache_less_64bytes) + jae L(shl_0_gobble_cache_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_cache_less_64bytes) - movdqa (%rsi), %xmm4 - sub $0x40, %rdx - movdqa 0x10(%rsi), %xmm1 + movdqa (%rsi), %xmm4 + sub $0x40, %rdx + movdqa 0x10(%rsi), %xmm1 - movdqa %xmm4, (%rdi) - movdqa %xmm1, 0x10(%rdi) + movdqa %xmm4, (%rdi) + movdqa %xmm1, 0x10(%rdi) - movdqa 0x20(%rsi), %xmm4 - movdqa 0x30(%rsi), %xmm1 - add $0x40, %rsi + movdqa 0x20(%rsi), %xmm4 + movdqa 0x30(%rsi), %xmm1 + add $0x40, %rsi - movdqa %xmm4, 0x20(%rdi) - movdqa %xmm1, 0x30(%rdi) - add $0x40, %rdi + movdqa %xmm4, 0x20(%rdi) + movdqa %xmm1, 0x30(%rdi) + add $0x40, %rdi L(shl_0_cache_less_64bytes): - add %rdx, %rsi - add %rdx, %rdi - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_0_gobble_mem_loop): - prefetcht0 0x1c0(%rsi) - prefetcht0 0x280(%rsi) + prefetcht0 0x1c0(%rsi) + prefetcht0 0x280(%rsi) - movdqa (%rsi), %xmm0 - movdqa 0x10(%rsi), %xmm1 - movdqa 0x20(%rsi), %xmm2 - movdqa 0x30(%rsi), %xmm3 - movdqa 0x40(%rsi), %xmm4 - movdqa 0x50(%rsi), %xmm5 - movdqa 0x60(%rsi), %xmm6 - movdqa 0x70(%rsi), %xmm7 - lea 0x80(%rsi), %rsi - sub $0x80, %rdx - movdqa %xmm0, (%rdi) - movdqa %xmm1, 0x10(%rdi) - movdqa %xmm2, 0x20(%rdi) - movdqa %xmm3, 0x30(%rdi) - movdqa %xmm4, 0x40(%rdi) - movdqa %xmm5, 0x50(%rdi) - movdqa %xmm6, 0x60(%rdi) - movdqa %xmm7, 0x70(%rdi) - lea 0x80(%rdi), %rdi + movdqa (%rsi), %xmm0 + movdqa 0x10(%rsi), %xmm1 + movdqa 0x20(%rsi), %xmm2 + movdqa 0x30(%rsi), %xmm3 + movdqa 0x40(%rsi), %xmm4 + movdqa 0x50(%rsi), %xmm5 + movdqa 0x60(%rsi), %xmm6 + movdqa 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi + sub $0x80, %rdx + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) + movdqa %xmm2, 0x20(%rdi) + movdqa %xmm3, 0x30(%rdi) + movdqa %xmm4, 0x40(%rdi) + movdqa %xmm5, 0x50(%rdi) + movdqa %xmm6, 0x60(%rdi) + movdqa %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi - jae L(shl_0_gobble_mem_loop) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(shl_0_mem_less_64bytes) + jae L(shl_0_gobble_mem_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_mem_less_64bytes) - movdqa (%rsi), %xmm0 - sub $0x40, %rdx - movdqa 0x10(%rsi), %xmm1 + movdqa (%rsi), %xmm0 + sub $0x40, %rdx + movdqa 0x10(%rsi), %xmm1 - movdqa %xmm0, (%rdi) - movdqa %xmm1, 0x10(%rdi) + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) - movdqa 0x20(%rsi), %xmm0 - movdqa 0x30(%rsi), %xmm1 - add $0x40, %rsi + movdqa 0x20(%rsi), %xmm0 + movdqa 0x30(%rsi), %xmm1 + add $0x40, %rsi - movdqa %xmm0, 0x20(%rdi) - movdqa %xmm1, 0x30(%rdi) - add $0x40, %rdi + movdqa %xmm0, 0x20(%rdi) + movdqa %xmm1, 0x30(%rdi) + add $0x40, %rdi L(shl_0_mem_less_64bytes): - cmp $0x20, %rdx - jb L(shl_0_mem_less_32bytes) - movdqa (%rsi), %xmm0 - sub $0x20, %rdx - movdqa 0x10(%rsi), %xmm1 - add $0x20, %rsi - movdqa %xmm0, (%rdi) - movdqa %xmm1, 0x10(%rdi) - add $0x20, %rdi + cmp $0x20, %rdx + jb L(shl_0_mem_less_32bytes) + movdqa (%rsi), %xmm0 + sub $0x20, %rdx + movdqa 0x10(%rsi), %xmm1 + add $0x20, %rsi + movdqa %xmm0, (%rdi) + movdqa %xmm1, 0x10(%rdi) + add $0x20, %rdi L(shl_0_mem_less_32bytes): - add %rdx, %rdi - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + add %rdx, %rdi + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_0_bwd): - sub $16, %rdx - movdqa -0x10(%rsi), %xmm1 - sub $16, %rsi - movdqa %xmm1, -0x10(%rdi) - sub $16, %rdi - cmp $0x80, %rdx - movdqu %xmm0, (%r8) - ja L(shl_0_gobble_bwd) - cmp $64, %rdx - jb L(shl_0_less_64bytes_bwd) - movaps -0x10(%rsi), %xmm0 - movaps -0x20(%rsi), %xmm1 - movaps -0x30(%rsi), %xmm2 - movaps -0x40(%rsi), %xmm3 - movaps %xmm0, -0x10(%rdi) - movaps %xmm1, -0x20(%rdi) - movaps %xmm2, -0x30(%rdi) - movaps %xmm3, -0x40(%rdi) - sub $64, %rdx - sub $0x40, %rsi - sub $0x40, %rdi + sub $16, %rdx + movdqa -0x10(%rsi), %xmm1 + sub $16, %rsi + movdqa %xmm1, -0x10(%rdi) + sub $16, %rdi + cmp $0x80, %rdx + movdqu %xmm0, (%r8) + ja L(shl_0_gobble_bwd) + cmp $64, %rdx + jb L(shl_0_less_64bytes_bwd) + movaps -0x10(%rsi), %xmm0 + movaps -0x20(%rsi), %xmm1 + movaps -0x30(%rsi), %xmm2 + movaps -0x40(%rsi), %xmm3 + movaps %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) + sub $64, %rdx + sub $0x40, %rsi + sub $0x40, %rdi L(shl_0_less_64bytes_bwd): - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_0_gobble_bwd): #ifdef DATA_CACHE_SIZE_HALF - cmp $DATA_CACHE_SIZE_HALF, %RDX_LP + cmp $DATA_CACHE_SIZE_HALF, %RDX_LP #else - cmp __x86_data_cache_size_half(%rip), %RDX_LP + cmp __x86_data_cache_size_half(%rip), %RDX_LP #endif - lea -128(%rdx), %rdx - jae L(shl_0_gobble_mem_bwd_loop) + lea -128(%rdx), %rdx + jae L(shl_0_gobble_mem_bwd_loop) L(shl_0_gobble_bwd_loop): - movdqa -0x10(%rsi), %xmm0 - movaps -0x20(%rsi), %xmm1 - movaps -0x30(%rsi), %xmm2 - movaps -0x40(%rsi), %xmm3 + movdqa -0x10(%rsi), %xmm0 + movaps -0x20(%rsi), %xmm1 + movaps -0x30(%rsi), %xmm2 + movaps -0x40(%rsi), %xmm3 - movdqa %xmm0, -0x10(%rdi) - movaps %xmm1, -0x20(%rdi) - movaps %xmm2, -0x30(%rdi) - movaps %xmm3, -0x40(%rdi) + movdqa %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) - sub $0x80, %rdx - movaps -0x50(%rsi), %xmm4 - movaps -0x60(%rsi), %xmm5 - movaps -0x70(%rsi), %xmm6 - movaps -0x80(%rsi), %xmm7 - lea -0x80(%rsi), %rsi - movaps %xmm4, -0x50(%rdi) - movaps %xmm5, -0x60(%rdi) - movaps %xmm6, -0x70(%rdi) - movaps %xmm7, -0x80(%rdi) - lea -0x80(%rdi), %rdi + sub $0x80, %rdx + movaps -0x50(%rsi), %xmm4 + movaps -0x60(%rsi), %xmm5 + movaps -0x70(%rsi), %xmm6 + movaps -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi + movaps %xmm4, -0x50(%rdi) + movaps %xmm5, -0x60(%rdi) + movaps %xmm6, -0x70(%rdi) + movaps %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi - jae L(shl_0_gobble_bwd_loop) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(shl_0_gobble_bwd_less_64bytes) + jae L(shl_0_gobble_bwd_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_gobble_bwd_less_64bytes) - movdqa -0x10(%rsi), %xmm0 - sub $0x40, %rdx - movdqa -0x20(%rsi), %xmm1 + movdqa -0x10(%rsi), %xmm0 + sub $0x40, %rdx + movdqa -0x20(%rsi), %xmm1 - movdqa %xmm0, -0x10(%rdi) - movdqa %xmm1, -0x20(%rdi) + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) - movdqa -0x30(%rsi), %xmm0 - movdqa -0x40(%rsi), %xmm1 - sub $0x40, %rsi + movdqa -0x30(%rsi), %xmm0 + movdqa -0x40(%rsi), %xmm1 + sub $0x40, %rsi - movdqa %xmm0, -0x30(%rdi) - movdqa %xmm1, -0x40(%rdi) - sub $0x40, %rdi + movdqa %xmm0, -0x30(%rdi) + movdqa %xmm1, -0x40(%rdi) + sub $0x40, %rdi L(shl_0_gobble_bwd_less_64bytes): - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_0_gobble_mem_bwd_loop): - prefetcht0 -0x1c0(%rsi) - prefetcht0 -0x280(%rsi) - movdqa -0x10(%rsi), %xmm0 - movdqa -0x20(%rsi), %xmm1 - movdqa -0x30(%rsi), %xmm2 - movdqa -0x40(%rsi), %xmm3 - movdqa -0x50(%rsi), %xmm4 - movdqa -0x60(%rsi), %xmm5 - movdqa -0x70(%rsi), %xmm6 - movdqa -0x80(%rsi), %xmm7 - lea -0x80(%rsi), %rsi - sub $0x80, %rdx - movdqa %xmm0, -0x10(%rdi) - movdqa %xmm1, -0x20(%rdi) - movdqa %xmm2, -0x30(%rdi) - movdqa %xmm3, -0x40(%rdi) - movdqa %xmm4, -0x50(%rdi) - movdqa %xmm5, -0x60(%rdi) - movdqa %xmm6, -0x70(%rdi) - movdqa %xmm7, -0x80(%rdi) - lea -0x80(%rdi), %rdi + prefetcht0 -0x1c0(%rsi) + prefetcht0 -0x280(%rsi) + movdqa -0x10(%rsi), %xmm0 + movdqa -0x20(%rsi), %xmm1 + movdqa -0x30(%rsi), %xmm2 + movdqa -0x40(%rsi), %xmm3 + movdqa -0x50(%rsi), %xmm4 + movdqa -0x60(%rsi), %xmm5 + movdqa -0x70(%rsi), %xmm6 + movdqa -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi + sub $0x80, %rdx + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) + movdqa %xmm2, -0x30(%rdi) + movdqa %xmm3, -0x40(%rdi) + movdqa %xmm4, -0x50(%rdi) + movdqa %xmm5, -0x60(%rdi) + movdqa %xmm6, -0x70(%rdi) + movdqa %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi - jae L(shl_0_gobble_mem_bwd_loop) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(shl_0_mem_bwd_less_64bytes) + jae L(shl_0_gobble_mem_bwd_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(shl_0_mem_bwd_less_64bytes) - movdqa -0x10(%rsi), %xmm0 - sub $0x40, %rdx - movdqa -0x20(%rsi), %xmm1 + movdqa -0x10(%rsi), %xmm0 + sub $0x40, %rdx + movdqa -0x20(%rsi), %xmm1 - movdqa %xmm0, -0x10(%rdi) - movdqa %xmm1, -0x20(%rdi) + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) - movdqa -0x30(%rsi), %xmm0 - movdqa -0x40(%rsi), %xmm1 - sub $0x40, %rsi + movdqa -0x30(%rsi), %xmm0 + movdqa -0x40(%rsi), %xmm1 + sub $0x40, %rsi - movdqa %xmm0, -0x30(%rdi) - movdqa %xmm1, -0x40(%rdi) - sub $0x40, %rdi + movdqa %xmm0, -0x30(%rdi) + movdqa %xmm1, -0x40(%rdi) + sub $0x40, %rdi L(shl_0_mem_bwd_less_64bytes): - cmp $0x20, %rdx - jb L(shl_0_mem_bwd_less_32bytes) - movdqa -0x10(%rsi), %xmm0 - sub $0x20, %rdx - movdqa -0x20(%rsi), %xmm1 - sub $0x20, %rsi - movdqa %xmm0, -0x10(%rdi) - movdqa %xmm1, -0x20(%rdi) - sub $0x20, %rdi + cmp $0x20, %rdx + jb L(shl_0_mem_bwd_less_32bytes) + movdqa -0x10(%rsi), %xmm0 + sub $0x20, %rdx + movdqa -0x20(%rsi), %xmm1 + sub $0x20, %rsi + movdqa %xmm0, -0x10(%rdi) + movdqa %xmm1, -0x20(%rdi) + sub $0x20, %rdi L(shl_0_mem_bwd_less_32bytes): - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_1): - lea (L(shl_1_loop_L1)-L(shl_1))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x01(%rsi), %xmm1 - jb L(L1_fwd) - lea (L(shl_1_loop_L2)-L(shl_1_loop_L1))(%r9), %r9 + lea (L(shl_1_loop_L1)-L(shl_1))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x01(%rsi), %xmm1 + jb L(L1_fwd) + lea (L(shl_1_loop_L2)-L(shl_1_loop_L1))(%r9), %r9 L(L1_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_1_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_1_loop_L1): - sub $64, %rdx - movaps 0x0f(%rsi), %xmm2 - movaps 0x1f(%rsi), %xmm3 - movaps 0x2f(%rsi), %xmm4 - movaps 0x3f(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $1, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $1, %xmm3, %xmm4 - palignr $1, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $1, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_1_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x0f(%rsi), %xmm2 + movaps 0x1f(%rsi), %xmm3 + movaps 0x2f(%rsi), %xmm4 + movaps 0x3f(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $1, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $1, %xmm3, %xmm4 + palignr $1, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $1, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_1_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_1_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_1_bwd): - lea (L(shl_1_bwd_loop_L1)-L(shl_1_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x01(%rsi), %xmm1 - jb L(L1_bwd) - lea (L(shl_1_bwd_loop_L2)-L(shl_1_bwd_loop_L1))(%r9), %r9 + lea (L(shl_1_bwd_loop_L1)-L(shl_1_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x01(%rsi), %xmm1 + jb L(L1_bwd) + lea (L(shl_1_bwd_loop_L2)-L(shl_1_bwd_loop_L1))(%r9), %r9 L(L1_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_1_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_1_bwd_loop_L1): - movaps -0x11(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x21(%rsi), %xmm3 - movaps -0x31(%rsi), %xmm4 - movaps -0x41(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $1, %xmm2, %xmm1 - palignr $1, %xmm3, %xmm2 - palignr $1, %xmm4, %xmm3 - palignr $1, %xmm5, %xmm4 + movaps -0x11(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x21(%rsi), %xmm3 + movaps -0x31(%rsi), %xmm4 + movaps -0x41(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $1, %xmm2, %xmm1 + palignr $1, %xmm3, %xmm2 + palignr $1, %xmm4, %xmm3 + palignr $1, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_1_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_1_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_1_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_2): - lea (L(shl_2_loop_L1)-L(shl_2))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x02(%rsi), %xmm1 - jb L(L2_fwd) - lea (L(shl_2_loop_L2)-L(shl_2_loop_L1))(%r9), %r9 + lea (L(shl_2_loop_L1)-L(shl_2))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x02(%rsi), %xmm1 + jb L(L2_fwd) + lea (L(shl_2_loop_L2)-L(shl_2_loop_L1))(%r9), %r9 L(L2_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_2_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_2_loop_L1): - sub $64, %rdx - movaps 0x0e(%rsi), %xmm2 - movaps 0x1e(%rsi), %xmm3 - movaps 0x2e(%rsi), %xmm4 - movaps 0x3e(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $2, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $2, %xmm3, %xmm4 - palignr $2, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $2, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_2_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x0e(%rsi), %xmm2 + movaps 0x1e(%rsi), %xmm3 + movaps 0x2e(%rsi), %xmm4 + movaps 0x3e(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $2, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $2, %xmm3, %xmm4 + palignr $2, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $2, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_2_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_2_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_2_bwd): - lea (L(shl_2_bwd_loop_L1)-L(shl_2_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x02(%rsi), %xmm1 - jb L(L2_bwd) - lea (L(shl_2_bwd_loop_L2)-L(shl_2_bwd_loop_L1))(%r9), %r9 + lea (L(shl_2_bwd_loop_L1)-L(shl_2_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x02(%rsi), %xmm1 + jb L(L2_bwd) + lea (L(shl_2_bwd_loop_L2)-L(shl_2_bwd_loop_L1))(%r9), %r9 L(L2_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_2_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_2_bwd_loop_L1): - movaps -0x12(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x22(%rsi), %xmm3 - movaps -0x32(%rsi), %xmm4 - movaps -0x42(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $2, %xmm2, %xmm1 - palignr $2, %xmm3, %xmm2 - palignr $2, %xmm4, %xmm3 - palignr $2, %xmm5, %xmm4 + movaps -0x12(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x22(%rsi), %xmm3 + movaps -0x32(%rsi), %xmm4 + movaps -0x42(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $2, %xmm2, %xmm1 + palignr $2, %xmm3, %xmm2 + palignr $2, %xmm4, %xmm3 + palignr $2, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_2_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_2_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_2_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_3): - lea (L(shl_3_loop_L1)-L(shl_3))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x03(%rsi), %xmm1 - jb L(L3_fwd) - lea (L(shl_3_loop_L2)-L(shl_3_loop_L1))(%r9), %r9 + lea (L(shl_3_loop_L1)-L(shl_3))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x03(%rsi), %xmm1 + jb L(L3_fwd) + lea (L(shl_3_loop_L2)-L(shl_3_loop_L1))(%r9), %r9 L(L3_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_3_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_3_loop_L1): - sub $64, %rdx - movaps 0x0d(%rsi), %xmm2 - movaps 0x1d(%rsi), %xmm3 - movaps 0x2d(%rsi), %xmm4 - movaps 0x3d(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $3, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $3, %xmm3, %xmm4 - palignr $3, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $3, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_3_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x0d(%rsi), %xmm2 + movaps 0x1d(%rsi), %xmm3 + movaps 0x2d(%rsi), %xmm4 + movaps 0x3d(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $3, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $3, %xmm3, %xmm4 + palignr $3, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $3, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_3_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_3_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_3_bwd): - lea (L(shl_3_bwd_loop_L1)-L(shl_3_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x03(%rsi), %xmm1 - jb L(L3_bwd) - lea (L(shl_3_bwd_loop_L2)-L(shl_3_bwd_loop_L1))(%r9), %r9 + lea (L(shl_3_bwd_loop_L1)-L(shl_3_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x03(%rsi), %xmm1 + jb L(L3_bwd) + lea (L(shl_3_bwd_loop_L2)-L(shl_3_bwd_loop_L1))(%r9), %r9 L(L3_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_3_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_3_bwd_loop_L1): - movaps -0x13(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x23(%rsi), %xmm3 - movaps -0x33(%rsi), %xmm4 - movaps -0x43(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $3, %xmm2, %xmm1 - palignr $3, %xmm3, %xmm2 - palignr $3, %xmm4, %xmm3 - palignr $3, %xmm5, %xmm4 + movaps -0x13(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x23(%rsi), %xmm3 + movaps -0x33(%rsi), %xmm4 + movaps -0x43(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $3, %xmm2, %xmm1 + palignr $3, %xmm3, %xmm2 + palignr $3, %xmm4, %xmm3 + palignr $3, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_3_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_3_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_3_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_4): - lea (L(shl_4_loop_L1)-L(shl_4))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x04(%rsi), %xmm1 - jb L(L4_fwd) - lea (L(shl_4_loop_L2)-L(shl_4_loop_L1))(%r9), %r9 + lea (L(shl_4_loop_L1)-L(shl_4))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x04(%rsi), %xmm1 + jb L(L4_fwd) + lea (L(shl_4_loop_L2)-L(shl_4_loop_L1))(%r9), %r9 L(L4_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_4_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_4_loop_L1): - sub $64, %rdx - movaps 0x0c(%rsi), %xmm2 - movaps 0x1c(%rsi), %xmm3 - movaps 0x2c(%rsi), %xmm4 - movaps 0x3c(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $4, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $4, %xmm3, %xmm4 - palignr $4, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $4, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_4_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x0c(%rsi), %xmm2 + movaps 0x1c(%rsi), %xmm3 + movaps 0x2c(%rsi), %xmm4 + movaps 0x3c(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $4, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $4, %xmm3, %xmm4 + palignr $4, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $4, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_4_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_4_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_4_bwd): - lea (L(shl_4_bwd_loop_L1)-L(shl_4_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x04(%rsi), %xmm1 - jb L(L4_bwd) - lea (L(shl_4_bwd_loop_L2)-L(shl_4_bwd_loop_L1))(%r9), %r9 + lea (L(shl_4_bwd_loop_L1)-L(shl_4_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x04(%rsi), %xmm1 + jb L(L4_bwd) + lea (L(shl_4_bwd_loop_L2)-L(shl_4_bwd_loop_L1))(%r9), %r9 L(L4_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_4_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_4_bwd_loop_L1): - movaps -0x14(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x24(%rsi), %xmm3 - movaps -0x34(%rsi), %xmm4 - movaps -0x44(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $4, %xmm2, %xmm1 - palignr $4, %xmm3, %xmm2 - palignr $4, %xmm4, %xmm3 - palignr $4, %xmm5, %xmm4 + movaps -0x14(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x24(%rsi), %xmm3 + movaps -0x34(%rsi), %xmm4 + movaps -0x44(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $4, %xmm2, %xmm1 + palignr $4, %xmm3, %xmm2 + palignr $4, %xmm4, %xmm3 + palignr $4, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_4_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_4_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_4_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_5): - lea (L(shl_5_loop_L1)-L(shl_5))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x05(%rsi), %xmm1 - jb L(L5_fwd) - lea (L(shl_5_loop_L2)-L(shl_5_loop_L1))(%r9), %r9 + lea (L(shl_5_loop_L1)-L(shl_5))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x05(%rsi), %xmm1 + jb L(L5_fwd) + lea (L(shl_5_loop_L2)-L(shl_5_loop_L1))(%r9), %r9 L(L5_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_5_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_5_loop_L1): - sub $64, %rdx - movaps 0x0b(%rsi), %xmm2 - movaps 0x1b(%rsi), %xmm3 - movaps 0x2b(%rsi), %xmm4 - movaps 0x3b(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $5, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $5, %xmm3, %xmm4 - palignr $5, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $5, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_5_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x0b(%rsi), %xmm2 + movaps 0x1b(%rsi), %xmm3 + movaps 0x2b(%rsi), %xmm4 + movaps 0x3b(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $5, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $5, %xmm3, %xmm4 + palignr $5, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $5, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_5_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_5_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_5_bwd): - lea (L(shl_5_bwd_loop_L1)-L(shl_5_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x05(%rsi), %xmm1 - jb L(L5_bwd) - lea (L(shl_5_bwd_loop_L2)-L(shl_5_bwd_loop_L1))(%r9), %r9 + lea (L(shl_5_bwd_loop_L1)-L(shl_5_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x05(%rsi), %xmm1 + jb L(L5_bwd) + lea (L(shl_5_bwd_loop_L2)-L(shl_5_bwd_loop_L1))(%r9), %r9 L(L5_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_5_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_5_bwd_loop_L1): - movaps -0x15(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x25(%rsi), %xmm3 - movaps -0x35(%rsi), %xmm4 - movaps -0x45(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $5, %xmm2, %xmm1 - palignr $5, %xmm3, %xmm2 - palignr $5, %xmm4, %xmm3 - palignr $5, %xmm5, %xmm4 + movaps -0x15(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x25(%rsi), %xmm3 + movaps -0x35(%rsi), %xmm4 + movaps -0x45(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $5, %xmm2, %xmm1 + palignr $5, %xmm3, %xmm2 + palignr $5, %xmm4, %xmm3 + palignr $5, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_5_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_5_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_5_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_6): - lea (L(shl_6_loop_L1)-L(shl_6))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x06(%rsi), %xmm1 - jb L(L6_fwd) - lea (L(shl_6_loop_L2)-L(shl_6_loop_L1))(%r9), %r9 + lea (L(shl_6_loop_L1)-L(shl_6))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x06(%rsi), %xmm1 + jb L(L6_fwd) + lea (L(shl_6_loop_L2)-L(shl_6_loop_L1))(%r9), %r9 L(L6_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_6_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_6_loop_L1): - sub $64, %rdx - movaps 0x0a(%rsi), %xmm2 - movaps 0x1a(%rsi), %xmm3 - movaps 0x2a(%rsi), %xmm4 - movaps 0x3a(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $6, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $6, %xmm3, %xmm4 - palignr $6, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $6, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_6_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x0a(%rsi), %xmm2 + movaps 0x1a(%rsi), %xmm3 + movaps 0x2a(%rsi), %xmm4 + movaps 0x3a(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $6, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $6, %xmm3, %xmm4 + palignr $6, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $6, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_6_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_6_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_6_bwd): - lea (L(shl_6_bwd_loop_L1)-L(shl_6_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x06(%rsi), %xmm1 - jb L(L6_bwd) - lea (L(shl_6_bwd_loop_L2)-L(shl_6_bwd_loop_L1))(%r9), %r9 + lea (L(shl_6_bwd_loop_L1)-L(shl_6_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x06(%rsi), %xmm1 + jb L(L6_bwd) + lea (L(shl_6_bwd_loop_L2)-L(shl_6_bwd_loop_L1))(%r9), %r9 L(L6_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_6_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_6_bwd_loop_L1): - movaps -0x16(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x26(%rsi), %xmm3 - movaps -0x36(%rsi), %xmm4 - movaps -0x46(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $6, %xmm2, %xmm1 - palignr $6, %xmm3, %xmm2 - palignr $6, %xmm4, %xmm3 - palignr $6, %xmm5, %xmm4 + movaps -0x16(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x26(%rsi), %xmm3 + movaps -0x36(%rsi), %xmm4 + movaps -0x46(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $6, %xmm2, %xmm1 + palignr $6, %xmm3, %xmm2 + palignr $6, %xmm4, %xmm3 + palignr $6, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_6_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_6_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_6_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_7): - lea (L(shl_7_loop_L1)-L(shl_7))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x07(%rsi), %xmm1 - jb L(L7_fwd) - lea (L(shl_7_loop_L2)-L(shl_7_loop_L1))(%r9), %r9 + lea (L(shl_7_loop_L1)-L(shl_7))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x07(%rsi), %xmm1 + jb L(L7_fwd) + lea (L(shl_7_loop_L2)-L(shl_7_loop_L1))(%r9), %r9 L(L7_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_7_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_7_loop_L1): - sub $64, %rdx - movaps 0x09(%rsi), %xmm2 - movaps 0x19(%rsi), %xmm3 - movaps 0x29(%rsi), %xmm4 - movaps 0x39(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $7, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $7, %xmm3, %xmm4 - palignr $7, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $7, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_7_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x09(%rsi), %xmm2 + movaps 0x19(%rsi), %xmm3 + movaps 0x29(%rsi), %xmm4 + movaps 0x39(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $7, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $7, %xmm3, %xmm4 + palignr $7, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $7, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_7_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_7_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_7_bwd): - lea (L(shl_7_bwd_loop_L1)-L(shl_7_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x07(%rsi), %xmm1 - jb L(L7_bwd) - lea (L(shl_7_bwd_loop_L2)-L(shl_7_bwd_loop_L1))(%r9), %r9 + lea (L(shl_7_bwd_loop_L1)-L(shl_7_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x07(%rsi), %xmm1 + jb L(L7_bwd) + lea (L(shl_7_bwd_loop_L2)-L(shl_7_bwd_loop_L1))(%r9), %r9 L(L7_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_7_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_7_bwd_loop_L1): - movaps -0x17(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x27(%rsi), %xmm3 - movaps -0x37(%rsi), %xmm4 - movaps -0x47(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $7, %xmm2, %xmm1 - palignr $7, %xmm3, %xmm2 - palignr $7, %xmm4, %xmm3 - palignr $7, %xmm5, %xmm4 + movaps -0x17(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x27(%rsi), %xmm3 + movaps -0x37(%rsi), %xmm4 + movaps -0x47(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $7, %xmm2, %xmm1 + palignr $7, %xmm3, %xmm2 + palignr $7, %xmm4, %xmm3 + palignr $7, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_7_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_7_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_7_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_8): - lea (L(shl_8_loop_L1)-L(shl_8))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x08(%rsi), %xmm1 - jb L(L8_fwd) - lea (L(shl_8_loop_L2)-L(shl_8_loop_L1))(%r9), %r9 + lea (L(shl_8_loop_L1)-L(shl_8))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x08(%rsi), %xmm1 + jb L(L8_fwd) + lea (L(shl_8_loop_L2)-L(shl_8_loop_L1))(%r9), %r9 L(L8_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 L(shl_8_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_8_loop_L1): - sub $64, %rdx - movaps 0x08(%rsi), %xmm2 - movaps 0x18(%rsi), %xmm3 - movaps 0x28(%rsi), %xmm4 - movaps 0x38(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $8, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $8, %xmm3, %xmm4 - palignr $8, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $8, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_8_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 - .p2align 4 + sub $64, %rdx + movaps 0x08(%rsi), %xmm2 + movaps 0x18(%rsi), %xmm3 + movaps 0x28(%rsi), %xmm4 + movaps 0x38(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $8, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $8, %xmm3, %xmm4 + palignr $8, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $8, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_8_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 + .p2align 4 L(shl_8_end): - lea 64(%rdx), %rdx - movaps %xmm4, -0x20(%rdi) - add %rdx, %rsi - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + lea 64(%rdx), %rdx + movaps %xmm4, -0x20(%rdi) + add %rdx, %rsi + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_8_bwd): - lea (L(shl_8_bwd_loop_L1)-L(shl_8_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x08(%rsi), %xmm1 - jb L(L8_bwd) - lea (L(shl_8_bwd_loop_L2)-L(shl_8_bwd_loop_L1))(%r9), %r9 + lea (L(shl_8_bwd_loop_L1)-L(shl_8_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x08(%rsi), %xmm1 + jb L(L8_bwd) + lea (L(shl_8_bwd_loop_L2)-L(shl_8_bwd_loop_L1))(%r9), %r9 L(L8_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_8_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_8_bwd_loop_L1): - movaps -0x18(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x28(%rsi), %xmm3 - movaps -0x38(%rsi), %xmm4 - movaps -0x48(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $8, %xmm2, %xmm1 - palignr $8, %xmm3, %xmm2 - palignr $8, %xmm4, %xmm3 - palignr $8, %xmm5, %xmm4 + movaps -0x18(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x28(%rsi), %xmm3 + movaps -0x38(%rsi), %xmm4 + movaps -0x48(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $8, %xmm2, %xmm1 + palignr $8, %xmm3, %xmm2 + palignr $8, %xmm4, %xmm3 + palignr $8, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_8_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_8_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_8_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_9): - lea (L(shl_9_loop_L1)-L(shl_9))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x09(%rsi), %xmm1 - jb L(L9_fwd) - lea (L(shl_9_loop_L2)-L(shl_9_loop_L1))(%r9), %r9 + lea (L(shl_9_loop_L1)-L(shl_9))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x09(%rsi), %xmm1 + jb L(L9_fwd) + lea (L(shl_9_loop_L2)-L(shl_9_loop_L1))(%r9), %r9 L(L9_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_9_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_9_loop_L1): - sub $64, %rdx - movaps 0x07(%rsi), %xmm2 - movaps 0x17(%rsi), %xmm3 - movaps 0x27(%rsi), %xmm4 - movaps 0x37(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $9, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $9, %xmm3, %xmm4 - palignr $9, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $9, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_9_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x07(%rsi), %xmm2 + movaps 0x17(%rsi), %xmm3 + movaps 0x27(%rsi), %xmm4 + movaps 0x37(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $9, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $9, %xmm3, %xmm4 + palignr $9, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $9, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_9_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_9_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_9_bwd): - lea (L(shl_9_bwd_loop_L1)-L(shl_9_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x09(%rsi), %xmm1 - jb L(L9_bwd) - lea (L(shl_9_bwd_loop_L2)-L(shl_9_bwd_loop_L1))(%r9), %r9 + lea (L(shl_9_bwd_loop_L1)-L(shl_9_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x09(%rsi), %xmm1 + jb L(L9_bwd) + lea (L(shl_9_bwd_loop_L2)-L(shl_9_bwd_loop_L1))(%r9), %r9 L(L9_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_9_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_9_bwd_loop_L1): - movaps -0x19(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x29(%rsi), %xmm3 - movaps -0x39(%rsi), %xmm4 - movaps -0x49(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $9, %xmm2, %xmm1 - palignr $9, %xmm3, %xmm2 - palignr $9, %xmm4, %xmm3 - palignr $9, %xmm5, %xmm4 + movaps -0x19(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x29(%rsi), %xmm3 + movaps -0x39(%rsi), %xmm4 + movaps -0x49(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $9, %xmm2, %xmm1 + palignr $9, %xmm3, %xmm2 + palignr $9, %xmm4, %xmm3 + palignr $9, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_9_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_9_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_9_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_10): - lea (L(shl_10_loop_L1)-L(shl_10))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0a(%rsi), %xmm1 - jb L(L10_fwd) - lea (L(shl_10_loop_L2)-L(shl_10_loop_L1))(%r9), %r9 + lea (L(shl_10_loop_L1)-L(shl_10))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0a(%rsi), %xmm1 + jb L(L10_fwd) + lea (L(shl_10_loop_L2)-L(shl_10_loop_L1))(%r9), %r9 L(L10_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_10_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_10_loop_L1): - sub $64, %rdx - movaps 0x06(%rsi), %xmm2 - movaps 0x16(%rsi), %xmm3 - movaps 0x26(%rsi), %xmm4 - movaps 0x36(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $10, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $10, %xmm3, %xmm4 - palignr $10, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $10, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_10_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x06(%rsi), %xmm2 + movaps 0x16(%rsi), %xmm3 + movaps 0x26(%rsi), %xmm4 + movaps 0x36(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $10, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $10, %xmm3, %xmm4 + palignr $10, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $10, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_10_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_10_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_10_bwd): - lea (L(shl_10_bwd_loop_L1)-L(shl_10_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0a(%rsi), %xmm1 - jb L(L10_bwd) - lea (L(shl_10_bwd_loop_L2)-L(shl_10_bwd_loop_L1))(%r9), %r9 + lea (L(shl_10_bwd_loop_L1)-L(shl_10_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0a(%rsi), %xmm1 + jb L(L10_bwd) + lea (L(shl_10_bwd_loop_L2)-L(shl_10_bwd_loop_L1))(%r9), %r9 L(L10_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_10_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_10_bwd_loop_L1): - movaps -0x1a(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x2a(%rsi), %xmm3 - movaps -0x3a(%rsi), %xmm4 - movaps -0x4a(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $10, %xmm2, %xmm1 - palignr $10, %xmm3, %xmm2 - palignr $10, %xmm4, %xmm3 - palignr $10, %xmm5, %xmm4 + movaps -0x1a(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2a(%rsi), %xmm3 + movaps -0x3a(%rsi), %xmm4 + movaps -0x4a(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $10, %xmm2, %xmm1 + palignr $10, %xmm3, %xmm2 + palignr $10, %xmm4, %xmm3 + palignr $10, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_10_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_10_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_10_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_11): - lea (L(shl_11_loop_L1)-L(shl_11))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0b(%rsi), %xmm1 - jb L(L11_fwd) - lea (L(shl_11_loop_L2)-L(shl_11_loop_L1))(%r9), %r9 + lea (L(shl_11_loop_L1)-L(shl_11))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0b(%rsi), %xmm1 + jb L(L11_fwd) + lea (L(shl_11_loop_L2)-L(shl_11_loop_L1))(%r9), %r9 L(L11_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_11_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_11_loop_L1): - sub $64, %rdx - movaps 0x05(%rsi), %xmm2 - movaps 0x15(%rsi), %xmm3 - movaps 0x25(%rsi), %xmm4 - movaps 0x35(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $11, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $11, %xmm3, %xmm4 - palignr $11, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $11, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_11_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x05(%rsi), %xmm2 + movaps 0x15(%rsi), %xmm3 + movaps 0x25(%rsi), %xmm4 + movaps 0x35(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $11, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $11, %xmm3, %xmm4 + palignr $11, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $11, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_11_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_11_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_11_bwd): - lea (L(shl_11_bwd_loop_L1)-L(shl_11_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0b(%rsi), %xmm1 - jb L(L11_bwd) - lea (L(shl_11_bwd_loop_L2)-L(shl_11_bwd_loop_L1))(%r9), %r9 + lea (L(shl_11_bwd_loop_L1)-L(shl_11_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0b(%rsi), %xmm1 + jb L(L11_bwd) + lea (L(shl_11_bwd_loop_L2)-L(shl_11_bwd_loop_L1))(%r9), %r9 L(L11_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_11_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_11_bwd_loop_L1): - movaps -0x1b(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x2b(%rsi), %xmm3 - movaps -0x3b(%rsi), %xmm4 - movaps -0x4b(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $11, %xmm2, %xmm1 - palignr $11, %xmm3, %xmm2 - palignr $11, %xmm4, %xmm3 - palignr $11, %xmm5, %xmm4 + movaps -0x1b(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2b(%rsi), %xmm3 + movaps -0x3b(%rsi), %xmm4 + movaps -0x4b(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $11, %xmm2, %xmm1 + palignr $11, %xmm3, %xmm2 + palignr $11, %xmm4, %xmm3 + palignr $11, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_11_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_11_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_11_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_12): - lea (L(shl_12_loop_L1)-L(shl_12))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0c(%rsi), %xmm1 - jb L(L12_fwd) - lea (L(shl_12_loop_L2)-L(shl_12_loop_L1))(%r9), %r9 + lea (L(shl_12_loop_L1)-L(shl_12))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0c(%rsi), %xmm1 + jb L(L12_fwd) + lea (L(shl_12_loop_L2)-L(shl_12_loop_L1))(%r9), %r9 L(L12_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_12_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_12_loop_L1): - sub $64, %rdx - movaps 0x04(%rsi), %xmm2 - movaps 0x14(%rsi), %xmm3 - movaps 0x24(%rsi), %xmm4 - movaps 0x34(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $12, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $12, %xmm3, %xmm4 - palignr $12, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $12, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_12_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x04(%rsi), %xmm2 + movaps 0x14(%rsi), %xmm3 + movaps 0x24(%rsi), %xmm4 + movaps 0x34(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $12, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $12, %xmm3, %xmm4 + palignr $12, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $12, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_12_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_12_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_12_bwd): - lea (L(shl_12_bwd_loop_L1)-L(shl_12_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0c(%rsi), %xmm1 - jb L(L12_bwd) - lea (L(shl_12_bwd_loop_L2)-L(shl_12_bwd_loop_L1))(%r9), %r9 + lea (L(shl_12_bwd_loop_L1)-L(shl_12_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0c(%rsi), %xmm1 + jb L(L12_bwd) + lea (L(shl_12_bwd_loop_L2)-L(shl_12_bwd_loop_L1))(%r9), %r9 L(L12_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_12_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_12_bwd_loop_L1): - movaps -0x1c(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x2c(%rsi), %xmm3 - movaps -0x3c(%rsi), %xmm4 - movaps -0x4c(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $12, %xmm2, %xmm1 - palignr $12, %xmm3, %xmm2 - palignr $12, %xmm4, %xmm3 - palignr $12, %xmm5, %xmm4 + movaps -0x1c(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2c(%rsi), %xmm3 + movaps -0x3c(%rsi), %xmm4 + movaps -0x4c(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $12, %xmm2, %xmm1 + palignr $12, %xmm3, %xmm2 + palignr $12, %xmm4, %xmm3 + palignr $12, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_12_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_12_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_12_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_13): - lea (L(shl_13_loop_L1)-L(shl_13))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0d(%rsi), %xmm1 - jb L(L13_fwd) - lea (L(shl_13_loop_L2)-L(shl_13_loop_L1))(%r9), %r9 + lea (L(shl_13_loop_L1)-L(shl_13))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0d(%rsi), %xmm1 + jb L(L13_fwd) + lea (L(shl_13_loop_L2)-L(shl_13_loop_L1))(%r9), %r9 L(L13_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_13_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_13_loop_L1): - sub $64, %rdx - movaps 0x03(%rsi), %xmm2 - movaps 0x13(%rsi), %xmm3 - movaps 0x23(%rsi), %xmm4 - movaps 0x33(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $13, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $13, %xmm3, %xmm4 - palignr $13, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $13, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_13_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x03(%rsi), %xmm2 + movaps 0x13(%rsi), %xmm3 + movaps 0x23(%rsi), %xmm4 + movaps 0x33(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $13, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $13, %xmm3, %xmm4 + palignr $13, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $13, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_13_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_13_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_13_bwd): - lea (L(shl_13_bwd_loop_L1)-L(shl_13_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0d(%rsi), %xmm1 - jb L(L13_bwd) - lea (L(shl_13_bwd_loop_L2)-L(shl_13_bwd_loop_L1))(%r9), %r9 + lea (L(shl_13_bwd_loop_L1)-L(shl_13_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0d(%rsi), %xmm1 + jb L(L13_bwd) + lea (L(shl_13_bwd_loop_L2)-L(shl_13_bwd_loop_L1))(%r9), %r9 L(L13_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_13_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_13_bwd_loop_L1): - movaps -0x1d(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x2d(%rsi), %xmm3 - movaps -0x3d(%rsi), %xmm4 - movaps -0x4d(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $13, %xmm2, %xmm1 - palignr $13, %xmm3, %xmm2 - palignr $13, %xmm4, %xmm3 - palignr $13, %xmm5, %xmm4 + movaps -0x1d(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2d(%rsi), %xmm3 + movaps -0x3d(%rsi), %xmm4 + movaps -0x4d(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $13, %xmm2, %xmm1 + palignr $13, %xmm3, %xmm2 + palignr $13, %xmm4, %xmm3 + palignr $13, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_13_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_13_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_13_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_14): - lea (L(shl_14_loop_L1)-L(shl_14))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0e(%rsi), %xmm1 - jb L(L14_fwd) - lea (L(shl_14_loop_L2)-L(shl_14_loop_L1))(%r9), %r9 + lea (L(shl_14_loop_L1)-L(shl_14))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0e(%rsi), %xmm1 + jb L(L14_fwd) + lea (L(shl_14_loop_L2)-L(shl_14_loop_L1))(%r9), %r9 L(L14_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_14_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_14_loop_L1): - sub $64, %rdx - movaps 0x02(%rsi), %xmm2 - movaps 0x12(%rsi), %xmm3 - movaps 0x22(%rsi), %xmm4 - movaps 0x32(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $14, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $14, %xmm3, %xmm4 - palignr $14, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $14, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_14_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x02(%rsi), %xmm2 + movaps 0x12(%rsi), %xmm3 + movaps 0x22(%rsi), %xmm4 + movaps 0x32(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $14, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $14, %xmm3, %xmm4 + palignr $14, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $14, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_14_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_14_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_14_bwd): - lea (L(shl_14_bwd_loop_L1)-L(shl_14_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0e(%rsi), %xmm1 - jb L(L14_bwd) - lea (L(shl_14_bwd_loop_L2)-L(shl_14_bwd_loop_L1))(%r9), %r9 + lea (L(shl_14_bwd_loop_L1)-L(shl_14_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0e(%rsi), %xmm1 + jb L(L14_bwd) + lea (L(shl_14_bwd_loop_L2)-L(shl_14_bwd_loop_L1))(%r9), %r9 L(L14_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_14_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_14_bwd_loop_L1): - movaps -0x1e(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x2e(%rsi), %xmm3 - movaps -0x3e(%rsi), %xmm4 - movaps -0x4e(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $14, %xmm2, %xmm1 - palignr $14, %xmm3, %xmm2 - palignr $14, %xmm4, %xmm3 - palignr $14, %xmm5, %xmm4 + movaps -0x1e(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2e(%rsi), %xmm3 + movaps -0x3e(%rsi), %xmm4 + movaps -0x4e(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $14, %xmm2, %xmm1 + palignr $14, %xmm3, %xmm2 + palignr $14, %xmm4, %xmm3 + palignr $14, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_14_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_14_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_14_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_15): - lea (L(shl_15_loop_L1)-L(shl_15))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0f(%rsi), %xmm1 - jb L(L15_fwd) - lea (L(shl_15_loop_L2)-L(shl_15_loop_L1))(%r9), %r9 + lea (L(shl_15_loop_L1)-L(shl_15))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0f(%rsi), %xmm1 + jb L(L15_fwd) + lea (L(shl_15_loop_L2)-L(shl_15_loop_L1))(%r9), %r9 L(L15_fwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_15_loop_L2): - prefetchnta 0x1c0(%rsi) + prefetchnta 0x1c0(%rsi) L(shl_15_loop_L1): - sub $64, %rdx - movaps 0x01(%rsi), %xmm2 - movaps 0x11(%rsi), %xmm3 - movaps 0x21(%rsi), %xmm4 - movaps 0x31(%rsi), %xmm5 - movdqa %xmm5, %xmm6 - palignr $15, %xmm4, %xmm5 - lea 64(%rsi), %rsi - palignr $15, %xmm3, %xmm4 - palignr $15, %xmm2, %xmm3 - lea 64(%rdi), %rdi - palignr $15, %xmm1, %xmm2 - movdqa %xmm6, %xmm1 - movdqa %xmm2, -0x40(%rdi) - movaps %xmm3, -0x30(%rdi) - jb L(shl_15_end) - movaps %xmm4, -0x20(%rdi) - movaps %xmm5, -0x10(%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + sub $64, %rdx + movaps 0x01(%rsi), %xmm2 + movaps 0x11(%rsi), %xmm3 + movaps 0x21(%rsi), %xmm4 + movaps 0x31(%rsi), %xmm5 + movdqa %xmm5, %xmm6 + palignr $15, %xmm4, %xmm5 + lea 64(%rsi), %rsi + palignr $15, %xmm3, %xmm4 + palignr $15, %xmm2, %xmm3 + lea 64(%rdi), %rdi + palignr $15, %xmm1, %xmm2 + movdqa %xmm6, %xmm1 + movdqa %xmm2, -0x40(%rdi) + movaps %xmm3, -0x30(%rdi) + jb L(shl_15_end) + movaps %xmm4, -0x20(%rdi) + movaps %xmm5, -0x10(%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_15_end): - movaps %xmm4, -0x20(%rdi) - lea 64(%rdx), %rdx - movaps %xmm5, -0x10(%rdi) - add %rdx, %rdi - movdqu %xmm0, (%r8) - add %rdx, %rsi - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, -0x20(%rdi) + lea 64(%rdx), %rdx + movaps %xmm5, -0x10(%rdi) + add %rdx, %rdi + movdqu %xmm0, (%r8) + add %rdx, %rsi + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(shl_15_bwd): - lea (L(shl_15_bwd_loop_L1)-L(shl_15_bwd))(%r9), %r9 - cmp %rcx, %rdx - movaps -0x0f(%rsi), %xmm1 - jb L(L15_bwd) - lea (L(shl_15_bwd_loop_L2)-L(shl_15_bwd_loop_L1))(%r9), %r9 + lea (L(shl_15_bwd_loop_L1)-L(shl_15_bwd))(%r9), %r9 + cmp %rcx, %rdx + movaps -0x0f(%rsi), %xmm1 + jb L(L15_bwd) + lea (L(shl_15_bwd_loop_L2)-L(shl_15_bwd_loop_L1))(%r9), %r9 L(L15_bwd): - lea -64(%rdx), %rdx - _CET_NOTRACK jmp *%r9 - ud2 + lea -64(%rdx), %rdx + _CET_NOTRACK jmp *%r9 + ud2 L(shl_15_bwd_loop_L2): - prefetchnta -0x1c0(%rsi) + prefetchnta -0x1c0(%rsi) L(shl_15_bwd_loop_L1): - movaps -0x1f(%rsi), %xmm2 - sub $0x40, %rdx - movaps -0x2f(%rsi), %xmm3 - movaps -0x3f(%rsi), %xmm4 - movaps -0x4f(%rsi), %xmm5 - lea -0x40(%rsi), %rsi - palignr $15, %xmm2, %xmm1 - palignr $15, %xmm3, %xmm2 - palignr $15, %xmm4, %xmm3 - palignr $15, %xmm5, %xmm4 + movaps -0x1f(%rsi), %xmm2 + sub $0x40, %rdx + movaps -0x2f(%rsi), %xmm3 + movaps -0x3f(%rsi), %xmm4 + movaps -0x4f(%rsi), %xmm5 + lea -0x40(%rsi), %rsi + palignr $15, %xmm2, %xmm1 + palignr $15, %xmm3, %xmm2 + palignr $15, %xmm4, %xmm3 + palignr $15, %xmm5, %xmm4 - movaps %xmm1, -0x10(%rdi) - movaps %xmm5, %xmm1 + movaps %xmm1, -0x10(%rdi) + movaps %xmm5, %xmm1 - movaps %xmm2, -0x20(%rdi) - lea -0x40(%rdi), %rdi + movaps %xmm2, -0x20(%rdi) + lea -0x40(%rdi), %rdi - movaps %xmm3, 0x10(%rdi) - jb L(shl_15_bwd_end) - movaps %xmm4, (%rdi) - _CET_NOTRACK jmp *%r9 - ud2 + movaps %xmm3, 0x10(%rdi) + jb L(shl_15_bwd_end) + movaps %xmm4, (%rdi) + _CET_NOTRACK jmp *%r9 + ud2 L(shl_15_bwd_end): - movaps %xmm4, (%rdi) - lea 64(%rdx), %rdx - movdqu %xmm0, (%r8) - BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) + movaps %xmm4, (%rdi) + lea 64(%rdx), %rdx + movdqu %xmm0, (%r8) + BRANCH_TO_JMPTBL_ENTRY(L(table_less_80bytes), %rdx, 4) - .p2align 4 + .p2align 4 L(write_72bytes): - movdqu -72(%rsi), %xmm0 - movdqu -56(%rsi), %xmm1 - mov -40(%rsi), %r8 - mov -32(%rsi), %r9 - mov -24(%rsi), %r10 - mov -16(%rsi), %r11 - mov -8(%rsi), %rcx - movdqu %xmm0, -72(%rdi) - movdqu %xmm1, -56(%rdi) - mov %r8, -40(%rdi) - mov %r9, -32(%rdi) - mov %r10, -24(%rdi) - mov %r11, -16(%rdi) - mov %rcx, -8(%rdi) - ret + movdqu -72(%rsi), %xmm0 + movdqu -56(%rsi), %xmm1 + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rcx + movdqu %xmm0, -72(%rdi) + movdqu %xmm1, -56(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rcx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_64bytes): - movdqu -64(%rsi), %xmm0 - mov -48(%rsi), %rcx - mov -40(%rsi), %r8 - mov -32(%rsi), %r9 - mov -24(%rsi), %r10 - mov -16(%rsi), %r11 - mov -8(%rsi), %rdx - movdqu %xmm0, -64(%rdi) - mov %rcx, -48(%rdi) - mov %r8, -40(%rdi) - mov %r9, -32(%rdi) - mov %r10, -24(%rdi) - mov %r11, -16(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -64(%rsi), %xmm0 + mov -48(%rsi), %rcx + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + movdqu %xmm0, -64(%rdi) + mov %rcx, -48(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_56bytes): - movdqu -56(%rsi), %xmm0 - mov -40(%rsi), %r8 - mov -32(%rsi), %r9 - mov -24(%rsi), %r10 - mov -16(%rsi), %r11 - mov -8(%rsi), %rcx - movdqu %xmm0, -56(%rdi) - mov %r8, -40(%rdi) - mov %r9, -32(%rdi) - mov %r10, -24(%rdi) - mov %r11, -16(%rdi) - mov %rcx, -8(%rdi) - ret + movdqu -56(%rsi), %xmm0 + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rcx + movdqu %xmm0, -56(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rcx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_48bytes): - mov -48(%rsi), %rcx - mov -40(%rsi), %r8 - mov -32(%rsi), %r9 - mov -24(%rsi), %r10 - mov -16(%rsi), %r11 - mov -8(%rsi), %rdx - mov %rcx, -48(%rdi) - mov %r8, -40(%rdi) - mov %r9, -32(%rdi) - mov %r10, -24(%rdi) - mov %r11, -16(%rdi) - mov %rdx, -8(%rdi) - ret + mov -48(%rsi), %rcx + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %rcx, -48(%rdi) + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_40bytes): - mov -40(%rsi), %r8 - mov -32(%rsi), %r9 - mov -24(%rsi), %r10 - mov -16(%rsi), %r11 - mov -8(%rsi), %rdx - mov %r8, -40(%rdi) - mov %r9, -32(%rdi) - mov %r10, -24(%rdi) - mov %r11, -16(%rdi) - mov %rdx, -8(%rdi) - ret + mov -40(%rsi), %r8 + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r8, -40(%rdi) + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_32bytes): - mov -32(%rsi), %r9 - mov -24(%rsi), %r10 - mov -16(%rsi), %r11 - mov -8(%rsi), %rdx - mov %r9, -32(%rdi) - mov %r10, -24(%rdi) - mov %r11, -16(%rdi) - mov %rdx, -8(%rdi) - ret + mov -32(%rsi), %r9 + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r9, -32(%rdi) + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_24bytes): - mov -24(%rsi), %r10 - mov -16(%rsi), %r11 - mov -8(%rsi), %rdx - mov %r10, -24(%rdi) - mov %r11, -16(%rdi) - mov %rdx, -8(%rdi) - ret + mov -24(%rsi), %r10 + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r10, -24(%rdi) + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_16bytes): - mov -16(%rsi), %r11 - mov -8(%rsi), %rdx - mov %r11, -16(%rdi) - mov %rdx, -8(%rdi) - ret + mov -16(%rsi), %r11 + mov -8(%rsi), %rdx + mov %r11, -16(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_8bytes): - mov -8(%rsi), %rdx - mov %rdx, -8(%rdi) + mov -8(%rsi), %rdx + mov %rdx, -8(%rdi) L(write_0bytes): - ret + ret - .p2align 4 + .p2align 4 L(write_73bytes): - movdqu -73(%rsi), %xmm0 - movdqu -57(%rsi), %xmm1 - mov -41(%rsi), %rcx - mov -33(%rsi), %r9 - mov -25(%rsi), %r10 - mov -17(%rsi), %r11 - mov -9(%rsi), %r8 - mov -4(%rsi), %edx - movdqu %xmm0, -73(%rdi) - movdqu %xmm1, -57(%rdi) - mov %rcx, -41(%rdi) - mov %r9, -33(%rdi) - mov %r10, -25(%rdi) - mov %r11, -17(%rdi) - mov %r8, -9(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -73(%rsi), %xmm0 + movdqu -57(%rsi), %xmm1 + mov -41(%rsi), %rcx + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %r8 + mov -4(%rsi), %edx + movdqu %xmm0, -73(%rdi) + movdqu %xmm1, -57(%rdi) + mov %rcx, -41(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %r8, -9(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_65bytes): - movdqu -65(%rsi), %xmm0 - movdqu -49(%rsi), %xmm1 - mov -33(%rsi), %r9 - mov -25(%rsi), %r10 - mov -17(%rsi), %r11 - mov -9(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -65(%rdi) - movdqu %xmm1, -49(%rdi) - mov %r9, -33(%rdi) - mov %r10, -25(%rdi) - mov %r11, -17(%rdi) - mov %rcx, -9(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -65(%rsi), %xmm0 + movdqu -49(%rsi), %xmm1 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -65(%rdi) + movdqu %xmm1, -49(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_57bytes): - movdqu -57(%rsi), %xmm0 - mov -41(%rsi), %r8 - mov -33(%rsi), %r9 - mov -25(%rsi), %r10 - mov -17(%rsi), %r11 - mov -9(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -57(%rdi) - mov %r8, -41(%rdi) - mov %r9, -33(%rdi) - mov %r10, -25(%rdi) - mov %r11, -17(%rdi) - mov %rcx, -9(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -57(%rsi), %xmm0 + mov -41(%rsi), %r8 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -57(%rdi) + mov %r8, -41(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_49bytes): - movdqu -49(%rsi), %xmm0 - mov -33(%rsi), %r9 - mov -25(%rsi), %r10 - mov -17(%rsi), %r11 - mov -9(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -49(%rdi) - mov %r9, -33(%rdi) - mov %r10, -25(%rdi) - mov %r11, -17(%rdi) - mov %rcx, -9(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -49(%rsi), %xmm0 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -49(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_41bytes): - mov -41(%rsi), %r8 - mov -33(%rsi), %r9 - mov -25(%rsi), %r10 - mov -17(%rsi), %r11 - mov -9(%rsi), %rcx - mov -1(%rsi), %dl - mov %r8, -41(%rdi) - mov %r9, -33(%rdi) - mov %r10, -25(%rdi) - mov %r11, -17(%rdi) - mov %rcx, -9(%rdi) - mov %dl, -1(%rdi) - ret + mov -41(%rsi), %r8 + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -1(%rsi), %dl + mov %r8, -41(%rdi) + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %dl, -1(%rdi) + ret - .p2align 4 + .p2align 4 L(write_33bytes): - mov -33(%rsi), %r9 - mov -25(%rsi), %r10 - mov -17(%rsi), %r11 - mov -9(%rsi), %rcx - mov -1(%rsi), %dl - mov %r9, -33(%rdi) - mov %r10, -25(%rdi) - mov %r11, -17(%rdi) - mov %rcx, -9(%rdi) - mov %dl, -1(%rdi) - ret + mov -33(%rsi), %r9 + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -1(%rsi), %dl + mov %r9, -33(%rdi) + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %dl, -1(%rdi) + ret - .p2align 4 + .p2align 4 L(write_25bytes): - mov -25(%rsi), %r10 - mov -17(%rsi), %r11 - mov -9(%rsi), %rcx - mov -1(%rsi), %dl - mov %r10, -25(%rdi) - mov %r11, -17(%rdi) - mov %rcx, -9(%rdi) - mov %dl, -1(%rdi) - ret + mov -25(%rsi), %r10 + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -1(%rsi), %dl + mov %r10, -25(%rdi) + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %dl, -1(%rdi) + ret - .p2align 4 + .p2align 4 L(write_17bytes): - mov -17(%rsi), %r11 - mov -9(%rsi), %rcx - mov -4(%rsi), %edx - mov %r11, -17(%rdi) - mov %rcx, -9(%rdi) - mov %edx, -4(%rdi) - ret + mov -17(%rsi), %r11 + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -17(%rdi) + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_9bytes): - mov -9(%rsi), %rcx - mov -4(%rsi), %edx - mov %rcx, -9(%rdi) - mov %edx, -4(%rdi) - ret + mov -9(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -9(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_1bytes): - mov -1(%rsi), %dl - mov %dl, -1(%rdi) - ret + mov -1(%rsi), %dl + mov %dl, -1(%rdi) + ret - .p2align 4 + .p2align 4 L(write_74bytes): - movdqu -74(%rsi), %xmm0 - movdqu -58(%rsi), %xmm1 - mov -42(%rsi), %r8 - mov -34(%rsi), %r9 - mov -26(%rsi), %r10 - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -74(%rdi) - movdqu %xmm1, -58(%rdi) - mov %r8, -42(%rdi) - mov %r9, -34(%rdi) - mov %r10, -26(%rdi) - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -74(%rsi), %xmm0 + movdqu -58(%rsi), %xmm1 + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -74(%rdi) + movdqu %xmm1, -58(%rdi) + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_66bytes): - movdqu -66(%rsi), %xmm0 - movdqu -50(%rsi), %xmm1 - mov -42(%rsi), %r8 - mov -34(%rsi), %r9 - mov -26(%rsi), %r10 - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -66(%rdi) - movdqu %xmm1, -50(%rdi) - mov %r8, -42(%rdi) - mov %r9, -34(%rdi) - mov %r10, -26(%rdi) - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -66(%rsi), %xmm0 + movdqu -50(%rsi), %xmm1 + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -66(%rdi) + movdqu %xmm1, -50(%rdi) + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_58bytes): - movdqu -58(%rsi), %xmm1 - mov -42(%rsi), %r8 - mov -34(%rsi), %r9 - mov -26(%rsi), %r10 - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm1, -58(%rdi) - mov %r8, -42(%rdi) - mov %r9, -34(%rdi) - mov %r10, -26(%rdi) - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -58(%rsi), %xmm1 + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm1, -58(%rdi) + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_50bytes): - movdqu -50(%rsi), %xmm0 - mov -34(%rsi), %r9 - mov -26(%rsi), %r10 - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -50(%rdi) - mov %r9, -34(%rdi) - mov %r10, -26(%rdi) - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -50(%rsi), %xmm0 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -50(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_42bytes): - mov -42(%rsi), %r8 - mov -34(%rsi), %r9 - mov -26(%rsi), %r10 - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - mov %r8, -42(%rdi) - mov %r9, -34(%rdi) - mov %r10, -26(%rdi) - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + mov -42(%rsi), %r8 + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r8, -42(%rdi) + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_34bytes): - mov -34(%rsi), %r9 - mov -26(%rsi), %r10 - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - mov %r9, -34(%rdi) - mov %r10, -26(%rdi) - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + mov -34(%rsi), %r9 + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r9, -34(%rdi) + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_26bytes): - mov -26(%rsi), %r10 - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - mov %r10, -26(%rdi) - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + mov -26(%rsi), %r10 + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r10, -26(%rdi) + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_18bytes): - mov -18(%rsi), %r11 - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - mov %r11, -18(%rdi) - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + mov -18(%rsi), %r11 + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -18(%rdi) + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_10bytes): - mov -10(%rsi), %rcx - mov -4(%rsi), %edx - mov %rcx, -10(%rdi) - mov %edx, -4(%rdi) - ret + mov -10(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -10(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_2bytes): - mov -2(%rsi), %dx - mov %dx, -2(%rdi) - ret + mov -2(%rsi), %dx + mov %dx, -2(%rdi) + ret - .p2align 4 + .p2align 4 L(write_75bytes): - movdqu -75(%rsi), %xmm0 - movdqu -59(%rsi), %xmm1 - mov -43(%rsi), %r8 - mov -35(%rsi), %r9 - mov -27(%rsi), %r10 - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -75(%rdi) - movdqu %xmm1, -59(%rdi) - mov %r8, -43(%rdi) - mov %r9, -35(%rdi) - mov %r10, -27(%rdi) - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -75(%rsi), %xmm0 + movdqu -59(%rsi), %xmm1 + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -75(%rdi) + movdqu %xmm1, -59(%rdi) + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_67bytes): - movdqu -67(%rsi), %xmm0 - movdqu -59(%rsi), %xmm1 - mov -43(%rsi), %r8 - mov -35(%rsi), %r9 - mov -27(%rsi), %r10 - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -67(%rdi) - movdqu %xmm1, -59(%rdi) - mov %r8, -43(%rdi) - mov %r9, -35(%rdi) - mov %r10, -27(%rdi) - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -67(%rsi), %xmm0 + movdqu -59(%rsi), %xmm1 + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -67(%rdi) + movdqu %xmm1, -59(%rdi) + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_59bytes): - movdqu -59(%rsi), %xmm0 - mov -43(%rsi), %r8 - mov -35(%rsi), %r9 - mov -27(%rsi), %r10 - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -59(%rdi) - mov %r8, -43(%rdi) - mov %r9, -35(%rdi) - mov %r10, -27(%rdi) - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -59(%rsi), %xmm0 + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -59(%rdi) + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_51bytes): - movdqu -51(%rsi), %xmm0 - mov -35(%rsi), %r9 - mov -27(%rsi), %r10 - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -51(%rdi) - mov %r9, -35(%rdi) - mov %r10, -27(%rdi) - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -51(%rsi), %xmm0 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -51(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_43bytes): - mov -43(%rsi), %r8 - mov -35(%rsi), %r9 - mov -27(%rsi), %r10 - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - mov %r8, -43(%rdi) - mov %r9, -35(%rdi) - mov %r10, -27(%rdi) - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + mov -43(%rsi), %r8 + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r8, -43(%rdi) + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_35bytes): - mov -35(%rsi), %r9 - mov -27(%rsi), %r10 - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - mov %r9, -35(%rdi) - mov %r10, -27(%rdi) - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + mov -35(%rsi), %r9 + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r9, -35(%rdi) + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_27bytes): - mov -27(%rsi), %r10 - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - mov %r10, -27(%rdi) - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + mov -27(%rsi), %r10 + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r10, -27(%rdi) + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_19bytes): - mov -19(%rsi), %r11 - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - mov %r11, -19(%rdi) - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + mov -19(%rsi), %r11 + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -19(%rdi) + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_11bytes): - mov -11(%rsi), %rcx - mov -4(%rsi), %edx - mov %rcx, -11(%rdi) - mov %edx, -4(%rdi) - ret + mov -11(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -11(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_3bytes): - mov -3(%rsi), %dx - mov -2(%rsi), %cx - mov %dx, -3(%rdi) - mov %cx, -2(%rdi) - ret + mov -3(%rsi), %dx + mov -2(%rsi), %cx + mov %dx, -3(%rdi) + mov %cx, -2(%rdi) + ret - .p2align 4 + .p2align 4 L(write_76bytes): - movdqu -76(%rsi), %xmm0 - movdqu -60(%rsi), %xmm1 - mov -44(%rsi), %r8 - mov -36(%rsi), %r9 - mov -28(%rsi), %r10 - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -76(%rdi) - movdqu %xmm1, -60(%rdi) - mov %r8, -44(%rdi) - mov %r9, -36(%rdi) - mov %r10, -28(%rdi) - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -76(%rsi), %xmm0 + movdqu -60(%rsi), %xmm1 + mov -44(%rsi), %r8 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -76(%rdi) + movdqu %xmm1, -60(%rdi) + mov %r8, -44(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_68bytes): - movdqu -68(%rsi), %xmm0 - movdqu -52(%rsi), %xmm1 - mov -36(%rsi), %r9 - mov -28(%rsi), %r10 - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -68(%rdi) - movdqu %xmm1, -52(%rdi) - mov %r9, -36(%rdi) - mov %r10, -28(%rdi) - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -68(%rsi), %xmm0 + movdqu -52(%rsi), %xmm1 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -68(%rdi) + movdqu %xmm1, -52(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_60bytes): - movdqu -60(%rsi), %xmm0 - mov -44(%rsi), %r8 - mov -36(%rsi), %r9 - mov -28(%rsi), %r10 - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -60(%rdi) - mov %r8, -44(%rdi) - mov %r9, -36(%rdi) - mov %r10, -28(%rdi) - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -60(%rsi), %xmm0 + mov -44(%rsi), %r8 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -60(%rdi) + mov %r8, -44(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_52bytes): - movdqu -52(%rsi), %xmm0 - mov -36(%rsi), %r9 - mov -28(%rsi), %r10 - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - movdqu %xmm0, -52(%rdi) - mov %r9, -36(%rdi) - mov %r10, -28(%rdi) - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + movdqu -52(%rsi), %xmm0 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + movdqu %xmm0, -52(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_44bytes): - mov -44(%rsi), %r8 - mov -36(%rsi), %r9 - mov -28(%rsi), %r10 - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - mov %r8, -44(%rdi) - mov %r9, -36(%rdi) - mov %r10, -28(%rdi) - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + mov -44(%rsi), %r8 + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r8, -44(%rdi) + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_36bytes): - mov -36(%rsi), %r9 - mov -28(%rsi), %r10 - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - mov %r9, -36(%rdi) - mov %r10, -28(%rdi) - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + mov -36(%rsi), %r9 + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r9, -36(%rdi) + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_28bytes): - mov -28(%rsi), %r10 - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - mov %r10, -28(%rdi) - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + mov -28(%rsi), %r10 + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r10, -28(%rdi) + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_20bytes): - mov -20(%rsi), %r11 - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - mov %r11, -20(%rdi) - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + mov -20(%rsi), %r11 + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %r11, -20(%rdi) + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_12bytes): - mov -12(%rsi), %rcx - mov -4(%rsi), %edx - mov %rcx, -12(%rdi) - mov %edx, -4(%rdi) - ret + mov -12(%rsi), %rcx + mov -4(%rsi), %edx + mov %rcx, -12(%rdi) + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_4bytes): - mov -4(%rsi), %edx - mov %edx, -4(%rdi) - ret + mov -4(%rsi), %edx + mov %edx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_77bytes): - movdqu -77(%rsi), %xmm0 - movdqu -61(%rsi), %xmm1 - mov -45(%rsi), %r8 - mov -37(%rsi), %r9 - mov -29(%rsi), %r10 - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -77(%rdi) - movdqu %xmm1, -61(%rdi) - mov %r8, -45(%rdi) - mov %r9, -37(%rdi) - mov %r10, -29(%rdi) - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -77(%rsi), %xmm0 + movdqu -61(%rsi), %xmm1 + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -77(%rdi) + movdqu %xmm1, -61(%rdi) + mov %r8, -45(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_69bytes): - movdqu -69(%rsi), %xmm0 - movdqu -53(%rsi), %xmm1 - mov -37(%rsi), %r9 - mov -29(%rsi), %r10 - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -69(%rdi) - movdqu %xmm1, -53(%rdi) - mov %r9, -37(%rdi) - mov %r10, -29(%rdi) - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -69(%rsi), %xmm0 + movdqu -53(%rsi), %xmm1 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -69(%rdi) + movdqu %xmm1, -53(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_61bytes): - movdqu -61(%rsi), %xmm0 - mov -45(%rsi), %r8 - mov -37(%rsi), %r9 - mov -29(%rsi), %r10 - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -61(%rdi) - mov %r8, -45(%rdi) - mov %r9, -37(%rdi) - mov %r10, -29(%rdi) - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -61(%rsi), %xmm0 + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -61(%rdi) + mov %r8, -45(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_53bytes): - movdqu -53(%rsi), %xmm0 - mov -45(%rsi), %r8 - mov -37(%rsi), %r9 - mov -29(%rsi), %r10 - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -53(%rdi) - mov %r9, -37(%rdi) - mov %r10, -29(%rdi) - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -53(%rsi), %xmm0 + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -53(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_45bytes): - mov -45(%rsi), %r8 - mov -37(%rsi), %r9 - mov -29(%rsi), %r10 - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r8, -45(%rdi) - mov %r9, -37(%rdi) - mov %r10, -29(%rdi) - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + mov -45(%rsi), %r8 + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r8, -45(%rdi) + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_37bytes): - mov -37(%rsi), %r9 - mov -29(%rsi), %r10 - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r9, -37(%rdi) - mov %r10, -29(%rdi) - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + mov -37(%rsi), %r9 + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r9, -37(%rdi) + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_29bytes): - mov -29(%rsi), %r10 - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r10, -29(%rdi) - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + mov -29(%rsi), %r10 + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r10, -29(%rdi) + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_21bytes): - mov -21(%rsi), %r11 - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r11, -21(%rdi) - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + mov -21(%rsi), %r11 + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r11, -21(%rdi) + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_13bytes): - mov -13(%rsi), %rcx - mov -8(%rsi), %rdx - mov %rcx, -13(%rdi) - mov %rdx, -8(%rdi) - ret + mov -13(%rsi), %rcx + mov -8(%rsi), %rdx + mov %rcx, -13(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_5bytes): - mov -5(%rsi), %edx - mov -4(%rsi), %ecx - mov %edx, -5(%rdi) - mov %ecx, -4(%rdi) - ret + mov -5(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -5(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_78bytes): - movdqu -78(%rsi), %xmm0 - movdqu -62(%rsi), %xmm1 - mov -46(%rsi), %r8 - mov -38(%rsi), %r9 - mov -30(%rsi), %r10 - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -78(%rdi) - movdqu %xmm1, -62(%rdi) - mov %r8, -46(%rdi) - mov %r9, -38(%rdi) - mov %r10, -30(%rdi) - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -78(%rsi), %xmm0 + movdqu -62(%rsi), %xmm1 + mov -46(%rsi), %r8 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -78(%rdi) + movdqu %xmm1, -62(%rdi) + mov %r8, -46(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_70bytes): - movdqu -70(%rsi), %xmm0 - movdqu -54(%rsi), %xmm1 - mov -38(%rsi), %r9 - mov -30(%rsi), %r10 - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -70(%rdi) - movdqu %xmm1, -54(%rdi) - mov %r9, -38(%rdi) - mov %r10, -30(%rdi) - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -70(%rsi), %xmm0 + movdqu -54(%rsi), %xmm1 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -70(%rdi) + movdqu %xmm1, -54(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_62bytes): - movdqu -62(%rsi), %xmm0 - mov -46(%rsi), %r8 - mov -38(%rsi), %r9 - mov -30(%rsi), %r10 - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -62(%rdi) - mov %r8, -46(%rdi) - mov %r9, -38(%rdi) - mov %r10, -30(%rdi) - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -62(%rsi), %xmm0 + mov -46(%rsi), %r8 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -62(%rdi) + mov %r8, -46(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_54bytes): - movdqu -54(%rsi), %xmm0 - mov -38(%rsi), %r9 - mov -30(%rsi), %r10 - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -54(%rdi) - mov %r9, -38(%rdi) - mov %r10, -30(%rdi) - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -54(%rsi), %xmm0 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -54(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_46bytes): - mov -46(%rsi), %r8 - mov -38(%rsi), %r9 - mov -30(%rsi), %r10 - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r8, -46(%rdi) - mov %r9, -38(%rdi) - mov %r10, -30(%rdi) - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + mov -46(%rsi), %r8 + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r8, -46(%rdi) + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_38bytes): - mov -38(%rsi), %r9 - mov -30(%rsi), %r10 - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r9, -38(%rdi) - mov %r10, -30(%rdi) - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + mov -38(%rsi), %r9 + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r9, -38(%rdi) + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_30bytes): - mov -30(%rsi), %r10 - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r10, -30(%rdi) - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + mov -30(%rsi), %r10 + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r10, -30(%rdi) + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_22bytes): - mov -22(%rsi), %r11 - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r11, -22(%rdi) - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + mov -22(%rsi), %r11 + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r11, -22(%rdi) + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_14bytes): - mov -14(%rsi), %rcx - mov -8(%rsi), %rdx - mov %rcx, -14(%rdi) - mov %rdx, -8(%rdi) - ret + mov -14(%rsi), %rcx + mov -8(%rsi), %rdx + mov %rcx, -14(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_6bytes): - mov -6(%rsi), %edx - mov -4(%rsi), %ecx - mov %edx, -6(%rdi) - mov %ecx, -4(%rdi) - ret + mov -6(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -6(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(write_79bytes): - movdqu -79(%rsi), %xmm0 - movdqu -63(%rsi), %xmm1 - mov -47(%rsi), %r8 - mov -39(%rsi), %r9 - mov -31(%rsi), %r10 - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -79(%rdi) - movdqu %xmm1, -63(%rdi) - mov %r8, -47(%rdi) - mov %r9, -39(%rdi) - mov %r10, -31(%rdi) - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -79(%rsi), %xmm0 + movdqu -63(%rsi), %xmm1 + mov -47(%rsi), %r8 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -79(%rdi) + movdqu %xmm1, -63(%rdi) + mov %r8, -47(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_71bytes): - movdqu -71(%rsi), %xmm0 - movdqu -55(%rsi), %xmm1 - mov -39(%rsi), %r9 - mov -31(%rsi), %r10 - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -71(%rdi) - movdqu %xmm1, -55(%rdi) - mov %r9, -39(%rdi) - mov %r10, -31(%rdi) - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -71(%rsi), %xmm0 + movdqu -55(%rsi), %xmm1 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -71(%rdi) + movdqu %xmm1, -55(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_63bytes): - movdqu -63(%rsi), %xmm0 - mov -47(%rsi), %r8 - mov -39(%rsi), %r9 - mov -31(%rsi), %r10 - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -63(%rdi) - mov %r8, -47(%rdi) - mov %r9, -39(%rdi) - mov %r10, -31(%rdi) - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -63(%rsi), %xmm0 + mov -47(%rsi), %r8 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -63(%rdi) + mov %r8, -47(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_55bytes): - movdqu -55(%rsi), %xmm0 - mov -39(%rsi), %r9 - mov -31(%rsi), %r10 - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - movdqu %xmm0, -55(%rdi) - mov %r9, -39(%rdi) - mov %r10, -31(%rdi) - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + movdqu -55(%rsi), %xmm0 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + movdqu %xmm0, -55(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_47bytes): - mov -47(%rsi), %r8 - mov -39(%rsi), %r9 - mov -31(%rsi), %r10 - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r8, -47(%rdi) - mov %r9, -39(%rdi) - mov %r10, -31(%rdi) - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + mov -47(%rsi), %r8 + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r8, -47(%rdi) + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_39bytes): - mov -39(%rsi), %r9 - mov -31(%rsi), %r10 - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r9, -39(%rdi) - mov %r10, -31(%rdi) - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + mov -39(%rsi), %r9 + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r9, -39(%rdi) + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_31bytes): - mov -31(%rsi), %r10 - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r10, -31(%rdi) - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + mov -31(%rsi), %r10 + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r10, -31(%rdi) + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_23bytes): - mov -23(%rsi), %r11 - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - mov %r11, -23(%rdi) - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + mov -23(%rsi), %r11 + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %r11, -23(%rdi) + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_15bytes): - mov -15(%rsi), %rcx - mov -8(%rsi), %rdx - mov %rcx, -15(%rdi) - mov %rdx, -8(%rdi) - ret + mov -15(%rsi), %rcx + mov -8(%rsi), %rdx + mov %rcx, -15(%rdi) + mov %rdx, -8(%rdi) + ret - .p2align 4 + .p2align 4 L(write_7bytes): - mov -7(%rsi), %edx - mov -4(%rsi), %ecx - mov %edx, -7(%rdi) - mov %ecx, -4(%rdi) - ret + mov -7(%rsi), %edx + mov -4(%rsi), %ecx + mov %edx, -7(%rdi) + mov %ecx, -4(%rdi) + ret - .p2align 4 + .p2align 4 L(large_page_fwd): - movdqu (%rsi), %xmm1 - lea 16(%rsi), %rsi - movdqu %xmm0, (%r8) - movntdq %xmm1, (%rdi) - lea 16(%rdi), %rdi - lea -0x90(%rdx), %rdx + movdqu (%rsi), %xmm1 + lea 16(%rsi), %rsi + movdqu %xmm0, (%r8) + movntdq %xmm1, (%rdi) + lea 16(%rdi), %rdi + lea -0x90(%rdx), %rdx #ifdef USE_AS_MEMMOVE - mov %rsi, %r9 - sub %rdi, %r9 - cmp %rdx, %r9 - jae L(memmove_is_memcpy_fwd) - shl $2, %rcx - cmp %rcx, %rdx - jb L(ll_cache_copy_fwd_start) + mov %rsi, %r9 + sub %rdi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_fwd) + shl $2, %rcx + cmp %rcx, %rdx + jb L(ll_cache_copy_fwd_start) L(memmove_is_memcpy_fwd): #endif L(large_page_loop): - movdqu (%rsi), %xmm0 - movdqu 0x10(%rsi), %xmm1 - movdqu 0x20(%rsi), %xmm2 - movdqu 0x30(%rsi), %xmm3 - movdqu 0x40(%rsi), %xmm4 - movdqu 0x50(%rsi), %xmm5 - movdqu 0x60(%rsi), %xmm6 - movdqu 0x70(%rsi), %xmm7 - lea 0x80(%rsi), %rsi + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi - sub $0x80, %rdx - movntdq %xmm0, (%rdi) - movntdq %xmm1, 0x10(%rdi) - movntdq %xmm2, 0x20(%rdi) - movntdq %xmm3, 0x30(%rdi) - movntdq %xmm4, 0x40(%rdi) - movntdq %xmm5, 0x50(%rdi) - movntdq %xmm6, 0x60(%rdi) - movntdq %xmm7, 0x70(%rdi) - lea 0x80(%rdi), %rdi - jae L(large_page_loop) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(large_page_less_64bytes) + sub $0x80, %rdx + movntdq %xmm0, (%rdi) + movntdq %xmm1, 0x10(%rdi) + movntdq %xmm2, 0x20(%rdi) + movntdq %xmm3, 0x30(%rdi) + movntdq %xmm4, 0x40(%rdi) + movntdq %xmm5, 0x50(%rdi) + movntdq %xmm6, 0x60(%rdi) + movntdq %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi + jae L(large_page_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_less_64bytes) - movdqu (%rsi), %xmm0 - movdqu 0x10(%rsi), %xmm1 - movdqu 0x20(%rsi), %xmm2 - movdqu 0x30(%rsi), %xmm3 - lea 0x40(%rsi), %rsi + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + lea 0x40(%rsi), %rsi - movntdq %xmm0, (%rdi) - movntdq %xmm1, 0x10(%rdi) - movntdq %xmm2, 0x20(%rdi) - movntdq %xmm3, 0x30(%rdi) - lea 0x40(%rdi), %rdi - sub $0x40, %rdx + movntdq %xmm0, (%rdi) + movntdq %xmm1, 0x10(%rdi) + movntdq %xmm2, 0x20(%rdi) + movntdq %xmm3, 0x30(%rdi) + lea 0x40(%rdi), %rdi + sub $0x40, %rdx L(large_page_less_64bytes): - add %rdx, %rsi - add %rdx, %rdi - sfence - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + add %rdx, %rsi + add %rdx, %rdi + sfence + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) #ifdef USE_AS_MEMMOVE - .p2align 4 + .p2align 4 L(ll_cache_copy_fwd_start): - prefetcht0 0x1c0(%rsi) - prefetcht0 0x200(%rsi) - movdqu (%rsi), %xmm0 - movdqu 0x10(%rsi), %xmm1 - movdqu 0x20(%rsi), %xmm2 - movdqu 0x30(%rsi), %xmm3 - movdqu 0x40(%rsi), %xmm4 - movdqu 0x50(%rsi), %xmm5 - movdqu 0x60(%rsi), %xmm6 - movdqu 0x70(%rsi), %xmm7 - lea 0x80(%rsi), %rsi + prefetcht0 0x1c0(%rsi) + prefetcht0 0x200(%rsi) + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + movdqu 0x40(%rsi), %xmm4 + movdqu 0x50(%rsi), %xmm5 + movdqu 0x60(%rsi), %xmm6 + movdqu 0x70(%rsi), %xmm7 + lea 0x80(%rsi), %rsi - sub $0x80, %rdx - movaps %xmm0, (%rdi) - movaps %xmm1, 0x10(%rdi) - movaps %xmm2, 0x20(%rdi) - movaps %xmm3, 0x30(%rdi) - movaps %xmm4, 0x40(%rdi) - movaps %xmm5, 0x50(%rdi) - movaps %xmm6, 0x60(%rdi) - movaps %xmm7, 0x70(%rdi) - lea 0x80(%rdi), %rdi - jae L(ll_cache_copy_fwd_start) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(large_page_ll_less_fwd_64bytes) + sub $0x80, %rdx + movaps %xmm0, (%rdi) + movaps %xmm1, 0x10(%rdi) + movaps %xmm2, 0x20(%rdi) + movaps %xmm3, 0x30(%rdi) + movaps %xmm4, 0x40(%rdi) + movaps %xmm5, 0x50(%rdi) + movaps %xmm6, 0x60(%rdi) + movaps %xmm7, 0x70(%rdi) + lea 0x80(%rdi), %rdi + jae L(ll_cache_copy_fwd_start) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_ll_less_fwd_64bytes) - movdqu (%rsi), %xmm0 - movdqu 0x10(%rsi), %xmm1 - movdqu 0x20(%rsi), %xmm2 - movdqu 0x30(%rsi), %xmm3 - lea 0x40(%rsi), %rsi + movdqu (%rsi), %xmm0 + movdqu 0x10(%rsi), %xmm1 + movdqu 0x20(%rsi), %xmm2 + movdqu 0x30(%rsi), %xmm3 + lea 0x40(%rsi), %rsi - movaps %xmm0, (%rdi) - movaps %xmm1, 0x10(%rdi) - movaps %xmm2, 0x20(%rdi) - movaps %xmm3, 0x30(%rdi) - lea 0x40(%rdi), %rdi - sub $0x40, %rdx + movaps %xmm0, (%rdi) + movaps %xmm1, 0x10(%rdi) + movaps %xmm2, 0x20(%rdi) + movaps %xmm3, 0x30(%rdi) + lea 0x40(%rdi), %rdi + sub $0x40, %rdx L(large_page_ll_less_fwd_64bytes): - add %rdx, %rsi - add %rdx, %rdi - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + add %rdx, %rsi + add %rdx, %rdi + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) #endif - .p2align 4 + .p2align 4 L(large_page_bwd): - movdqu -0x10(%rsi), %xmm1 - lea -16(%rsi), %rsi - movdqu %xmm0, (%r8) - movdqa %xmm1, -0x10(%rdi) - lea -16(%rdi), %rdi - lea -0x90(%rdx), %rdx + movdqu -0x10(%rsi), %xmm1 + lea -16(%rsi), %rsi + movdqu %xmm0, (%r8) + movdqa %xmm1, -0x10(%rdi) + lea -16(%rdi), %rdi + lea -0x90(%rdx), %rdx #ifdef USE_AS_MEMMOVE - mov %rdi, %r9 - sub %rsi, %r9 - cmp %rdx, %r9 - jae L(memmove_is_memcpy_bwd) - cmp %rcx, %r9 - jb L(ll_cache_copy_bwd_start) + mov %rdi, %r9 + sub %rsi, %r9 + cmp %rdx, %r9 + jae L(memmove_is_memcpy_bwd) + cmp %rcx, %r9 + jb L(ll_cache_copy_bwd_start) L(memmove_is_memcpy_bwd): #endif L(large_page_bwd_loop): - movdqu -0x10(%rsi), %xmm0 - movdqu -0x20(%rsi), %xmm1 - movdqu -0x30(%rsi), %xmm2 - movdqu -0x40(%rsi), %xmm3 - movdqu -0x50(%rsi), %xmm4 - movdqu -0x60(%rsi), %xmm5 - movdqu -0x70(%rsi), %xmm6 - movdqu -0x80(%rsi), %xmm7 - lea -0x80(%rsi), %rsi + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + movdqu -0x50(%rsi), %xmm4 + movdqu -0x60(%rsi), %xmm5 + movdqu -0x70(%rsi), %xmm6 + movdqu -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi - sub $0x80, %rdx - movntdq %xmm0, -0x10(%rdi) - movntdq %xmm1, -0x20(%rdi) - movntdq %xmm2, -0x30(%rdi) - movntdq %xmm3, -0x40(%rdi) - movntdq %xmm4, -0x50(%rdi) - movntdq %xmm5, -0x60(%rdi) - movntdq %xmm6, -0x70(%rdi) - movntdq %xmm7, -0x80(%rdi) - lea -0x80(%rdi), %rdi - jae L(large_page_bwd_loop) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(large_page_less_bwd_64bytes) + sub $0x80, %rdx + movntdq %xmm0, -0x10(%rdi) + movntdq %xmm1, -0x20(%rdi) + movntdq %xmm2, -0x30(%rdi) + movntdq %xmm3, -0x40(%rdi) + movntdq %xmm4, -0x50(%rdi) + movntdq %xmm5, -0x60(%rdi) + movntdq %xmm6, -0x70(%rdi) + movntdq %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi + jae L(large_page_bwd_loop) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_less_bwd_64bytes) - movdqu -0x10(%rsi), %xmm0 - movdqu -0x20(%rsi), %xmm1 - movdqu -0x30(%rsi), %xmm2 - movdqu -0x40(%rsi), %xmm3 - lea -0x40(%rsi), %rsi + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + lea -0x40(%rsi), %rsi - movntdq %xmm0, -0x10(%rdi) - movntdq %xmm1, -0x20(%rdi) - movntdq %xmm2, -0x30(%rdi) - movntdq %xmm3, -0x40(%rdi) - lea -0x40(%rdi), %rdi - sub $0x40, %rdx + movntdq %xmm0, -0x10(%rdi) + movntdq %xmm1, -0x20(%rdi) + movntdq %xmm2, -0x30(%rdi) + movntdq %xmm3, -0x40(%rdi) + lea -0x40(%rdi), %rdi + sub $0x40, %rdx L(large_page_less_bwd_64bytes): - sfence - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + sfence + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) #ifdef USE_AS_MEMMOVE - .p2align 4 + .p2align 4 L(ll_cache_copy_bwd_start): - prefetcht0 -0x1c0(%rsi) - prefetcht0 -0x200(%rsi) - movdqu -0x10(%rsi), %xmm0 - movdqu -0x20(%rsi), %xmm1 - movdqu -0x30(%rsi), %xmm2 - movdqu -0x40(%rsi), %xmm3 - movdqu -0x50(%rsi), %xmm4 - movdqu -0x60(%rsi), %xmm5 - movdqu -0x70(%rsi), %xmm6 - movdqu -0x80(%rsi), %xmm7 - lea -0x80(%rsi), %rsi + prefetcht0 -0x1c0(%rsi) + prefetcht0 -0x200(%rsi) + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + movdqu -0x50(%rsi), %xmm4 + movdqu -0x60(%rsi), %xmm5 + movdqu -0x70(%rsi), %xmm6 + movdqu -0x80(%rsi), %xmm7 + lea -0x80(%rsi), %rsi - sub $0x80, %rdx - movaps %xmm0, -0x10(%rdi) - movaps %xmm1, -0x20(%rdi) - movaps %xmm2, -0x30(%rdi) - movaps %xmm3, -0x40(%rdi) - movaps %xmm4, -0x50(%rdi) - movaps %xmm5, -0x60(%rdi) - movaps %xmm6, -0x70(%rdi) - movaps %xmm7, -0x80(%rdi) - lea -0x80(%rdi), %rdi - jae L(ll_cache_copy_bwd_start) - cmp $-0x40, %rdx - lea 0x80(%rdx), %rdx - jl L(large_page_ll_less_bwd_64bytes) + sub $0x80, %rdx + movaps %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) + movaps %xmm4, -0x50(%rdi) + movaps %xmm5, -0x60(%rdi) + movaps %xmm6, -0x70(%rdi) + movaps %xmm7, -0x80(%rdi) + lea -0x80(%rdi), %rdi + jae L(ll_cache_copy_bwd_start) + cmp $-0x40, %rdx + lea 0x80(%rdx), %rdx + jl L(large_page_ll_less_bwd_64bytes) - movdqu -0x10(%rsi), %xmm0 - movdqu -0x20(%rsi), %xmm1 - movdqu -0x30(%rsi), %xmm2 - movdqu -0x40(%rsi), %xmm3 - lea -0x40(%rsi), %rsi + movdqu -0x10(%rsi), %xmm0 + movdqu -0x20(%rsi), %xmm1 + movdqu -0x30(%rsi), %xmm2 + movdqu -0x40(%rsi), %xmm3 + lea -0x40(%rsi), %rsi - movaps %xmm0, -0x10(%rdi) - movaps %xmm1, -0x20(%rdi) - movaps %xmm2, -0x30(%rdi) - movaps %xmm3, -0x40(%rdi) - lea -0x40(%rdi), %rdi - sub $0x40, %rdx + movaps %xmm0, -0x10(%rdi) + movaps %xmm1, -0x20(%rdi) + movaps %xmm2, -0x30(%rdi) + movaps %xmm3, -0x40(%rdi) + lea -0x40(%rdi), %rdi + sub $0x40, %rdx L(large_page_ll_less_bwd_64bytes): - BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) + BRANCH_TO_JMPTBL_ENTRY (L(table_less_80bytes), %rdx, 4) #endif END (MEMCPY) - .section .rodata.ssse3,"a",@progbits - .p2align 3 + .section .rodata.ssse3,"a",@progbits + .p2align 3 L(table_less_80bytes): - .int JMPTBL (L(write_0bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_1bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_2bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_3bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_4bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_5bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_6bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_7bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_8bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_9bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_10bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_11bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_12bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_13bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_14bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_15bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_16bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_17bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_18bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_19bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_20bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_21bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_22bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_23bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_24bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_25bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_26bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_27bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_28bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_29bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_30bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_31bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_32bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_33bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_34bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_35bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_36bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_37bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_38bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_39bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_40bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_41bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_42bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_43bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_44bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_45bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_46bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_47bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_48bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_49bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_50bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_51bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_52bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_53bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_54bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_55bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_56bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_57bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_58bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_59bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_60bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_61bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_62bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_63bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_64bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_65bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_66bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_67bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_68bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_69bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_70bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_71bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_72bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_73bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_74bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_75bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_76bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_77bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_78bytes), L(table_less_80bytes)) - .int JMPTBL (L(write_79bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_0bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_1bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_2bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_3bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_4bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_5bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_6bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_7bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_8bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_9bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_10bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_11bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_12bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_13bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_14bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_15bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_16bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_17bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_18bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_19bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_20bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_21bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_22bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_23bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_24bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_25bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_26bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_27bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_28bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_29bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_30bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_31bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_32bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_33bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_34bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_35bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_36bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_37bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_38bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_39bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_40bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_41bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_42bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_43bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_44bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_45bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_46bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_47bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_48bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_49bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_50bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_51bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_52bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_53bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_54bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_55bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_56bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_57bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_58bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_59bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_60bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_61bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_62bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_63bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_64bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_65bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_66bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_67bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_68bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_69bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_70bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_71bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_72bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_73bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_74bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_75bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_76bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_77bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_78bytes), L(table_less_80bytes)) + .int JMPTBL (L(write_79bytes), L(table_less_80bytes)) - .p2align 3 + .p2align 3 L(shl_table): - .int JMPTBL (L(shl_0), L(shl_table)) - .int JMPTBL (L(shl_1), L(shl_table)) - .int JMPTBL (L(shl_2), L(shl_table)) - .int JMPTBL (L(shl_3), L(shl_table)) - .int JMPTBL (L(shl_4), L(shl_table)) - .int JMPTBL (L(shl_5), L(shl_table)) - .int JMPTBL (L(shl_6), L(shl_table)) - .int JMPTBL (L(shl_7), L(shl_table)) - .int JMPTBL (L(shl_8), L(shl_table)) - .int JMPTBL (L(shl_9), L(shl_table)) - .int JMPTBL (L(shl_10), L(shl_table)) - .int JMPTBL (L(shl_11), L(shl_table)) - .int JMPTBL (L(shl_12), L(shl_table)) - .int JMPTBL (L(shl_13), L(shl_table)) - .int JMPTBL (L(shl_14), L(shl_table)) - .int JMPTBL (L(shl_15), L(shl_table)) + .int JMPTBL (L(shl_0), L(shl_table)) + .int JMPTBL (L(shl_1), L(shl_table)) + .int JMPTBL (L(shl_2), L(shl_table)) + .int JMPTBL (L(shl_3), L(shl_table)) + .int JMPTBL (L(shl_4), L(shl_table)) + .int JMPTBL (L(shl_5), L(shl_table)) + .int JMPTBL (L(shl_6), L(shl_table)) + .int JMPTBL (L(shl_7), L(shl_table)) + .int JMPTBL (L(shl_8), L(shl_table)) + .int JMPTBL (L(shl_9), L(shl_table)) + .int JMPTBL (L(shl_10), L(shl_table)) + .int JMPTBL (L(shl_11), L(shl_table)) + .int JMPTBL (L(shl_12), L(shl_table)) + .int JMPTBL (L(shl_13), L(shl_table)) + .int JMPTBL (L(shl_14), L(shl_table)) + .int JMPTBL (L(shl_15), L(shl_table)) - .p2align 3 + .p2align 3 L(shl_table_bwd): - .int JMPTBL (L(shl_0_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_1_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_2_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_3_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_4_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_5_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_6_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_7_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_8_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_9_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_10_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_11_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_12_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_13_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_14_bwd), L(shl_table_bwd)) - .int JMPTBL (L(shl_15_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_0_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_1_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_2_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_3_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_4_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_5_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_6_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_7_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_8_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_9_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_10_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_11_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_12_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_13_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_14_bwd), L(shl_table_bwd)) + .int JMPTBL (L(shl_15_bwd), L(shl_table_bwd)) #endif diff --git a/utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S index 9ee6f0a71c3..2de73b29a85 100644 --- a/utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S +++ b/utils/memcpy-bench/glibc/memmove-avx-unaligned-erms.S @@ -1,12 +1,12 @@ #if 1 -# define VEC_SIZE 32 -# define VEC(i) ymm##i -# define VMOVNT vmovntdq -# define VMOVU vmovdqu -# define VMOVA vmovdqa +# define VEC_SIZE 32 +# define VEC(i) ymm##i +# define VMOVNT vmovntdq +# define VMOVU vmovdqu +# define VMOVA vmovdqa -# define SECTION(p) p##.avx -# define MEMMOVE_SYMBOL(p,s) p##_avx_##s +# define SECTION(p) p##.avx +# define MEMMOVE_SYMBOL(p,s) p##_avx_##s # include "memmove-vec-unaligned-erms.S" #endif diff --git a/utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S b/utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S index b14d92fd6a8..3effa845274 100644 --- a/utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S +++ b/utils/memcpy-bench/glibc/memmove-avx512-no-vzeroupper.S @@ -22,396 +22,396 @@ # include "asm-syntax.h" - .section .text.avx512,"ax",@progbits + .section .text.avx512,"ax",@progbits ENTRY (__mempcpy_chk_avx512_no_vzeroupper) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (__mempcpy_chk_avx512_no_vzeroupper) ENTRY (__mempcpy_avx512_no_vzeroupper) - mov %RDI_LP, %RAX_LP - add %RDX_LP, %RAX_LP - jmp L(start) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) END (__mempcpy_avx512_no_vzeroupper) ENTRY (__memmove_chk_avx512_no_vzeroupper) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (__memmove_chk_avx512_no_vzeroupper) ENTRY (__memmove_avx512_no_vzeroupper) - mov %RDI_LP, %RAX_LP + mov %RDI_LP, %RAX_LP # ifdef USE_AS_MEMPCPY - add %RDX_LP, %RAX_LP + add %RDX_LP, %RAX_LP # endif L(start): # ifdef __ILP32__ - /* Clear the upper 32 bits. */ - mov %edx, %edx + /* Clear the upper 32 bits. */ + mov %edx, %edx # endif - lea (%rsi, %rdx), %rcx - lea (%rdi, %rdx), %r9 - cmp $512, %rdx - ja L(512bytesormore) + lea (%rsi, %rdx), %rcx + lea (%rdi, %rdx), %r9 + cmp $512, %rdx + ja L(512bytesormore) L(check): - cmp $16, %rdx - jbe L(less_16bytes) - cmp $256, %rdx - jb L(less_256bytes) - vmovups (%rsi), %zmm0 - vmovups 0x40(%rsi), %zmm1 - vmovups 0x80(%rsi), %zmm2 - vmovups 0xC0(%rsi), %zmm3 - vmovups -0x100(%rcx), %zmm4 - vmovups -0xC0(%rcx), %zmm5 - vmovups -0x80(%rcx), %zmm6 - vmovups -0x40(%rcx), %zmm7 - vmovups %zmm0, (%rdi) - vmovups %zmm1, 0x40(%rdi) - vmovups %zmm2, 0x80(%rdi) - vmovups %zmm3, 0xC0(%rdi) - vmovups %zmm4, -0x100(%r9) - vmovups %zmm5, -0xC0(%r9) - vmovups %zmm6, -0x80(%r9) - vmovups %zmm7, -0x40(%r9) - ret + cmp $16, %rdx + jbe L(less_16bytes) + cmp $256, %rdx + jb L(less_256bytes) + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups 0x80(%rsi), %zmm2 + vmovups 0xC0(%rsi), %zmm3 + vmovups -0x100(%rcx), %zmm4 + vmovups -0xC0(%rcx), %zmm5 + vmovups -0x80(%rcx), %zmm6 + vmovups -0x40(%rcx), %zmm7 + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, 0x80(%rdi) + vmovups %zmm3, 0xC0(%rdi) + vmovups %zmm4, -0x100(%r9) + vmovups %zmm5, -0xC0(%r9) + vmovups %zmm6, -0x80(%r9) + vmovups %zmm7, -0x40(%r9) + ret L(less_256bytes): - cmp $128, %dl - jb L(less_128bytes) - vmovups (%rsi), %zmm0 - vmovups 0x40(%rsi), %zmm1 - vmovups -0x80(%rcx), %zmm2 - vmovups -0x40(%rcx), %zmm3 - vmovups %zmm0, (%rdi) - vmovups %zmm1, 0x40(%rdi) - vmovups %zmm2, -0x80(%r9) - vmovups %zmm3, -0x40(%r9) - ret + cmp $128, %dl + jb L(less_128bytes) + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups -0x80(%rcx), %zmm2 + vmovups -0x40(%rcx), %zmm3 + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, -0x80(%r9) + vmovups %zmm3, -0x40(%r9) + ret L(less_128bytes): - cmp $64, %dl - jb L(less_64bytes) - vmovdqu (%rsi), %ymm0 - vmovdqu 0x20(%rsi), %ymm1 - vmovdqu -0x40(%rcx), %ymm2 - vmovdqu -0x20(%rcx), %ymm3 - vmovdqu %ymm0, (%rdi) - vmovdqu %ymm1, 0x20(%rdi) - vmovdqu %ymm2, -0x40(%r9) - vmovdqu %ymm3, -0x20(%r9) - ret + cmp $64, %dl + jb L(less_64bytes) + vmovdqu (%rsi), %ymm0 + vmovdqu 0x20(%rsi), %ymm1 + vmovdqu -0x40(%rcx), %ymm2 + vmovdqu -0x20(%rcx), %ymm3 + vmovdqu %ymm0, (%rdi) + vmovdqu %ymm1, 0x20(%rdi) + vmovdqu %ymm2, -0x40(%r9) + vmovdqu %ymm3, -0x20(%r9) + ret L(less_64bytes): - cmp $32, %dl - jb L(less_32bytes) - vmovdqu (%rsi), %ymm0 - vmovdqu -0x20(%rcx), %ymm1 - vmovdqu %ymm0, (%rdi) - vmovdqu %ymm1, -0x20(%r9) - ret + cmp $32, %dl + jb L(less_32bytes) + vmovdqu (%rsi), %ymm0 + vmovdqu -0x20(%rcx), %ymm1 + vmovdqu %ymm0, (%rdi) + vmovdqu %ymm1, -0x20(%r9) + ret L(less_32bytes): - vmovdqu (%rsi), %xmm0 - vmovdqu -0x10(%rcx), %xmm1 - vmovdqu %xmm0, (%rdi) - vmovdqu %xmm1, -0x10(%r9) - ret + vmovdqu (%rsi), %xmm0 + vmovdqu -0x10(%rcx), %xmm1 + vmovdqu %xmm0, (%rdi) + vmovdqu %xmm1, -0x10(%r9) + ret L(less_16bytes): - cmp $8, %dl - jb L(less_8bytes) - movq (%rsi), %rsi - movq -0x8(%rcx), %rcx - movq %rsi, (%rdi) - movq %rcx, -0x8(%r9) - ret + cmp $8, %dl + jb L(less_8bytes) + movq (%rsi), %rsi + movq -0x8(%rcx), %rcx + movq %rsi, (%rdi) + movq %rcx, -0x8(%r9) + ret L(less_8bytes): - cmp $4, %dl - jb L(less_4bytes) - mov (%rsi), %esi - mov -0x4(%rcx), %ecx - mov %esi, (%rdi) - mov %ecx, -0x4(%r9) - ret + cmp $4, %dl + jb L(less_4bytes) + mov (%rsi), %esi + mov -0x4(%rcx), %ecx + mov %esi, (%rdi) + mov %ecx, -0x4(%r9) + ret L(less_4bytes): - cmp $2, %dl - jb L(less_2bytes) - mov (%rsi), %si - mov -0x2(%rcx), %cx - mov %si, (%rdi) - mov %cx, -0x2(%r9) - ret + cmp $2, %dl + jb L(less_2bytes) + mov (%rsi), %si + mov -0x2(%rcx), %cx + mov %si, (%rdi) + mov %cx, -0x2(%r9) + ret L(less_2bytes): - cmp $1, %dl - jb L(less_1bytes) - mov (%rsi), %cl - mov %cl, (%rdi) + cmp $1, %dl + jb L(less_1bytes) + mov (%rsi), %cl + mov %cl, (%rdi) L(less_1bytes): - ret + ret L(512bytesormore): # ifdef SHARED_CACHE_SIZE_HALF - mov $SHARED_CACHE_SIZE_HALF, %r8 + mov $SHARED_CACHE_SIZE_HALF, %r8 # else - mov __x86_shared_cache_size_half(%rip), %r8 + mov __x86_shared_cache_size_half(%rip), %r8 # endif - cmp %r8, %rdx - jae L(preloop_large) - cmp $1024, %rdx - ja L(1024bytesormore) - prefetcht1 (%rsi) - prefetcht1 0x40(%rsi) - prefetcht1 0x80(%rsi) - prefetcht1 0xC0(%rsi) - prefetcht1 0x100(%rsi) - prefetcht1 0x140(%rsi) - prefetcht1 0x180(%rsi) - prefetcht1 0x1C0(%rsi) - prefetcht1 -0x200(%rcx) - prefetcht1 -0x1C0(%rcx) - prefetcht1 -0x180(%rcx) - prefetcht1 -0x140(%rcx) - prefetcht1 -0x100(%rcx) - prefetcht1 -0xC0(%rcx) - prefetcht1 -0x80(%rcx) - prefetcht1 -0x40(%rcx) - vmovups (%rsi), %zmm0 - vmovups 0x40(%rsi), %zmm1 - vmovups 0x80(%rsi), %zmm2 - vmovups 0xC0(%rsi), %zmm3 - vmovups 0x100(%rsi), %zmm4 - vmovups 0x140(%rsi), %zmm5 - vmovups 0x180(%rsi), %zmm6 - vmovups 0x1C0(%rsi), %zmm7 - vmovups -0x200(%rcx), %zmm8 - vmovups -0x1C0(%rcx), %zmm9 - vmovups -0x180(%rcx), %zmm10 - vmovups -0x140(%rcx), %zmm11 - vmovups -0x100(%rcx), %zmm12 - vmovups -0xC0(%rcx), %zmm13 - vmovups -0x80(%rcx), %zmm14 - vmovups -0x40(%rcx), %zmm15 - vmovups %zmm0, (%rdi) - vmovups %zmm1, 0x40(%rdi) - vmovups %zmm2, 0x80(%rdi) - vmovups %zmm3, 0xC0(%rdi) - vmovups %zmm4, 0x100(%rdi) - vmovups %zmm5, 0x140(%rdi) - vmovups %zmm6, 0x180(%rdi) - vmovups %zmm7, 0x1C0(%rdi) - vmovups %zmm8, -0x200(%r9) - vmovups %zmm9, -0x1C0(%r9) - vmovups %zmm10, -0x180(%r9) - vmovups %zmm11, -0x140(%r9) - vmovups %zmm12, -0x100(%r9) - vmovups %zmm13, -0xC0(%r9) - vmovups %zmm14, -0x80(%r9) - vmovups %zmm15, -0x40(%r9) - ret + cmp %r8, %rdx + jae L(preloop_large) + cmp $1024, %rdx + ja L(1024bytesormore) + prefetcht1 (%rsi) + prefetcht1 0x40(%rsi) + prefetcht1 0x80(%rsi) + prefetcht1 0xC0(%rsi) + prefetcht1 0x100(%rsi) + prefetcht1 0x140(%rsi) + prefetcht1 0x180(%rsi) + prefetcht1 0x1C0(%rsi) + prefetcht1 -0x200(%rcx) + prefetcht1 -0x1C0(%rcx) + prefetcht1 -0x180(%rcx) + prefetcht1 -0x140(%rcx) + prefetcht1 -0x100(%rcx) + prefetcht1 -0xC0(%rcx) + prefetcht1 -0x80(%rcx) + prefetcht1 -0x40(%rcx) + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups 0x80(%rsi), %zmm2 + vmovups 0xC0(%rsi), %zmm3 + vmovups 0x100(%rsi), %zmm4 + vmovups 0x140(%rsi), %zmm5 + vmovups 0x180(%rsi), %zmm6 + vmovups 0x1C0(%rsi), %zmm7 + vmovups -0x200(%rcx), %zmm8 + vmovups -0x1C0(%rcx), %zmm9 + vmovups -0x180(%rcx), %zmm10 + vmovups -0x140(%rcx), %zmm11 + vmovups -0x100(%rcx), %zmm12 + vmovups -0xC0(%rcx), %zmm13 + vmovups -0x80(%rcx), %zmm14 + vmovups -0x40(%rcx), %zmm15 + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, 0x80(%rdi) + vmovups %zmm3, 0xC0(%rdi) + vmovups %zmm4, 0x100(%rdi) + vmovups %zmm5, 0x140(%rdi) + vmovups %zmm6, 0x180(%rdi) + vmovups %zmm7, 0x1C0(%rdi) + vmovups %zmm8, -0x200(%r9) + vmovups %zmm9, -0x1C0(%r9) + vmovups %zmm10, -0x180(%r9) + vmovups %zmm11, -0x140(%r9) + vmovups %zmm12, -0x100(%r9) + vmovups %zmm13, -0xC0(%r9) + vmovups %zmm14, -0x80(%r9) + vmovups %zmm15, -0x40(%r9) + ret L(1024bytesormore): - cmp %rsi, %rdi - ja L(1024bytesormore_bkw) - sub $512, %r9 - vmovups -0x200(%rcx), %zmm8 - vmovups -0x1C0(%rcx), %zmm9 - vmovups -0x180(%rcx), %zmm10 - vmovups -0x140(%rcx), %zmm11 - vmovups -0x100(%rcx), %zmm12 - vmovups -0xC0(%rcx), %zmm13 - vmovups -0x80(%rcx), %zmm14 - vmovups -0x40(%rcx), %zmm15 - prefetcht1 (%rsi) - prefetcht1 0x40(%rsi) - prefetcht1 0x80(%rsi) - prefetcht1 0xC0(%rsi) - prefetcht1 0x100(%rsi) - prefetcht1 0x140(%rsi) - prefetcht1 0x180(%rsi) - prefetcht1 0x1C0(%rsi) + cmp %rsi, %rdi + ja L(1024bytesormore_bkw) + sub $512, %r9 + vmovups -0x200(%rcx), %zmm8 + vmovups -0x1C0(%rcx), %zmm9 + vmovups -0x180(%rcx), %zmm10 + vmovups -0x140(%rcx), %zmm11 + vmovups -0x100(%rcx), %zmm12 + vmovups -0xC0(%rcx), %zmm13 + vmovups -0x80(%rcx), %zmm14 + vmovups -0x40(%rcx), %zmm15 + prefetcht1 (%rsi) + prefetcht1 0x40(%rsi) + prefetcht1 0x80(%rsi) + prefetcht1 0xC0(%rsi) + prefetcht1 0x100(%rsi) + prefetcht1 0x140(%rsi) + prefetcht1 0x180(%rsi) + prefetcht1 0x1C0(%rsi) /* Loop with unaligned memory access. */ L(gobble_512bytes_loop): - vmovups (%rsi), %zmm0 - vmovups 0x40(%rsi), %zmm1 - vmovups 0x80(%rsi), %zmm2 - vmovups 0xC0(%rsi), %zmm3 - vmovups 0x100(%rsi), %zmm4 - vmovups 0x140(%rsi), %zmm5 - vmovups 0x180(%rsi), %zmm6 - vmovups 0x1C0(%rsi), %zmm7 - add $512, %rsi - prefetcht1 (%rsi) - prefetcht1 0x40(%rsi) - prefetcht1 0x80(%rsi) - prefetcht1 0xC0(%rsi) - prefetcht1 0x100(%rsi) - prefetcht1 0x140(%rsi) - prefetcht1 0x180(%rsi) - prefetcht1 0x1C0(%rsi) - vmovups %zmm0, (%rdi) - vmovups %zmm1, 0x40(%rdi) - vmovups %zmm2, 0x80(%rdi) - vmovups %zmm3, 0xC0(%rdi) - vmovups %zmm4, 0x100(%rdi) - vmovups %zmm5, 0x140(%rdi) - vmovups %zmm6, 0x180(%rdi) - vmovups %zmm7, 0x1C0(%rdi) - add $512, %rdi - cmp %r9, %rdi - jb L(gobble_512bytes_loop) - vmovups %zmm8, (%r9) - vmovups %zmm9, 0x40(%r9) - vmovups %zmm10, 0x80(%r9) - vmovups %zmm11, 0xC0(%r9) - vmovups %zmm12, 0x100(%r9) - vmovups %zmm13, 0x140(%r9) - vmovups %zmm14, 0x180(%r9) - vmovups %zmm15, 0x1C0(%r9) - ret + vmovups (%rsi), %zmm0 + vmovups 0x40(%rsi), %zmm1 + vmovups 0x80(%rsi), %zmm2 + vmovups 0xC0(%rsi), %zmm3 + vmovups 0x100(%rsi), %zmm4 + vmovups 0x140(%rsi), %zmm5 + vmovups 0x180(%rsi), %zmm6 + vmovups 0x1C0(%rsi), %zmm7 + add $512, %rsi + prefetcht1 (%rsi) + prefetcht1 0x40(%rsi) + prefetcht1 0x80(%rsi) + prefetcht1 0xC0(%rsi) + prefetcht1 0x100(%rsi) + prefetcht1 0x140(%rsi) + prefetcht1 0x180(%rsi) + prefetcht1 0x1C0(%rsi) + vmovups %zmm0, (%rdi) + vmovups %zmm1, 0x40(%rdi) + vmovups %zmm2, 0x80(%rdi) + vmovups %zmm3, 0xC0(%rdi) + vmovups %zmm4, 0x100(%rdi) + vmovups %zmm5, 0x140(%rdi) + vmovups %zmm6, 0x180(%rdi) + vmovups %zmm7, 0x1C0(%rdi) + add $512, %rdi + cmp %r9, %rdi + jb L(gobble_512bytes_loop) + vmovups %zmm8, (%r9) + vmovups %zmm9, 0x40(%r9) + vmovups %zmm10, 0x80(%r9) + vmovups %zmm11, 0xC0(%r9) + vmovups %zmm12, 0x100(%r9) + vmovups %zmm13, 0x140(%r9) + vmovups %zmm14, 0x180(%r9) + vmovups %zmm15, 0x1C0(%r9) + ret L(1024bytesormore_bkw): - add $512, %rdi - vmovups 0x1C0(%rsi), %zmm8 - vmovups 0x180(%rsi), %zmm9 - vmovups 0x140(%rsi), %zmm10 - vmovups 0x100(%rsi), %zmm11 - vmovups 0xC0(%rsi), %zmm12 - vmovups 0x80(%rsi), %zmm13 - vmovups 0x40(%rsi), %zmm14 - vmovups (%rsi), %zmm15 - prefetcht1 -0x40(%rcx) - prefetcht1 -0x80(%rcx) - prefetcht1 -0xC0(%rcx) - prefetcht1 -0x100(%rcx) - prefetcht1 -0x140(%rcx) - prefetcht1 -0x180(%rcx) - prefetcht1 -0x1C0(%rcx) - prefetcht1 -0x200(%rcx) + add $512, %rdi + vmovups 0x1C0(%rsi), %zmm8 + vmovups 0x180(%rsi), %zmm9 + vmovups 0x140(%rsi), %zmm10 + vmovups 0x100(%rsi), %zmm11 + vmovups 0xC0(%rsi), %zmm12 + vmovups 0x80(%rsi), %zmm13 + vmovups 0x40(%rsi), %zmm14 + vmovups (%rsi), %zmm15 + prefetcht1 -0x40(%rcx) + prefetcht1 -0x80(%rcx) + prefetcht1 -0xC0(%rcx) + prefetcht1 -0x100(%rcx) + prefetcht1 -0x140(%rcx) + prefetcht1 -0x180(%rcx) + prefetcht1 -0x1C0(%rcx) + prefetcht1 -0x200(%rcx) /* Backward loop with unaligned memory access. */ L(gobble_512bytes_loop_bkw): - vmovups -0x40(%rcx), %zmm0 - vmovups -0x80(%rcx), %zmm1 - vmovups -0xC0(%rcx), %zmm2 - vmovups -0x100(%rcx), %zmm3 - vmovups -0x140(%rcx), %zmm4 - vmovups -0x180(%rcx), %zmm5 - vmovups -0x1C0(%rcx), %zmm6 - vmovups -0x200(%rcx), %zmm7 - sub $512, %rcx - prefetcht1 -0x40(%rcx) - prefetcht1 -0x80(%rcx) - prefetcht1 -0xC0(%rcx) - prefetcht1 -0x100(%rcx) - prefetcht1 -0x140(%rcx) - prefetcht1 -0x180(%rcx) - prefetcht1 -0x1C0(%rcx) - prefetcht1 -0x200(%rcx) - vmovups %zmm0, -0x40(%r9) - vmovups %zmm1, -0x80(%r9) - vmovups %zmm2, -0xC0(%r9) - vmovups %zmm3, -0x100(%r9) - vmovups %zmm4, -0x140(%r9) - vmovups %zmm5, -0x180(%r9) - vmovups %zmm6, -0x1C0(%r9) - vmovups %zmm7, -0x200(%r9) - sub $512, %r9 - cmp %rdi, %r9 - ja L(gobble_512bytes_loop_bkw) - vmovups %zmm8, -0x40(%rdi) - vmovups %zmm9, -0x80(%rdi) - vmovups %zmm10, -0xC0(%rdi) - vmovups %zmm11, -0x100(%rdi) - vmovups %zmm12, -0x140(%rdi) - vmovups %zmm13, -0x180(%rdi) - vmovups %zmm14, -0x1C0(%rdi) - vmovups %zmm15, -0x200(%rdi) - ret + vmovups -0x40(%rcx), %zmm0 + vmovups -0x80(%rcx), %zmm1 + vmovups -0xC0(%rcx), %zmm2 + vmovups -0x100(%rcx), %zmm3 + vmovups -0x140(%rcx), %zmm4 + vmovups -0x180(%rcx), %zmm5 + vmovups -0x1C0(%rcx), %zmm6 + vmovups -0x200(%rcx), %zmm7 + sub $512, %rcx + prefetcht1 -0x40(%rcx) + prefetcht1 -0x80(%rcx) + prefetcht1 -0xC0(%rcx) + prefetcht1 -0x100(%rcx) + prefetcht1 -0x140(%rcx) + prefetcht1 -0x180(%rcx) + prefetcht1 -0x1C0(%rcx) + prefetcht1 -0x200(%rcx) + vmovups %zmm0, -0x40(%r9) + vmovups %zmm1, -0x80(%r9) + vmovups %zmm2, -0xC0(%r9) + vmovups %zmm3, -0x100(%r9) + vmovups %zmm4, -0x140(%r9) + vmovups %zmm5, -0x180(%r9) + vmovups %zmm6, -0x1C0(%r9) + vmovups %zmm7, -0x200(%r9) + sub $512, %r9 + cmp %rdi, %r9 + ja L(gobble_512bytes_loop_bkw) + vmovups %zmm8, -0x40(%rdi) + vmovups %zmm9, -0x80(%rdi) + vmovups %zmm10, -0xC0(%rdi) + vmovups %zmm11, -0x100(%rdi) + vmovups %zmm12, -0x140(%rdi) + vmovups %zmm13, -0x180(%rdi) + vmovups %zmm14, -0x1C0(%rdi) + vmovups %zmm15, -0x200(%rdi) + ret L(preloop_large): - cmp %rsi, %rdi - ja L(preloop_large_bkw) - vmovups (%rsi), %zmm4 - vmovups 0x40(%rsi), %zmm5 + cmp %rsi, %rdi + ja L(preloop_large_bkw) + vmovups (%rsi), %zmm4 + vmovups 0x40(%rsi), %zmm5 - mov %rdi, %r11 + mov %rdi, %r11 /* Align destination for access with non-temporal stores in the loop. */ - mov %rdi, %r8 - and $-0x80, %rdi - add $0x80, %rdi - sub %rdi, %r8 - sub %r8, %rsi - add %r8, %rdx + mov %rdi, %r8 + and $-0x80, %rdi + add $0x80, %rdi + sub %rdi, %r8 + sub %r8, %rsi + add %r8, %rdx L(gobble_256bytes_nt_loop): - prefetcht1 0x200(%rsi) - prefetcht1 0x240(%rsi) - prefetcht1 0x280(%rsi) - prefetcht1 0x2C0(%rsi) - prefetcht1 0x300(%rsi) - prefetcht1 0x340(%rsi) - prefetcht1 0x380(%rsi) - prefetcht1 0x3C0(%rsi) - vmovdqu64 (%rsi), %zmm0 - vmovdqu64 0x40(%rsi), %zmm1 - vmovdqu64 0x80(%rsi), %zmm2 - vmovdqu64 0xC0(%rsi), %zmm3 - vmovntdq %zmm0, (%rdi) - vmovntdq %zmm1, 0x40(%rdi) - vmovntdq %zmm2, 0x80(%rdi) - vmovntdq %zmm3, 0xC0(%rdi) - sub $256, %rdx - add $256, %rsi - add $256, %rdi - cmp $256, %rdx - ja L(gobble_256bytes_nt_loop) - sfence - vmovups %zmm4, (%r11) - vmovups %zmm5, 0x40(%r11) - jmp L(check) + prefetcht1 0x200(%rsi) + prefetcht1 0x240(%rsi) + prefetcht1 0x280(%rsi) + prefetcht1 0x2C0(%rsi) + prefetcht1 0x300(%rsi) + prefetcht1 0x340(%rsi) + prefetcht1 0x380(%rsi) + prefetcht1 0x3C0(%rsi) + vmovdqu64 (%rsi), %zmm0 + vmovdqu64 0x40(%rsi), %zmm1 + vmovdqu64 0x80(%rsi), %zmm2 + vmovdqu64 0xC0(%rsi), %zmm3 + vmovntdq %zmm0, (%rdi) + vmovntdq %zmm1, 0x40(%rdi) + vmovntdq %zmm2, 0x80(%rdi) + vmovntdq %zmm3, 0xC0(%rdi) + sub $256, %rdx + add $256, %rsi + add $256, %rdi + cmp $256, %rdx + ja L(gobble_256bytes_nt_loop) + sfence + vmovups %zmm4, (%r11) + vmovups %zmm5, 0x40(%r11) + jmp L(check) L(preloop_large_bkw): - vmovups -0x80(%rcx), %zmm4 - vmovups -0x40(%rcx), %zmm5 + vmovups -0x80(%rcx), %zmm4 + vmovups -0x40(%rcx), %zmm5 /* Align end of destination for access with non-temporal stores. */ - mov %r9, %r8 - and $-0x80, %r9 - sub %r9, %r8 - sub %r8, %rcx - sub %r8, %rdx - add %r9, %r8 + mov %r9, %r8 + and $-0x80, %r9 + sub %r9, %r8 + sub %r8, %rcx + sub %r8, %rdx + add %r9, %r8 L(gobble_256bytes_nt_loop_bkw): - prefetcht1 -0x400(%rcx) - prefetcht1 -0x3C0(%rcx) - prefetcht1 -0x380(%rcx) - prefetcht1 -0x340(%rcx) - prefetcht1 -0x300(%rcx) - prefetcht1 -0x2C0(%rcx) - prefetcht1 -0x280(%rcx) - prefetcht1 -0x240(%rcx) - vmovdqu64 -0x100(%rcx), %zmm0 - vmovdqu64 -0xC0(%rcx), %zmm1 - vmovdqu64 -0x80(%rcx), %zmm2 - vmovdqu64 -0x40(%rcx), %zmm3 - vmovntdq %zmm0, -0x100(%r9) - vmovntdq %zmm1, -0xC0(%r9) - vmovntdq %zmm2, -0x80(%r9) - vmovntdq %zmm3, -0x40(%r9) - sub $256, %rdx - sub $256, %rcx - sub $256, %r9 - cmp $256, %rdx - ja L(gobble_256bytes_nt_loop_bkw) - sfence - vmovups %zmm4, -0x80(%r8) - vmovups %zmm5, -0x40(%r8) - jmp L(check) + prefetcht1 -0x400(%rcx) + prefetcht1 -0x3C0(%rcx) + prefetcht1 -0x380(%rcx) + prefetcht1 -0x340(%rcx) + prefetcht1 -0x300(%rcx) + prefetcht1 -0x2C0(%rcx) + prefetcht1 -0x280(%rcx) + prefetcht1 -0x240(%rcx) + vmovdqu64 -0x100(%rcx), %zmm0 + vmovdqu64 -0xC0(%rcx), %zmm1 + vmovdqu64 -0x80(%rcx), %zmm2 + vmovdqu64 -0x40(%rcx), %zmm3 + vmovntdq %zmm0, -0x100(%r9) + vmovntdq %zmm1, -0xC0(%r9) + vmovntdq %zmm2, -0x80(%r9) + vmovntdq %zmm3, -0x40(%r9) + sub $256, %rdx + sub $256, %rcx + sub $256, %r9 + cmp $256, %rdx + ja L(gobble_256bytes_nt_loop_bkw) + sfence + vmovups %zmm4, -0x80(%r8) + vmovups %zmm5, -0x40(%r8) + jmp L(check) END (__memmove_avx512_no_vzeroupper) strong_alias (__memmove_avx512_no_vzeroupper, __memcpy_avx512_no_vzeroupper) diff --git a/utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S index db70fdf1b4e..9666b05f1c5 100644 --- a/utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S +++ b/utils/memcpy-bench/glibc/memmove-avx512-unaligned-erms.S @@ -1,12 +1,12 @@ #if 1 -# define VEC_SIZE 64 -# define VEC(i) zmm##i -# define VMOVNT vmovntdq -# define VMOVU vmovdqu64 -# define VMOVA vmovdqa64 +# define VEC_SIZE 64 +# define VEC(i) zmm##i +# define VMOVNT vmovntdq +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 -# define SECTION(p) p##.avx512 -# define MEMMOVE_SYMBOL(p,s) p##_avx512_##s +# define SECTION(p) p##.avx512 +# define MEMMOVE_SYMBOL(p,s) p##_avx512_##s # include "memmove-vec-unaligned-erms.S" #endif diff --git a/utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S index 17b4f861621..ad405be479e 100644 --- a/utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S +++ b/utils/memcpy-bench/glibc/memmove-sse2-unaligned-erms.S @@ -17,7 +17,7 @@ . */ #if 1 -# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s +# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s #else weak_alias (__mempcpy, mempcpy) #endif diff --git a/utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S b/utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S index 21be351b4e7..097ff6ca617 100644 --- a/utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S +++ b/utils/memcpy-bench/glibc/memmove-vec-unaligned-erms.S @@ -37,15 +37,15 @@ #include "sysdep.h" #ifndef MEMCPY_SYMBOL -# define MEMCPY_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) +# define MEMCPY_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) #endif #ifndef MEMPCPY_SYMBOL -# define MEMPCPY_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) +# define MEMPCPY_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) #endif #ifndef MEMMOVE_CHK_SYMBOL -# define MEMMOVE_CHK_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) +# define MEMMOVE_CHK_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) #endif #ifndef VZEROUPPER @@ -70,17 +70,17 @@ #if PREFETCH_SIZE == 64 # if PREFETCHED_LOAD_SIZE == PREFETCH_SIZE # define PREFETCH_ONE_SET(dir, base, offset) \ - PREFETCH ((offset)base) + PREFETCH ((offset)base) # elif PREFETCHED_LOAD_SIZE == 2 * PREFETCH_SIZE # define PREFETCH_ONE_SET(dir, base, offset) \ - PREFETCH ((offset)base); \ - PREFETCH ((offset + dir * PREFETCH_SIZE)base) + PREFETCH ((offset)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE)base) # elif PREFETCHED_LOAD_SIZE == 4 * PREFETCH_SIZE # define PREFETCH_ONE_SET(dir, base, offset) \ - PREFETCH ((offset)base); \ - PREFETCH ((offset + dir * PREFETCH_SIZE)base); \ - PREFETCH ((offset + dir * PREFETCH_SIZE * 2)base); \ - PREFETCH ((offset + dir * PREFETCH_SIZE * 3)base) + PREFETCH ((offset)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE * 2)base); \ + PREFETCH ((offset + dir * PREFETCH_SIZE * 3)base) # else # error Unsupported PREFETCHED_LOAD_SIZE! # endif @@ -92,100 +92,100 @@ # error SECTION is not defined! #endif - .section SECTION(.text),"ax",@progbits + .section SECTION(.text),"ax",@progbits #if defined SHARED ENTRY (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned)) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned)) #endif ENTRY (MEMPCPY_SYMBOL (__mempcpy, unaligned)) - mov %RDI_LP, %RAX_LP - add %RDX_LP, %RAX_LP - jmp L(start) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start) END (MEMPCPY_SYMBOL (__mempcpy, unaligned)) #if defined SHARED ENTRY (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned)) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned)) #endif ENTRY (MEMMOVE_SYMBOL (__memmove, unaligned)) - movq %rdi, %rax + movq %rdi, %rax L(start): # ifdef __ILP32__ - /* Clear the upper 32 bits. */ - movl %edx, %edx + /* Clear the upper 32 bits. */ + movl %edx, %edx # endif - cmp $VEC_SIZE, %RDX_LP - jb L(less_vec) - cmp $(VEC_SIZE * 2), %RDX_LP - ja L(more_2x_vec) + cmp $VEC_SIZE, %RDX_LP + jb L(less_vec) + cmp $(VEC_SIZE * 2), %RDX_LP + ja L(more_2x_vec) #if !defined USE_MULTIARCH L(last_2x_vec): #endif - /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ - VMOVU (%rsi), %VEC(0) - VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1) - VMOVU %VEC(0), (%rdi) - VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) - VZEROUPPER + /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ + VMOVU (%rsi), %VEC(0) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) + VZEROUPPER #if !defined USE_MULTIARCH L(nop): #endif - ret + ret #if defined USE_MULTIARCH END (MEMMOVE_SYMBOL (__memmove, unaligned)) # if VEC_SIZE == 16 ENTRY (__mempcpy_chk_erms) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (__mempcpy_chk_erms) /* Only used to measure performance of REP MOVSB. */ ENTRY (__mempcpy_erms) - mov %RDI_LP, %RAX_LP - /* Skip zero length. */ - test %RDX_LP, %RDX_LP - jz 2f - add %RDX_LP, %RAX_LP - jmp L(start_movsb) + mov %RDI_LP, %RAX_LP + /* Skip zero length. */ + test %RDX_LP, %RDX_LP + jz 2f + add %RDX_LP, %RAX_LP + jmp L(start_movsb) END (__mempcpy_erms) ENTRY (__memmove_chk_erms) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (__memmove_chk_erms) ENTRY (__memmove_erms) - movq %rdi, %rax - /* Skip zero length. */ - test %RDX_LP, %RDX_LP - jz 2f + movq %rdi, %rax + /* Skip zero length. */ + test %RDX_LP, %RDX_LP + jz 2f L(start_movsb): - mov %RDX_LP, %RCX_LP - cmp %RSI_LP, %RDI_LP - jb 1f - /* Source == destination is less common. */ - je 2f - lea (%rsi,%rcx), %RDX_LP - cmp %RDX_LP, %RDI_LP - jb L(movsb_backward) + mov %RDX_LP, %RCX_LP + cmp %RSI_LP, %RDI_LP + jb 1f + /* Source == destination is less common. */ + je 2f + lea (%rsi,%rcx), %RDX_LP + cmp %RDX_LP, %RDI_LP + jb L(movsb_backward) 1: - rep movsb + rep movsb 2: - ret + ret L(movsb_backward): - leaq -1(%rdi,%rcx), %rdi - leaq -1(%rsi,%rcx), %rsi - std - rep movsb - cld - ret + leaq -1(%rdi,%rcx), %rdi + leaq -1(%rsi,%rcx), %rsi + std + rep movsb + cld + ret END (__memmove_erms) strong_alias (__memmove_erms, __memcpy_erms) strong_alias (__memmove_chk_erms, __memcpy_chk_erms) @@ -193,367 +193,367 @@ strong_alias (__memmove_chk_erms, __memcpy_chk_erms) # ifdef SHARED ENTRY (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned_erms)) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned_erms)) # endif ENTRY (MEMMOVE_SYMBOL (__mempcpy, unaligned_erms)) - mov %RDI_LP, %RAX_LP - add %RDX_LP, %RAX_LP - jmp L(start_erms) + mov %RDI_LP, %RAX_LP + add %RDX_LP, %RAX_LP + jmp L(start_erms) END (MEMMOVE_SYMBOL (__mempcpy, unaligned_erms)) # ifdef SHARED ENTRY (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned_erms)) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned_erms)) # endif ENTRY (MEMMOVE_SYMBOL (__memmove, unaligned_erms)) - movq %rdi, %rax + movq %rdi, %rax L(start_erms): # ifdef __ILP32__ - /* Clear the upper 32 bits. */ - movl %edx, %edx + /* Clear the upper 32 bits. */ + movl %edx, %edx # endif - cmp $VEC_SIZE, %RDX_LP - jb L(less_vec) - cmp $(VEC_SIZE * 2), %RDX_LP - ja L(movsb_more_2x_vec) + cmp $VEC_SIZE, %RDX_LP + jb L(less_vec) + cmp $(VEC_SIZE * 2), %RDX_LP + ja L(movsb_more_2x_vec) L(last_2x_vec): - /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ - VMOVU (%rsi), %VEC(0) - VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1) - VMOVU %VEC(0), (%rdi) - VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) + /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ + VMOVU (%rsi), %VEC(0) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) L(return): - VZEROUPPER - ret + VZEROUPPER + ret L(movsb): - cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP - jae L(more_8x_vec) - cmpq %rsi, %rdi - jb 1f - /* Source == destination is less common. */ - je L(nop) - leaq (%rsi,%rdx), %r9 - cmpq %r9, %rdi - /* Avoid slow backward REP MOVSB. */ - jb L(more_8x_vec_backward) + cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP + jae L(more_8x_vec) + cmpq %rsi, %rdi + jb 1f + /* Source == destination is less common. */ + je L(nop) + leaq (%rsi,%rdx), %r9 + cmpq %r9, %rdi + /* Avoid slow backward REP MOVSB. */ + jb L(more_8x_vec_backward) 1: - mov %RDX_LP, %RCX_LP - rep movsb + mov %RDX_LP, %RCX_LP + rep movsb L(nop): - ret + ret #endif L(less_vec): - /* Less than 1 VEC. */ + /* Less than 1 VEC. */ #if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 # error Unsupported VEC_SIZE! #endif #if VEC_SIZE > 32 - cmpb $32, %dl - jae L(between_32_63) + cmpb $32, %dl + jae L(between_32_63) #endif #if VEC_SIZE > 16 - cmpb $16, %dl - jae L(between_16_31) + cmpb $16, %dl + jae L(between_16_31) #endif - cmpb $8, %dl - jae L(between_8_15) - cmpb $4, %dl - jae L(between_4_7) - cmpb $1, %dl - ja L(between_2_3) - jb 1f - movzbl (%rsi), %ecx - movb %cl, (%rdi) + cmpb $8, %dl + jae L(between_8_15) + cmpb $4, %dl + jae L(between_4_7) + cmpb $1, %dl + ja L(between_2_3) + jb 1f + movzbl (%rsi), %ecx + movb %cl, (%rdi) 1: - ret + ret #if VEC_SIZE > 32 L(between_32_63): - /* From 32 to 63. No branch when size == 32. */ - vmovdqu (%rsi), %ymm0 - vmovdqu -32(%rsi,%rdx), %ymm1 - vmovdqu %ymm0, (%rdi) - vmovdqu %ymm1, -32(%rdi,%rdx) - VZEROUPPER - ret + /* From 32 to 63. No branch when size == 32. */ + vmovdqu (%rsi), %ymm0 + vmovdqu -32(%rsi,%rdx), %ymm1 + vmovdqu %ymm0, (%rdi) + vmovdqu %ymm1, -32(%rdi,%rdx) + VZEROUPPER + ret #endif #if VEC_SIZE > 16 - /* From 16 to 31. No branch when size == 16. */ + /* From 16 to 31. No branch when size == 16. */ L(between_16_31): - vmovdqu (%rsi), %xmm0 - vmovdqu -16(%rsi,%rdx), %xmm1 - vmovdqu %xmm0, (%rdi) - vmovdqu %xmm1, -16(%rdi,%rdx) - ret + vmovdqu (%rsi), %xmm0 + vmovdqu -16(%rsi,%rdx), %xmm1 + vmovdqu %xmm0, (%rdi) + vmovdqu %xmm1, -16(%rdi,%rdx) + ret #endif L(between_8_15): - /* From 8 to 15. No branch when size == 8. */ - movq -8(%rsi,%rdx), %rcx - movq (%rsi), %rsi - movq %rcx, -8(%rdi,%rdx) - movq %rsi, (%rdi) - ret + /* From 8 to 15. No branch when size == 8. */ + movq -8(%rsi,%rdx), %rcx + movq (%rsi), %rsi + movq %rcx, -8(%rdi,%rdx) + movq %rsi, (%rdi) + ret L(between_4_7): - /* From 4 to 7. No branch when size == 4. */ - movl -4(%rsi,%rdx), %ecx - movl (%rsi), %esi - movl %ecx, -4(%rdi,%rdx) - movl %esi, (%rdi) - ret + /* From 4 to 7. No branch when size == 4. */ + movl -4(%rsi,%rdx), %ecx + movl (%rsi), %esi + movl %ecx, -4(%rdi,%rdx) + movl %esi, (%rdi) + ret L(between_2_3): - /* From 2 to 3. No branch when size == 2. */ - movzwl -2(%rsi,%rdx), %ecx - movzwl (%rsi), %esi - movw %cx, -2(%rdi,%rdx) - movw %si, (%rdi) - ret + /* From 2 to 3. No branch when size == 2. */ + movzwl -2(%rsi,%rdx), %ecx + movzwl (%rsi), %esi + movw %cx, -2(%rdi,%rdx) + movw %si, (%rdi) + ret #if defined USE_MULTIARCH L(movsb_more_2x_vec): - cmp $REP_MOSB_THRESHOLD, %RDX_LP - ja L(movsb) + cmp $REP_MOSB_THRESHOLD, %RDX_LP + ja L(movsb) #endif L(more_2x_vec): - /* More than 2 * VEC and there may be overlap between destination - and source. */ - cmpq $(VEC_SIZE * 8), %rdx - ja L(more_8x_vec) - cmpq $(VEC_SIZE * 4), %rdx - jb L(last_4x_vec) - /* Copy from 4 * VEC to 8 * VEC, inclusively. */ - VMOVU (%rsi), %VEC(0) - VMOVU VEC_SIZE(%rsi), %VEC(1) - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) - VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(4) - VMOVU -(VEC_SIZE * 2)(%rsi,%rdx), %VEC(5) - VMOVU -(VEC_SIZE * 3)(%rsi,%rdx), %VEC(6) - VMOVU -(VEC_SIZE * 4)(%rsi,%rdx), %VEC(7) - VMOVU %VEC(0), (%rdi) - VMOVU %VEC(1), VEC_SIZE(%rdi) - VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi) - VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi) - VMOVU %VEC(4), -VEC_SIZE(%rdi,%rdx) - VMOVU %VEC(5), -(VEC_SIZE * 2)(%rdi,%rdx) - VMOVU %VEC(6), -(VEC_SIZE * 3)(%rdi,%rdx) - VMOVU %VEC(7), -(VEC_SIZE * 4)(%rdi,%rdx) - VZEROUPPER - ret + /* More than 2 * VEC and there may be overlap between destination + and source. */ + cmpq $(VEC_SIZE * 8), %rdx + ja L(more_8x_vec) + cmpq $(VEC_SIZE * 4), %rdx + jb L(last_4x_vec) + /* Copy from 4 * VEC to 8 * VEC, inclusively. */ + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(4) + VMOVU -(VEC_SIZE * 2)(%rsi,%rdx), %VEC(5) + VMOVU -(VEC_SIZE * 3)(%rsi,%rdx), %VEC(6) + VMOVU -(VEC_SIZE * 4)(%rsi,%rdx), %VEC(7) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), VEC_SIZE(%rdi) + VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi) + VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi) + VMOVU %VEC(4), -VEC_SIZE(%rdi,%rdx) + VMOVU %VEC(5), -(VEC_SIZE * 2)(%rdi,%rdx) + VMOVU %VEC(6), -(VEC_SIZE * 3)(%rdi,%rdx) + VMOVU %VEC(7), -(VEC_SIZE * 4)(%rdi,%rdx) + VZEROUPPER + ret L(last_4x_vec): - /* Copy from 2 * VEC to 4 * VEC. */ - VMOVU (%rsi), %VEC(0) - VMOVU VEC_SIZE(%rsi), %VEC(1) - VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(2) - VMOVU -(VEC_SIZE * 2)(%rsi,%rdx), %VEC(3) - VMOVU %VEC(0), (%rdi) - VMOVU %VEC(1), VEC_SIZE(%rdi) - VMOVU %VEC(2), -VEC_SIZE(%rdi,%rdx) - VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi,%rdx) - VZEROUPPER - ret + /* Copy from 2 * VEC to 4 * VEC. */ + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(2) + VMOVU -(VEC_SIZE * 2)(%rsi,%rdx), %VEC(3) + VMOVU %VEC(0), (%rdi) + VMOVU %VEC(1), VEC_SIZE(%rdi) + VMOVU %VEC(2), -VEC_SIZE(%rdi,%rdx) + VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi,%rdx) + VZEROUPPER + ret L(more_8x_vec): - cmpq %rsi, %rdi - ja L(more_8x_vec_backward) - /* Source == destination is less common. */ - je L(nop) - /* Load the first VEC and last 4 * VEC to support overlapping - addresses. */ - VMOVU (%rsi), %VEC(4) - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(5) - VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(6) - VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(7) - VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(8) - /* Save start and stop of the destination buffer. */ - movq %rdi, %r11 - leaq -VEC_SIZE(%rdi, %rdx), %rcx - /* Align destination for aligned stores in the loop. Compute - how much destination is misaligned. */ - movq %rdi, %r8 - andq $(VEC_SIZE - 1), %r8 - /* Get the negative of offset for alignment. */ - subq $VEC_SIZE, %r8 - /* Adjust source. */ - subq %r8, %rsi - /* Adjust destination which should be aligned now. */ - subq %r8, %rdi - /* Adjust length. */ - addq %r8, %rdx + cmpq %rsi, %rdi + ja L(more_8x_vec_backward) + /* Source == destination is less common. */ + je L(nop) + /* Load the first VEC and last 4 * VEC to support overlapping + addresses. */ + VMOVU (%rsi), %VEC(4) + VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(5) + VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(6) + VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(7) + VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(8) + /* Save start and stop of the destination buffer. */ + movq %rdi, %r11 + leaq -VEC_SIZE(%rdi, %rdx), %rcx + /* Align destination for aligned stores in the loop. Compute + how much destination is misaligned. */ + movq %rdi, %r8 + andq $(VEC_SIZE - 1), %r8 + /* Get the negative of offset for alignment. */ + subq $VEC_SIZE, %r8 + /* Adjust source. */ + subq %r8, %rsi + /* Adjust destination which should be aligned now. */ + subq %r8, %rdi + /* Adjust length. */ + addq %r8, %rdx #if (defined USE_MULTIARCH || VEC_SIZE == 16) - /* Check non-temporal store threshold. */ - cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP - ja L(large_forward) + /* Check non-temporal store threshold. */ + cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP + ja L(large_forward) #endif L(loop_4x_vec_forward): - /* Copy 4 * VEC a time forward. */ - VMOVU (%rsi), %VEC(0) - VMOVU VEC_SIZE(%rsi), %VEC(1) - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) - addq $(VEC_SIZE * 4), %rsi - subq $(VEC_SIZE * 4), %rdx - VMOVA %VEC(0), (%rdi) - VMOVA %VEC(1), VEC_SIZE(%rdi) - VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi) - VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi) - addq $(VEC_SIZE * 4), %rdi - cmpq $(VEC_SIZE * 4), %rdx - ja L(loop_4x_vec_forward) - /* Store the last 4 * VEC. */ - VMOVU %VEC(5), (%rcx) - VMOVU %VEC(6), -VEC_SIZE(%rcx) - VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx) - VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) - /* Store the first VEC. */ - VMOVU %VEC(4), (%r11) - VZEROUPPER - ret + /* Copy 4 * VEC a time forward. */ + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) + addq $(VEC_SIZE * 4), %rsi + subq $(VEC_SIZE * 4), %rdx + VMOVA %VEC(0), (%rdi) + VMOVA %VEC(1), VEC_SIZE(%rdi) + VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi) + VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi) + addq $(VEC_SIZE * 4), %rdi + cmpq $(VEC_SIZE * 4), %rdx + ja L(loop_4x_vec_forward) + /* Store the last 4 * VEC. */ + VMOVU %VEC(5), (%rcx) + VMOVU %VEC(6), -VEC_SIZE(%rcx) + VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx) + VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) + /* Store the first VEC. */ + VMOVU %VEC(4), (%r11) + VZEROUPPER + ret L(more_8x_vec_backward): - /* Load the first 4 * VEC and last VEC to support overlapping - addresses. */ - VMOVU (%rsi), %VEC(4) - VMOVU VEC_SIZE(%rsi), %VEC(5) - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(6) - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(7) - VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(8) - /* Save stop of the destination buffer. */ - leaq -VEC_SIZE(%rdi, %rdx), %r11 - /* Align destination end for aligned stores in the loop. Compute - how much destination end is misaligned. */ - leaq -VEC_SIZE(%rsi, %rdx), %rcx - movq %r11, %r9 - movq %r11, %r8 - andq $(VEC_SIZE - 1), %r8 - /* Adjust source. */ - subq %r8, %rcx - /* Adjust the end of destination which should be aligned now. */ - subq %r8, %r9 - /* Adjust length. */ - subq %r8, %rdx + /* Load the first 4 * VEC and last VEC to support overlapping + addresses. */ + VMOVU (%rsi), %VEC(4) + VMOVU VEC_SIZE(%rsi), %VEC(5) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(6) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(7) + VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(8) + /* Save stop of the destination buffer. */ + leaq -VEC_SIZE(%rdi, %rdx), %r11 + /* Align destination end for aligned stores in the loop. Compute + how much destination end is misaligned. */ + leaq -VEC_SIZE(%rsi, %rdx), %rcx + movq %r11, %r9 + movq %r11, %r8 + andq $(VEC_SIZE - 1), %r8 + /* Adjust source. */ + subq %r8, %rcx + /* Adjust the end of destination which should be aligned now. */ + subq %r8, %r9 + /* Adjust length. */ + subq %r8, %rdx #if (defined USE_MULTIARCH || VEC_SIZE == 16) - /* Check non-temporal store threshold. */ - cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP - ja L(large_backward) + /* Check non-temporal store threshold. */ + cmp $SHARED_NON_TEMPORAL_THRESHOLD, %RDX_LP + ja L(large_backward) #endif L(loop_4x_vec_backward): - /* Copy 4 * VEC a time backward. */ - VMOVU (%rcx), %VEC(0) - VMOVU -VEC_SIZE(%rcx), %VEC(1) - VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2) - VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3) - subq $(VEC_SIZE * 4), %rcx - subq $(VEC_SIZE * 4), %rdx - VMOVA %VEC(0), (%r9) - VMOVA %VEC(1), -VEC_SIZE(%r9) - VMOVA %VEC(2), -(VEC_SIZE * 2)(%r9) - VMOVA %VEC(3), -(VEC_SIZE * 3)(%r9) - subq $(VEC_SIZE * 4), %r9 - cmpq $(VEC_SIZE * 4), %rdx - ja L(loop_4x_vec_backward) - /* Store the first 4 * VEC. */ - VMOVU %VEC(4), (%rdi) - VMOVU %VEC(5), VEC_SIZE(%rdi) - VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi) - VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) - /* Store the last VEC. */ - VMOVU %VEC(8), (%r11) - VZEROUPPER - ret + /* Copy 4 * VEC a time backward. */ + VMOVU (%rcx), %VEC(0) + VMOVU -VEC_SIZE(%rcx), %VEC(1) + VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2) + VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3) + subq $(VEC_SIZE * 4), %rcx + subq $(VEC_SIZE * 4), %rdx + VMOVA %VEC(0), (%r9) + VMOVA %VEC(1), -VEC_SIZE(%r9) + VMOVA %VEC(2), -(VEC_SIZE * 2)(%r9) + VMOVA %VEC(3), -(VEC_SIZE * 3)(%r9) + subq $(VEC_SIZE * 4), %r9 + cmpq $(VEC_SIZE * 4), %rdx + ja L(loop_4x_vec_backward) + /* Store the first 4 * VEC. */ + VMOVU %VEC(4), (%rdi) + VMOVU %VEC(5), VEC_SIZE(%rdi) + VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi) + VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) + /* Store the last VEC. */ + VMOVU %VEC(8), (%r11) + VZEROUPPER + ret #if (defined USE_MULTIARCH || VEC_SIZE == 16) L(large_forward): - /* Don't use non-temporal store if there is overlap between - destination and source since destination may be in cache - when source is loaded. */ - leaq (%rdi, %rdx), %r10 - cmpq %r10, %rsi - jb L(loop_4x_vec_forward) + /* Don't use non-temporal store if there is overlap between + destination and source since destination may be in cache + when source is loaded. */ + leaq (%rdi, %rdx), %r10 + cmpq %r10, %rsi + jb L(loop_4x_vec_forward) L(loop_large_forward): - /* Copy 4 * VEC a time forward with non-temporal stores. */ - PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 2) - PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 3) - VMOVU (%rsi), %VEC(0) - VMOVU VEC_SIZE(%rsi), %VEC(1) - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) - addq $PREFETCHED_LOAD_SIZE, %rsi - subq $PREFETCHED_LOAD_SIZE, %rdx - VMOVNT %VEC(0), (%rdi) - VMOVNT %VEC(1), VEC_SIZE(%rdi) - VMOVNT %VEC(2), (VEC_SIZE * 2)(%rdi) - VMOVNT %VEC(3), (VEC_SIZE * 3)(%rdi) - addq $PREFETCHED_LOAD_SIZE, %rdi - cmpq $PREFETCHED_LOAD_SIZE, %rdx - ja L(loop_large_forward) - sfence - /* Store the last 4 * VEC. */ - VMOVU %VEC(5), (%rcx) - VMOVU %VEC(6), -VEC_SIZE(%rcx) - VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx) - VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) - /* Store the first VEC. */ - VMOVU %VEC(4), (%r11) - VZEROUPPER - ret + /* Copy 4 * VEC a time forward with non-temporal stores. */ + PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 2) + PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE * 3) + VMOVU (%rsi), %VEC(0) + VMOVU VEC_SIZE(%rsi), %VEC(1) + VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2) + VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3) + addq $PREFETCHED_LOAD_SIZE, %rsi + subq $PREFETCHED_LOAD_SIZE, %rdx + VMOVNT %VEC(0), (%rdi) + VMOVNT %VEC(1), VEC_SIZE(%rdi) + VMOVNT %VEC(2), (VEC_SIZE * 2)(%rdi) + VMOVNT %VEC(3), (VEC_SIZE * 3)(%rdi) + addq $PREFETCHED_LOAD_SIZE, %rdi + cmpq $PREFETCHED_LOAD_SIZE, %rdx + ja L(loop_large_forward) + sfence + /* Store the last 4 * VEC. */ + VMOVU %VEC(5), (%rcx) + VMOVU %VEC(6), -VEC_SIZE(%rcx) + VMOVU %VEC(7), -(VEC_SIZE * 2)(%rcx) + VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) + /* Store the first VEC. */ + VMOVU %VEC(4), (%r11) + VZEROUPPER + ret L(large_backward): - /* Don't use non-temporal store if there is overlap between - destination and source since destination may be in cache - when source is loaded. */ - leaq (%rcx, %rdx), %r10 - cmpq %r10, %r9 - jb L(loop_4x_vec_backward) + /* Don't use non-temporal store if there is overlap between + destination and source since destination may be in cache + when source is loaded. */ + leaq (%rcx, %rdx), %r10 + cmpq %r10, %r9 + jb L(loop_4x_vec_backward) L(loop_large_backward): - /* Copy 4 * VEC a time backward with non-temporal stores. */ - PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 2) - PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 3) - VMOVU (%rcx), %VEC(0) - VMOVU -VEC_SIZE(%rcx), %VEC(1) - VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2) - VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3) - subq $PREFETCHED_LOAD_SIZE, %rcx - subq $PREFETCHED_LOAD_SIZE, %rdx - VMOVNT %VEC(0), (%r9) - VMOVNT %VEC(1), -VEC_SIZE(%r9) - VMOVNT %VEC(2), -(VEC_SIZE * 2)(%r9) - VMOVNT %VEC(3), -(VEC_SIZE * 3)(%r9) - subq $PREFETCHED_LOAD_SIZE, %r9 - cmpq $PREFETCHED_LOAD_SIZE, %rdx - ja L(loop_large_backward) - sfence - /* Store the first 4 * VEC. */ - VMOVU %VEC(4), (%rdi) - VMOVU %VEC(5), VEC_SIZE(%rdi) - VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi) - VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) - /* Store the last VEC. */ - VMOVU %VEC(8), (%r11) - VZEROUPPER - ret + /* Copy 4 * VEC a time backward with non-temporal stores. */ + PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 2) + PREFETCH_ONE_SET (-1, (%rcx), -PREFETCHED_LOAD_SIZE * 3) + VMOVU (%rcx), %VEC(0) + VMOVU -VEC_SIZE(%rcx), %VEC(1) + VMOVU -(VEC_SIZE * 2)(%rcx), %VEC(2) + VMOVU -(VEC_SIZE * 3)(%rcx), %VEC(3) + subq $PREFETCHED_LOAD_SIZE, %rcx + subq $PREFETCHED_LOAD_SIZE, %rdx + VMOVNT %VEC(0), (%r9) + VMOVNT %VEC(1), -VEC_SIZE(%r9) + VMOVNT %VEC(2), -(VEC_SIZE * 2)(%r9) + VMOVNT %VEC(3), -(VEC_SIZE * 3)(%r9) + subq $PREFETCHED_LOAD_SIZE, %r9 + cmpq $PREFETCHED_LOAD_SIZE, %rdx + ja L(loop_large_backward) + sfence + /* Store the first 4 * VEC. */ + VMOVU %VEC(4), (%rdi) + VMOVU %VEC(5), VEC_SIZE(%rdi) + VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi) + VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) + /* Store the last VEC. */ + VMOVU %VEC(8), (%r11) + VZEROUPPER + ret #endif END (MEMMOVE_SYMBOL (__memmove, unaligned_erms)) #if 1 # ifdef USE_MULTIARCH strong_alias (MEMMOVE_SYMBOL (__memmove, unaligned_erms), - MEMMOVE_SYMBOL (__memcpy, unaligned_erms)) + MEMMOVE_SYMBOL (__memcpy, unaligned_erms)) # ifdef SHARED strong_alias (MEMMOVE_SYMBOL (__memmove_chk, unaligned_erms), - MEMMOVE_SYMBOL (__memcpy_chk, unaligned_erms)) + MEMMOVE_SYMBOL (__memcpy_chk, unaligned_erms)) # endif # endif # ifdef SHARED strong_alias (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned), - MEMMOVE_CHK_SYMBOL (__memcpy_chk, unaligned)) + MEMMOVE_CHK_SYMBOL (__memcpy_chk, unaligned)) # endif #endif strong_alias (MEMMOVE_SYMBOL (__memmove, unaligned), - MEMCPY_SYMBOL (__memcpy, unaligned)) + MEMCPY_SYMBOL (__memcpy, unaligned)) diff --git a/utils/memcpy-bench/glibc/memmove.S b/utils/memcpy-bench/glibc/memmove.S index 97e735facff..7bd47b9a03f 100644 --- a/utils/memcpy-bench/glibc/memmove.S +++ b/utils/memcpy-bench/glibc/memmove.S @@ -18,33 +18,33 @@ #include "sysdep.h" -#define VEC_SIZE 16 -#define VEC(i) xmm##i -#define PREFETCHNT prefetchnta -#define VMOVNT movntdq +#define VEC_SIZE 16 +#define VEC(i) xmm##i +#define PREFETCHNT prefetchnta +#define VMOVNT movntdq /* Use movups and movaps for smaller code sizes. */ -#define VMOVU movups -#define VMOVA movaps +#define VMOVU movups +#define VMOVA movaps -#define SECTION(p) p +#define SECTION(p) p #ifdef USE_MULTIARCH # if 0 -# define MEMCPY_SYMBOL(p,s) memcpy +# define MEMCPY_SYMBOL(p,s) memcpy # endif #else # if defined SHARED -# define MEMCPY_SYMBOL(p,s) __memcpy +# define MEMCPY_SYMBOL(p,s) __memcpy # else -# define MEMCPY_SYMBOL(p,s) memcpy +# define MEMCPY_SYMBOL(p,s) memcpy # endif #endif #if !defined USE_MULTIARCH -# define MEMPCPY_SYMBOL(p,s) __mempcpy +# define MEMPCPY_SYMBOL(p,s) __mempcpy #endif #ifndef MEMMOVE_SYMBOL -# define MEMMOVE_CHK_SYMBOL(p,s) p -# define MEMMOVE_SYMBOL(p,s) memmove +# define MEMMOVE_CHK_SYMBOL(p,s) p +# define MEMMOVE_SYMBOL(p,s) memmove #endif #include "memmove-vec-unaligned-erms.S" diff --git a/utils/memcpy-bench/glibc/sysdep.h b/utils/memcpy-bench/glibc/sysdep.h index 099134b2a2f..e255e7488da 100644 --- a/utils/memcpy-bench/glibc/sysdep.h +++ b/utils/memcpy-bench/glibc/sysdep.h @@ -21,7 +21,7 @@ #include "sysdep_x86.h" -#ifdef __ASSEMBLER__ +#ifdef __ASSEMBLER__ /* Syntactic details of assembler. */ @@ -29,11 +29,11 @@ the register as saved relative to %rsp instead of relative to the CFA. Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset from %rsp. */ -#define cfi_offset_rel_rsp(regn, off) .cfi_escape 0x10, regn, 0x4, 0x13, \ - 0x77, off & 0x7F | 0x80, off >> 7 +#define cfi_offset_rel_rsp(regn, off) .cfi_escape 0x10, regn, 0x4, 0x13, \ + 0x77, off & 0x7F | 0x80, off >> 7 /* If compiled for profiling, call `mcount' at the start of each function. */ -#ifdef PROF +#ifdef PROF /* The mcount code relies on a normal frame pointer being on the stack to locate our caller, so push one just for its benefit. */ #define CALL_MCOUNT \ @@ -45,31 +45,31 @@ popq %rbp; \ cfi_def_cfa(rsp,8); #else -#define CALL_MCOUNT /* Do nothing. */ +#define CALL_MCOUNT /* Do nothing. */ #endif -#define PSEUDO(name, syscall_name, args) \ -lose: \ - jmp JUMPTARGET(syscall_error) \ - .globl syscall_error; \ - ENTRY (name) \ - DO_CALL (syscall_name, args); \ +#define PSEUDO(name, syscall_name, args) \ +lose: \ + jmp JUMPTARGET(syscall_error) \ + .globl syscall_error; \ + ENTRY (name) \ + DO_CALL (syscall_name, args); \ jb lose #undef JUMPTARGET #ifdef SHARED # ifdef BIND_NOW -# define JUMPTARGET(name) *name##@GOTPCREL(%rip) +# define JUMPTARGET(name) *name##@GOTPCREL(%rip) # else -# define JUMPTARGET(name) name##@PLT +# define JUMPTARGET(name) name##@PLT # endif #else /* For static archives, branch to target directly. */ -# define JUMPTARGET(name) name +# define JUMPTARGET(name) name #endif /* Long and pointer size in bytes. */ -#define LP_SIZE 8 +#define LP_SIZE 8 /* Instruction to operate on long and pointer. */ #define LP_OP(insn) insn##q @@ -78,24 +78,24 @@ lose: \ #define ASM_ADDR .quad /* Registers to hold long and pointer. */ -#define RAX_LP rax -#define RBP_LP rbp -#define RBX_LP rbx -#define RCX_LP rcx -#define RDI_LP rdi -#define RDX_LP rdx -#define RSI_LP rsi -#define RSP_LP rsp -#define R8_LP r8 -#define R9_LP r9 -#define R10_LP r10 -#define R11_LP r11 -#define R12_LP r12 -#define R13_LP r13 -#define R14_LP r14 -#define R15_LP r15 +#define RAX_LP rax +#define RBP_LP rbp +#define RBX_LP rbx +#define RCX_LP rcx +#define RDI_LP rdi +#define RDX_LP rdx +#define RSI_LP rsi +#define RSP_LP rsp +#define R8_LP r8 +#define R9_LP r9 +#define R10_LP r10 +#define R11_LP r11 +#define R12_LP r12 +#define R13_LP r13 +#define R14_LP r14 +#define R15_LP r15 -#else /* __ASSEMBLER__ */ +#else /* __ASSEMBLER__ */ /* Long and pointer size in bytes. */ #define LP_SIZE "8" @@ -107,23 +107,23 @@ lose: \ #define ASM_ADDR ".quad" /* Registers to hold long and pointer. */ -#define RAX_LP "rax" -#define RBP_LP "rbp" -#define RBX_LP "rbx" -#define RCX_LP "rcx" -#define RDI_LP "rdi" -#define RDX_LP "rdx" -#define RSI_LP "rsi" -#define RSP_LP "rsp" -#define R8_LP "r8" -#define R9_LP "r9" -#define R10_LP "r10" -#define R11_LP "r11" -#define R12_LP "r12" -#define R13_LP "r13" -#define R14_LP "r14" -#define R15_LP "r15" +#define RAX_LP "rax" +#define RBP_LP "rbp" +#define RBX_LP "rbx" +#define RCX_LP "rcx" +#define RDI_LP "rdi" +#define RDX_LP "rdx" +#define RSI_LP "rsi" +#define RSP_LP "rsp" +#define R8_LP "r8" +#define R9_LP "r9" +#define R10_LP "r10" +#define R11_LP "r11" +#define R12_LP "r12" +#define R13_LP "r13" +#define R14_LP "r14" +#define R15_LP "r15" -#endif /* __ASSEMBLER__ */ +#endif /* __ASSEMBLER__ */ -#endif /* _X86_64_SYSDEP_H */ +#endif /* _X86_64_SYSDEP_H */ diff --git a/utils/memcpy-bench/glibc/sysdep_generic.h b/utils/memcpy-bench/glibc/sysdep_generic.h index 91f78e1b04d..afecea8c356 100644 --- a/utils/memcpy-bench/glibc/sysdep_generic.h +++ b/utils/memcpy-bench/glibc/sysdep_generic.h @@ -28,14 +28,14 @@ #define ASM_LINE_SEP ; -#define strong_alias(original, alias) \ - .globl C_SYMBOL_NAME (alias) ASM_LINE_SEP \ +#define strong_alias(original, alias) \ + .globl C_SYMBOL_NAME (alias) ASM_LINE_SEP \ C_SYMBOL_NAME (alias) = C_SYMBOL_NAME (original) #ifndef C_LABEL /* Define a macro we can use to construct the asm name for a C symbol. */ -# define C_LABEL(name) name##: +# define C_LABEL(name) name##: #endif @@ -47,38 +47,38 @@ # endif # ifndef JUMPTARGET -# define JUMPTARGET(sym) sym +# define JUMPTARGET(sym) sym # endif #endif -/* Makros to generate eh_frame unwind information. */ +/* Macros to generate eh_frame unwind information. */ #ifdef __ASSEMBLER__ -# define cfi_startproc .cfi_startproc -# define cfi_endproc .cfi_endproc -# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off -# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg -# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off -# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off -# define cfi_offset(reg, off) .cfi_offset reg, off -# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off -# define cfi_register(r1, r2) .cfi_register r1, r2 -# define cfi_return_column(reg) .cfi_return_column reg -# define cfi_restore(reg) .cfi_restore reg -# define cfi_same_value(reg) .cfi_same_value reg -# define cfi_undefined(reg) .cfi_undefined reg -# define cfi_remember_state .cfi_remember_state -# define cfi_restore_state .cfi_restore_state -# define cfi_window_save .cfi_window_save -# define cfi_personality(enc, exp) .cfi_personality enc, exp -# define cfi_lsda(enc, exp) .cfi_lsda enc, exp +# define cfi_startproc .cfi_startproc +# define cfi_endproc .cfi_endproc +# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off +# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg +# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off +# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +# define cfi_offset(reg, off) .cfi_offset reg, off +# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +# define cfi_register(r1, r2) .cfi_register r1, r2 +# define cfi_return_column(reg) .cfi_return_column reg +# define cfi_restore(reg) .cfi_restore reg +# define cfi_same_value(reg) .cfi_same_value reg +# define cfi_undefined(reg) .cfi_undefined reg +# define cfi_remember_state .cfi_remember_state +# define cfi_restore_state .cfi_restore_state +# define cfi_window_save .cfi_window_save +# define cfi_personality(enc, exp) .cfi_personality enc, exp +# define cfi_lsda(enc, exp) .cfi_lsda enc, exp #else /* ! ASSEMBLER */ # define CFI_STRINGIFY(Name) CFI_STRINGIFY2 (Name) # define CFI_STRINGIFY2(Name) #Name -# define CFI_STARTPROC ".cfi_startproc" -# define CFI_ENDPROC ".cfi_endproc" -# define CFI_DEF_CFA(reg, off) \ +# define CFI_STARTPROC ".cfi_startproc" +# define CFI_ENDPROC ".cfi_endproc" +# define CFI_DEF_CFA(reg, off) \ ".cfi_def_cfa " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) # define CFI_DEF_CFA_REGISTER(reg) \ ".cfi_def_cfa_register " CFI_STRINGIFY(reg) diff --git a/utils/memcpy-bench/glibc/sysdep_x86.h b/utils/memcpy-bench/glibc/sysdep_x86.h index a3fecd01268..7abb350242f 100644 --- a/utils/memcpy-bench/glibc/sysdep_x86.h +++ b/utils/memcpy-bench/glibc/sysdep_x86.h @@ -34,18 +34,18 @@ enum cf_protection_level */ /* Set if CF_BRANCH (IBT) is enabled. */ -#define X86_FEATURE_1_IBT (1U << 0) +#define X86_FEATURE_1_IBT (1U << 0) /* Set if CF_RETURN (SHSTK) is enabled. */ -#define X86_FEATURE_1_SHSTK (1U << 1) +#define X86_FEATURE_1_SHSTK (1U << 1) #ifdef __CET__ -# define CET_ENABLED 1 -# define IBT_ENABLED (__CET__ & X86_FEATURE_1_IBT) -# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) +# define CET_ENABLED 1 +# define IBT_ENABLED (__CET__ & X86_FEATURE_1_IBT) +# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) #else -# define CET_ENABLED 0 -# define IBT_ENABLED 0 -# define SHSTK_ENABLED 0 +# define CET_ENABLED 0 +# define IBT_ENABLED 0 +# define SHSTK_ENABLED 0 #endif /* Offset for fxsave/xsave area used by _dl_runtime_resolve. Also need @@ -57,7 +57,7 @@ enum cf_protection_level #define STATE_SAVE_MASK \ ((1 << 1) | (1 << 2) | (1 << 3) | (1 << 5) | (1 << 6) | (1 << 7)) -#ifdef __ASSEMBLER__ +#ifdef __ASSEMBLER__ /* Syntactic details of assembler. */ @@ -73,18 +73,18 @@ enum cf_protection_level #define ASM_SIZE_DIRECTIVE(name) .size name,.-name; /* Define an entry point visible from C. */ -#define ENTRY(name) \ - .globl C_SYMBOL_NAME(name); \ - .type C_SYMBOL_NAME(name),@function; \ - .align ALIGNARG(4); \ - C_LABEL(name) \ - cfi_startproc; \ - _CET_ENDBR; \ +#define ENTRY(name) \ + .globl C_SYMBOL_NAME(name); \ + .type C_SYMBOL_NAME(name),@function; \ + .align ALIGNARG(4); \ + C_LABEL(name) \ + cfi_startproc; \ + _CET_ENDBR; \ CALL_MCOUNT -#undef END -#define END(name) \ - cfi_endproc; \ +#undef END +#define END(name) \ + cfi_endproc; \ ASM_SIZE_DIRECTIVE(name) #define ENTRY_CHK(name) ENTRY (name) @@ -93,21 +93,21 @@ enum cf_protection_level /* Since C identifiers are not normally prefixed with an underscore on this system, the asm identifier `syscall_error' intrudes on the C name space. Make sure we use an innocuous name. */ -#define syscall_error __syscall_error -#define mcount _mcount +#define syscall_error __syscall_error +#define mcount _mcount -#undef PSEUDO_END -#define PSEUDO_END(name) \ +#undef PSEUDO_END +#define PSEUDO_END(name) \ END (name) /* Local label name for asm code. */ #ifndef L /* ELF-like local names start with `.L'. */ -# define L(name) .L##name +# define L(name) .L##name #endif #define atom_text_section .section ".text.atom", "ax" -#endif /* __ASSEMBLER__ */ +#endif /* __ASSEMBLER__ */ -#endif /* _X86_SYSDEP_H */ +#endif /* _X86_SYSDEP_H */ From 8c2d65242a81b68f9ca520cf015e53933a52eaca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Mar 2021 23:24:22 +0300 Subject: [PATCH 186/333] Fix style --- utils/memcpy-bench/glibc/asm-syntax.h | 2 ++ utils/memcpy-bench/glibc/dwarf2.h | 4 +++- utils/memcpy-bench/glibc/sysdep.h | 2 ++ utils/memcpy-bench/glibc/sysdep_generic.h | 2 ++ utils/memcpy-bench/glibc/sysdep_x86.h | 2 ++ 5 files changed, 11 insertions(+), 1 deletion(-) diff --git a/utils/memcpy-bench/glibc/asm-syntax.h b/utils/memcpy-bench/glibc/asm-syntax.h index 6e299c1fec2..9d65213ba30 100644 --- a/utils/memcpy-bench/glibc/asm-syntax.h +++ b/utils/memcpy-bench/glibc/asm-syntax.h @@ -1,3 +1,5 @@ +#pragma once + /* Definitions for x86 syntax variations. Copyright (C) 1992-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. Its master source is NOT part of diff --git a/utils/memcpy-bench/glibc/dwarf2.h b/utils/memcpy-bench/glibc/dwarf2.h index 2be827f00ae..b0536c97e5e 100644 --- a/utils/memcpy-bench/glibc/dwarf2.h +++ b/utils/memcpy-bench/glibc/dwarf2.h @@ -1,3 +1,5 @@ +#pragma once + /* Declarations and definitions of codes relating to the DWARF2 symbolic debugging information format. Copyright (C) 1992-2020 Free Software Foundation, Inc. @@ -563,7 +565,7 @@ enum dwarf_macinfo_record_type }; #endif /* !ASSEMBLER */ - + /* @@@ For use with GNU frame unwind information. */ #define DW_EH_PE_absptr 0x00 diff --git a/utils/memcpy-bench/glibc/sysdep.h b/utils/memcpy-bench/glibc/sysdep.h index e255e7488da..2f43d688df9 100644 --- a/utils/memcpy-bench/glibc/sysdep.h +++ b/utils/memcpy-bench/glibc/sysdep.h @@ -1,3 +1,5 @@ +#pragma once + /* Assembler macros for x86-64. Copyright (C) 2001-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. diff --git a/utils/memcpy-bench/glibc/sysdep_generic.h b/utils/memcpy-bench/glibc/sysdep_generic.h index afecea8c356..0cb5bca4102 100644 --- a/utils/memcpy-bench/glibc/sysdep_generic.h +++ b/utils/memcpy-bench/glibc/sysdep_generic.h @@ -1,3 +1,5 @@ +#pragma once + /* Generic asm macros used on many machines. Copyright (C) 1991-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. diff --git a/utils/memcpy-bench/glibc/sysdep_x86.h b/utils/memcpy-bench/glibc/sysdep_x86.h index 7abb350242f..4469ed2e885 100644 --- a/utils/memcpy-bench/glibc/sysdep_x86.h +++ b/utils/memcpy-bench/glibc/sysdep_x86.h @@ -1,3 +1,5 @@ +#pragma once + /* Assembler macros for x86. Copyright (C) 2017-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. From 4be01d927dcb81fc824f3a9b6367456809432029 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Sun, 14 Mar 2021 23:57:31 +0300 Subject: [PATCH 187/333] Bump CI From f092d22a253f2c471f28358d81a77dc75385936f Mon Sep 17 00:00:00 2001 From: George Date: Mon, 15 Mar 2021 00:25:21 +0300 Subject: [PATCH 188/333] Updated description --- .../external-authenticators/ldap.md | 14 ++--- .../external-authenticators/ldap.md | 54 +++++++++---------- 2 files changed, 33 insertions(+), 35 deletions(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index cb8aa07dc41..98d6e18b72e 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -39,11 +39,11 @@ Note, that you can define multiple LDAP servers inside the `ldap_servers` sectio - `host` — LDAP server hostname or IP, this parameter is mandatory and cannot be empty. - `port` — LDAP server port, default is `636` if `enable_tls` is set to `true`, `389` otherwise. -- `bind_dn` — template used to construct the DN to bind to. +- `bind_dn` — Template used to construct the DN to bind to. - The resulting DN will be constructed by replacing all `{user_name}` substrings of the template with the actual user name during each authentication attempt. -- `verification_cooldown` — a period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. +- `verification_cooldown` — A period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server. - Specify `0` (the default) to disable caching and force contacting the LDAP server for each authentication request. -- `enable_tls` — a flag to trigger the use of the secure connection to the LDAP server. +- `enable_tls` — A flag to trigger the use of the secure connection to the LDAP server. - Specify `no` for plain text `ldap://` protocol (not recommended). - Specify `yes` for LDAP over SSL/TLS `ldaps://` protocol (recommended, the default). - Specify `starttls` for legacy StartTLS protocol (plain text `ldap://` protocol, upgraded to TLS). @@ -127,20 +127,20 @@ Note that `my_ldap_server` referred in the `ldap` section inside the `user_direc **Parameters** -- `server` — One of LDAP server names defined in the `ldap_servers` config section above. This parameter is mandatory and cannot be empty. Одно из имен +- `server` — One of LDAP server names defined in the `ldap_servers` config section above. This parameter is mandatory and cannot be empty. - `roles` — Section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. - If no roles are specified here or assigned during role mapping (below), user will not be able to perform any actions after authentication. - `role_mapping` — Section with LDAP search parameters and mapping rules. - - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. + - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged-in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. - `base_dn` — Template used to construct the base DN for the LDAP search. - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during each LDAP search. - `scope` — Scope of the LDAP search. - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). - `search_filter` — Template used to construct the search filter for the LDAP search. - - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. + - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}` and `{base_dn}` substrings of the template with the actual user name, bind DN and base DN during each LDAP search. - Note, that the special characters must be escaped properly in XML. - `attribute` — Attribute name whose values will be returned by the LDAP search. - - `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated as local role names. Empty, by default. + - `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. The prefix will be removed from the original strings and the resulting strings will be treated as local role names. Empty by default. [Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 3d71ec1eba3..f44c0ff2120 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -1,11 +1,11 @@ # LDAP {#external-authenticators-ldap} -Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Можно использовать два подхода: +Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Существует два подхода: - Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных путях управления контролем. - Использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере. -Для обоих подходов необходимо определить в конфиге ClickHouse внутренне названный LDAP сервер, чтобы другие части конфига могли ссылаться на него. +Для обоих подходов необходимо определить в конфиге ClickHouse с внутренним именем LDAP сервер, чтобы другие части конфига могли ссылаться на него. ## Определение LDAP сервера {#ldap-server-definition} @@ -33,16 +33,16 @@
``` -Обратите внимание, что можно определить несколько LDAP серверов внутри секции `ldap_servers` используя различные имена. +Обратите внимание, что можно определить несколько LDAP серверов внутри секции `ldap_servers`, используя различные имена. **Параметры** -- `host` — имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть пустым. -- `port` — порт сервера LDAP. По-умолчанию: при значении `true` настройки `enable_tls` — `636`, иначе `389`. +- `host` — имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть оставлен пустым. +- `port` — порт сервера LDAP. По-умолчанию: `636` при значении `true` настройки `enable_tls`, иначе `389`. - `bind_dn` — шаблон для создания DN для привязки. - - конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на фактическое имя пользователя при каждой попытке аутентификации. -- `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться успешно аутентифицированным без с сервером LDAP для всех последующих запросов. - - Укажите `0` (по-умолчанию), чтобы отключить кеширования и заставить связываться с сервером LDAP для каждого запроса аутетификации. + - Конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на фактическое имя пользователя при каждой попытке аутентификации. +- `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться успешно аутентифицированным, и сможет совершать запросы без контакта с серверов LDAP. + - Укажите `0` (по-умолчанию), чтобы отключить кеширование и заставить связываться с сервером LDAP для каждого запроса аутентификации. - `enable_tls` — флаг, включающий использование защищенного соединения с сервером LDAP. - Укажите `no` для текстового `ldap://` протокола (не рекомендовано). - Укажите `yes` для LDAP через SSL/TLS `ldaps://` протокола (рекомендовано, используется по-умолчанию). @@ -51,7 +51,7 @@ - Принимаемые значения: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (по-умолчанию). - `tls_require_cert` — поведение при проверке сертификата SSL/TLS. - Принимаемые значения: `never`, `allow`, `try`, `demand` (по-умолчанию). -- `tls_cert_file` — путь до файла сертификата. +- `tls_cert_file` — путь к файлу сертификата. - `tls_key_file` — путь к файлу ключа сертификата. - `tls_ca_cert_file` — путь к файлу ЦС сертификата. - `tls_ca_cert_dir` — путь к каталогу, содержащая сертификаты ЦС. @@ -61,7 +61,7 @@ Удаленный сервер LDAP можно использовать как метод верификации паролей локально определенных пользователей (пользователей, которые определены в `users.xml` или в локальных путях управления контролем). Для этого укажите имя определенного до этого сервера LDAP вместо `password` или другой похожей секции в определении пользователя. -При каждой попытке авторизации, ClickHouse пытается "привязаться" к DN, указанному в [определение LDAP сервера](#ldap-server-definition) параметром `bind_dn`, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается аутентифицированным. Обычно это называют методом "простой привязки". +При каждой попытке авторизации, ClickHouse пытается "привязаться" к DN, указанному в [определении LDAP сервера](#ldap-server-definition) параметром `bind_dn`, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается аутентифицированным. Обычно это называют методом "простой привязки". **Например** @@ -94,7 +94,7 @@ CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server'; В добавок к локально определенным пользователям, удаленный LDAP сервер может быть использован как источник определения пользователей. Для этого укажите имя определенного до этого сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -При каждой попытке авторизации, ClicHouse пытается локально найти определение пользователя и авторизовать его как обычно. Если определение не будет найдено, ClickHouse предполагает, что оно находится во внешнем LDAP каталоге, и попытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю будут присвоены роли из списка, указанного в секции `roles`. Кроме того, может быть выполнен LDAP поиск, а его результаты могут быть преобразованы в имена ролей и присвоены пользователям, если была настроена секция `role_mapping`. Все это работает при условии, что SQL-ориентированное [Управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением[CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). +При каждой попытке авторизации, ClicHouse пытается локально найти определение пользователя и авторизовать его как обычно. Если определение не будет найдено, ClickHouse предполагает, что оно находится во внешнем LDAP каталоге, и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю будут присвоены роли из списка, указанного в секции `roles`. Кроме того, может быть выполнен LDAP поиск, а его результаты могут быть преобразованы в имена ролей и присвоены пользователям, если была настроена секция `role_mapping`. Все это работает при условии, что SQL-ориентированное [Управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). **Пример** @@ -127,22 +127,20 @@ CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server'; **Параметры** -- `server` — One of LDAP server names defined in the `ldap_servers` config section above. - This parameter is mandatory and cannot be empty. -- `roles` — Section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. - - If no roles are specified here or assigned during role mapping (below), user will not be able to perform any actions after authentication. -- `role_mapping` — Section with LDAP search parameters and mapping rules. - - When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. - - There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied. - - `base_dn` — Template used to construct the base DN for the LDAP search. - - The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during each LDAP search. - - `scope` — Scope of the LDAP search. - - Accepted values are: `base`, `one_level`, `children`, `subtree` (the default). - - `search_filter` — Template used to construct the search filter for the LDAP search. - - The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}` substrings of the template with the actual user name, bind DN, and base DN during each LDAP search. - - Note, that the special characters must be escaped properly in XML. - - `attribute` — Attribute name whose values will be returned by the LDAP search. - - `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated as local role names. Empty, by default. - +- `server` — одно из имен сервера LDAP, определенных в секции конфига `ldap_servers выше. Этот параметр обязательный и не может быть оставлен пустым. +- `roles` — секция со списком локально определенных ролей, которые будут присвоены каждому пользователю, полученному от сервера LDAP. + - Если роли не указаны здесь или в секции `role_mapping` (ниже), пользователь не сможет выполнять никаких операций после аутентификации. +- `role_mapping` — секция c параметрами LDAP поиска и правилами отображения. + - При аутентификации пользователя, пока еще связанного с LDAP, производится LDAP поиск с помощью `search_filter` и имени этого пользователя. Для каждой записи, найденной в ходе поиска, выделяется значение указанного атрибута. У каждого атрибута, имеющего указанный префикс, удаляется этот префикс, а остальная часть значения становится именем локальной роли, определенной в ClickHouse, причем предполагается, что эта роль была создана выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) до этого. + - Внутри одной секции `ldap` может быть несколько секций `role_mapping`. Все они будут применены. + - `base_dn` — шаблон, который используется для создания базового DN для LDAP поиска. + - конечный DN будет создан заменой всех подстрок `{user_name}` и `{bind_dn}` шаблона на фактическое имя пользователя и DN привязки соответственно при каждом LDAP поиске. + - `scope` — Область LDAP поиска. + - Принимаемые значения: `base`, `one_level`, `children`, `subtree` (по-умолчанию). + - `search_filter` — шаблон, который используется для создания фильтра для каждого LDAP поиска. + - Конечный фильтр будет создан заменой всех подстрок `{user_name}`, `{bind_dn}` и `{base_dn}` шаблона на фактическое имя пользователя, DN привязи и базовый DN при соответственно каждом LDAP поиске. + - Обратите внимание, что специальные символы должны быть правильно экранированы в XML. + - `attribute` — имя атрибута, значение которого будет возвращаться LDAP поиском. + - `prefix` — префикс, который, как предполагается, будет находиться перед началом каждой строки в исходном списке строк, возвращаемых LDAP поиском. Префикс будет удален из исходных строк, а сами они будут рассматриваться как имена локальных ролей. По-умолчанию пусто. [Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) From 6402b2c33c45b3e04d81515baa05ffdae1cdce4b Mon Sep 17 00:00:00 2001 From: George Date: Mon, 15 Mar 2021 00:39:24 +0300 Subject: [PATCH 189/333] Small fixes --- docs/en/operations/external-authenticators/ldap.md | 2 +- docs/ru/operations/external-authenticators/ldap.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 98d6e18b72e..4c2748d6141 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -87,7 +87,7 @@ When SQL-driven [Access Control and Account Management](../access-rights.md#acce Query: ```sql -CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server'; +CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; ``` ## LDAP Exernal User Directory {#ldap-external-user-directory} diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index f44c0ff2120..f13c3b99def 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -55,7 +55,7 @@ - `tls_key_file` — путь к файлу ключа сертификата. - `tls_ca_cert_file` — путь к файлу ЦС сертификата. - `tls_ca_cert_dir` — путь к каталогу, содержащая сертификаты ЦС. -- `tls_cipher_suite` — разрешить набор шифров (в нотации OpenSSL). +- `tls_cipher_suite` — разрешенный набор шифров (в нотации OpenSSL). ## LDAP внешний аутентификатор {#ldap-external-authenticator} @@ -87,7 +87,7 @@ Запрос: ```sql -CREATE USER my_user IDENTIFIED WITH ldap_server BY 'my_ldap_server'; +CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; ``` ## Внешний пользовательский каталог LDAP {#ldap-external-user-directory} From 832f041e23034a40a383bd4199318dd64fb2bdfb Mon Sep 17 00:00:00 2001 From: George Date: Mon, 15 Mar 2021 00:44:36 +0300 Subject: [PATCH 190/333] Fixed toc --- docs/ru/operations/external-authenticators/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/operations/external-authenticators/index.md b/docs/ru/operations/external-authenticators/index.md index db5c89a3d66..ffe705dffb2 100644 --- a/docs/ru/operations/external-authenticators/index.md +++ b/docs/ru/operations/external-authenticators/index.md @@ -1,7 +1,7 @@ --- -toc_folder_title: \u0412\u043d\u0435\u0448\u043d\u0438\u0435\u0020\u0430\u0443\u0442\u0435\u043d\u0442\u0438\u0444\u0438\u043a\u0430\u0442\u043e\u0440\u044b\u0020\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0435\u0439\u0020\u0438\u0020\u043a\u0430\u0442\u0430\u043b\u043e\u0433\u0438 +toc_folder_title: "\u0412\u043d\u0435\u0448\u043d\u0438\u0435\u0020\u0430\u0443\u0442\u0435\u043d\u0442\u0438\u0444\u0438\u043a\u0430\u0442\u043e\u0440\u044b\u0020\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0435\u0439\u0020\u0438\u0020\u043a\u0430\u0442\u0430\u043b\u043e\u0433\u0438" toc_priority: 48 -toc_title: \u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435 +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" --- # Внешние аутентификаторы пользователей и каталоги {#external-authenticators} From 0a27c814901c17a906bc66ba7829f854eff592c9 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 15 Mar 2021 00:46:02 +0300 Subject: [PATCH 191/333] fixed index.md --- docs/ru/operations/external-authenticators/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/index.md b/docs/ru/operations/external-authenticators/index.md index ffe705dffb2..22ecb9bc2d7 100644 --- a/docs/ru/operations/external-authenticators/index.md +++ b/docs/ru/operations/external-authenticators/index.md @@ -10,6 +10,6 @@ ClickHouse поддерживает аунтетификацию и управл Поддерживаются следующие внешние аутентификаторы и каталоги: -- [LDAP](./ldap.md#external-authenticators-ldap) [Authenticator](./ldap.md#ldap-external-authenticator) и [Directory](./ldap.md#ldap-external-user-directory) +- [LDAP](./ldap.md#external-authenticators-ldap) [аутентификатор](./ldap.md#ldap-external-authenticator) и [каталог](./ldap.md#ldap-external-user-directory) [Original article](https://clickhouse.tech/docs/ru/operations/external-authenticators/index.md) From 8e86067d6da3107c073030e788e9cbd66eabd922 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 15 Mar 2021 01:05:58 +0300 Subject: [PATCH 192/333] Some fixes --- .../operations/external-authenticators/ldap.md | 8 +++++--- .../operations/external-authenticators/index.md | 2 +- .../operations/external-authenticators/ldap.md | 16 +++++++++------- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index ebad4f2dbe8..e528e2a7c07 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -9,7 +9,9 @@ For both of these approaches, an internally named LDAP server must be defined in ## LDAP Server Definition {#ldap-server-definition} -To define LDAP server you must add `ldap_servers` section to the `config.xml`. For example, +To define LDAP server you must add `ldap_servers` section to the `config.xml`. + +**Example** ```xml @@ -87,9 +89,9 @@ When SQL-driven [Access Control and Account Management](../access-rights.md#acce Query: - +```sql CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - +``` ## LDAP Exernal User Directory {#ldap-external-user-directory} diff --git a/docs/ru/operations/external-authenticators/index.md b/docs/ru/operations/external-authenticators/index.md index 22ecb9bc2d7..6b75e864fb8 100644 --- a/docs/ru/operations/external-authenticators/index.md +++ b/docs/ru/operations/external-authenticators/index.md @@ -12,4 +12,4 @@ ClickHouse поддерживает аунтетификацию и управл - [LDAP](./ldap.md#external-authenticators-ldap) [аутентификатор](./ldap.md#ldap-external-authenticator) и [каталог](./ldap.md#ldap-external-user-directory) -[Original article](https://clickhouse.tech/docs/ru/operations/external-authenticators/index.md) +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/external-authenticators/index.md) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index f13c3b99def..7f901898a99 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -5,11 +5,13 @@ - Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных путях управления контролем. - Использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере. -Для обоих подходов необходимо определить в конфиге ClickHouse с внутренним именем LDAP сервер, чтобы другие части конфига могли ссылаться на него. +Для обоих подходов необходимо определить в конфиге ClickHouse LDAP сервер с внутренним именем, чтобы другие части конфига могли ссылаться на него. ## Определение LDAP сервера {#ldap-server-definition} -Чтобы определить LDAP сервер, необходимо добавить секцию `ldap_servers` в `config.xml`. Например: +Чтобы определить LDAP сервер, необходимо добавить секцию `ldap_servers` в `config.xml`. + +**Пример** ```xml @@ -57,13 +59,13 @@ - `tls_ca_cert_dir` — путь к каталогу, содержащая сертификаты ЦС. - `tls_cipher_suite` — разрешенный набор шифров (в нотации OpenSSL). -## LDAP внешний аутентификатор {#ldap-external-authenticator} +## Внешний аутентификатор LDAP {#ldap-external-authenticator} Удаленный сервер LDAP можно использовать как метод верификации паролей локально определенных пользователей (пользователей, которые определены в `users.xml` или в локальных путях управления контролем). Для этого укажите имя определенного до этого сервера LDAP вместо `password` или другой похожей секции в определении пользователя. При каждой попытке авторизации, ClickHouse пытается "привязаться" к DN, указанному в [определении LDAP сервера](#ldap-server-definition) параметром `bind_dn`, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается аутентифицированным. Обычно это называют методом "простой привязки". -**Например** +**Пример** ```xml @@ -94,7 +96,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; В добавок к локально определенным пользователям, удаленный LDAP сервер может быть использован как источник определения пользователей. Для этого укажите имя определенного до этого сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -При каждой попытке авторизации, ClicHouse пытается локально найти определение пользователя и авторизовать его как обычно. Если определение не будет найдено, ClickHouse предполагает, что оно находится во внешнем LDAP каталоге, и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю будут присвоены роли из списка, указанного в секции `roles`. Кроме того, может быть выполнен LDAP поиск, а его результаты могут быть преобразованы в имена ролей и присвоены пользователям, если была настроена секция `role_mapping`. Все это работает при условии, что SQL-ориентированное [Управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). +При каждой попытке авторизации ClicHouse пытается локально найти определение пользователя и авторизовать его как обычно. Если определение не будет найдено, ClickHouse предполагает, что оно находится во внешнем LDAP каталоге, и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю будут присвоены роли из списка, указанного в секции `roles`. Кроме того, может быть выполнен LDAP поиск, а его результаты могут быть преобразованы в имена ролей и присвоены пользователям, если была настроена секция `role_mapping`. Все это работает при условии, что SQL-ориентированное [Управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). **Пример** @@ -127,7 +129,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; **Параметры** -- `server` — одно из имен сервера LDAP, определенных в секции конфига `ldap_servers выше. Этот параметр обязательный и не может быть оставлен пустым. +- `server` — одно из имен сервера LDAP, определенного в секции конфига `ldap_servers` выше. Этот параметр обязательный и не может быть оставлен пустым. - `roles` — секция со списком локально определенных ролей, которые будут присвоены каждому пользователю, полученному от сервера LDAP. - Если роли не указаны здесь или в секции `role_mapping` (ниже), пользователь не сможет выполнять никаких операций после аутентификации. - `role_mapping` — секция c параметрами LDAP поиска и правилами отображения. @@ -143,4 +145,4 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - `attribute` — имя атрибута, значение которого будет возвращаться LDAP поиском. - `prefix` — префикс, который, как предполагается, будет находиться перед началом каждой строки в исходном списке строк, возвращаемых LDAP поиском. Префикс будет удален из исходных строк, а сами они будут рассматриваться как имена локальных ролей. По-умолчанию пусто. -[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) +[Оригинальная статья](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) From 5b7ef512f58f5697af0820a5c470a9c7c06b03d4 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 15 Mar 2021 09:14:30 +0300 Subject: [PATCH 193/333] Update PostgreSQLReplicaConnection.h --- src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h index 289183d8451..9465d4a119b 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h @@ -16,8 +16,8 @@ public: PostgreSQLReplicaConnection( const Poco::Util::AbstractConfiguration & config, - const String & config_name, - const size_t num_retries = POSTGRESQL_CONNECTION_DEFAULT_RETRIES_NUM); + const String & config_prefix, + const size_t num_retries_ = POSTGRESQL_CONNECTION_DEFAULT_RETRIES_NUM); PostgreSQLReplicaConnection(const PostgreSQLReplicaConnection & other); From 679b32ee547ee4b43f6807476b9fe6ae6345711c Mon Sep 17 00:00:00 2001 From: Pysaoke Date: Mon, 15 Mar 2021 16:11:57 +0800 Subject: [PATCH 194/333] Update query_log.md Improve the translation of `query_log.md` in Chinese documents --- docs/zh/operations/system-tables/query_log.md | 125 +++++++++--------- 1 file changed, 63 insertions(+), 62 deletions(-) diff --git a/docs/zh/operations/system-tables/query_log.md b/docs/zh/operations/system-tables/query_log.md index 6d8d7a39699..aa954fc4845 100644 --- a/docs/zh/operations/system-tables/query_log.md +++ b/docs/zh/operations/system-tables/query_log.md @@ -5,86 +5,87 @@ machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3 # system.query_log {#system_tables-query_log} -包含有关已执行查询的信息,例如,开始时间、处理持续时间、错误消息。 +包含已执行查询的相关信息,例如:开始时间、处理持续时间、错误消息。 !!! note "注" 此表不包含以下内容的摄取数据 `INSERT` 查询。 -您可以更改查询日志记录的设置 [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) 服务器配置部分。 +您可以更改query_log的设置,在服务器配置的 [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) 部分。 -您可以通过设置禁用查询日志记录 [log_queries=0](../../operations/settings/settings.md#settings-log-queries). 我们不建议关闭日志记录,因为此表中的信息对于解决问题很重要。 +您可以通过设置 [log_queries=0](../../operations/settings/settings.md#settings-log-queries)来禁用query_log. 我们不建议关闭此日志,因为此表中的信息对于解决问题很重要。 -数据的冲洗周期设置在 `flush_interval_milliseconds` 的参数 [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) 服务器设置部分。 要强制冲洗,请使用 [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) 查询。 +数据刷新的周期可通过 `flush_interval_milliseconds` 参数来设置 [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) 。 要强制刷新,请使用 [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs)。 -ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/system-tables/index.md#system-tables-introduction) 欲了解更多详情。 +ClickHouse不会自动从表中删除数据。更多详情请看 [introduction](../../operations/system-tables/index.md#system-tables-introduction) 。 -该 `system.query_log` 表注册两种查询: +`system.query_log` 表注册两种查询: 1. 客户端直接运行的初始查询。 2. 由其他查询启动的子查询(用于分布式查询执行)。 对于这些类型的查询,有关父查询的信息显示在 `initial_*` 列。 -每个查询创建一个或两个行中 `query_log` 表,这取决于状态(见 `type` 列)的查询: +每个查询在`query_log` 表中创建一或两行记录,这取决于查询的状态(见 `type` 列): -1. 如果查询执行成功,则两行具有 `QueryStart` 和 `QueryFinish` 创建类型。 -2. 如果在查询处理过程中发生错误,两个事件与 `QueryStart` 和 `ExceptionWhileProcessing` 创建类型。 -3. 如果在启动查询之前发生错误,则单个事件具有 `ExceptionBeforeStart` 创建类型。 +1. 如果查询执行成功,会创建type分别为`QueryStart` 和 `QueryFinish` 的两行记录。 +2. 如果在查询处理过程中发生错误,会创建type分别为`QueryStart` 和 `ExceptionWhileProcessing` 的两行记录。 +3. 如果在启动查询之前发生错误,则创建一行type为`ExceptionBeforeStart` 的记录。 列: -- `type` ([枚举8](../../sql-reference/data-types/enum.md)) — Type of an event that occurred when executing the query. Values: - - `'QueryStart' = 1` — Successful start of query execution. - - `'QueryFinish' = 2` — Successful end of query execution. - - `'ExceptionBeforeStart' = 3` — Exception before the start of query execution. - - `'ExceptionWhileProcessing' = 4` — Exception during the query execution. -- `event_date` ([日期](../../sql-reference/data-types/date.md)) — Query starting date. -- `event_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Query starting time. -- `query_start_time` ([日期时间](../../sql-reference/data-types/datetime.md)) — Start time of query execution. -- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds. -- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` 和 `JOIN`. 对于分布式查询 `read_rows` 包括在所有副本上读取的行总数。 每个副本发送它的 `read_rows` 值,并且查询的服务器-发起方汇总所有接收到的和本地的值。 缓存卷不会影响此值。 -- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` 和 `JOIN`. 对于分布式查询 `read_bytes` 包括在所有副本上读取的行总数。 每个副本发送它的 `read_bytes` 值,并且查询的服务器-发起方汇总所有接收到的和本地的值。 缓存卷不会影响此值。 -- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询,写入的行数。 对于其他查询,列值为0。 -- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` 查询时,写入的字节数。 对于其他查询,列值为0。 -- `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` 查询,或者在一些行 `INSERT` 查询。 -- `result_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — RAM volume in bytes used to store a query result. -- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query. -- `query` ([字符串](../../sql-reference/data-types/string.md)) — Query string. -- `exception` ([字符串](../../sql-reference/data-types/string.md)) — Exception message. -- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code of an exception. -- `stack_trace` ([字符串](../../sql-reference/data-types/string.md)) — [堆栈跟踪](https://en.wikipedia.org/wiki/Stack_trace). 如果查询成功完成,则为空字符串。 -- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Query type. Possible values: - - 1 — Query was initiated by the client. - - 0 — Query was initiated by another query as part of distributed query execution. -- `user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query. -- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the query. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query. -- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query. -- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution). -- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution). -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from. -- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query. -- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values: +- `type` ([Enum8](../../sql-reference/data-types/enum.md)) — 执行查询时的事件类型. 值: + - `'QueryStart' = 1` — 查询成功启动. + - `'QueryFinish' = 2` — 查询成功完成. + - `'ExceptionBeforeStart' = 3` — 查询执行前有异常. + - `'ExceptionWhileProcessing' = 4` — 查询执行期间有异常. +- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 查询开始日期. +- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 查询开始时间. +- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 查询开始时间(毫秒精度). +- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 查询执行的开始时间. +- `query_start_time_microseconds` (DateTime64) — 查询执行的开始时间(毫秒精度). +- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 查询消耗的时间(毫秒). +- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 从参与了查询的所有表和表函数读取的总行数. 包括:普通的子查询, `IN` 和 `JOIN`的子查询. 对于分布式查询 `read_rows` 包括在所有副本上读取的行总数。 每个副本发送它的 `read_rows` 值,并且查询的服务器-发起方汇总所有接收到的和本地的值。 缓存卷不会影响此值。 +- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 从参与了查询的所有表和表函数读取的总字节数. 包括:普通的子查询, `IN` 和 `JOIN`的子查询. 对于分布式查询 `read_bytes` 包括在所有副本上读取的字节总数。 每个副本发送它的 `read_bytes` 值,并且查询的服务器-发起方汇总所有接收到的和本地的值。 缓存卷不会影响此值。 +- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 对于 `INSERT` 查询,为写入的行数。 对于其他查询,值为0。 +- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 对于 `INSERT` 查询时,为写入的字节数。 对于其他查询,值为0。 +- `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — `SELECT` 查询结果的行数,或`INSERT` 的行数。 +- `result_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 存储查询结果的RAM量. +- `memory_usage` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 查询使用的内存. +- `query` ([String](../../sql-reference/data-types/string.md)) — 查询语句. +- `exception` ([String](../../sql-reference/data-types/string.md)) — 异常信息. +- `exception_code` ([Int32](../../sql-reference/data-types/int-uint.md)) — 异常码. +- `stack_trace` ([String](../../sql-reference/data-types/string.md)) — [Stack Trace](https://en.wikipedia.org/wiki/Stack_trace). 如果查询成功完成,则为空字符串。 +- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 查询类型. 可能的值: + - 1 — 客户端发起的查询. + - 0 — 由另一个查询发起的,作为分布式查询的一部分. +- `user` ([String](../../sql-reference/data-types/string.md)) — 发起查询的用户. +- `query_id` ([String](../../sql-reference/data-types/string.md)) — 查询ID. +- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的客户端IP地址. +- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起查询的客户端端口. +- `initial_user` ([String](../../sql-reference/data-types/string.md)) — 初始查询的用户名(用于分布式查询执行). +- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — 运行初始查询的ID(用于分布式查询执行). +- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 运行父查询的IP地址. +- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起父查询的客户端端口. +- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 发起查询的接口. 可能的值: - 1 — TCP. - 2 — HTTP. -- `os_user` ([字符串](../../sql-reference/data-types/string.md)) — Operating system username who runs [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md). -- `client_hostname` ([字符串](../../sql-reference/data-types/string.md)) — Hostname of the client machine where the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或者运行另一个TCP客户端。 -- `client_name` ([字符串](../../sql-reference/data-types/string.md)) — The [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端名称。 -- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Revision of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。 -- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Major version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。 -- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Minor version of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端。 -- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Patch component of the [ツ环板clientョツ嘉ッツ偲](../../interfaces/cli.md) 或另一个TCP客户端版本。 -- `http_method` (UInt8) — HTTP method that initiated the query. Possible values: - - 0 — The query was launched from the TCP interface. - - 1 — `GET` 方法被使用。 - - 2 — `POST` 方法被使用。 -- `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。 -- `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`). +- `os_user` ([String](../../sql-reference/data-types/string.md)) — 运行 [clickhouse-client](../../interfaces/cli.md)的操作系统用户名. +- `client_hostname` ([String](../../sql-reference/data-types/string.md)) — 运行[clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的机器的主机名。 +- `client_name` ([String](../../sql-reference/data-types/string.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的名称。 +- `client_revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的Revision。 +- `client_version_major` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的Major version。 +- `client_version_minor` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的Minor version。 +- `client_version_patch` ([UInt32](../../sql-reference/data-types/int-uint.md)) — [clickhouse-client](../../interfaces/cli.md) 或其他TCP客户端的Patch component。 +- `http_method` (UInt8) — 发起查询的HTTP方法. 可能值: + - 0 — TCP接口的查询. + - 1 — `GET` + - 2 — `POST` +- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` The UserAgent header passed in the HTTP request。 +- `quota_key` ([String](../../sql-reference/data-types/string.md)) — 在[quotas](../../operations/quotas.md) 配置里设置的“quota key” (见 `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision. -- `thread_numbers` ([数组(UInt32)](../../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution. -- `ProfileEvents.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](../../operations/system-tables/events.md#system_tables-events) -- `ProfileEvents.Values` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。 -- `Settings.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。 -- `Settings.Values` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` 列。 - +- `thread_numbers` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — 参与查询的线程数. +- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — 衡量不同指标的计数器。 可以在[system.events](../../operations/system-tables/events.md#system_tables-events)中找到它们的描述。 +- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — `ProfileEvents.Names` 列中列出的指标的值。 +- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — 客户端运行查询时更改的设置的名称。 要启用对设置的日志记录更改,请将log_query_settings参数设置为1。 +- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — `Settings.Names` 列中列出的设置的值。 **示例** ``` sql @@ -140,4 +141,4 @@ Settings.Values: ['0','random','1','10000000000'] **另请参阅** -- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — This table contains information about each query execution thread. +- [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — 这个表包含了每个查询执行线程的信息 From 637f6a29a649ee46360848c5a8013fb040050589 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 11:16:15 +0300 Subject: [PATCH 195/333] Add penalty --- utils/memcpy-bench/memcpy-bench.cpp | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/utils/memcpy-bench/memcpy-bench.cpp b/utils/memcpy-bench/memcpy-bench.cpp index 365abe1f01e..dc510af0dbf 100644 --- a/utils/memcpy-bench/memcpy-bench.cpp +++ b/utils/memcpy-bench/memcpy-bench.cpp @@ -33,6 +33,9 @@ void NO_INLINE loop(uint8_t * dst, uint8_t * src, size_t size, F && chunk_size_d dst += bytes_to_copy; src += bytes_to_copy; size -= bytes_to_copy; + + /// Execute at least one SSE instruction as a penalty after running AVX code. + __asm__ volatile ("pxor %%xmm7, %%xmm7" ::: "xmm7"); } } @@ -76,16 +79,9 @@ uint64_t test(uint8_t * dst, uint8_t * src, size_t size, size_t iterations, size uint64_t elapsed_ns = watch.elapsed(); /// Validation - size_t sum = 0; - size_t reference = 0; for (size_t i = 0; i < size; ++i) - { - sum += dst[i]; - reference += uint8_t(i); - } - - if (sum != reference) - throw std::logic_error("Incorrect result"); + if (dst[i] != uint8_t(i)) + throw std::logic_error("Incorrect result"); std::cout << name; return elapsed_ns; @@ -676,11 +672,9 @@ done | tee result.tsv } else { - iterations = 10000000000ULL * num_threads / size; + iterations = 10000000000ULL / size; if (generator_variant == 1) - iterations /= 100; - if (generator_variant == 2) iterations /= 10; } From 1f6b05cd85d34c2d6f71b057c16d95b83f7d8853 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 11:18:11 +0300 Subject: [PATCH 196/333] Add example --- utils/memcpy-bench/memcpy-bench.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/utils/memcpy-bench/memcpy-bench.cpp b/utils/memcpy-bench/memcpy-bench.cpp index dc510af0dbf..cd769640017 100644 --- a/utils/memcpy-bench/memcpy-bench.cpp +++ b/utils/memcpy-bench/memcpy-bench.cpp @@ -655,6 +655,24 @@ for size in 4096 16384 50000 65536 100000 1000000 10000000 100000000; do done; done | tee result.tsv +clickhouse-local --structure ' + name String, + size UInt64, + iterations UInt64, + threads UInt16, + generator UInt8, + memcpy UInt8, + elapsed UInt64 +' --query " + SELECT + size, name, + avg(1000 * elapsed / size / iterations) AS s, + count() AS c + FROM table + GROUP BY size, name + ORDER BY size ASC, s DESC +" --output-format PrettyCompact < result.tsv + )" << std::endl; std::cout << desc << std::endl; return 1; From 69b8ded5786ce341e842aec7d1967d6b6661761a Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 15 Mar 2021 11:22:15 +0300 Subject: [PATCH 197/333] Fix alter modify for decimal columns when type size doesn't change --- src/Storages/StorageReplicatedMergeTree.cpp | 74 +++++++++---------- .../01761_alter_decimal_zookeeper.reference | 9 +++ .../01761_alter_decimal_zookeeper.sql | 31 ++++++++ 3 files changed, 75 insertions(+), 39 deletions(-) create mode 100644 tests/queries/0_stateless/01761_alter_decimal_zookeeper.reference create mode 100644 tests/queries/0_stateless/01761_alter_decimal_zookeeper.sql diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index d08c5b6ad7c..391d685be78 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -907,8 +907,7 @@ void StorageReplicatedMergeTree::setTableStructure( StorageInMemoryMetadata new_metadata = getInMemoryMetadata(); StorageInMemoryMetadata old_metadata = getInMemoryMetadata(); - if (new_columns != new_metadata.columns) - new_metadata.columns = new_columns; + new_metadata.columns = new_columns; if (!metadata_diff.empty()) { @@ -976,46 +975,43 @@ void StorageReplicatedMergeTree::setTableStructure( } /// Changes in columns may affect following metadata fields - if (new_metadata.columns != old_metadata.columns) + new_metadata.column_ttls_by_name.clear(); + for (const auto & [name, ast] : new_metadata.columns.getColumnTTLs()) { - new_metadata.column_ttls_by_name.clear(); - for (const auto & [name, ast] : new_metadata.columns.getColumnTTLs()) - { - auto new_ttl_entry = TTLDescription::getTTLFromAST(ast, new_metadata.columns, global_context, new_metadata.primary_key); - new_metadata.column_ttls_by_name[name] = new_ttl_entry; - } - - if (new_metadata.partition_key.definition_ast != nullptr) - new_metadata.partition_key.recalculateWithNewColumns(new_metadata.columns, global_context); - - if (!metadata_diff.sorting_key_changed) /// otherwise already updated - new_metadata.sorting_key.recalculateWithNewColumns(new_metadata.columns, global_context); - - /// Primary key is special, it exists even if not defined - if (new_metadata.primary_key.definition_ast != nullptr) - { - new_metadata.primary_key.recalculateWithNewColumns(new_metadata.columns, global_context); - } - else - { - new_metadata.primary_key = KeyDescription::getKeyFromAST(new_metadata.sorting_key.definition_ast, new_metadata.columns, global_context); - new_metadata.primary_key.definition_ast = nullptr; - } - - if (!metadata_diff.sampling_expression_changed && new_metadata.sampling_key.definition_ast != nullptr) - new_metadata.sampling_key.recalculateWithNewColumns(new_metadata.columns, global_context); - - if (!metadata_diff.skip_indices_changed) /// otherwise already updated - { - for (auto & index : new_metadata.secondary_indices) - index.recalculateWithNewColumns(new_metadata.columns, global_context); - } - - if (!metadata_diff.ttl_table_changed && new_metadata.table_ttl.definition_ast != nullptr) - new_metadata.table_ttl = TTLTableDescription::getTTLForTableFromAST( - new_metadata.table_ttl.definition_ast, new_metadata.columns, global_context, new_metadata.primary_key); + auto new_ttl_entry = TTLDescription::getTTLFromAST(ast, new_metadata.columns, global_context, new_metadata.primary_key); + new_metadata.column_ttls_by_name[name] = new_ttl_entry; } + if (new_metadata.partition_key.definition_ast != nullptr) + new_metadata.partition_key.recalculateWithNewColumns(new_metadata.columns, global_context); + + if (!metadata_diff.sorting_key_changed) /// otherwise already updated + new_metadata.sorting_key.recalculateWithNewColumns(new_metadata.columns, global_context); + + /// Primary key is special, it exists even if not defined + if (new_metadata.primary_key.definition_ast != nullptr) + { + new_metadata.primary_key.recalculateWithNewColumns(new_metadata.columns, global_context); + } + else + { + new_metadata.primary_key = KeyDescription::getKeyFromAST(new_metadata.sorting_key.definition_ast, new_metadata.columns, global_context); + new_metadata.primary_key.definition_ast = nullptr; + } + + if (!metadata_diff.sampling_expression_changed && new_metadata.sampling_key.definition_ast != nullptr) + new_metadata.sampling_key.recalculateWithNewColumns(new_metadata.columns, global_context); + + if (!metadata_diff.skip_indices_changed) /// otherwise already updated + { + for (auto & index : new_metadata.secondary_indices) + index.recalculateWithNewColumns(new_metadata.columns, global_context); + } + + if (!metadata_diff.ttl_table_changed && new_metadata.table_ttl.definition_ast != nullptr) + new_metadata.table_ttl = TTLTableDescription::getTTLForTableFromAST( + new_metadata.table_ttl.definition_ast, new_metadata.columns, global_context, new_metadata.primary_key); + /// Even if the primary/sorting/partition keys didn't change we must reinitialize it /// because primary/partition key column types might have changed. checkTTLExpressions(new_metadata, old_metadata); diff --git a/tests/queries/0_stateless/01761_alter_decimal_zookeeper.reference b/tests/queries/0_stateless/01761_alter_decimal_zookeeper.reference new file mode 100644 index 00000000000..5dcc95fd7b7 --- /dev/null +++ b/tests/queries/0_stateless/01761_alter_decimal_zookeeper.reference @@ -0,0 +1,9 @@ +1 5.00000000 +2 6.00000000 +CREATE TABLE default.test_alter_decimal\n(\n `n` UInt64,\n `d` Decimal(18, 8)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/01761_alter_decimal_zookeeper\', \'r1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1 5.00000000 +2 6.00000000 +CREATE TABLE default.test_alter_decimal\n(\n `n` UInt64,\n `d` Decimal(18, 8)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/01761_alter_decimal_zookeeper\', \'r1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1 5.00000000 +2 6.00000000 +3 7.00000000 diff --git a/tests/queries/0_stateless/01761_alter_decimal_zookeeper.sql b/tests/queries/0_stateless/01761_alter_decimal_zookeeper.sql new file mode 100644 index 00000000000..01766f0d6c2 --- /dev/null +++ b/tests/queries/0_stateless/01761_alter_decimal_zookeeper.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test_alter_decimal; + +CREATE TABLE test_alter_decimal +(n UInt64, d Decimal(15, 8)) +ENGINE = ReplicatedMergeTree('/clickhouse/01761_alter_decimal_zookeeper', 'r1') +ORDER BY tuple(); + +INSERT INTO test_alter_decimal VALUES (1, toDecimal32(5, 5)); + +INSERT INTO test_alter_decimal VALUES (2, toDecimal32(6, 6)); + +SELECT * FROM test_alter_decimal ORDER BY n; + +ALTER TABLE test_alter_decimal MODIFY COLUMN d Decimal(18, 8); + +SHOW CREATE TABLE test_alter_decimal; + +SELECT * FROM test_alter_decimal ORDER BY n; + +DETACH TABLE test_alter_decimal; +ATTACH TABLE test_alter_decimal; + +SHOW CREATE TABLE test_alter_decimal; + +INSERT INTO test_alter_decimal VALUES (3, toDecimal32(7, 7)); + +OPTIMIZE TABLE test_alter_decimal FINAL; + +SELECT * FROM test_alter_decimal ORDER BY n; + +DROP TABLE IF EXISTS test_alter_decimal; From 9bea10d9f94206671c89b8faf196725ed47e0d5e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 11:49:56 +0300 Subject: [PATCH 198/333] Fix style --- utils/memcpy-bench/glibc/asm-syntax.h | 28 +++--- utils/memcpy-bench/glibc/dwarf2.h | 114 +++++++++++----------- utils/memcpy-bench/glibc/sysdep.h | 58 +++++------ utils/memcpy-bench/glibc/sysdep_generic.h | 62 ++++++------ utils/memcpy-bench/glibc/sysdep_x86.h | 66 ++++++------- 5 files changed, 164 insertions(+), 164 deletions(-) diff --git a/utils/memcpy-bench/glibc/asm-syntax.h b/utils/memcpy-bench/glibc/asm-syntax.h index 9d65213ba30..0879f2606c7 100644 --- a/utils/memcpy-bench/glibc/asm-syntax.h +++ b/utils/memcpy-bench/glibc/asm-syntax.h @@ -1,23 +1,23 @@ #pragma once /* Definitions for x86 syntax variations. - Copyright (C) 1992-2020 Free Software Foundation, Inc. - This file is part of the GNU C Library. Its master source is NOT part of - the C library, however. The master source lives in the GNU MP Library. + Copyright (C) 1992-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. Its master source is NOT part of + the C library, however. The master source lives in the GNU MP Library. - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ #undef ALIGN #define ALIGN(log) .align 1<. */ + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ #ifndef _DWARF2_H #define _DWARF2_H 1 /* This file is derived from the DWARF specification (a public document) - Revision 2.0.0 (July 27, 1993) developed by the UNIX International - Programming Languages Special Interest Group (UI/PLSIG) and distributed - by UNIX International. Copies of this specification are available from - UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. */ + Revision 2.0.0 (July 27, 1993) developed by the UNIX International + Programming Languages Special Interest Group (UI/PLSIG) and distributed + by UNIX International. Copies of this specification are available from + UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. */ /* This file is shared between GCC and GDB, and should not contain - prototypes. */ + prototypes. */ #ifndef __ASSEMBLER__ /* Tag names and codes. */ enum dwarf_tag - { + { DW_TAG_padding = 0x00, DW_TAG_array_type = 0x01, DW_TAG_class_type = 0x02, @@ -95,7 +95,7 @@ enum dwarf_tag DW_TAG_class_template = 0x4103, /* for C++ */ DW_TAG_GNU_BINCL = 0x4104, DW_TAG_GNU_EINCL = 0x4105 - }; + }; #define DW_TAG_lo_user 0x4080 #define DW_TAG_hi_user 0xffff @@ -106,7 +106,7 @@ enum dwarf_tag /* Form names and codes. */ enum dwarf_form - { + { DW_FORM_addr = 0x01, DW_FORM_block2 = 0x03, DW_FORM_block4 = 0x04, @@ -128,12 +128,12 @@ enum dwarf_form DW_FORM_ref8 = 0x14, DW_FORM_ref_udata = 0x15, DW_FORM_indirect = 0x16 - }; + }; /* Attribute names and codes. */ enum dwarf_attribute - { + { DW_AT_sibling = 0x01, DW_AT_location = 0x02, DW_AT_name = 0x03, @@ -215,7 +215,7 @@ enum dwarf_attribute DW_AT_src_coords = 0x2104, DW_AT_body_begin = 0x2105, DW_AT_body_end = 0x2106 - }; + }; #define DW_AT_lo_user 0x2000 /* implementation-defined range start */ #define DW_AT_hi_user 0x3ff0 /* implementation-defined range end */ @@ -223,7 +223,7 @@ enum dwarf_attribute /* Location atom names and codes. */ enum dwarf_location_atom - { + { DW_OP_addr = 0x03, DW_OP_deref = 0x06, DW_OP_const1u = 0x08, @@ -369,7 +369,7 @@ enum dwarf_location_atom DW_OP_deref_size = 0x94, DW_OP_xderef_size = 0x95, DW_OP_nop = 0x96 - }; + }; #define DW_OP_lo_user 0x80 /* implementation-defined range start */ #define DW_OP_hi_user 0xff /* implementation-defined range end */ @@ -377,7 +377,7 @@ enum dwarf_location_atom /* Type encodings. */ enum dwarf_type - { + { DW_ATE_void = 0x0, DW_ATE_address = 0x1, DW_ATE_boolean = 0x2, @@ -387,81 +387,81 @@ enum dwarf_type DW_ATE_signed_char = 0x6, DW_ATE_unsigned = 0x7, DW_ATE_unsigned_char = 0x8 - }; + }; #define DW_ATE_lo_user 0x80 #define DW_ATE_hi_user 0xff /* Array ordering names and codes. */ enum dwarf_array_dim_ordering - { + { DW_ORD_row_major = 0, DW_ORD_col_major = 1 - }; + }; /* access attribute */ enum dwarf_access_attribute - { + { DW_ACCESS_public = 1, DW_ACCESS_protected = 2, DW_ACCESS_private = 3 - }; + }; /* visibility */ enum dwarf_visibility_attribute - { + { DW_VIS_local = 1, DW_VIS_exported = 2, DW_VIS_qualified = 3 - }; + }; /* virtuality */ enum dwarf_virtuality_attribute - { + { DW_VIRTUALITY_none = 0, DW_VIRTUALITY_virtual = 1, DW_VIRTUALITY_pure_virtual = 2 - }; + }; /* case sensitivity */ enum dwarf_id_case - { + { DW_ID_case_sensitive = 0, DW_ID_up_case = 1, DW_ID_down_case = 2, DW_ID_case_insensitive = 3 - }; + }; /* calling convention */ enum dwarf_calling_convention - { + { DW_CC_normal = 0x1, DW_CC_program = 0x2, DW_CC_nocall = 0x3 - }; + }; #define DW_CC_lo_user 0x40 #define DW_CC_hi_user 0xff /* inline attribute */ enum dwarf_inline_attribute - { + { DW_INL_not_inlined = 0, DW_INL_inlined = 1, DW_INL_declared_not_inlined = 2, DW_INL_declared_inlined = 3 - }; + }; /* discriminant lists */ enum dwarf_discrim_list - { + { DW_DSC_label = 0, DW_DSC_range = 1 - }; + }; /* line number opcodes */ enum dwarf_line_number_ops - { + { DW_LNS_extended_op = 0, DW_LNS_copy = 1, DW_LNS_advance_pc = 2, @@ -472,19 +472,19 @@ enum dwarf_line_number_ops DW_LNS_set_basic_block = 7, DW_LNS_const_add_pc = 8, DW_LNS_fixed_advance_pc = 9 - }; + }; /* line number extended opcodes */ enum dwarf_line_number_x_ops - { + { DW_LNE_end_sequence = 1, DW_LNE_set_address = 2, DW_LNE_define_file = 3 - }; + }; /* call frame information */ enum dwarf_call_frame_info - { + { DW_CFA_advance_loc = 0x40, DW_CFA_offset = 0x80, DW_CFA_restore = 0xc0, @@ -517,7 +517,7 @@ enum dwarf_call_frame_info DW_CFA_GNU_window_save = 0x2d, DW_CFA_GNU_args_size = 0x2e, DW_CFA_GNU_negative_offset_extended = 0x2f - }; + }; #define DW_CIE_ID 0xffffffff #define DW_CIE_VERSION 1 @@ -534,7 +534,7 @@ enum dwarf_call_frame_info /* Source language names and codes. */ enum dwarf_source_language - { + { DW_LANG_C89 = 0x0001, DW_LANG_C = 0x0002, DW_LANG_Ada83 = 0x0003, @@ -547,7 +547,7 @@ enum dwarf_source_language DW_LANG_Modula2 = 0x000a, DW_LANG_Java = 0x000b, DW_LANG_Mips_Assembler = 0x8001 - }; + }; #define DW_LANG_lo_user 0x8000 /* implementation-defined range start */ @@ -556,13 +556,13 @@ enum dwarf_source_language /* Names and codes for macro information. */ enum dwarf_macinfo_record_type - { + { DW_MACINFO_define = 1, DW_MACINFO_undef = 2, DW_MACINFO_start_file = 3, DW_MACINFO_end_file = 4, DW_MACINFO_vendor_ext = 255 - }; + }; #endif /* !ASSEMBLER */ diff --git a/utils/memcpy-bench/glibc/sysdep.h b/utils/memcpy-bench/glibc/sysdep.h index 2f43d688df9..82b1e747fbe 100644 --- a/utils/memcpy-bench/glibc/sysdep.h +++ b/utils/memcpy-bench/glibc/sysdep.h @@ -1,22 +1,22 @@ #pragma once /* Assembler macros for x86-64. - Copyright (C) 2001-2020 Free Software Foundation, Inc. - This file is part of the GNU C Library. + Copyright (C) 2001-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ #ifndef _X86_64_SYSDEP_H #define _X86_64_SYSDEP_H 1 @@ -28,35 +28,35 @@ /* Syntactic details of assembler. */ /* This macro is for setting proper CFI with DW_CFA_expression describing - the register as saved relative to %rsp instead of relative to the CFA. - Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset - from %rsp. */ + the register as saved relative to %rsp instead of relative to the CFA. + Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset + from %rsp. */ #define cfi_offset_rel_rsp(regn, off) .cfi_escape 0x10, regn, 0x4, 0x13, \ 0x77, off & 0x7F | 0x80, off >> 7 /* If compiled for profiling, call `mcount' at the start of each function. */ #ifdef PROF /* The mcount code relies on a normal frame pointer being on the stack - to locate our caller, so push one just for its benefit. */ + to locate our caller, so push one just for its benefit. */ #define CALL_MCOUNT \ - pushq %rbp; \ - cfi_adjust_cfa_offset(8); \ - movq %rsp, %rbp; \ - cfi_def_cfa_register(%rbp); \ - call JUMPTARGET(mcount); \ - popq %rbp; \ - cfi_def_cfa(rsp,8); + pushq %rbp; \ + cfi_adjust_cfa_offset(8); \ + movq %rsp, %rbp; \ + cfi_def_cfa_register(%rbp); \ + call JUMPTARGET(mcount); \ + popq %rbp; \ + cfi_def_cfa(rsp,8); #else #define CALL_MCOUNT /* Do nothing. */ #endif #define PSEUDO(name, syscall_name, args) \ lose: \ - jmp JUMPTARGET(syscall_error) \ - .globl syscall_error; \ - ENTRY (name) \ - DO_CALL (syscall_name, args); \ - jb lose + jmp JUMPTARGET(syscall_error) \ + .globl syscall_error; \ + ENTRY (name) \ + DO_CALL (syscall_name, args); \ + jb lose #undef JUMPTARGET #ifdef SHARED diff --git a/utils/memcpy-bench/glibc/sysdep_generic.h b/utils/memcpy-bench/glibc/sysdep_generic.h index 0cb5bca4102..e6183d72792 100644 --- a/utils/memcpy-bench/glibc/sysdep_generic.h +++ b/utils/memcpy-bench/glibc/sysdep_generic.h @@ -1,22 +1,22 @@ #pragma once /* Generic asm macros used on many machines. - Copyright (C) 1991-2020 Free Software Foundation, Inc. - This file is part of the GNU C Library. + Copyright (C) 1991-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ #define C_SYMBOL_NAME(name) name #define HIDDEN_JUMPTARGET(name) 0x0 @@ -31,8 +31,8 @@ #define ASM_LINE_SEP ; #define strong_alias(original, alias) \ - .globl C_SYMBOL_NAME (alias) ASM_LINE_SEP \ - C_SYMBOL_NAME (alias) = C_SYMBOL_NAME (original) + .globl C_SYMBOL_NAME (alias) ASM_LINE_SEP \ + C_SYMBOL_NAME (alias) = C_SYMBOL_NAME (original) #ifndef C_LABEL @@ -43,7 +43,7 @@ #ifdef __ASSEMBLER__ /* Mark the end of function named SYM. This is used on some platforms - to generate correct debugging information. */ + to generate correct debugging information. */ # ifndef END # define END(sym) # endif @@ -81,35 +81,35 @@ # define CFI_STARTPROC ".cfi_startproc" # define CFI_ENDPROC ".cfi_endproc" # define CFI_DEF_CFA(reg, off) \ - ".cfi_def_cfa " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) + ".cfi_def_cfa " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) # define CFI_DEF_CFA_REGISTER(reg) \ - ".cfi_def_cfa_register " CFI_STRINGIFY(reg) + ".cfi_def_cfa_register " CFI_STRINGIFY(reg) # define CFI_DEF_CFA_OFFSET(off) \ - ".cfi_def_cfa_offset " CFI_STRINGIFY(off) + ".cfi_def_cfa_offset " CFI_STRINGIFY(off) # define CFI_ADJUST_CFA_OFFSET(off) \ - ".cfi_adjust_cfa_offset " CFI_STRINGIFY(off) + ".cfi_adjust_cfa_offset " CFI_STRINGIFY(off) # define CFI_OFFSET(reg, off) \ - ".cfi_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) + ".cfi_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) # define CFI_REL_OFFSET(reg, off) \ - ".cfi_rel_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) + ".cfi_rel_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off) # define CFI_REGISTER(r1, r2) \ - ".cfi_register " CFI_STRINGIFY(r1) "," CFI_STRINGIFY(r2) + ".cfi_register " CFI_STRINGIFY(r1) "," CFI_STRINGIFY(r2) # define CFI_RETURN_COLUMN(reg) \ - ".cfi_return_column " CFI_STRINGIFY(reg) + ".cfi_return_column " CFI_STRINGIFY(reg) # define CFI_RESTORE(reg) \ - ".cfi_restore " CFI_STRINGIFY(reg) + ".cfi_restore " CFI_STRINGIFY(reg) # define CFI_UNDEFINED(reg) \ - ".cfi_undefined " CFI_STRINGIFY(reg) + ".cfi_undefined " CFI_STRINGIFY(reg) # define CFI_REMEMBER_STATE \ - ".cfi_remember_state" + ".cfi_remember_state" # define CFI_RESTORE_STATE \ - ".cfi_restore_state" + ".cfi_restore_state" # define CFI_WINDOW_SAVE \ - ".cfi_window_save" + ".cfi_window_save" # define CFI_PERSONALITY(enc, exp) \ - ".cfi_personality " CFI_STRINGIFY(enc) "," CFI_STRINGIFY(exp) + ".cfi_personality " CFI_STRINGIFY(enc) "," CFI_STRINGIFY(exp) # define CFI_LSDA(enc, exp) \ - ".cfi_lsda " CFI_STRINGIFY(enc) "," CFI_STRINGIFY(exp) + ".cfi_lsda " CFI_STRINGIFY(enc) "," CFI_STRINGIFY(exp) #endif #include "dwarf2.h" diff --git a/utils/memcpy-bench/glibc/sysdep_x86.h b/utils/memcpy-bench/glibc/sysdep_x86.h index 4469ed2e885..1c482cfabb7 100644 --- a/utils/memcpy-bench/glibc/sysdep_x86.h +++ b/utils/memcpy-bench/glibc/sysdep_x86.h @@ -1,22 +1,22 @@ #pragma once /* Assembler macros for x86. - Copyright (C) 2017-2020 Free Software Foundation, Inc. - This file is part of the GNU C Library. + Copyright (C) 2017-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ #ifndef _X86_SYSDEP_H #define _X86_SYSDEP_H 1 @@ -27,11 +27,11 @@ enum cf_protection_level { - CF_NONE = 0, - CF_BRANCH = 1 << 0, - CF_RETURN = 1 << 1, - CF_FULL = CF_BRANCH | CF_RETURN, - CF_SET = 1 << 2 + CF_NONE = 0, + CF_BRANCH = 1 << 0, + CF_RETURN = 1 << 1, + CF_FULL = CF_BRANCH | CF_RETURN, + CF_SET = 1 << 2 }; */ @@ -51,13 +51,13 @@ enum cf_protection_level #endif /* Offset for fxsave/xsave area used by _dl_runtime_resolve. Also need - space to preserve RCX, RDX, RSI, RDI, R8, R9 and RAX. It must be - aligned to 16 bytes for fxsave and 64 bytes for xsave. */ + space to preserve RCX, RDX, RSI, RDI, R8, R9 and RAX. It must be + aligned to 16 bytes for fxsave and 64 bytes for xsave. */ #define STATE_SAVE_OFFSET (8 * 7 + 8) /* Save SSE, AVX, AVX512, mask and bound registers. */ #define STATE_SAVE_MASK \ - ((1 << 1) | (1 << 2) | (1 << 3) | (1 << 5) | (1 << 6) | (1 << 7)) + ((1 << 1) | (1 << 2) | (1 << 3) | (1 << 5) | (1 << 6) | (1 << 7)) #ifdef __ASSEMBLER__ @@ -76,31 +76,31 @@ enum cf_protection_level /* Define an entry point visible from C. */ #define ENTRY(name) \ - .globl C_SYMBOL_NAME(name); \ - .type C_SYMBOL_NAME(name),@function; \ - .align ALIGNARG(4); \ - C_LABEL(name) \ - cfi_startproc; \ - _CET_ENDBR; \ - CALL_MCOUNT + .globl C_SYMBOL_NAME(name); \ + .type C_SYMBOL_NAME(name),@function; \ + .align ALIGNARG(4); \ + C_LABEL(name) \ + cfi_startproc; \ + _CET_ENDBR; \ + CALL_MCOUNT #undef END #define END(name) \ - cfi_endproc; \ - ASM_SIZE_DIRECTIVE(name) + cfi_endproc; \ + ASM_SIZE_DIRECTIVE(name) #define ENTRY_CHK(name) ENTRY (name) #define END_CHK(name) END (name) /* Since C identifiers are not normally prefixed with an underscore - on this system, the asm identifier `syscall_error' intrudes on the - C name space. Make sure we use an innocuous name. */ + on this system, the asm identifier `syscall_error' intrudes on the + C name space. Make sure we use an innocuous name. */ #define syscall_error __syscall_error #define mcount _mcount #undef PSEUDO_END #define PSEUDO_END(name) \ - END (name) + END (name) /* Local label name for asm code. */ #ifndef L From caccc6da3787d4f328149ccae528328c165d4810 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 15 Mar 2021 12:01:26 +0300 Subject: [PATCH 199/333] Add missing tests --- .../test_optimize_on_insert/__init__.py | 1 + .../test_optimize_on_insert/test.py | 48 +++++++++++++++++++ ...560_optimize_on_insert_zookeeper.reference | 1 + .../01560_optimize_on_insert_zookeeper.sql | 36 ++++++++++++++ 4 files changed, 86 insertions(+) create mode 100644 tests/integration/test_optimize_on_insert/__init__.py create mode 100644 tests/integration/test_optimize_on_insert/test.py create mode 100644 tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.reference create mode 100644 tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.sql diff --git a/tests/integration/test_optimize_on_insert/__init__.py b/tests/integration/test_optimize_on_insert/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_optimize_on_insert/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_optimize_on_insert/test.py b/tests/integration/test_optimize_on_insert/test.py new file mode 100644 index 00000000000..da4e20edf0c --- /dev/null +++ b/tests/integration/test_optimize_on_insert/test.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import pytest +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', with_zookeeper=True) +node2 = cluster.add_instance('node2', with_zookeeper=True) + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + + finally: + cluster.shutdown() + + +def get_data_files_for_table(node, table_name): + raw_output = node.exec_in_container(["bash", "-c", "ls /var/lib/clickhouse/data/default/{}".format(table_name)]) + return raw_output.strip().split("\n") + +def test_empty_parts_optimize(start_cluster): + for n, node in enumerate([node1, node2]): + node.query(""" + CREATE TABLE empty (key UInt32, val UInt32, date Datetime) + ENGINE=ReplicatedSummingMergeTree('/clickhouse/01560_optimize_on_insert', '{}', val) + PARTITION BY date ORDER BY key; + """.format(n+1)) + + node1.query("INSERT INTO empty VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-01'), (1, -2, '2020-01-01')") + + node2.query("SYSTEM SYNC REPLICA empty", timeout=15) + + assert node1.query("SELECT * FROM empty") == "" + assert node2.query("SELECT * FROM empty") == "" + + # No other tmp files exists + assert set(get_data_files_for_table(node1, "empty")) == {"detached", "format_version.txt"} + assert set(get_data_files_for_table(node2, "empty")) == {"detached", "format_version.txt"} + + node1.query("INSERT INTO empty VALUES (1, 1, '2020-02-01'), (1, 1, '2020-02-01'), (1, -2, '2020-02-01')", settings={"insert_quorum": 2}) + + assert node1.query("SELECT * FROM empty") == "" + assert node2.query("SELECT * FROM empty") == "" diff --git a/tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.reference b/tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.reference new file mode 100644 index 00000000000..e89c6201fb7 --- /dev/null +++ b/tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.reference @@ -0,0 +1 @@ +Check creating empty parts diff --git a/tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.sql b/tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.sql new file mode 100644 index 00000000000..a98818b2195 --- /dev/null +++ b/tests/queries/0_stateless/01560_optimize_on_insert_zookeeper.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS empty1; +DROP TABLE IF EXISTS empty2; + +SELECT 'Check creating empty parts'; + +CREATE TABLE empty1 (key UInt32, val UInt32, date Datetime) +ENGINE=ReplicatedSummingMergeTree('/clickhouse/01560_optimize_on_insert', '1', val) +PARTITION BY date ORDER BY key; + +CREATE TABLE empty2 (key UInt32, val UInt32, date Datetime) +ENGINE=ReplicatedSummingMergeTree('/clickhouse/01560_optimize_on_insert', '2', val) +PARTITION BY date ORDER BY key; + +INSERT INTO empty2 VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-01'), (1, -2, '2020-01-01'); + +SYSTEM SYNC REPLICA empty1; + +SELECT * FROM empty1 ORDER BY key; +SELECT * FROM empty2 ORDER BY key; + +SELECT table, partition, active FROM system.parts where table = 'empty1' and database=currentDatabase() and active = 1; +SELECT table, partition, active FROM system.parts where table = 'empty2' and database=currentDatabase() and active = 1; + +DETACH table empty1; +DETACH table empty2; +ATTACH table empty1; +ATTACH table empty2; + +SELECT * FROM empty1 ORDER BY key; +SELECT * FROM empty2 ORDER BY key; + +SELECT table, partition, active FROM system.parts where table = 'empty1' and database=currentDatabase() and active = 1; +SELECT table, partition, active FROM system.parts where table = 'empty2' and database=currentDatabase() and active = 1; + +DROP TABLE IF EXISTS empty1; +DROP TABLE IF EXISTS empty2; From 1e495705ada1957c64437d0a0387675f01536a2d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 15 Mar 2021 12:44:13 +0300 Subject: [PATCH 200/333] Update version_date.tsv after release 21.2.6.1 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 5edbc4bca1a..b13f651f10a 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,4 +1,5 @@ v21.3.2.5-lts 2021-03-12 +v21.2.6.1-stable 2021-03-15 v21.2.5.5-stable 2021-03-02 v21.2.4.6-stable 2021-02-20 v21.2.3.15-stable 2021-02-14 From 1ede69e27707998cf6ce39cb9ec5925e6717d6f4 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 15 Mar 2021 14:15:06 +0300 Subject: [PATCH 201/333] Update version_date.tsv after release 21.1.7.1 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index b13f651f10a..687dbc24000 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -4,6 +4,7 @@ v21.2.5.5-stable 2021-03-02 v21.2.4.6-stable 2021-02-20 v21.2.3.15-stable 2021-02-14 v21.2.2.8-stable 2021-02-07 +v21.1.7.1-stable 2021-03-15 v21.1.6.13-stable 2021-03-02 v21.1.5.4-stable 2021-02-20 v21.1.4.46-stable 2021-02-14 From 35e0d0f672321d8ab2120dd873f562a39d6310e6 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Mon, 15 Mar 2021 14:17:16 +0300 Subject: [PATCH 202/333] Reverted S3 connection pools. --- .../mergetree-family/mergetree.md | 2 -- .../mergetree-family/mergetree.md | 2 -- src/IO/S3/PocoHTTPClient.cpp | 26 +++++++++++-------- src/IO/S3/PocoHTTPClient.h | 3 +-- 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 52d9111dc90..70cf7f2212e 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -748,7 +748,6 @@ Configuration markup: 10000 5000 - 100 10 1000 /var/lib/clickhouse/disks/s3/ @@ -771,7 +770,6 @@ Optional parameters: - `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL. - `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`. - `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`. -- `max_connections` — S3 connections pool size. Default value is `100`. - `retry_attempts` — Number of retry attempts in case of failed request. Default value is `10`. - `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`. - `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks//`. diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index bc74b2592b9..b10087e0697 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -733,7 +733,6 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' 10000 5000 - 100 10 1000 /var/lib/clickhouse/disks/s3/ @@ -758,7 +757,6 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' - `proxy` — конфигурация прокси-сервера для конечной точки S3. Каждый элемент `uri` внутри блока `proxy` должен содержать URL прокси-сервера. - `connect_timeout_ms` — таймаут подключения к сокету в миллисекундах. Значение по умолчанию: 10 секунд. - `request_timeout_ms` — таймаут выполнения запроса в миллисекундах. Значение по умолчанию: 5 секунд. -- `max_connections` — размер пула соединений S3. Значение по умолчанию: `100`. - `retry_attempts` — число попыток выполнения запроса в случае возникновения ошибки. Значение по умолчанию: `10`. - `min_bytes_for_seek` — минимальное количество байтов, которые используются для операций поиска вместо последовательного чтения. Значение по умолчанию: 1 МБайт. - `metadata_path` — путь к локальному файловому хранилищу для хранения файлов с метаданными для S3. Значение по умолчанию: `/var/lib/clickhouse/disks//`. diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index 471044dd08c..c31c12a1899 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -86,7 +86,6 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & clientConfigu )) , remote_host_filter(clientConfiguration.remote_host_filter) , s3_max_redirects(clientConfiguration.s3_max_redirects) - , max_connections(clientConfiguration.maxConnections) { } @@ -156,19 +155,24 @@ void PocoHTTPClient::makeRequestInternal( for (unsigned int attempt = 0; attempt <= s3_max_redirects; ++attempt) { Poco::URI target_uri(uri); - Poco::URI proxy_uri; - - auto request_configuration = per_request_configuration(request); - if (!request_configuration.proxyHost.empty()) - { - proxy_uri.setScheme(Aws::Http::SchemeMapper::ToString(request_configuration.proxyScheme)); - proxy_uri.setHost(request_configuration.proxyHost); - proxy_uri.setPort(request_configuration.proxyPort); - } /// Reverse proxy can replace host header with resolved ip address instead of host name. /// This can lead to request signature difference on S3 side. - auto session = makePooledHTTPSession(target_uri, proxy_uri, timeouts, max_connections, false); + auto session = makeHTTPSession(target_uri, timeouts, false); + + auto request_configuration = per_request_configuration(request); + + if (!request_configuration.proxyHost.empty()) + { + bool use_tunnel = request_configuration.proxyScheme == Aws::Http::Scheme::HTTP && target_uri.getScheme() == "https"; + + session->setProxy( + request_configuration.proxyHost, + request_configuration.proxyPort, + Aws::Http::SchemeMapper::ToString(request_configuration.proxyScheme), + use_tunnel + ); + } Poco::Net::HTTPRequest poco_request(Poco::Net::HTTPRequest::HTTP_1_1); diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 918943a413c..da6c4dd5985 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -40,7 +40,7 @@ private: class PocoHTTPResponse : public Aws::Http::Standard::StandardHttpResponse { public: - using SessionPtr = PooledHTTPSessionPtr; + using SessionPtr = HTTPSessionPtr; PocoHTTPResponse(const std::shared_ptr request) : Aws::Http::Standard::StandardHttpResponse(request) @@ -91,7 +91,6 @@ private: ConnectionTimeouts timeouts; const RemoteHostFilter & remote_host_filter; unsigned int s3_max_redirects; - unsigned int max_connections; }; } From 307b89f6b0101268507a284ffa35f6a2682a569d Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 15 Mar 2021 14:20:11 +0300 Subject: [PATCH 203/333] fix skip list --- tests/queries/skip_list.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 974ef48ef3c..5dc0aef0081 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -111,6 +111,7 @@ "memory_tracking", "memory_usage", "live_view", + "01761_alter_decimal_zookeeper", "01720_type_map_and_casts", "01413_alter_update_supertype", "01149_zookeeper_mutation_stuck_after_replace_partition", @@ -753,6 +754,7 @@ "01700_system_zookeeper_path_in", "01715_background_checker_blather_zookeeper", "01747_alter_partition_key_enum_zookeeper", + "01761_alter_decimal_zookeeper", "attach", "ddl_dictionaries", "dictionary", From 31ec24255e74098aa8e39723cfd14781770d9ce9 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 15 Mar 2021 14:24:52 +0300 Subject: [PATCH 204/333] Fix skip list --- tests/queries/skip_list.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 974ef48ef3c..90564d354e5 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -111,6 +111,7 @@ "memory_tracking", "memory_usage", "live_view", + "01560_optimize_on_insert_zookeeper", "01720_type_map_and_casts", "01413_alter_update_supertype", "01149_zookeeper_mutation_stuck_after_replace_partition", @@ -736,6 +737,7 @@ "01530_drop_database_atomic_sync", "01541_max_memory_usage_for_user_long", "01542_dictionary_load_exception_race", + "01560_optimize_on_insert_zookeeper", "01575_disable_detach_table_of_dictionary", "01593_concurrent_alter_mutations_kill", "01593_concurrent_alter_mutations_kill_many_replicas", From c2b398b39a5bada456e2c8d0d2fd84b7c1290a8a Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Mon, 15 Mar 2021 13:25:22 +0200 Subject: [PATCH 205/333] Documnentation on OPTIMIZE DEDUPLICATE BY expression. --- docs/en/sql-reference/statements/optimize.md | 66 ++++++++++++++++++- .../sql-reference/statements/select/index.md | 3 + 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 9b16a12d2e2..ea1fac90466 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -6,7 +6,7 @@ toc_title: OPTIMIZE # OPTIMIZE Statement {#misc_operations-optimize} ``` sql -OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE] +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]] ``` This query tries to initialize an unscheduled merge of data parts for tables with a table engine from the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family. @@ -18,7 +18,69 @@ When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engin - If `OPTIMIZE` doesn’t perform a merge for any reason, it doesn’t notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting. - If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](../../sql-reference/statements/alter/index.md#alter-how-to-specify-part-expr). - If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed. -- If you specify `DEDUPLICATE`, then completely identical rows will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. +- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. + + +### BY expression {#by-expression} + +If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key). + +Note that `*` behaves just like in `SELECT`: `MATERIALIZED`, and `ALIAS` columns are not used for expansion. +Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column. + +``` sql +OPTIMIZE TABLE table DEDUPLICATE; -- the old one +OPTIMIZE TABLE table DEDUPLICATE BY *; -- not the same as the old one, excludes MATERIALIZED columns (see the note above) +OPTIMIZE TABLE table DEDUPLICATE BY * EXCEPT colX; +OPTIMIZE TABLE table DEDUPLICATE BY * EXCEPT (colX, colY); +OPTIMIZE TABLE table DEDUPLICATE BY col1,col2,col3; +OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex'); +OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT colX; +OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT (colX, colY); +``` + +**Example:** + +A silly synthetic table. +``` sql +CREATE TABLE example ( + primary_key Int32, + secondary_key Int32, + value UInt32, + partition_key UInt32, + materialized_value UInt32 MATERIALIZED 12345, + aliased_value UInt32 ALIAS 2, + PRIMARY KEY primary_key +) ENGINE=MergeTree +PARTITION BY partition_key +ORDER BY (primary_key, secondary_key); +``` + +``` sql +-- The 'old' deduplicate, all columns are taken into account, i.e. row is removed only if all values in all columns are equal to corresponding values in previous row. +OPTIMIZE TABLE example FINAL DEDUPLICATE; +``` + +``` sql +-- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED`: `primary_key`, `secondary_key`, `value`, `partition_key`, and `materialized_value` columns. +OPTIMIZE TABLE example FINAL DEDUPLICATE BY *; +``` + +``` sql +-- Deduplicate by all columns that are not `ALIAS` or `MATERIALIZED` and explicitly not `materialized_value`: `primary_key`, `secondary_key`, `value`, and `partition_key` columns. +OPTIMIZE TABLE example FINAL DEDUPLICATE BY * EXCEPT materialized_value; +``` + +``` sql +-- Deduplicate explicitly by `primary_key`, `secondary_key`, and `partition_key` columns. +OPTIMIZE TABLE example FINAL DEDUPLICATE BY primary_key, secondary_key, partition_key; +``` + +``` sql +-- Deduplicate by any column matching a regex: `primary_key`, `secondary_key`, and `partition_key` columns. +OPTIMIZE TABLE example FINAL DEDUPLICATE BY COLUMNS('.*_key'); +``` + !!! warning "Warning" `OPTIMIZE` can’t fix the “Too many parts” error. diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index e99ebef838c..ada4699c224 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -57,6 +57,9 @@ Specifics of each optional clause are covered in separate sections, which are li If you want to include all columns in the result, use the asterisk (`*`) symbol. For example, `SELECT * FROM ...`. + +### COLUMNS expression {#columns-expression} + To match some columns in the result with a [re2](https://en.wikipedia.org/wiki/RE2_(software)) regular expression, you can use the `COLUMNS` expression. ``` sql From 63bf066e2d5164671a5c6ff492ea008e86eaadc3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:27:49 +0300 Subject: [PATCH 206/333] Improve links matching --- docs/tools/single_page.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index 27628962271..af38c3fb8d7 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -30,11 +30,17 @@ def generate_anchor_from_path(path): def replace_link(match, path): - link = match.group(1) + title = match.group(1) + link = match.group(2) + + # Not a relative link + if link.startswith('http'): + return match.group(0) + if link.endswith('/'): link = link[0:-1] + '.md' - return '(#{})'.format(generate_anchor_from_path(os.path.normpath(os.path.join(os.path.dirname(path), link)))) + return '[{}](#{})'.format(title, generate_anchor_from_path(os.path.normpath(os.path.join(os.path.dirname(path), link)))) # Concatenates Markdown files to a single file. @@ -52,8 +58,7 @@ def concatenate(lang, docs_path, single_page_file, nav): logging.debug('Concatenating: ' + ', '.join(files_to_concatenate)) assert files_count > 0, f'Empty single-page for {lang}' - # (../anything) or (../anything#anchor) or (xyz-abc.md) or (xyz-abc.md#anchor) - relative_link_regexp = re.compile(r'\((\.\./[^)#]+|[\w\-]+\.md)(?:#[^\)]*)?\)') + link_regexp = re.compile(r'(\[[^\]]+\])\(([^)#]+)(?:#[^\)]+)?\)') for path in files_to_concatenate: try: @@ -75,9 +80,9 @@ def concatenate(lang, docs_path, single_page_file, nav): # Replace links within the docs. - if re.search(relative_link_regexp, line): + if re.search(link_regexp, line): line = re.sub( - relative_link_regexp, + link_regexp, lambda match: replace_link(match, path), line) From 4ad6cc331374fe93332949a20ea1be777503194b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:28:37 +0300 Subject: [PATCH 207/333] Improve links matching --- docs/tools/single_page.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index af38c3fb8d7..f885a84ec89 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -40,7 +40,7 @@ def replace_link(match, path): if link.endswith('/'): link = link[0:-1] + '.md' - return '[{}](#{})'.format(title, generate_anchor_from_path(os.path.normpath(os.path.join(os.path.dirname(path), link)))) + return '{}(#{})'.format(title, generate_anchor_from_path(os.path.normpath(os.path.join(os.path.dirname(path), link)))) # Concatenates Markdown files to a single file. From 0a6a80fa5293ee282e34c1cd326f96f0e122d65b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:30:56 +0300 Subject: [PATCH 208/333] Fix broken links --- docs/ru/getting-started/playground.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/ru/getting-started/playground.md b/docs/ru/getting-started/playground.md index 86a5cd5272c..b51a9b2b436 100644 --- a/docs/ru/getting-started/playground.md +++ b/docs/ru/getting-started/playground.md @@ -36,10 +36,10 @@ ClickHouse Playground дает возможность поработать с [ - запрещены INSERT запросы Также установлены следующие опции: -- [max_result_bytes=10485760](../operations/settings/query_complexity/#max-result-bytes) -- [max_result_rows=2000](../operations/settings/query_complexity/#setting-max_result_rows) -- [result_overflow_mode=break](../operations/settings/query_complexity/#result-overflow-mode) -- [max_execution_time=60000](../operations/settings/query_complexity/#max-execution-time) +- [max_result_bytes=10485760](../operations/settings/query-complexity.md#max-result-bytes) +- [max_result_rows=2000](../operations/settings/query-complexity.md#setting-max_result_rows) +- [result_overflow_mode=break](../operations/settings/query-complexity.md#result-overflow-mode) +- [max_execution_time=60000](../operations/settings/query-complexity.md#max-execution-time) ## Примеры {#examples} From 8850856276a42aaa2ed4810009cd4f63af06d4f7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:32:26 +0300 Subject: [PATCH 209/333] Remove "Original article" (must be automated instead) --- docs/ru/commercial/cloud.md | 1 - docs/ru/development/style.md | 1 - docs/ru/engines/database-engines/index.md | 1 - docs/ru/engines/database-engines/lazy.md | 1 - docs/ru/engines/database-engines/materialize-mysql.md | 1 - docs/ru/engines/table-engines/index.md | 1 - docs/ru/engines/table-engines/integrations/embedded-rocksdb.md | 1 - docs/ru/engines/table-engines/integrations/hdfs.md | 1 - docs/ru/engines/table-engines/integrations/index.md | 1 - docs/ru/engines/table-engines/integrations/jdbc.md | 1 - docs/ru/engines/table-engines/integrations/kafka.md | 1 - docs/ru/engines/table-engines/integrations/mysql.md | 1 - docs/ru/engines/table-engines/integrations/odbc.md | 1 - docs/ru/engines/table-engines/integrations/postgresql.md | 1 - docs/ru/engines/table-engines/integrations/rabbitmq.md | 1 - docs/ru/engines/table-engines/integrations/s3.md | 1 - docs/ru/engines/table-engines/log-family/index.md | 1 - docs/ru/engines/table-engines/log-family/log.md | 1 - docs/ru/engines/table-engines/log-family/stripelog.md | 1 - docs/ru/engines/table-engines/log-family/tinylog.md | 1 - .../table-engines/mergetree-family/aggregatingmergetree.md | 1 - .../table-engines/mergetree-family/collapsingmergetree.md | 1 - .../table-engines/mergetree-family/custom-partitioning-key.md | 1 - .../engines/table-engines/mergetree-family/graphitemergetree.md | 1 - docs/ru/engines/table-engines/mergetree-family/mergetree.md | 1 - .../engines/table-engines/mergetree-family/replacingmergetree.md | 1 - docs/ru/engines/table-engines/mergetree-family/replication.md | 1 - .../engines/table-engines/mergetree-family/summingmergetree.md | 1 - .../mergetree-family/versionedcollapsingmergetree.md | 1 - docs/ru/engines/table-engines/special/buffer.md | 1 - docs/ru/engines/table-engines/special/dictionary.md | 1 - docs/ru/engines/table-engines/special/distributed.md | 1 - docs/ru/engines/table-engines/special/external-data.md | 1 - docs/ru/engines/table-engines/special/file.md | 1 - docs/ru/engines/table-engines/special/index.md | 1 - docs/ru/engines/table-engines/special/join.md | 1 - docs/ru/engines/table-engines/special/materializedview.md | 1 - docs/ru/engines/table-engines/special/memory.md | 1 - docs/ru/engines/table-engines/special/merge.md | 1 - docs/ru/engines/table-engines/special/null.md | 1 - docs/ru/engines/table-engines/special/set.md | 1 - docs/ru/engines/table-engines/special/url.md | 1 - docs/ru/engines/table-engines/special/view.md | 1 - docs/ru/getting-started/example-datasets/amplab-benchmark.md | 1 - docs/ru/getting-started/example-datasets/brown-benchmark.md | 1 - docs/ru/getting-started/example-datasets/criteo.md | 1 - docs/ru/getting-started/example-datasets/index.md | 1 - docs/ru/getting-started/example-datasets/nyc-taxi.md | 1 - docs/ru/getting-started/example-datasets/ontime.md | 1 - docs/ru/getting-started/example-datasets/wikistat.md | 1 - docs/ru/getting-started/index.md | 1 - docs/ru/getting-started/install.md | 1 - docs/ru/index.md | 1 - docs/ru/interfaces/cli.md | 1 - docs/ru/interfaces/cpp.md | 1 - docs/ru/interfaces/formats.md | 1 - docs/ru/interfaces/http.md | 1 - docs/ru/interfaces/index.md | 1 - docs/ru/interfaces/jdbc.md | 1 - docs/ru/interfaces/odbc.md | 1 - docs/ru/interfaces/tcp.md | 1 - docs/ru/interfaces/third-party/client-libraries.md | 1 - docs/ru/interfaces/third-party/gui.md | 1 - docs/ru/interfaces/third-party/index.md | 1 - docs/ru/interfaces/third-party/integrations.md | 1 - docs/ru/interfaces/third-party/proxy.md | 1 - docs/ru/introduction/distinctive-features.md | 1 - docs/ru/introduction/history.md | 1 - docs/ru/introduction/info.md | 1 - docs/ru/introduction/performance.md | 1 - docs/ru/operations/access-rights.md | 1 - docs/ru/operations/backup.md | 1 - docs/ru/operations/caches.md | 1 - docs/ru/operations/configuration-files.md | 1 - docs/ru/operations/index.md | 1 - docs/ru/operations/monitoring.md | 1 - docs/ru/operations/opentelemetry.md | 1 - docs/ru/operations/quotas.md | 1 - docs/ru/operations/server-configuration-parameters/index.md | 1 - docs/ru/operations/server-configuration-parameters/settings.md | 1 - docs/ru/operations/settings/constraints-on-settings.md | 1 - docs/ru/operations/settings/index.md | 1 - docs/ru/operations/settings/permissions-for-queries.md | 1 - docs/ru/operations/settings/query-complexity.md | 1 - docs/ru/operations/settings/settings-profiles.md | 1 - docs/ru/operations/settings/settings-users.md | 1 - docs/ru/operations/settings/settings.md | 1 - docs/ru/operations/system-tables/asynchronous_metric_log.md | 1 - docs/ru/operations/system-tables/asynchronous_metrics.md | 1 - docs/ru/operations/system-tables/clusters.md | 1 - docs/ru/operations/system-tables/columns.md | 1 - docs/ru/operations/system-tables/contributors.md | 1 - docs/ru/operations/system-tables/current-roles.md | 1 - docs/ru/operations/system-tables/data_type_families.md | 1 - docs/ru/operations/system-tables/databases.md | 1 - docs/ru/operations/system-tables/detached_parts.md | 1 - docs/ru/operations/system-tables/dictionaries.md | 1 - docs/ru/operations/system-tables/disks.md | 1 - docs/ru/operations/system-tables/distributed_ddl_queue.md | 1 - docs/ru/operations/system-tables/distribution_queue.md | 1 - docs/ru/operations/system-tables/enabled-roles.md | 1 - docs/ru/operations/system-tables/events.md | 1 - docs/ru/operations/system-tables/functions.md | 1 - docs/ru/operations/system-tables/grants.md | 1 - docs/ru/operations/system-tables/graphite_retentions.md | 1 - docs/ru/operations/system-tables/index.md | 1 - docs/ru/operations/system-tables/licenses.md | 1 - docs/ru/operations/system-tables/merges.md | 1 - docs/ru/operations/system-tables/metric_log.md | 1 - docs/ru/operations/system-tables/metrics.md | 1 - docs/ru/operations/system-tables/mutations.md | 1 - docs/ru/operations/system-tables/numbers.md | 1 - docs/ru/operations/system-tables/numbers_mt.md | 1 - docs/ru/operations/system-tables/one.md | 1 - docs/ru/operations/system-tables/opentelemetry_span_log.md | 1 - docs/ru/operations/system-tables/part_log.md | 1 - docs/ru/operations/system-tables/parts.md | 1 - docs/ru/operations/system-tables/parts_columns.md | 1 - docs/ru/operations/system-tables/processes.md | 1 - docs/ru/operations/system-tables/query_log.md | 1 - docs/ru/operations/system-tables/query_thread_log.md | 1 - docs/ru/operations/system-tables/quota_limits.md | 1 - docs/ru/operations/system-tables/quota_usage.md | 1 - docs/ru/operations/system-tables/quotas.md | 1 - docs/ru/operations/system-tables/quotas_usage.md | 1 - docs/ru/operations/system-tables/replicas.md | 1 - docs/ru/operations/system-tables/replicated_fetches.md | 1 - docs/ru/operations/system-tables/replication_queue.md | 1 - docs/ru/operations/system-tables/role-grants.md | 1 - docs/ru/operations/system-tables/roles.md | 1 - docs/ru/operations/system-tables/row_policies.md | 1 - docs/ru/operations/system-tables/settings.md | 1 - docs/ru/operations/system-tables/settings_profile_elements.md | 1 - docs/ru/operations/system-tables/settings_profiles.md | 1 - docs/ru/operations/system-tables/stack_trace.md | 1 - docs/ru/operations/system-tables/storage_policies.md | 1 - docs/ru/operations/system-tables/table_engines.md | 1 - docs/ru/operations/system-tables/tables.md | 1 - docs/ru/operations/system-tables/text_log.md | 1 - docs/ru/operations/system-tables/trace_log.md | 1 - docs/ru/operations/system-tables/users.md | 1 - docs/ru/operations/system-tables/zookeeper.md | 1 - docs/ru/operations/tips.md | 1 - docs/ru/operations/utilities/clickhouse-benchmark.md | 1 - docs/ru/operations/utilities/clickhouse-copier.md | 1 - docs/ru/operations/utilities/clickhouse-local.md | 1 - docs/ru/operations/utilities/index.md | 1 - docs/ru/sql-reference/aggregate-functions/combinators.md | 1 - docs/ru/sql-reference/aggregate-functions/index.md | 1 - .../ru/sql-reference/aggregate-functions/parametric-functions.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/any.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/anylast.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/argmax.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/argmin.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/avg.md | 1 - .../sql-reference/aggregate-functions/reference/avgweighted.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/corr.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/count.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/covarpop.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md | 1 - .../ru/sql-reference/aggregate-functions/reference/grouparray.md | 1 - .../aggregate-functions/reference/grouparrayinsertat.md | 1 - .../aggregate-functions/reference/grouparraymovingavg.md | 1 - .../aggregate-functions/reference/grouparraymovingsum.md | 1 - .../sql-reference/aggregate-functions/reference/groupbitand.md | 1 - .../sql-reference/aggregate-functions/reference/groupbitmap.md | 1 - .../ru/sql-reference/aggregate-functions/reference/groupbitor.md | 1 - .../sql-reference/aggregate-functions/reference/groupbitxor.md | 1 - .../aggregate-functions/reference/groupuniqarray.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/index.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md | 1 - .../aggregate-functions/reference/mannwhitneyutest.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/max.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/median.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/min.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/quantile.md | 1 - .../aggregate-functions/reference/quantiledeterministic.md | 1 - .../sql-reference/aggregate-functions/reference/quantileexact.md | 1 - .../aggregate-functions/reference/quantileexactweighted.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/quantiles.md | 1 - .../aggregate-functions/reference/quantiletdigest.md | 1 - .../aggregate-functions/reference/quantiletdigestweighted.md | 1 - .../aggregate-functions/reference/quantiletiming.md | 1 - .../aggregate-functions/reference/quantiletimingweighted.md | 1 - .../aggregate-functions/reference/simplelinearregression.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/skewpop.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md | 1 - .../ru/sql-reference/aggregate-functions/reference/stddevsamp.md | 1 - .../aggregate-functions/reference/stochasticlinearregression.md | 1 - .../reference/stochasticlogisticregression.md | 1 - .../sql-reference/aggregate-functions/reference/studentttest.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/sum.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/summap.md | 1 - .../aggregate-functions/reference/sumwithoverflow.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/topk.md | 1 - .../sql-reference/aggregate-functions/reference/topkweighted.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/uniq.md | 1 - .../sql-reference/aggregate-functions/reference/uniqcombined.md | 1 - .../aggregate-functions/reference/uniqcombined64.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/varpop.md | 1 - docs/ru/sql-reference/aggregate-functions/reference/varsamp.md | 1 - .../ru/sql-reference/aggregate-functions/reference/welchttest.md | 1 - docs/ru/sql-reference/data-types/aggregatefunction.md | 1 - docs/ru/sql-reference/data-types/array.md | 1 - docs/ru/sql-reference/data-types/boolean.md | 1 - docs/ru/sql-reference/data-types/date.md | 1 - docs/ru/sql-reference/data-types/datetime.md | 1 - docs/ru/sql-reference/data-types/decimal.md | 1 - docs/ru/sql-reference/data-types/domains/index.md | 1 - docs/ru/sql-reference/data-types/domains/ipv4.md | 1 - docs/ru/sql-reference/data-types/domains/ipv6.md | 1 - docs/ru/sql-reference/data-types/enum.md | 1 - docs/ru/sql-reference/data-types/fixedstring.md | 1 - docs/ru/sql-reference/data-types/float.md | 1 - docs/ru/sql-reference/data-types/geo.md | 1 - docs/ru/sql-reference/data-types/index.md | 1 - docs/ru/sql-reference/data-types/int-uint.md | 1 - docs/ru/sql-reference/data-types/lowcardinality.md | 1 - docs/ru/sql-reference/data-types/multiword-types.md | 1 - docs/ru/sql-reference/data-types/nested-data-structures/index.md | 1 - .../ru/sql-reference/data-types/nested-data-structures/nested.md | 1 - docs/ru/sql-reference/data-types/nullable.md | 1 - docs/ru/sql-reference/data-types/simpleaggregatefunction.md | 1 - .../ru/sql-reference/data-types/special-data-types/expression.md | 1 - docs/ru/sql-reference/data-types/special-data-types/index.md | 1 - docs/ru/sql-reference/data-types/special-data-types/nothing.md | 1 - docs/ru/sql-reference/data-types/special-data-types/set.md | 1 - docs/ru/sql-reference/data-types/string.md | 1 - docs/ru/sql-reference/data-types/tuple.md | 1 - .../external-dictionaries/external-dicts-dict-hierarchical.md | 1 - .../external-dictionaries/external-dicts-dict-layout.md | 1 - .../external-dictionaries/external-dicts-dict-lifetime.md | 1 - .../external-dictionaries/external-dicts-dict-sources.md | 1 - .../external-dictionaries/external-dicts-dict-structure.md | 1 - .../dictionaries/external-dictionaries/external-dicts-dict.md | 1 - .../dictionaries/external-dictionaries/external-dicts.md | 1 - docs/ru/sql-reference/dictionaries/index.md | 1 - docs/ru/sql-reference/dictionaries/internal-dicts.md | 1 - docs/ru/sql-reference/distributed-ddl.md | 1 - docs/ru/sql-reference/functions/arithmetic-functions.md | 1 - docs/ru/sql-reference/functions/array-functions.md | 1 - docs/ru/sql-reference/functions/array-join.md | 1 - docs/ru/sql-reference/functions/bit-functions.md | 1 - docs/ru/sql-reference/functions/bitmap-functions.md | 1 - docs/ru/sql-reference/functions/comparison-functions.md | 1 - docs/ru/sql-reference/functions/conditional-functions.md | 1 - docs/ru/sql-reference/functions/date-time-functions.md | 1 - docs/ru/sql-reference/functions/encoding-functions.md | 1 - docs/ru/sql-reference/functions/ext-dict-functions.md | 1 - docs/ru/sql-reference/functions/functions-for-nulls.md | 1 - docs/ru/sql-reference/functions/geo/coordinates.md | 1 - docs/ru/sql-reference/functions/geo/geohash.md | 1 - docs/ru/sql-reference/functions/geo/h3.md | 1 - docs/ru/sql-reference/functions/geo/index.md | 1 - docs/ru/sql-reference/functions/hash-functions.md | 1 - docs/ru/sql-reference/functions/in-functions.md | 1 - docs/ru/sql-reference/functions/index.md | 1 - docs/ru/sql-reference/functions/ip-address-functions.md | 1 - docs/ru/sql-reference/functions/json-functions.md | 1 - docs/ru/sql-reference/functions/logical-functions.md | 1 - docs/ru/sql-reference/functions/math-functions.md | 1 - docs/ru/sql-reference/functions/other-functions.md | 1 - docs/ru/sql-reference/functions/random-functions.md | 1 - docs/ru/sql-reference/functions/rounding-functions.md | 1 - docs/ru/sql-reference/functions/splitting-merging-functions.md | 1 - docs/ru/sql-reference/functions/string-functions.md | 1 - docs/ru/sql-reference/functions/string-replace-functions.md | 1 - docs/ru/sql-reference/functions/string-search-functions.md | 1 - docs/ru/sql-reference/functions/tuple-functions.md | 1 - docs/ru/sql-reference/functions/tuple-map-functions.md | 1 - docs/ru/sql-reference/functions/type-conversion-functions.md | 1 - docs/ru/sql-reference/functions/url-functions.md | 1 - docs/ru/sql-reference/functions/ym-dict-functions.md | 1 - docs/ru/sql-reference/index.md | 1 - docs/ru/sql-reference/operators/index.md | 1 - docs/ru/sql-reference/statements/alter/column.md | 1 - docs/ru/sql-reference/statements/alter/constraint.md | 1 - docs/ru/sql-reference/statements/alter/delete.md | 1 - docs/ru/sql-reference/statements/alter/index.md | 1 - docs/ru/sql-reference/statements/alter/index/index.md | 1 - docs/ru/sql-reference/statements/alter/order-by.md | 1 - docs/ru/sql-reference/statements/alter/partition.md | 1 - docs/ru/sql-reference/statements/alter/quota.md | 1 - docs/ru/sql-reference/statements/alter/role.md | 1 - docs/ru/sql-reference/statements/alter/row-policy.md | 1 - docs/ru/sql-reference/statements/alter/settings-profile.md | 1 - docs/ru/sql-reference/statements/alter/ttl.md | 1 - docs/ru/sql-reference/statements/alter/update.md | 1 - docs/ru/sql-reference/statements/alter/user.md | 1 - docs/ru/sql-reference/statements/attach.md | 1 - docs/ru/sql-reference/statements/check-table.md | 1 - docs/ru/sql-reference/statements/create/database.md | 1 - docs/ru/sql-reference/statements/create/dictionary.md | 1 - docs/ru/sql-reference/statements/create/index.md | 1 - docs/ru/sql-reference/statements/create/quota.md | 1 - docs/ru/sql-reference/statements/create/role.md | 1 - docs/ru/sql-reference/statements/create/row-policy.md | 1 - docs/ru/sql-reference/statements/create/settings-profile.md | 1 - docs/ru/sql-reference/statements/create/table.md | 1 - docs/ru/sql-reference/statements/create/user.md | 1 - docs/ru/sql-reference/statements/create/view.md | 1 - docs/ru/sql-reference/statements/describe-table.md | 1 - docs/ru/sql-reference/statements/detach.md | 1 - docs/ru/sql-reference/statements/drop.md | 1 - docs/ru/sql-reference/statements/exists.md | 1 - docs/ru/sql-reference/statements/grant.md | 1 - docs/ru/sql-reference/statements/insert-into.md | 1 - docs/ru/sql-reference/statements/kill.md | 1 - docs/ru/sql-reference/statements/misc.md | 1 - docs/ru/sql-reference/statements/optimize.md | 1 - docs/ru/sql-reference/statements/rename.md | 1 - docs/ru/sql-reference/statements/revoke.md | 1 - docs/ru/sql-reference/statements/select/all.md | 1 - docs/ru/sql-reference/statements/select/index.md | 1 - docs/ru/sql-reference/statements/select/order-by.md | 1 - docs/ru/sql-reference/statements/select/union.md | 1 - docs/ru/sql-reference/statements/select/with.md | 1 - docs/ru/sql-reference/statements/set-role.md | 1 - docs/ru/sql-reference/statements/set.md | 1 - docs/ru/sql-reference/statements/show.md | 1 - docs/ru/sql-reference/statements/system.md | 1 - docs/ru/sql-reference/statements/truncate.md | 1 - docs/ru/sql-reference/statements/use.md | 1 - docs/ru/sql-reference/syntax.md | 1 - docs/ru/sql-reference/table-functions/file.md | 1 - docs/ru/sql-reference/table-functions/generate.md | 1 - docs/ru/sql-reference/table-functions/hdfs.md | 1 - docs/ru/sql-reference/table-functions/index.md | 1 - docs/ru/sql-reference/table-functions/input.md | 1 - docs/ru/sql-reference/table-functions/jdbc.md | 1 - docs/ru/sql-reference/table-functions/merge.md | 1 - docs/ru/sql-reference/table-functions/mysql.md | 1 - docs/ru/sql-reference/table-functions/numbers.md | 1 - docs/ru/sql-reference/table-functions/odbc.md | 1 - docs/ru/sql-reference/table-functions/remote.md | 1 - docs/ru/sql-reference/table-functions/url.md | 1 - docs/ru/sql-reference/table-functions/view.md | 1 - docs/ru/whats-new/extended-roadmap.md | 1 - docs/ru/whats-new/security-changelog.md | 1 - 344 files changed, 344 deletions(-) diff --git a/docs/ru/commercial/cloud.md b/docs/ru/commercial/cloud.md index 8023f738c70..610f0f00a99 100644 --- a/docs/ru/commercial/cloud.md +++ b/docs/ru/commercial/cloud.md @@ -29,4 +29,3 @@ toc_title: "Поставщики облачных услуг ClickHouse" - cross-az масштабирование для повышения производительности и обеспечения высокой доступности - встроенный мониторинг и редактор SQL-запросов -{## [Оригинальная статья](https://clickhouse.tech/docs/ru/commercial/cloud/) ##} diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index 72607ca6bad..f08ecc3c4c7 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -911,4 +911,3 @@ function( size_t limit) ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/development/style/) diff --git a/docs/ru/engines/database-engines/index.md b/docs/ru/engines/database-engines/index.md index e06c032a636..e56faad39ef 100644 --- a/docs/ru/engines/database-engines/index.md +++ b/docs/ru/engines/database-engines/index.md @@ -18,4 +18,3 @@ toc_title: "Введение" - [Lazy](../../engines/database-engines/lazy.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/database_engines/) diff --git a/docs/ru/engines/database-engines/lazy.md b/docs/ru/engines/database-engines/lazy.md index c01aae0284e..140a67be761 100644 --- a/docs/ru/engines/database-engines/lazy.md +++ b/docs/ru/engines/database-engines/lazy.md @@ -15,4 +15,3 @@ toc_title: Lazy CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/database_engines/lazy/) diff --git a/docs/ru/engines/database-engines/materialize-mysql.md b/docs/ru/engines/database-engines/materialize-mysql.md index 3022542e294..2067dfecca0 100644 --- a/docs/ru/engines/database-engines/materialize-mysql.md +++ b/docs/ru/engines/database-engines/materialize-mysql.md @@ -157,4 +157,3 @@ SELECT * FROM mysql.test; └───┴─────┴──────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/database-engines/materialize-mysql/) diff --git a/docs/ru/engines/table-engines/index.md b/docs/ru/engines/table-engines/index.md index 05236eb5b33..6c11011a307 100644 --- a/docs/ru/engines/table-engines/index.md +++ b/docs/ru/engines/table-engines/index.md @@ -80,4 +80,3 @@ toc_title: "Введение" При создании таблицы со столбцом, имя которого совпадает с именем одного из виртуальных столбцов таблицы, виртуальный столбец становится недоступным. Не делайте так. Чтобы помочь избежать конфликтов, имена виртуальных столбцов обычно предваряются подчеркиванием. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/) diff --git a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md index 7bd1420dfab..f66e789a392 100644 --- a/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/ru/engines/table-engines/integrations/embedded-rocksdb.md @@ -41,4 +41,3 @@ ENGINE = EmbeddedRocksDB PRIMARY KEY key; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/embedded-rocksdb/) \ No newline at end of file diff --git a/docs/ru/engines/table-engines/integrations/hdfs.md b/docs/ru/engines/table-engines/integrations/hdfs.md index 449d7c9a20c..3d9cb388a01 100644 --- a/docs/ru/engines/table-engines/integrations/hdfs.md +++ b/docs/ru/engines/table-engines/integrations/hdfs.md @@ -202,4 +202,3 @@ CREATE TABLE big_table (name String, value UInt32) ENGINE = HDFS('hdfs://hdfs1:9 - [Виртуальные колонки](../../../engines/table-engines/index.md#table_engines-virtual_columns) -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/hdfs/) diff --git a/docs/ru/engines/table-engines/integrations/index.md b/docs/ru/engines/table-engines/integrations/index.md index c7004d104f8..cb217270129 100644 --- a/docs/ru/engines/table-engines/integrations/index.md +++ b/docs/ru/engines/table-engines/integrations/index.md @@ -20,4 +20,3 @@ toc_priority: 30 - [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) - [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/) diff --git a/docs/ru/engines/table-engines/integrations/jdbc.md b/docs/ru/engines/table-engines/integrations/jdbc.md index 8ead5abb277..e2db6fac0b2 100644 --- a/docs/ru/engines/table-engines/integrations/jdbc.md +++ b/docs/ru/engines/table-engines/integrations/jdbc.md @@ -89,4 +89,3 @@ FROM jdbc_table - [Табличная функция JDBC](../../../engines/table-engines/integrations/jdbc.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/jdbc/) diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index 06a0d4df180..f053b80aebd 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -193,4 +193,3 @@ ClickHouse может поддерживать учетные данные Kerbe - [Виртуальные столбцы](index.md#table_engines-virtual_columns) - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/kafka/) diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index bc53e0f1fbb..9152a57d122 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -101,4 +101,3 @@ SELECT * FROM mysql_table - [Табличная функция ‘mysql’](../../../engines/table-engines/integrations/mysql.md) - [Использование MySQL в качестве источника для внешнего словаря](../../../engines/table-engines/integrations/mysql.md#dicts-external_dicts_dict_sources-mysql) -[Оригинальная статья](https://clickhouse.tech/docs/engines/table-engines/integrations/mysql/) diff --git a/docs/ru/engines/table-engines/integrations/odbc.md b/docs/ru/engines/table-engines/integrations/odbc.md index ee34be302bc..b2faa9b1e9e 100644 --- a/docs/ru/engines/table-engines/integrations/odbc.md +++ b/docs/ru/engines/table-engines/integrations/odbc.md @@ -128,4 +128,3 @@ SELECT * FROM odbc_t - [Внешние словари ODBC](../../../engines/table-engines/integrations/odbc.md#dicts-external_dicts_dict_sources-odbc) - [Табличная функция odbc](../../../engines/table-engines/integrations/odbc.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/odbc/) diff --git a/docs/ru/engines/table-engines/integrations/postgresql.md b/docs/ru/engines/table-engines/integrations/postgresql.md index bc26899f55b..ecf431830f8 100644 --- a/docs/ru/engines/table-engines/integrations/postgresql.md +++ b/docs/ru/engines/table-engines/integrations/postgresql.md @@ -103,4 +103,3 @@ SELECT * FROM postgresql_table WHERE str IN ('test') - [Табличная функция ‘postgresql’](../../../sql-reference/table-functions/postgresql.md) - [Использование PostgreSQL в качестве истояника для внешнего словаря](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql) -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/postgresql/) diff --git a/docs/ru/engines/table-engines/integrations/rabbitmq.md b/docs/ru/engines/table-engines/integrations/rabbitmq.md index 1865cb16fcc..ef8a58c4c82 100644 --- a/docs/ru/engines/table-engines/integrations/rabbitmq.md +++ b/docs/ru/engines/table-engines/integrations/rabbitmq.md @@ -156,4 +156,3 @@ Example: - `_message_id` - значение поля `messageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. - `_timestamp` - значение поля `timestamp` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/rabbitmq/) diff --git a/docs/ru/engines/table-engines/integrations/s3.md b/docs/ru/engines/table-engines/integrations/s3.md index f1b2e78b0ba..4eaf2d5b05c 100644 --- a/docs/ru/engines/table-engines/integrations/s3.md +++ b/docs/ru/engines/table-engines/integrations/s3.md @@ -153,4 +153,3 @@ CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage. ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/s3/) diff --git a/docs/ru/engines/table-engines/log-family/index.md b/docs/ru/engines/table-engines/log-family/index.md index b2a56f650f4..7737eac2f43 100644 --- a/docs/ru/engines/table-engines/log-family/index.md +++ b/docs/ru/engines/table-engines/log-family/index.md @@ -42,4 +42,3 @@ toc_priority: 29 Движки `Log` и `StripeLog` поддерживают параллельное чтение. При чтении данных, ClickHouse использует множество потоков. Каждый поток обрабатывает отдельный блок данных. Движок `Log` сохраняет каждый столбец таблицы в отдельном файле. Движок `StripeLog` хранит все данные в одном файле. Таким образом, движок `StripeLog` использует меньше дескрипторов в операционной системе, а движок `Log` обеспечивает более эффективное считывание данных. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/log_family/) diff --git a/docs/ru/engines/table-engines/log-family/log.md b/docs/ru/engines/table-engines/log-family/log.md index fad331454c7..6c5bf2221f8 100644 --- a/docs/ru/engines/table-engines/log-family/log.md +++ b/docs/ru/engines/table-engines/log-family/log.md @@ -11,4 +11,3 @@ toc_title: Log При конкурентном доступе к данным, чтения могут выполняться одновременно, а записи блокируют чтения и друг друга. Движок Log не поддерживает индексы. Также, если при записи в таблицу произошёл сбой, то таблица станет битой, и чтения из неё будут возвращать ошибку. Движок Log подходит для временных данных, write-once таблиц, а также для тестовых и демонстрационных целей. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/log/) diff --git a/docs/ru/engines/table-engines/log-family/stripelog.md b/docs/ru/engines/table-engines/log-family/stripelog.md index e505aae4c52..2f4b228f894 100644 --- a/docs/ru/engines/table-engines/log-family/stripelog.md +++ b/docs/ru/engines/table-engines/log-family/stripelog.md @@ -90,4 +90,3 @@ SELECT * FROM stripe_log_table ORDER BY timestamp └─────────────────────┴──────────────┴────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/stripelog/) diff --git a/docs/ru/engines/table-engines/log-family/tinylog.md b/docs/ru/engines/table-engines/log-family/tinylog.md index d5c24d41ca4..721355d8702 100644 --- a/docs/ru/engines/table-engines/log-family/tinylog.md +++ b/docs/ru/engines/table-engines/log-family/tinylog.md @@ -11,4 +11,3 @@ toc_title: TinyLog Запросы выполняются в один поток. То есть, этот движок предназначен для сравнительно маленьких таблиц (до 1 000 000 строк). Этот движок таблиц имеет смысл использовать в том случае, когда у вас есть много маленьких таблиц, так как он проще, чем движок [Log](log.md) (требуется открывать меньше файлов). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/tinylog/) diff --git a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md index 99b4ec06765..6e01cc2bcac 100644 --- a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -97,4 +97,3 @@ GROUP BY StartDate ORDER BY StartDate; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/aggregatingmergetree/) diff --git a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md index 8ea3a5a7c92..424fcbb5873 100644 --- a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -304,4 +304,3 @@ select * FROM UAct └─────────────────────┴───────────┴──────────┴──────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/collapsingmergetree/) diff --git a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md index 00d850b01c3..9a09618e508 100644 --- a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -129,4 +129,3 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_manipulations-with-partitions). -[Оригинальная статья:](https://clickhouse.tech/docs/ru/operations/table_engines/custom_partitioning_key/) diff --git a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md index e47c9127711..f3e915a413b 100644 --- a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md @@ -171,4 +171,3 @@ default ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/graphitemergetree/) diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index bc74b2592b9..0975544263b 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -807,4 +807,3 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' Если диск сконфигурирован как `cold`, данные будут переноситься в S3 при срабатывании правил TTL или когда свободное место на локальном диске станет меньше порогового значения, которое определяется как `move_factor * disk_size`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/mergetree-family/mergetree/) diff --git a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md index a4e47b161ad..ec0b339e8c9 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md @@ -66,4 +66,3 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/replacingmergetree/) diff --git a/docs/ru/engines/table-engines/mergetree-family/replication.md b/docs/ru/engines/table-engines/mergetree-family/replication.md index 1735a02cf4c..848adbee4da 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replication.md +++ b/docs/ru/engines/table-engines/mergetree-family/replication.md @@ -251,4 +251,3 @@ $ sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data - [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) - [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/replication/) diff --git a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md index 7b9c11adc2e..adb40037319 100644 --- a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md @@ -136,4 +136,3 @@ ClickHouse может слить куски данных таким образо Для вложенной структуры данных не нужно указывать её столбцы в кортеже столбцов для суммирования. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/summingmergetree/) diff --git a/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index 2adb8cc0d77..61688b1f00f 100644 --- a/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -233,4 +233,3 @@ SELECT * FROM UAct FINAL Это очень неэффективный способ выбора данных. Не используйте его для больших таблиц. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/versionedcollapsingmergetree/) diff --git a/docs/ru/engines/table-engines/special/buffer.md b/docs/ru/engines/table-engines/special/buffer.md index 75ce12f50fa..ba865b72b78 100644 --- a/docs/ru/engines/table-engines/special/buffer.md +++ b/docs/ru/engines/table-engines/special/buffer.md @@ -66,4 +66,3 @@ CREATE TABLE merge.hits_buffer AS merge.hits ENGINE = Buffer(merge, hits, 16, 10 Заметим, что даже для таблиц типа Buffer не имеет смысла вставлять данные по одной строке, так как таким образом будет достигнута скорость всего лишь в несколько тысяч строк в секунду, тогда как при вставке более крупными блоками, достижимо более миллиона строк в секунду (смотрите раздел [«Производительность»](../../../introduction/performance/). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/buffer/) diff --git a/docs/ru/engines/table-engines/special/dictionary.md b/docs/ru/engines/table-engines/special/dictionary.md index 048da157b2d..243fd5395c0 100644 --- a/docs/ru/engines/table-engines/special/dictionary.md +++ b/docs/ru/engines/table-engines/special/dictionary.md @@ -90,4 +90,3 @@ select * from products limit 1; └───────────────┴─────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/dictionary/) diff --git a/docs/ru/engines/table-engines/special/distributed.md b/docs/ru/engines/table-engines/special/distributed.md index 7ab0b916337..86eef35ebbc 100644 --- a/docs/ru/engines/table-engines/special/distributed.md +++ b/docs/ru/engines/table-engines/special/distributed.md @@ -136,4 +136,3 @@ logs - имя кластера в конфигурационном файле с При выставлении опции max_parallel_replicas выполнение запроса распараллеливается по всем репликам внутри одного шарда. Подробнее смотрите раздел [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/distributed/) diff --git a/docs/ru/engines/table-engines/special/external-data.md b/docs/ru/engines/table-engines/special/external-data.md index da9e132dd4f..29075837aba 100644 --- a/docs/ru/engines/table-engines/special/external-data.md +++ b/docs/ru/engines/table-engines/special/external-data.md @@ -65,4 +65,3 @@ $ curl -F 'passwd=@passwd.tsv;' 'http://localhost:8123/?query=SELECT+shell,+coun При распределённой обработке запроса, временные таблицы передаются на все удалённые серверы. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/external_data/) diff --git a/docs/ru/engines/table-engines/special/file.md b/docs/ru/engines/table-engines/special/file.md index 9be09fd33e6..6f1c723d2a7 100644 --- a/docs/ru/engines/table-engines/special/file.md +++ b/docs/ru/engines/table-engines/special/file.md @@ -81,4 +81,3 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64 - индексы; - репликация. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/file/) diff --git a/docs/ru/engines/table-engines/special/index.md b/docs/ru/engines/table-engines/special/index.md index 0300d3ad641..231bf2979ed 100644 --- a/docs/ru/engines/table-engines/special/index.md +++ b/docs/ru/engines/table-engines/special/index.md @@ -13,4 +13,3 @@ toc_priority: 31 Остальные движки таблиц уникальны по своему назначению и еще не сгруппированы в семейства, поэтому они помещены в эту специальную категорию. -[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/special/) diff --git a/docs/ru/engines/table-engines/special/join.md b/docs/ru/engines/table-engines/special/join.md index 8cb7acd91e1..ef27ac3f10f 100644 --- a/docs/ru/engines/table-engines/special/join.md +++ b/docs/ru/engines/table-engines/special/join.md @@ -107,4 +107,3 @@ SELECT joinGet('id_val_join', 'val', toUInt32(1)) При аварийном перезапуске сервера блок данных на диске может быть потерян или повреждён. В последнем случае, может потребоваться вручную удалить файл с повреждёнными данными. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/join/) diff --git a/docs/ru/engines/table-engines/special/materializedview.md b/docs/ru/engines/table-engines/special/materializedview.md index 1281d1db9ab..6b82f95df92 100644 --- a/docs/ru/engines/table-engines/special/materializedview.md +++ b/docs/ru/engines/table-engines/special/materializedview.md @@ -7,4 +7,3 @@ toc_title: MaterializedView Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/materializedview/) diff --git a/docs/ru/engines/table-engines/special/memory.md b/docs/ru/engines/table-engines/special/memory.md index 9ca189ef3b2..5a242238a02 100644 --- a/docs/ru/engines/table-engines/special/memory.md +++ b/docs/ru/engines/table-engines/special/memory.md @@ -14,4 +14,3 @@ toc_title: Memory Движок Memory используется системой для временных таблиц - внешних данных запроса (смотрите раздел «Внешние данные для обработки запроса»), для реализации `GLOBAL IN` (смотрите раздел «Операторы IN»). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/memory/) diff --git a/docs/ru/engines/table-engines/special/merge.md b/docs/ru/engines/table-engines/special/merge.md index 656aa7cfd6b..714b087c201 100644 --- a/docs/ru/engines/table-engines/special/merge.md +++ b/docs/ru/engines/table-engines/special/merge.md @@ -65,4 +65,3 @@ FROM WatchLog - [Виртуальные столбцы](index.md#table_engines-virtual_columns) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/merge/) diff --git a/docs/ru/engines/table-engines/special/null.md b/docs/ru/engines/table-engines/special/null.md index 2c3af1ce11e..05f5c88bacb 100644 --- a/docs/ru/engines/table-engines/special/null.md +++ b/docs/ru/engines/table-engines/special/null.md @@ -7,4 +7,3 @@ toc_title: 'Null' Тем не менее, есть возможность создать материализованное представление над таблицей типа Null. Тогда данные, записываемые в таблицу, будут попадать в представление. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/null/) diff --git a/docs/ru/engines/table-engines/special/set.md b/docs/ru/engines/table-engines/special/set.md index 14b7f123a34..ced9abf55dc 100644 --- a/docs/ru/engines/table-engines/special/set.md +++ b/docs/ru/engines/table-engines/special/set.md @@ -20,4 +20,3 @@ toc_title: Set - [persistent](../../../operations/settings/settings.md#persistent) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/set/) diff --git a/docs/ru/engines/table-engines/special/url.md b/docs/ru/engines/table-engines/special/url.md index cdb5afddf75..b8fcd27204f 100644 --- a/docs/ru/engines/table-engines/special/url.md +++ b/docs/ru/engines/table-engines/special/url.md @@ -77,4 +77,3 @@ SELECT * FROM url_engine_table - индексы; - репликация. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/url/) diff --git a/docs/ru/engines/table-engines/special/view.md b/docs/ru/engines/table-engines/special/view.md index 18813a55da2..45aeb55cd85 100644 --- a/docs/ru/engines/table-engines/special/view.md +++ b/docs/ru/engines/table-engines/special/view.md @@ -7,4 +7,3 @@ toc_title: View Используется для реализации представлений (подробнее см. запрос `CREATE VIEW`). Не хранит данные, а хранит только указанный запрос `SELECT`. При чтении из таблицы, выполняет его (с удалением из запроса всех ненужных столбцов). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/view/) diff --git a/docs/ru/getting-started/example-datasets/amplab-benchmark.md b/docs/ru/getting-started/example-datasets/amplab-benchmark.md index bc59672ab26..8a75852aad9 100644 --- a/docs/ru/getting-started/example-datasets/amplab-benchmark.md +++ b/docs/ru/getting-started/example-datasets/amplab-benchmark.md @@ -125,4 +125,3 @@ ORDER BY totalRevenue DESC LIMIT 1 ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/example_datasets/amplab_benchmark/) diff --git a/docs/ru/getting-started/example-datasets/brown-benchmark.md b/docs/ru/getting-started/example-datasets/brown-benchmark.md index 23702e07fcd..f1aad06b743 100644 --- a/docs/ru/getting-started/example-datasets/brown-benchmark.md +++ b/docs/ru/getting-started/example-datasets/brown-benchmark.md @@ -413,4 +413,3 @@ ORDER BY yr, Данные также доступны для работы с интерактивными запросами через [Playground](https://gh-api.clickhouse.tech/play?user=play), [пример](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==). -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/example_datasets/brown-benchmark/) diff --git a/docs/ru/getting-started/example-datasets/criteo.md b/docs/ru/getting-started/example-datasets/criteo.md index ecdc5f5fa41..bfa428a0e1c 100644 --- a/docs/ru/getting-started/example-datasets/criteo.md +++ b/docs/ru/getting-started/example-datasets/criteo.md @@ -76,4 +76,3 @@ INSERT INTO criteo SELECT date, clicked, int1, int2, int3, int4, int5, int6, int DROP TABLE criteo_log; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/example_datasets/criteo/) diff --git a/docs/ru/getting-started/example-datasets/index.md b/docs/ru/getting-started/example-datasets/index.md index fd89bb122e3..f590300adda 100644 --- a/docs/ru/getting-started/example-datasets/index.md +++ b/docs/ru/getting-started/example-datasets/index.md @@ -17,4 +17,3 @@ toc_title: "Введение" - [Данные о такси в Нью-Йорке](nyc-taxi.md) - [OnTime](ontime.md) -[Оригинальная статья](https://clickhouse.tech/docs/en/getting_started/example_datasets) diff --git a/docs/ru/getting-started/example-datasets/nyc-taxi.md b/docs/ru/getting-started/example-datasets/nyc-taxi.md index 891a92e2fa7..38a60ed1b2d 100644 --- a/docs/ru/getting-started/example-datasets/nyc-taxi.md +++ b/docs/ru/getting-started/example-datasets/nyc-taxi.md @@ -390,4 +390,3 @@ Q4: 0.072 sec. | 3 | 0.212 | 0.438 | 0.733 | 1.241 | | 140 | 0.028 | 0.043 | 0.051 | 0.072 | -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/example_datasets/nyc_taxi/) diff --git a/docs/ru/getting-started/example-datasets/ontime.md b/docs/ru/getting-started/example-datasets/ontime.md index 41a1c0d3142..be5b1cd1b70 100644 --- a/docs/ru/getting-started/example-datasets/ontime.md +++ b/docs/ru/getting-started/example-datasets/ontime.md @@ -407,4 +407,3 @@ LIMIT 10; - https://www.percona.com/blog/2016/01/07/apache-spark-with-air-ontime-performance-data/ - http://nickmakos.blogspot.ru/2012/08/analyzing-air-traffic-performance-with.html -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/example_datasets/ontime/) diff --git a/docs/ru/getting-started/example-datasets/wikistat.md b/docs/ru/getting-started/example-datasets/wikistat.md index c5a877ff8fd..f224c24e6ac 100644 --- a/docs/ru/getting-started/example-datasets/wikistat.md +++ b/docs/ru/getting-started/example-datasets/wikistat.md @@ -30,4 +30,3 @@ $ cat links.txt | while read link; do wget http://dumps.wikimedia.org/other/page $ ls -1 /opt/wikistat/ | grep gz | while read i; do echo $i; gzip -cd /opt/wikistat/$i | ./wikistat-loader --time="$(echo -n $i | sed -r 's/pagecounts-([0-9]{4})([0-9]{2})([0-9]{2})-([0-9]{2})([0-9]{2})([0-9]{2})\.gz/\1-\2-\3 \4-00-00/')" | clickhouse-client --query="INSERT INTO wikistat FORMAT TabSeparated"; done ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/example_datasets/wikistat/) diff --git a/docs/ru/getting-started/index.md b/docs/ru/getting-started/index.md index 78b56092740..599cb8b9434 100644 --- a/docs/ru/getting-started/index.md +++ b/docs/ru/getting-started/index.md @@ -14,4 +14,3 @@ toc_title: hidden - [Пройти подробное руководство для начинающих](tutorial.md) - [Поэкспериментировать с тестовыми наборами данных](example-datasets/ontime.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/) diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index aa5e8d77512..4ae27a910ea 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -173,4 +173,3 @@ SELECT 1 Для дальнейших экспериментов можно попробовать загрузить один из тестовых наборов данных или пройти [пошаговое руководство для начинающих](https://clickhouse.tech/tutorial.html). -[Оригинальная статья](https://clickhouse.tech/docs/ru/getting_started/install/) diff --git a/docs/ru/index.md b/docs/ru/index.md index 26d7dc3bf21..e16f2afed82 100644 --- a/docs/ru/index.md +++ b/docs/ru/index.md @@ -97,4 +97,3 @@ ClickHouse - столбцовая система управления базам Стоит заметить, что для эффективности по CPU требуется, чтобы язык запросов был декларативным (SQL, MDX) или хотя бы векторным (J, K). То есть, чтобы запрос содержал циклы только в неявном виде, открывая возможности для оптимизации. -[Оригинальная статья](https://clickhouse.tech/docs/ru/) diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 3f6b288fc2b..96ec36be79f 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -153,4 +153,3 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/cli/) diff --git a/docs/ru/interfaces/cpp.md b/docs/ru/interfaces/cpp.md index 018f4e22e34..f0691453fe6 100644 --- a/docs/ru/interfaces/cpp.md +++ b/docs/ru/interfaces/cpp.md @@ -7,4 +7,3 @@ toc_title: "C++ клиентская библиотека" См. README в репозитории [clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp). -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/cpp/) diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index edea533b642..3a61d789e75 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -1390,4 +1390,3 @@ $ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum f9725a22f9191e064120d718e26862a9 - ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/formats/) diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 5cb50d8f168..9e553c12dc0 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -635,4 +635,3 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler' * Connection #0 to host localhost left intact ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/http_interface/) diff --git a/docs/ru/interfaces/index.md b/docs/ru/interfaces/index.md index fc8743b3c1e..12e8853823e 100644 --- a/docs/ru/interfaces/index.md +++ b/docs/ru/interfaces/index.md @@ -24,4 +24,3 @@ ClickHouse предоставляет два сетевых интерфейса - [Библиотеки для интеграции](third-party/integrations.md); - [Визуальные интерфейсы](third-party/gui.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/) diff --git a/docs/ru/interfaces/jdbc.md b/docs/ru/interfaces/jdbc.md index ac86375c74f..30270322f7a 100644 --- a/docs/ru/interfaces/jdbc.md +++ b/docs/ru/interfaces/jdbc.md @@ -10,4 +10,3 @@ toc_title: "JDBC-драйвер" - [ClickHouse-Native-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC) - [clickhouse4j](https://github.com/blynkkk/clickhouse4j) -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/jdbc/) diff --git a/docs/ru/interfaces/odbc.md b/docs/ru/interfaces/odbc.md index 7843d3cb943..22153865298 100644 --- a/docs/ru/interfaces/odbc.md +++ b/docs/ru/interfaces/odbc.md @@ -8,4 +8,3 @@ toc_title: "ODBC-драйвер" - [Официальный драйвер](https://github.com/ClickHouse/clickhouse-odbc). -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/odbc/) diff --git a/docs/ru/interfaces/tcp.md b/docs/ru/interfaces/tcp.md index ea8c170009d..5261e1eafef 100644 --- a/docs/ru/interfaces/tcp.md +++ b/docs/ru/interfaces/tcp.md @@ -7,4 +7,3 @@ toc_title: "Родной интерфейс (TCP)" Нативный протокол используется в [клиенте командной строки](cli.md), для взаимодействия между серверами во время обработки распределенных запросов, а также в других программах на C++. К сожалению, у родного протокола ClickHouse пока нет формальной спецификации, но в нем можно разобраться с использованием исходного кода ClickHouse (начиная с [примерно этого места](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) и/или путем перехвата и анализа TCP трафика. -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/tcp/) diff --git a/docs/ru/interfaces/third-party/client-libraries.md b/docs/ru/interfaces/third-party/client-libraries.md index 65e93731300..411475f0aaa 100644 --- a/docs/ru/interfaces/third-party/client-libraries.md +++ b/docs/ru/interfaces/third-party/client-libraries.md @@ -58,4 +58,3 @@ toc_title: "Клиентские библиотеки от сторонних р - Nim - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/client_libraries/) diff --git a/docs/ru/interfaces/third-party/gui.md b/docs/ru/interfaces/third-party/gui.md index c02c32e08f4..ba455df312c 100644 --- a/docs/ru/interfaces/third-party/gui.md +++ b/docs/ru/interfaces/third-party/gui.md @@ -146,7 +146,6 @@ toc_title: "Визуальные интерфейсы от сторонних р - Подготовка данных и возможности ETL. - Моделирование данных с помощью SQL для их реляционного отображения. -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/gui/) ### Looker {#looker} diff --git a/docs/ru/interfaces/third-party/index.md b/docs/ru/interfaces/third-party/index.md index 8b59bb5fd28..bbf5a237000 100644 --- a/docs/ru/interfaces/third-party/index.md +++ b/docs/ru/interfaces/third-party/index.md @@ -15,4 +15,3 @@ toc_priority: 24 !!! note "Примечание" С ClickHouse работают также универсальные инструменты, поддерживающие общий API, такие как [ODBC](../../interfaces/odbc.md) или [JDBC](../../interfaces/jdbc.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/) diff --git a/docs/ru/interfaces/third-party/integrations.md b/docs/ru/interfaces/third-party/integrations.md index 84d5b93f92f..6da1459c34b 100644 --- a/docs/ru/interfaces/third-party/integrations.md +++ b/docs/ru/interfaces/third-party/integrations.md @@ -105,4 +105,3 @@ toc_title: "Библиотеки для интеграции от сторонн - [GraphQL](https://github.com/graphql) - [activecube-graphql](https://github.com/bitquery/activecube-graphql) -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/integrations/) diff --git a/docs/ru/interfaces/third-party/proxy.md b/docs/ru/interfaces/third-party/proxy.md index 48853cb352e..6d85c960c0e 100644 --- a/docs/ru/interfaces/third-party/proxy.md +++ b/docs/ru/interfaces/third-party/proxy.md @@ -41,4 +41,3 @@ toc_title: "Прокси-серверы от сторонних разработ Реализован на Go. -[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/proxy/) diff --git a/docs/ru/introduction/distinctive-features.md b/docs/ru/introduction/distinctive-features.md index 852f5cecd5b..dedb1412dbf 100644 --- a/docs/ru/introduction/distinctive-features.md +++ b/docs/ru/introduction/distinctive-features.md @@ -73,4 +73,3 @@ ClickHouse предоставляет различные способы разм 3. Разреженный индекс делает ClickHouse плохо пригодным для точечных чтений одиночных строк по своим ключам. -[Оригинальная статья](https://clickhouse.tech/docs/ru/introduction/distinctive_features/) diff --git a/docs/ru/introduction/history.md b/docs/ru/introduction/history.md index ad17b2be27d..dc4aa935c27 100644 --- a/docs/ru/introduction/history.md +++ b/docs/ru/introduction/history.md @@ -52,4 +52,3 @@ OLAPServer хорошо подходил для неагрегированных Чтобы снять ограничения OLAPServer-а и решить задачу работы с неагрегированными данными для всех отчётов, разработана СУБД ClickHouse. -[Оригинальная статья](https://clickhouse.tech/docs/ru/introduction/ya_metrika_task/) diff --git a/docs/ru/introduction/info.md b/docs/ru/introduction/info.md index a9398b8c9cd..a5e7efffc7e 100644 --- a/docs/ru/introduction/info.md +++ b/docs/ru/introduction/info.md @@ -9,4 +9,3 @@ toc_priority: 100 - Адрес электронной почты: - Телефон: +7-495-780-6510 -[Оригинальная статья](https://clickhouse.tech/docs/ru/introduction/info/) diff --git a/docs/ru/introduction/performance.md b/docs/ru/introduction/performance.md index dd92d3df9f5..eec1dcf4d0a 100644 --- a/docs/ru/introduction/performance.md +++ b/docs/ru/introduction/performance.md @@ -27,4 +27,3 @@ toc_title: "Производительность" Данные рекомендуется вставлять пачками не менее 1000 строк или не более одного запроса в секунду. При вставке в таблицу типа MergeTree из tab-separated дампа, скорость вставки будет в районе 50-200 МБ/сек. Если вставляются строчки размером около 1 КБ, то скорость будет в районе 50 000 - 200 000 строчек в секунду. Если строчки маленькие - производительность в строчках в секунду будет выше (на данных БК - `>` 500 000 строк в секунду, на данных Graphite - `>` 1 000 000 строк в секунду). Для увеличения производительности, можно производить несколько запросов INSERT параллельно - при этом производительность растёт линейно. -[Оригинальная статья](https://clickhouse.tech/docs/ru/introduction/performance/) diff --git a/docs/ru/operations/access-rights.md b/docs/ru/operations/access-rights.md index 9aa4e5f2561..a0ad7664131 100644 --- a/docs/ru/operations/access-rights.md +++ b/docs/ru/operations/access-rights.md @@ -146,4 +146,3 @@ ClickHouse поддерживает управление доступом на По умолчанию управление доступом на основе SQL выключено для всех пользователей. Вам необходимо настроить хотя бы одного пользователя в файле конфигурации `users.xml` и присвоить значение 1 параметру [access_management](settings/settings-users.md#access_management-user-setting). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/access_rights/) diff --git a/docs/ru/operations/backup.md b/docs/ru/operations/backup.md index 703217e8547..ed0adeb5e6f 100644 --- a/docs/ru/operations/backup.md +++ b/docs/ru/operations/backup.md @@ -36,4 +36,3 @@ ClickHouse позволяет использовать запрос `ALTER TABLE Для автоматизации этого подхода доступен инструмент от сторонних разработчиков: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/backup/) diff --git a/docs/ru/operations/caches.md b/docs/ru/operations/caches.md index 7744c596cd9..a0b71d1782a 100644 --- a/docs/ru/operations/caches.md +++ b/docs/ru/operations/caches.md @@ -26,4 +26,3 @@ toc_title: Кеши Чтобы очистить кеш, используйте выражение [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/caches/) diff --git a/docs/ru/operations/configuration-files.md b/docs/ru/operations/configuration-files.md index 84b26d0ba2a..11a01d1e6d2 100644 --- a/docs/ru/operations/configuration-files.md +++ b/docs/ru/operations/configuration-files.md @@ -52,4 +52,3 @@ $ cat /etc/clickhouse-server/users.d/alice.xml Сервер следит за изменениями конфигурационных файлов, а также файлов и ZooKeeper-узлов, которые были использованы при выполнении подстановок и переопределений, и перезагружает настройки пользователей и кластеров на лету. То есть, можно изменять кластера, пользователей и их настройки без перезапуска сервера. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/configuration_files/) diff --git a/docs/ru/operations/index.md b/docs/ru/operations/index.md index 99dcf652891..88212e6804f 100644 --- a/docs/ru/operations/index.md +++ b/docs/ru/operations/index.md @@ -23,4 +23,3 @@ toc_title: "Эксплуатация" - [Настройки](settings/index.md#settings) - [Утилиты](utilities/index.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/) diff --git a/docs/ru/operations/monitoring.md b/docs/ru/operations/monitoring.md index 7656b04d011..da51d27ded2 100644 --- a/docs/ru/operations/monitoring.md +++ b/docs/ru/operations/monitoring.md @@ -43,4 +43,3 @@ ClickHouse собирает: Для мониторинга серверов в кластерной конфигурации необходимо установить параметр [max_replica_delay_for_distributed_queries](settings/settings.md#settings-max_replica_delay_for_distributed_queries) и использовать HTTP ресурс `/replicas_status`. Если реплика доступна и не отстаёт от других реплик, то запрос к `/replicas_status` возвращает `200 OK`. Если реплика отстаёт, то запрос возвращает `503 HTTP_SERVICE_UNAVAILABLE`, включая информацию о размере отставания. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/monitoring) diff --git a/docs/ru/operations/opentelemetry.md b/docs/ru/operations/opentelemetry.md index a60f1b3e085..073e7c67e9c 100644 --- a/docs/ru/operations/opentelemetry.md +++ b/docs/ru/operations/opentelemetry.md @@ -34,4 +34,3 @@ ClickHouse создает `trace spans` для каждого запроса и Теги или атрибуты сохраняются в виде двух параллельных массивов, содержащих ключи и значения. Для работы с ними используйте [ARRAY JOIN](../sql-reference/statements/select/array-join.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/opentelemetry/) diff --git a/docs/ru/operations/quotas.md b/docs/ru/operations/quotas.md index 31f3a66a1c3..bf531c0b35a 100644 --- a/docs/ru/operations/quotas.md +++ b/docs/ru/operations/quotas.md @@ -107,4 +107,3 @@ toc_title: "Квоты" При перезапуске сервера, квоты сбрасываются. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/quotas/) diff --git a/docs/ru/operations/server-configuration-parameters/index.md b/docs/ru/operations/server-configuration-parameters/index.md index f511955ebc4..503c5d32163 100644 --- a/docs/ru/operations/server-configuration-parameters/index.md +++ b/docs/ru/operations/server-configuration-parameters/index.md @@ -14,4 +14,3 @@ toc_title: "Введение" Перед изучением настроек ознакомьтесь с разделом [Конфигурационные файлы](../configuration-files.md#configuration_files), обратите внимание на использование подстановок (атрибуты `incl` и `optional`). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/server_configuration_parameters/) diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index f46d899a3b7..b50347f6196 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -1160,4 +1160,3 @@ ClickHouse использует ZooKeeper для хранения метадан ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/server_configuration_parameters/settings/) diff --git a/docs/ru/operations/settings/constraints-on-settings.md b/docs/ru/operations/settings/constraints-on-settings.md index a4c1876574d..754d6cbba8a 100644 --- a/docs/ru/operations/settings/constraints-on-settings.md +++ b/docs/ru/operations/settings/constraints-on-settings.md @@ -71,4 +71,3 @@ Code: 452, e.displayText() = DB::Exception: Setting force_index_by_date should n **Примечание:** профиль с именем `default` обрабатывается специальным образом: все ограничения на изменение настроек из этого профиля становятся дефолтными и влияют на всех пользователей, кроме тех, где эти ограничения явно переопределены. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/constraints_on_settings/) diff --git a/docs/ru/operations/settings/index.md b/docs/ru/operations/settings/index.md index 2ef1d4730a3..050df975b47 100644 --- a/docs/ru/operations/settings/index.md +++ b/docs/ru/operations/settings/index.md @@ -54,4 +54,3 @@ SELECT getSetting('custom_a'); - [Конфигурационные параметры сервера](../../operations/server-configuration-parameters/settings.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/) diff --git a/docs/ru/operations/settings/permissions-for-queries.md b/docs/ru/operations/settings/permissions-for-queries.md index 571f56fc3bd..8cd5a2570ca 100644 --- a/docs/ru/operations/settings/permissions-for-queries.md +++ b/docs/ru/operations/settings/permissions-for-queries.md @@ -59,4 +59,3 @@ toc_title: "Разрешения для запросов" 1 -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/permissions_for_queries/) diff --git a/docs/ru/operations/settings/query-complexity.md b/docs/ru/operations/settings/query-complexity.md index c6e580a2209..c2e00302d18 100644 --- a/docs/ru/operations/settings/query-complexity.md +++ b/docs/ru/operations/settings/query-complexity.md @@ -314,4 +314,3 @@ FORMAT Null; > «Too many partitions for single INSERT block (more than» + toString(max_parts) + «). The limit is controlled by ‘max_partitions_per_insert_block’ setting. Large number of partitions is a common misconception. It will lead to severe negative performance impact, including slow server startup, slow INSERT queries and slow SELECT queries. Recommended total number of partitions for a table is under 1000..10000. Please note, that partitioning is not intended to speed up SELECT queries (ORDER BY key is sufficient to make range queries fast). Partitions are intended for data manipulation (DROP PARTITION, etc).» -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/query_complexity/) diff --git a/docs/ru/operations/settings/settings-profiles.md b/docs/ru/operations/settings/settings-profiles.md index e8082919d89..d3b3d29db94 100644 --- a/docs/ru/operations/settings/settings-profiles.md +++ b/docs/ru/operations/settings/settings-profiles.md @@ -77,4 +77,3 @@ SET profile = 'web' Профиль `web` — обычный профиль, который может быть установлен с помощью запроса `SET` или параметра URL при запросе по HTTP. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings_profiles/) diff --git a/docs/ru/operations/settings/settings-users.md b/docs/ru/operations/settings/settings-users.md index 21cd78569df..6a10e518817 100644 --- a/docs/ru/operations/settings/settings-users.md +++ b/docs/ru/operations/settings/settings-users.md @@ -162,4 +162,3 @@ toc_title: "Настройки пользователей" Элемент `filter` содержать любое выражение, возвращающее значение типа [UInt8](../../sql-reference/data-types/int-uint.md). Обычно он содержит сравнения и логические операторы. Строки `database_name.table1`, для которых фильтр возвращает 0 не выдаются пользователю. Фильтрация несовместима с операциями `PREWHERE` и отключает оптимизацию `WHERE→PREWHERE`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings_users/) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 663821158bd..ab24b7f3a44 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -2563,4 +2563,3 @@ SELECT * FROM test2; Значение по умолчанию: `0`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) diff --git a/docs/ru/operations/system-tables/asynchronous_metric_log.md b/docs/ru/operations/system-tables/asynchronous_metric_log.md index 2fe617e48af..979b63f0cc8 100644 --- a/docs/ru/operations/system-tables/asynchronous_metric_log.md +++ b/docs/ru/operations/system-tables/asynchronous_metric_log.md @@ -34,4 +34,3 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10 - [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — Содержит метрики, которые периодически вычисляются в фоновом режиме. - [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/asynchronous_metric_log) diff --git a/docs/ru/operations/system-tables/asynchronous_metrics.md b/docs/ru/operations/system-tables/asynchronous_metrics.md index 5ff010bc79f..9d12a119c43 100644 --- a/docs/ru/operations/system-tables/asynchronous_metrics.md +++ b/docs/ru/operations/system-tables/asynchronous_metrics.md @@ -35,5 +35,4 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10 - [system.events](#system_tables-events) — таблица с количеством произошедших событий. - [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`. - [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/asynchronous_metrics) \ No newline at end of file diff --git a/docs/ru/operations/system-tables/clusters.md b/docs/ru/operations/system-tables/clusters.md index 9cf84ea5f02..ddc6849b44d 100644 --- a/docs/ru/operations/system-tables/clusters.md +++ b/docs/ru/operations/system-tables/clusters.md @@ -13,4 +13,3 @@ - `port` (UInt16) — порт, на который обращаться для соединения с сервером. - `user` (String) — имя пользователя, которого использовать для соединения с сервером. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/clusters) diff --git a/docs/ru/operations/system-tables/columns.md b/docs/ru/operations/system-tables/columns.md index 8cb9408e7d8..af4cff85439 100644 --- a/docs/ru/operations/system-tables/columns.md +++ b/docs/ru/operations/system-tables/columns.md @@ -23,4 +23,3 @@ - `is_in_sampling_key` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, показывающий включение столбца в ключ выборки. - `compression_codec` ([String](../../sql-reference/data-types/string.md)) — имя кодека сжатия. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/columns) diff --git a/docs/ru/operations/system-tables/contributors.md b/docs/ru/operations/system-tables/contributors.md index 64c9a863bc3..6e11219e044 100644 --- a/docs/ru/operations/system-tables/contributors.md +++ b/docs/ru/operations/system-tables/contributors.md @@ -39,4 +39,3 @@ SELECT * FROM system.contributors WHERE name='Olga Khvostikova' └──────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/contributors) diff --git a/docs/ru/operations/system-tables/current-roles.md b/docs/ru/operations/system-tables/current-roles.md index a948b7b1e97..42ed4260fde 100644 --- a/docs/ru/operations/system-tables/current-roles.md +++ b/docs/ru/operations/system-tables/current-roles.md @@ -8,4 +8,3 @@ - `with_admin_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, обладает ли `current_role` роль привилегией `ADMIN OPTION`. - `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `current_role` ролью по умолчанию. - [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/current-roles) diff --git a/docs/ru/operations/system-tables/data_type_families.md b/docs/ru/operations/system-tables/data_type_families.md index d8d0b5e1074..3a9a4a3413a 100644 --- a/docs/ru/operations/system-tables/data_type_families.md +++ b/docs/ru/operations/system-tables/data_type_families.md @@ -33,4 +33,3 @@ SELECT * FROM system.data_type_families WHERE alias_to = 'String' - [Синтаксис](../../sql-reference/syntax.md) — поддерживаемый SQL синтаксис. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/data_type_families) diff --git a/docs/ru/operations/system-tables/databases.md b/docs/ru/operations/system-tables/databases.md index 00a4b543717..026f49c0d5d 100644 --- a/docs/ru/operations/system-tables/databases.md +++ b/docs/ru/operations/system-tables/databases.md @@ -4,4 +4,3 @@ Для каждой базы данных, о которой знает сервер, будет присутствовать соответствующая запись в таблице. Эта системная таблица используется для реализации запроса `SHOW DATABASES`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/databases) \ No newline at end of file diff --git a/docs/ru/operations/system-tables/detached_parts.md b/docs/ru/operations/system-tables/detached_parts.md index c59daa3985c..23fd4882c44 100644 --- a/docs/ru/operations/system-tables/detached_parts.md +++ b/docs/ru/operations/system-tables/detached_parts.md @@ -4,4 +4,3 @@ Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION\|PART](../../sql_reference/alter/#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../../sql_reference/alter/#alter_drop-detached). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/detached_parts) diff --git a/docs/ru/operations/system-tables/dictionaries.md b/docs/ru/operations/system-tables/dictionaries.md index cd1a4acab72..6a49904aae9 100644 --- a/docs/ru/operations/system-tables/dictionaries.md +++ b/docs/ru/operations/system-tables/dictionaries.md @@ -59,4 +59,3 @@ SELECT * FROM system.dictionaries └──────────┴──────┴────────┴─────────────┴──────┴────────┴──────────────────────────────────────┴─────────────────────┴─────────────────┴─────────────┴──────────┴───────────────┴───────────────────────┴────────────────────────────┴──────────────┴──────────────┴─────────────────────┴──────────────────────────────┘───────────────────────┴────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/dictionaries) \ No newline at end of file diff --git a/docs/ru/operations/system-tables/disks.md b/docs/ru/operations/system-tables/disks.md index 2832e7a1a32..186dfbd7819 100644 --- a/docs/ru/operations/system-tables/disks.md +++ b/docs/ru/operations/system-tables/disks.md @@ -10,4 +10,3 @@ Cодержит информацию о дисках, заданных в [ко - `total_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — объём диска в байтах. - `keep_free_space` ([UInt64](../../sql-reference/data-types/int-uint.md)) — место, которое должно остаться свободным на диске в байтах. Задаётся значением параметра `keep_free_space_bytes` конфигурации дисков. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/disks) diff --git a/docs/ru/operations/system-tables/distributed_ddl_queue.md b/docs/ru/operations/system-tables/distributed_ddl_queue.md index 71be69e98d7..99d92574a0b 100644 --- a/docs/ru/operations/system-tables/distributed_ddl_queue.md +++ b/docs/ru/operations/system-tables/distributed_ddl_queue.md @@ -61,5 +61,4 @@ exception_code: ZOK 2 rows in set. Elapsed: 0.025 sec. ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/distributed_ddl_queuedistributed_ddl_queue.md) \ No newline at end of file diff --git a/docs/ru/operations/system-tables/distribution_queue.md b/docs/ru/operations/system-tables/distribution_queue.md index 18346b34e04..5b811ab2be8 100644 --- a/docs/ru/operations/system-tables/distribution_queue.md +++ b/docs/ru/operations/system-tables/distribution_queue.md @@ -43,4 +43,3 @@ last_exception: - [Движок таблиц Distributed](../../engines/table-engines/special/distributed.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/distribution_queue) diff --git a/docs/ru/operations/system-tables/enabled-roles.md b/docs/ru/operations/system-tables/enabled-roles.md index cd3b0846718..a3f5ba179b3 100644 --- a/docs/ru/operations/system-tables/enabled-roles.md +++ b/docs/ru/operations/system-tables/enabled-roles.md @@ -9,4 +9,3 @@ - `is_current` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `enabled_role` текущей ролью текущего пользователя. - `is_default` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Флаг, который показывает, является ли `enabled_role` ролью по умолчанию. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/enabled-roles) \ No newline at end of file diff --git a/docs/ru/operations/system-tables/events.md b/docs/ru/operations/system-tables/events.md index 0a48617bb5c..c05be74eea6 100644 --- a/docs/ru/operations/system-tables/events.md +++ b/docs/ru/operations/system-tables/events.md @@ -31,4 +31,3 @@ SELECT * FROM system.events LIMIT 5 - [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`. - [Мониторинг](../../operations/monitoring.md) — основы мониторинга в ClickHouse. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/events) diff --git a/docs/ru/operations/system-tables/functions.md b/docs/ru/operations/system-tables/functions.md index c51adb2c109..de752e2018c 100644 --- a/docs/ru/operations/system-tables/functions.md +++ b/docs/ru/operations/system-tables/functions.md @@ -7,4 +7,3 @@ - `name` (`String`) – Имя функции. - `is_aggregate` (`UInt8`) – Признак, является ли функция агрегатной. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/functions) diff --git a/docs/ru/operations/system-tables/grants.md b/docs/ru/operations/system-tables/grants.md index 58d8a9e1e06..76a014f62dd 100644 --- a/docs/ru/operations/system-tables/grants.md +++ b/docs/ru/operations/system-tables/grants.md @@ -21,4 +21,3 @@ - `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Разрешение предоставлено с опцией `WITH GRANT OPTION`, подробнее см. [GRANT](../../sql-reference/statements/grant.md#grant-privigele-syntax). -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/grants) diff --git a/docs/ru/operations/system-tables/graphite_retentions.md b/docs/ru/operations/system-tables/graphite_retentions.md index 66fca7ba299..1098a29aac6 100644 --- a/docs/ru/operations/system-tables/graphite_retentions.md +++ b/docs/ru/operations/system-tables/graphite_retentions.md @@ -14,4 +14,3 @@ - `Tables.database` (Array(String)) - Массив имён баз данных таблиц, использующих параметр `config_name`. - `Tables.table` (Array(String)) - Массив имён таблиц, использующих параметр `config_name`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/graphite_retentions) diff --git a/docs/ru/operations/system-tables/index.md b/docs/ru/operations/system-tables/index.md index e4b6f5beb9d..2760938add2 100644 --- a/docs/ru/operations/system-tables/index.md +++ b/docs/ru/operations/system-tables/index.md @@ -70,4 +70,3 @@ toc_title: "Системные таблицы" - `OSReadBytes` - `OSWriteBytes` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system-tables/) diff --git a/docs/ru/operations/system-tables/licenses.md b/docs/ru/operations/system-tables/licenses.md index a6a49d5e0be..598da1e72ee 100644 --- a/docs/ru/operations/system-tables/licenses.md +++ b/docs/ru/operations/system-tables/licenses.md @@ -36,4 +36,3 @@ SELECT library_name, license_type, license_path FROM system.licenses LIMIT 15 ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/licenses) diff --git a/docs/ru/operations/system-tables/merges.md b/docs/ru/operations/system-tables/merges.md index 021a95981e6..f48f0d1ac27 100644 --- a/docs/ru/operations/system-tables/merges.md +++ b/docs/ru/operations/system-tables/merges.md @@ -18,4 +18,3 @@ - `bytes_written_uncompressed UInt64` — Количество записанных байт, несжатых. - `rows_written UInt64` — Количество записанных строк. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/merges) diff --git a/docs/ru/operations/system-tables/metric_log.md b/docs/ru/operations/system-tables/metric_log.md index 2458c93da59..5160b32927b 100644 --- a/docs/ru/operations/system-tables/metric_log.md +++ b/docs/ru/operations/system-tables/metric_log.md @@ -48,4 +48,3 @@ CurrentMetric_ReplicatedChecks: 0 - [system.metrics](#system_tables-metrics) — таблица с мгновенно вычисляемыми метриками. - [Мониторинг](../../operations/monitoring.md) — основы мониторинга в ClickHouse. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/metric_log) diff --git a/docs/ru/operations/system-tables/metrics.md b/docs/ru/operations/system-tables/metrics.md index db4016687d6..13d5fbc750a 100644 --- a/docs/ru/operations/system-tables/metrics.md +++ b/docs/ru/operations/system-tables/metrics.md @@ -38,4 +38,3 @@ SELECT * FROM system.metrics LIMIT 10 - [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`. - [Мониторинг](../../operations/monitoring.md) — основы мониторинга в ClickHouse. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/metrics) diff --git a/docs/ru/operations/system-tables/mutations.md b/docs/ru/operations/system-tables/mutations.md index 044677030ba..4370ab593e7 100644 --- a/docs/ru/operations/system-tables/mutations.md +++ b/docs/ru/operations/system-tables/mutations.md @@ -45,4 +45,3 @@ - [Движок MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) - [Репликация данных](../../engines/table-engines/mergetree-family/replication.md) (семейство ReplicatedMergeTree) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/mutations) diff --git a/docs/ru/operations/system-tables/numbers.md b/docs/ru/operations/system-tables/numbers.md index 02192184aa1..0be4a4ce05d 100644 --- a/docs/ru/operations/system-tables/numbers.md +++ b/docs/ru/operations/system-tables/numbers.md @@ -4,4 +4,3 @@ Эту таблицу можно использовать для тестов, а также если вам нужно сделать перебор. Чтения из этой таблицы не распараллеливаются. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/numbers) diff --git a/docs/ru/operations/system-tables/numbers_mt.md b/docs/ru/operations/system-tables/numbers_mt.md index 12409d831a1..d66c4515ddb 100644 --- a/docs/ru/operations/system-tables/numbers_mt.md +++ b/docs/ru/operations/system-tables/numbers_mt.md @@ -3,4 +3,3 @@ То же самое, что и [system.numbers](../../operations/system-tables/numbers.md), но чтение распараллеливается. Числа могут возвращаться в произвольном порядке. Используется для тестов. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/numbers_mt) diff --git a/docs/ru/operations/system-tables/one.md b/docs/ru/operations/system-tables/one.md index 4231277ffe4..5cb297f06d4 100644 --- a/docs/ru/operations/system-tables/one.md +++ b/docs/ru/operations/system-tables/one.md @@ -4,4 +4,3 @@ Эта таблица используется, если в `SELECT` запросе не указана секция `FROM`. То есть, это - аналог таблицы `DUAL`, которую можно найти в других СУБД. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/one) diff --git a/docs/ru/operations/system-tables/opentelemetry_span_log.md b/docs/ru/operations/system-tables/opentelemetry_span_log.md index 96555064b0e..c421a602300 100644 --- a/docs/ru/operations/system-tables/opentelemetry_span_log.md +++ b/docs/ru/operations/system-tables/opentelemetry_span_log.md @@ -46,4 +46,3 @@ attribute.names: [] attribute.values: [] ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/opentelemetry_span_log) diff --git a/docs/ru/operations/system-tables/part_log.md b/docs/ru/operations/system-tables/part_log.md index 4157cd41bff..a8d892f3b67 100644 --- a/docs/ru/operations/system-tables/part_log.md +++ b/docs/ru/operations/system-tables/part_log.md @@ -66,4 +66,3 @@ error: 0 exception: ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/part_log) diff --git a/docs/ru/operations/system-tables/parts.md b/docs/ru/operations/system-tables/parts.md index 950e652332d..1c7f0ad2e9a 100644 --- a/docs/ru/operations/system-tables/parts.md +++ b/docs/ru/operations/system-tables/parts.md @@ -155,4 +155,3 @@ move_ttl_info.max: [] - [Движок MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) - [TTL для столбцов и таблиц](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/parts) diff --git a/docs/ru/operations/system-tables/parts_columns.md b/docs/ru/operations/system-tables/parts_columns.md index db4d453e8f1..5640929d810 100644 --- a/docs/ru/operations/system-tables/parts_columns.md +++ b/docs/ru/operations/system-tables/parts_columns.md @@ -145,4 +145,3 @@ column_marks_bytes: 48 - [Движок MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) -[Оригинальная статья](https://clickhouse.tech/docs/en/operations/system_tables/parts_columns) diff --git a/docs/ru/operations/system-tables/processes.md b/docs/ru/operations/system-tables/processes.md index c9216e162b3..682b174c483 100644 --- a/docs/ru/operations/system-tables/processes.md +++ b/docs/ru/operations/system-tables/processes.md @@ -14,4 +14,3 @@ - `query` (String) – текст запроса. Для запросов `INSERT` не содержит встаявляемые данные. - `query_id` (String) – идентификатор запроса, если был задан. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/processes) diff --git a/docs/ru/operations/system-tables/query_log.md b/docs/ru/operations/system-tables/query_log.md index 39f685288d8..2d9c5f3eaab 100644 --- a/docs/ru/operations/system-tables/query_log.md +++ b/docs/ru/operations/system-tables/query_log.md @@ -142,5 +142,4 @@ Settings.Values: ['0','random','1','10000000000','1'] - [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — в этой таблице содержится информация о цепочке каждого выполненного запроса. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/query_log) diff --git a/docs/ru/operations/system-tables/query_thread_log.md b/docs/ru/operations/system-tables/query_thread_log.md index 052baf98035..0292a321524 100644 --- a/docs/ru/operations/system-tables/query_thread_log.md +++ b/docs/ru/operations/system-tables/query_thread_log.md @@ -114,4 +114,3 @@ ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47, - [system.query_log](../../operations/system-tables/query_log.md#system_tables-query_log) — описание системной таблицы `query_log`, которая содержит общую информацию о выполненных запросах. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/query_thread_log) diff --git a/docs/ru/operations/system-tables/quota_limits.md b/docs/ru/operations/system-tables/quota_limits.md index a9ab87055d4..4327dd2f29d 100644 --- a/docs/ru/operations/system-tables/quota_limits.md +++ b/docs/ru/operations/system-tables/quota_limits.md @@ -17,4 +17,3 @@ - `max_read_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Максимальное количество байтов, считываемых из всех таблиц и табличных функций, участвующих в запросе. - `max_execution_time` ([Nullable](../../sql-reference/data-types/nullable.md)([Float64](../../sql-reference/data-types/float.md))) — Максимальное время выполнения запроса, в секундах. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/quota_limits) diff --git a/docs/ru/operations/system-tables/quota_usage.md b/docs/ru/operations/system-tables/quota_usage.md index cea3c4b2daa..f777b336f23 100644 --- a/docs/ru/operations/system-tables/quota_usage.md +++ b/docs/ru/operations/system-tables/quota_usage.md @@ -28,4 +28,3 @@ - [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/quota_usage) diff --git a/docs/ru/operations/system-tables/quotas.md b/docs/ru/operations/system-tables/quotas.md index 15bb41a85bf..fe6b78cc44b 100644 --- a/docs/ru/operations/system-tables/quotas.md +++ b/docs/ru/operations/system-tables/quotas.md @@ -25,5 +25,4 @@ - [SHOW QUOTAS](../../sql-reference/statements/show.md#show-quotas-statement) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/quotas) diff --git a/docs/ru/operations/system-tables/quotas_usage.md b/docs/ru/operations/system-tables/quotas_usage.md index 9d6d339c434..3baecee8ece 100644 --- a/docs/ru/operations/system-tables/quotas_usage.md +++ b/docs/ru/operations/system-tables/quotas_usage.md @@ -29,4 +29,3 @@ - [SHOW QUOTA](../../sql-reference/statements/show.md#show-quota-statement) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/quotas_usage) diff --git a/docs/ru/operations/system-tables/replicas.md b/docs/ru/operations/system-tables/replicas.md index 8d4eb60c56a..7879ee707a4 100644 --- a/docs/ru/operations/system-tables/replicas.md +++ b/docs/ru/operations/system-tables/replicas.md @@ -120,5 +120,4 @@ WHERE Если этот запрос ничего не возвращает - значит всё хорошо. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/replicas) diff --git a/docs/ru/operations/system-tables/replicated_fetches.md b/docs/ru/operations/system-tables/replicated_fetches.md index 94584f390ee..31d5a5cfe08 100644 --- a/docs/ru/operations/system-tables/replicated_fetches.md +++ b/docs/ru/operations/system-tables/replicated_fetches.md @@ -67,4 +67,3 @@ thread_id: 54 - [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system/#query-language-system-replicated) -[Оригинальная статья](https://clickhouse.tech/docs/en/operations/system_tables/replicated_fetches) diff --git a/docs/ru/operations/system-tables/replication_queue.md b/docs/ru/operations/system-tables/replication_queue.md index 47f64aea55d..2851551955a 100644 --- a/docs/ru/operations/system-tables/replication_queue.md +++ b/docs/ru/operations/system-tables/replication_queue.md @@ -78,4 +78,3 @@ last_postpone_time: 1970-01-01 03:00:00 - [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md/#query-language-system-replicated) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/replication_queue) diff --git a/docs/ru/operations/system-tables/role-grants.md b/docs/ru/operations/system-tables/role-grants.md index f014af1fe3d..2c80a597857 100644 --- a/docs/ru/operations/system-tables/role-grants.md +++ b/docs/ru/operations/system-tables/role-grants.md @@ -14,4 +14,3 @@ - 1 — Роль обладает привилегией `ADMIN OPTION`. - 0 — Роль не обладает привилегией `ADMIN OPTION`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/role-grants) \ No newline at end of file diff --git a/docs/ru/operations/system-tables/roles.md b/docs/ru/operations/system-tables/roles.md index 1b548e85be2..c2b94214012 100644 --- a/docs/ru/operations/system-tables/roles.md +++ b/docs/ru/operations/system-tables/roles.md @@ -14,4 +14,3 @@ - [SHOW ROLES](../../sql-reference/statements/show.md#show-roles-statement) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/roles) diff --git a/docs/ru/operations/system-tables/row_policies.md b/docs/ru/operations/system-tables/row_policies.md index 7d0a490f01c..f1e84a201cb 100644 --- a/docs/ru/operations/system-tables/row_policies.md +++ b/docs/ru/operations/system-tables/row_policies.md @@ -31,4 +31,3 @@ - [SHOW POLICIES](../../sql-reference/statements/show.md#show-policies-statement) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/row_policies) diff --git a/docs/ru/operations/system-tables/settings.md b/docs/ru/operations/system-tables/settings.md index 50ccac684c4..c9d63d336b6 100644 --- a/docs/ru/operations/system-tables/settings.md +++ b/docs/ru/operations/system-tables/settings.md @@ -50,4 +50,3 @@ SELECT * FROM system.settings WHERE changed AND name='load_balancing' - [Ограничения для значений настроек](../settings/constraints-on-settings.md) - Выражение [SHOW SETTINGS](../../sql-reference/statements/show.md#show-settings) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/settings) diff --git a/docs/ru/operations/system-tables/settings_profile_elements.md b/docs/ru/operations/system-tables/settings_profile_elements.md index cd801468e21..8a1461c6bb0 100644 --- a/docs/ru/operations/system-tables/settings_profile_elements.md +++ b/docs/ru/operations/system-tables/settings_profile_elements.md @@ -27,4 +27,3 @@ - `inherit_profile` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Родительский профиль для данного профиля настроек. `NULL` если не задано. Профиль настроек может наследовать все значения и ограничения настроек (`min`, `max`, `readonly`) от своего родительского профиля. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/settings_profile_elements) diff --git a/docs/ru/operations/system-tables/settings_profiles.md b/docs/ru/operations/system-tables/settings_profiles.md index e1401553a4a..f8101fb0cb7 100644 --- a/docs/ru/operations/system-tables/settings_profiles.md +++ b/docs/ru/operations/system-tables/settings_profiles.md @@ -21,4 +21,3 @@ - [SHOW PROFILES](../../sql-reference/statements/show.md#show-profiles-statement) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/settings_profiles) diff --git a/docs/ru/operations/system-tables/stack_trace.md b/docs/ru/operations/system-tables/stack_trace.md index 0689e15c35c..58d0a1c4b6a 100644 --- a/docs/ru/operations/system-tables/stack_trace.md +++ b/docs/ru/operations/system-tables/stack_trace.md @@ -85,4 +85,3 @@ res: /lib/x86_64-linux-gnu/libc-2.27.so - [arrayMap](../../sql-reference/functions/array-functions.md#array-map) — Описание и пример использования функции `arrayMap`. - [arrayFilter](../../sql-reference/functions/array-functions.md#array-filter) — Описание и пример использования функции `arrayFilter`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/stack_trace) diff --git a/docs/ru/operations/system-tables/storage_policies.md b/docs/ru/operations/system-tables/storage_policies.md index e62266af131..b2005d5f31e 100644 --- a/docs/ru/operations/system-tables/storage_policies.md +++ b/docs/ru/operations/system-tables/storage_policies.md @@ -14,4 +14,3 @@ Если политика хранения содержит несколько томов, то каждому тому соответствует отдельная запись в таблице. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/storage_policies) diff --git a/docs/ru/operations/system-tables/table_engines.md b/docs/ru/operations/system-tables/table_engines.md index eb198475e43..6af29753bbf 100644 --- a/docs/ru/operations/system-tables/table_engines.md +++ b/docs/ru/operations/system-tables/table_engines.md @@ -34,4 +34,3 @@ WHERE name in ('Kafka', 'MergeTree', 'ReplicatedCollapsingMergeTree') - [Настройки](../../engines/table-engines/integrations/kafka.md#table_engine-kafka-creating-a-table) Kafka - [Настройки](../../engines/table-engines/special/join.md#join-limitations-and-settings) Join -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/table_engines) diff --git a/docs/ru/operations/system-tables/tables.md b/docs/ru/operations/system-tables/tables.md index 52de10871b2..42e55b1f6b7 100644 --- a/docs/ru/operations/system-tables/tables.md +++ b/docs/ru/operations/system-tables/tables.md @@ -37,4 +37,3 @@ Таблица `system.tables` используется при выполнении запроса `SHOW TABLES`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/tables) diff --git a/docs/ru/operations/system-tables/text_log.md b/docs/ru/operations/system-tables/text_log.md index 141c3680c07..97c6ef9e2cd 100644 --- a/docs/ru/operations/system-tables/text_log.md +++ b/docs/ru/operations/system-tables/text_log.md @@ -50,4 +50,3 @@ source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void source_line: 45 ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/text_log) diff --git a/docs/ru/operations/system-tables/trace_log.md b/docs/ru/operations/system-tables/trace_log.md index 88f4b29651b..3d22e4eabfd 100644 --- a/docs/ru/operations/system-tables/trace_log.md +++ b/docs/ru/operations/system-tables/trace_log.md @@ -50,4 +50,3 @@ trace: [371912858,371912789,371798468,371799717,371801313,3717 size: 5244400 ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system-tables/trace_log) diff --git a/docs/ru/operations/system-tables/users.md b/docs/ru/operations/system-tables/users.md index c12b91f445f..2a523ae4a9a 100644 --- a/docs/ru/operations/system-tables/users.md +++ b/docs/ru/operations/system-tables/users.md @@ -31,4 +31,3 @@ - [SHOW USERS](../../sql-reference/statements/show.md#show-users-statement) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/users) diff --git a/docs/ru/operations/system-tables/zookeeper.md b/docs/ru/operations/system-tables/zookeeper.md index 9a2b781d8f3..a6ce62a9d4e 100644 --- a/docs/ru/operations/system-tables/zookeeper.md +++ b/docs/ru/operations/system-tables/zookeeper.md @@ -69,4 +69,3 @@ pzxid: 987021252247 path: /clickhouse/tables/01-08/visits/replicas ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/zookeeper) diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md index 0a2ca5ecac1..4535767e8e0 100644 --- a/docs/ru/operations/tips.md +++ b/docs/ru/operations/tips.md @@ -246,4 +246,3 @@ script end script ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/tips/) diff --git a/docs/ru/operations/utilities/clickhouse-benchmark.md b/docs/ru/operations/utilities/clickhouse-benchmark.md index 2a883cf3bb5..b4769b17818 100644 --- a/docs/ru/operations/utilities/clickhouse-benchmark.md +++ b/docs/ru/operations/utilities/clickhouse-benchmark.md @@ -160,4 +160,3 @@ localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, resu 99.990% 0.172 sec. ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/utilities/clickhouse-benchmark.md) diff --git a/docs/ru/operations/utilities/clickhouse-copier.md b/docs/ru/operations/utilities/clickhouse-copier.md index 243ad7f379b..aa4fd68f8e8 100644 --- a/docs/ru/operations/utilities/clickhouse-copier.md +++ b/docs/ru/operations/utilities/clickhouse-copier.md @@ -181,4 +181,3 @@ $ clickhouse-copier --daemon --config zookeeper.xml --task-path /task/path --bas `clickhouse-copier` отслеживает изменения `/task/path/description` и применяет их «на лету». Если вы поменяете, например, значение `max_workers`, то количество процессов, выполняющих задания, также изменится. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/utils/clickhouse-copier/) diff --git a/docs/ru/operations/utilities/clickhouse-local.md b/docs/ru/operations/utilities/clickhouse-local.md index 137472fa993..682dc0b5ace 100644 --- a/docs/ru/operations/utilities/clickhouse-local.md +++ b/docs/ru/operations/utilities/clickhouse-local.md @@ -110,4 +110,3 @@ Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. ... ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/utils/clickhouse-local/) diff --git a/docs/ru/operations/utilities/index.md b/docs/ru/operations/utilities/index.md index 8b533c29ff5..fa257fb4b1a 100644 --- a/docs/ru/operations/utilities/index.md +++ b/docs/ru/operations/utilities/index.md @@ -9,4 +9,3 @@ toc_title: "Обзор" - [clickhouse-local](clickhouse-local.md) - [clickhouse-copier](clickhouse-copier.md) - копирует (и перешардирует) данные с одного кластера на другой. -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/utils/) diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index 3b35716ec27..aaf03428d7c 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -248,4 +248,3 @@ FROM people └────────┴───────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/agg_functions/combinators/) diff --git a/docs/ru/sql-reference/aggregate-functions/index.md b/docs/ru/sql-reference/aggregate-functions/index.md index 3c931222f58..7afb6a374a7 100644 --- a/docs/ru/sql-reference/aggregate-functions/index.md +++ b/docs/ru/sql-reference/aggregate-functions/index.md @@ -57,4 +57,3 @@ SELECT groupArray(y) FROM t_null_big `groupArray` не включает `NULL` в результирующий массив. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/) diff --git a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md index 61518cb6f02..806d0140a9d 100644 --- a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md @@ -481,4 +481,3 @@ FROM Решение: пишем в запросе GROUP BY SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5 ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/agg_functions/parametric_functions/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/any.md b/docs/ru/sql-reference/aggregate-functions/reference/any.md index 38c412813ab..6142b9a2092 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/any.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/any.md @@ -12,4 +12,3 @@ toc_priority: 6 При наличии в запросе `SELECT` секции `GROUP BY` или хотя бы одной агрегатной функции, ClickHouse (в отличие от, например, MySQL) требует, чтобы все выражения в секциях `SELECT`, `HAVING`, `ORDER BY` вычислялись из ключей или из агрегатных функций. То есть, каждый выбираемый из таблицы столбец, должен использоваться либо в ключах, либо внутри агрегатных функций. Чтобы получить поведение, как в MySQL, вы можете поместить остальные столбцы в агрегатную функцию `any`. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/any/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md b/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md index 19fda7f64b7..bb7a01a47f3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md @@ -29,4 +29,3 @@ FROM ontime └───────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/anyheavy/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/anylast.md b/docs/ru/sql-reference/aggregate-functions/reference/anylast.md index da68c926d43..7be380461f7 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/anylast.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/anylast.md @@ -7,4 +7,3 @@ toc_priority: 104 Выбирает последнее попавшееся значение. Результат так же недетерминирован, как и для функции [any](../../../sql-reference/aggregate-functions/reference/any.md). -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/anylast/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmax.md b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md index dd2df23e1cd..246bfcfba9d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/argmax.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md @@ -63,4 +63,3 @@ SELECT argMax(user, salary), argMax(tuple(user, salary), salary), argMax(tuple(u └──────────────────────┴─────────────────────────────────────┴─────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference/argmax/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmin.md b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md index 8c25b79f92a..811f3706d2b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/argmin.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md @@ -63,4 +63,3 @@ SELECT argMin(user, salary), argMin(tuple(user, salary)) FROM salary; └──────────────────────┴─────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference/argmin/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/avg.md b/docs/ru/sql-reference/aggregate-functions/reference/avg.md index c032199aa32..c5e1dec14e0 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/avg.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/avg.md @@ -61,4 +61,3 @@ SELECT avg(t) FROM test; └────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference/avg/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md index 72e6ca5c88c..5e63ed61c6d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md @@ -43,4 +43,3 @@ FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) └────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/avgweighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/corr.md b/docs/ru/sql-reference/aggregate-functions/reference/corr.md index 6d631241f6a..7522dcebd0b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/corr.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/corr.md @@ -11,4 +11,3 @@ toc_priority: 107 !!! note "Примечание" Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `corrStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/corr/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/count.md b/docs/ru/sql-reference/aggregate-functions/reference/count.md index d99c3b2aeb2..762acce5602 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/count.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/count.md @@ -69,4 +69,3 @@ SELECT count(DISTINCT num) FROM t Этот пример показывает, что `count(DISTINCT num)` выполняется с помощью функции `uniqExact` в соответствии со значением настройки `count_distinct_implementation`. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/count/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md b/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md index e30b19924f9..1438fefbd8e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md @@ -11,4 +11,3 @@ toc_priority: 36 !!! note "Примечание" Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/covarpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md index 7fa9a1d3f2c..b4cea16f4c0 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md @@ -13,4 +13,3 @@ toc_priority: 37 !!! note "Примечание" Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarSampStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/covarsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md index 7640795fc51..370190dbb3c 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md @@ -14,4 +14,3 @@ toc_priority: 110 В некоторых случаях, вы всё же можете рассчитывать на порядок выполнения запроса. Это — случаи, когда `SELECT` идёт из подзапроса, в котором используется `ORDER BY`. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparray/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md index 5c73bccc2bb..6933f17aabf 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -90,4 +90,3 @@ SELECT groupArrayInsertAt(number, 0) FROM numbers_mt(10) SETTINGS max_block_size └───────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md index 6307189c440..5417cafa91f 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md @@ -75,4 +75,3 @@ FROM t └───────────┴──────────────────────────────────┴───────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md index c95f1b0b0eb..97eae78bd5b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md @@ -75,4 +75,3 @@ FROM t └────────────┴─────────────────────────────────┴────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md index 03aff64fecf..8ec7a98c2ef 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md @@ -45,4 +45,3 @@ binary decimal 00000100 = 4 ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitand/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md index a4be18b75ec..d42960f8b80 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -43,4 +43,3 @@ num 3 ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitmap/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md index e1afced014f..12cff73f575 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md @@ -45,4 +45,3 @@ binary decimal 01111101 = 125 ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitor/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md index a80f86b2a5f..a0ec75fa8db 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md @@ -45,4 +45,3 @@ binary decimal 01101000 = 104 ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitxor/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md b/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md index cecc63aef22..7d64b13a203 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md @@ -10,4 +10,3 @@ toc_priority: 111 Функция `groupUniqArray(max_size)(x)` ограничивает размер результирующего массива до `max_size` элементов. Например, `groupUniqArray(1)(x)` равнозначно `[any(x)]`. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/index.md b/docs/ru/sql-reference/aggregate-functions/reference/index.md index e496893a771..1af07623ade 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/index.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/index.md @@ -65,4 +65,3 @@ toc_hidden: true - [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md) - [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md index a00dae51ed6..4220195dc2f 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md @@ -24,4 +24,3 @@ kurtPop(expr) SELECT kurtPop(value) FROM series_with_value_column ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md index 379d74ec0c3..41668d7ee17 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -26,4 +26,3 @@ kurtSamp(expr) SELECT kurtSamp(value) FROM series_with_value_column ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md index a4647ecfb34..5e101b89c4e 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest.md @@ -69,4 +69,3 @@ SELECT mannWhitneyUTest('greater')(sample_data, sample_index) FROM mww_ttest; - [U-критерий Манна — Уитни](https://ru.wikipedia.org/wiki/U-%D0%BA%D1%80%D0%B8%D1%82%D0%B5%D1%80%D0%B8%D0%B9_%D0%9C%D0%B0%D0%BD%D0%BD%D0%B0_%E2%80%94_%D0%A3%D0%B8%D1%82%D0%BD%D0%B8) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference/mannwhitneyutest/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.md b/docs/ru/sql-reference/aggregate-functions/reference/max.md index 4ee577471ea..4f61ecd051d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/max.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/max.md @@ -6,4 +6,3 @@ toc_priority: 3 Вычисляет максимум. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/max/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/median.md b/docs/ru/sql-reference/aggregate-functions/reference/median.md index 803b2309665..a208c21dd21 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/median.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/median.md @@ -40,4 +40,3 @@ SELECT medianDeterministic(val, 1) FROM t └─────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/median/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.md b/docs/ru/sql-reference/aggregate-functions/reference/min.md index 7b56de3aed4..16dd577e790 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/min.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/min.md @@ -6,4 +6,3 @@ toc_priority: 2 Вычисляет минимум. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/min/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md index 10fec16ab94..05446856a8a 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md @@ -65,4 +65,3 @@ SELECT quantile(val) FROM t - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantile/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md index fdbcda821f6..02c50c58ae8 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -65,4 +65,3 @@ SELECT quantileDeterministic(val, 1) FROM t - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/qurntiledeterministic/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 4ee815a94fb..7347318fae0 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -163,4 +163,3 @@ SELECT quantileExactHigh(number) FROM numbers(10) - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantileexact/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md index f6982d4566f..baef55c818d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -66,4 +66,3 @@ SELECT quantileExactWeighted(n, val) FROM t - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantileexactweited/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md index 82e806b67fa..671cbc1fc4d 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -8,4 +8,3 @@ Syntax: `quantiles(level1, level2, …)(x)` All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiles/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md index f372e308e73..fbd1abadb54 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -56,4 +56,3 @@ SELECT quantileTDigest(number) FROM numbers(10) - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/qurntiledigest/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index b6dd846967b..6d943f568fc 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -57,4 +57,3 @@ SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiledigestweighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md index 32e5e6ce31b..c71332b0471 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -85,4 +85,3 @@ SELECT quantileTiming(response_time) FROM t - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiletiming/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index 4a7fcc666d5..c732e6a9bb8 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -84,4 +84,3 @@ SELECT quantileTimingWeighted(response_time, weight) FROM t - [median](../../../sql-reference/aggregate-functions/reference/median.md#median) - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiletiming weighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md b/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md index 370b1bde8d2..f634e553738 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md @@ -41,4 +41,3 @@ SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) └───────────────────────────────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md index a6dee5dc5ef..298aa6b76fc 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md @@ -24,4 +24,3 @@ skewPop(expr) SELECT skewPop(value) FROM series_with_value_column ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/skewpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md index 171eb5e304a..872f0fee875 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md @@ -26,4 +26,3 @@ skewSamp(expr) SELECT skewSamp(value) FROM series_with_value_column ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/skewsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md b/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md index ada8b8884cd..66d63147586 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md @@ -9,4 +9,3 @@ toc_priority: 30 !!! note "Примечание" Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `stddevPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stddevpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md index 952b6bcde68..5fbf438e894 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md @@ -9,4 +9,3 @@ toc_priority: 31 !!! note "Примечание" Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `stddevSampStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stddevsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md index 0b268e9ea1b..6da0f6caacd 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -86,4 +86,3 @@ evalMLMethod(model, param1, param2) FROM test_data - [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlogisticregression) - [Отличие линейной от логистической регрессии.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md index 01d3a0797bd..67454aa2c1b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md @@ -54,4 +54,3 @@ stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') - [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlinearregression) - [Отличие линейной от логистической регрессии](https://moredez.ru/q/51225972/) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md b/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md index 77378de95d1..a9ce46a51ba 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/studentttest.md @@ -63,4 +63,3 @@ SELECT studentTTest(sample_data, sample_index) FROM student_ttest; - [t-критерий Стьюдента](https://ru.wikipedia.org/wiki/T-%D0%BA%D1%80%D0%B8%D1%82%D0%B5%D1%80%D0%B8%D0%B9_%D0%A1%D1%82%D1%8C%D1%8E%D0%B4%D0%B5%D0%BD%D1%82%D0%B0) - [welchTTest](welchttest.md#welchttest) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference/studentttest/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sum.md b/docs/ru/sql-reference/aggregate-functions/reference/sum.md index 5fa769f3479..487313c006b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sum.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sum.md @@ -7,4 +7,3 @@ toc_priority: 4 Вычисляет сумму. Работает только для чисел. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/sum/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/summap.md b/docs/ru/sql-reference/aggregate-functions/reference/summap.md index 460fc078893..3cfe4c26fcc 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/summap.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/summap.md @@ -42,4 +42,3 @@ GROUP BY timeslot └─────────────────────┴──────────────────────────────────────────────┴────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/summap/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md b/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md index 845adc510f2..1e1962babbe 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md @@ -8,4 +8,3 @@ toc_priority: 140 Работает только для чисел. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topk.md b/docs/ru/sql-reference/aggregate-functions/reference/topk.md index 6aefd38bf34..929b49c35e9 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/topk.md @@ -36,4 +36,3 @@ FROM ontime └─────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/topk/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md index 20bd3ee85ff..e8a41e7f05f 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md @@ -41,4 +41,3 @@ SELECT topKWeighted(10)(number, number) FROM numbers(1000) └───────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/topkweighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniq.md b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md index f5f3f198139..cb92545a7fe 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniq.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md @@ -39,4 +39,3 @@ uniq(x[, ...]) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniq/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md index 751dc1a8c98..44783b8aca4 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -50,4 +50,3 @@ uniqCombined(HLL_precision)(x[, ...]) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqcombined/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md index 5db27fb301d..6fde16b4b0c 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md @@ -6,4 +6,3 @@ toc_priority: 193 Использует 64-битный хэш для всех типов, в отличие от [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined). -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md index 3dd22b2b4bc..3e8ef4480be 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md @@ -24,4 +24,3 @@ uniqExact(x[, ...]) - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined) - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqexact/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md index 09e52ac6833..b220121e85f 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -38,4 +38,3 @@ uniqHLL12(x[, ...]) - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqhll12/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/varpop.md b/docs/ru/sql-reference/aggregate-functions/reference/varpop.md index 9615e03673b..0a78b3cbb76 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/varpop.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/varpop.md @@ -11,4 +11,3 @@ toc_priority: 32 !!! note "Примечание" Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `varPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/varpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md index 31aaac68e7b..e18b858b7e2 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md @@ -13,4 +13,3 @@ toc_priority: 33 !!! note "Примечание" Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `varSampStable`. Она работает медленнее, но обеспечиват меньшую вычислительную ошибку. -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/vasamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md b/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md index 16c122d1b49..a37d08fc671 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/welchttest.md @@ -63,4 +63,3 @@ SELECT welchTTest(sample_data, sample_index) FROM welch_ttest; - [t-критерий Уэлча](https://ru.wikipedia.org/wiki/T-%D0%BA%D1%80%D0%B8%D1%82%D0%B5%D1%80%D0%B8%D0%B9_%D0%A3%D1%8D%D0%BB%D1%87%D0%B0) - [studentTTest](studentttest.md#studentttest) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference/welchTTest/) diff --git a/docs/ru/sql-reference/data-types/aggregatefunction.md b/docs/ru/sql-reference/data-types/aggregatefunction.md index 018d38d825e..6ca6879cf6c 100644 --- a/docs/ru/sql-reference/data-types/aggregatefunction.md +++ b/docs/ru/sql-reference/data-types/aggregatefunction.md @@ -65,4 +65,3 @@ SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP Смотрите в описании движка [AggregatingMergeTree](../../sql-reference/data-types/aggregatefunction.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/nested_data_structures/aggregatefunction/) diff --git a/docs/ru/sql-reference/data-types/array.md b/docs/ru/sql-reference/data-types/array.md index 86a23ed041b..30952d6e126 100644 --- a/docs/ru/sql-reference/data-types/array.md +++ b/docs/ru/sql-reference/data-types/array.md @@ -76,4 +76,3 @@ Received exception from server (version 1.1.54388): Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/array/) diff --git a/docs/ru/sql-reference/data-types/boolean.md b/docs/ru/sql-reference/data-types/boolean.md index b0fad6d7446..dff35777ff9 100644 --- a/docs/ru/sql-reference/data-types/boolean.md +++ b/docs/ru/sql-reference/data-types/boolean.md @@ -7,4 +7,3 @@ toc_title: "Булевы значения" Отдельного типа для булевых значений нет. Для них используется тип UInt8, в котором используются только значения 0 и 1. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/boolean/) diff --git a/docs/ru/sql-reference/data-types/date.md b/docs/ru/sql-reference/data-types/date.md index 490bc5c28b4..50508de96a3 100644 --- a/docs/ru/sql-reference/data-types/date.md +++ b/docs/ru/sql-reference/data-types/date.md @@ -44,4 +44,3 @@ SELECT * FROM dt; - [Тип данных `DateTime`](../../sql-reference/data-types/datetime.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/date/) diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index ffdf83e5bd0..ebd780d0d7d 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -126,4 +126,3 @@ FROM dt - [Тип данных `Date`](date.md) - [Тип данных `DateTime64`](datetime64.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/datetime/) diff --git a/docs/ru/sql-reference/data-types/decimal.md b/docs/ru/sql-reference/data-types/decimal.md index bdcd3c767b9..8524e8ea132 100644 --- a/docs/ru/sql-reference/data-types/decimal.md +++ b/docs/ru/sql-reference/data-types/decimal.md @@ -112,4 +112,3 @@ DB::Exception: Can't compare. - [countDigits](../../sql-reference/functions/other-functions.md#count-digits) -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/decimal/) diff --git a/docs/ru/sql-reference/data-types/domains/index.md b/docs/ru/sql-reference/data-types/domains/index.md index 6a968a76ff6..35f8149112f 100644 --- a/docs/ru/sql-reference/data-types/domains/index.md +++ b/docs/ru/sql-reference/data-types/domains/index.md @@ -30,4 +30,3 @@ toc_priority: 56 - Невозможно неявно преобразовывать строковые значение в значения с доменным типом данных при вставке данных из другого столбца или таблицы. - Домен не добавляет ограничения на хранимые значения. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/domains/overview) diff --git a/docs/ru/sql-reference/data-types/domains/ipv4.md b/docs/ru/sql-reference/data-types/domains/ipv4.md index 57d6f12ab17..af5f8261fae 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv4.md +++ b/docs/ru/sql-reference/data-types/domains/ipv4.md @@ -81,4 +81,3 @@ SELECT toTypeName(i), CAST(from AS UInt32) AS i FROM hits LIMIT 1; └──────────────────────────────────┴────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/domains/ipv4) diff --git a/docs/ru/sql-reference/data-types/domains/ipv6.md b/docs/ru/sql-reference/data-types/domains/ipv6.md index 04c5fd0d491..5b3c17feceb 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv6.md +++ b/docs/ru/sql-reference/data-types/domains/ipv6.md @@ -81,4 +81,3 @@ SELECT toTypeName(i), CAST(from AS FixedString(16)) AS i FROM hits LIMIT 1; └───────────────────────────────────────────┴─────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/domains/ipv6) diff --git a/docs/ru/sql-reference/data-types/enum.md b/docs/ru/sql-reference/data-types/enum.md index b86d15c19a8..95c053bed2c 100644 --- a/docs/ru/sql-reference/data-types/enum.md +++ b/docs/ru/sql-reference/data-types/enum.md @@ -126,4 +126,3 @@ INSERT INTO t_enum_nullable Values('hello'),('world'),(NULL) При ALTER, есть возможность поменять Enum8 на Enum16 и обратно - так же, как можно поменять Int8 на Int16. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/enum/) diff --git a/docs/ru/sql-reference/data-types/fixedstring.md b/docs/ru/sql-reference/data-types/fixedstring.md index 21115418e30..ef73dadaddf 100644 --- a/docs/ru/sql-reference/data-types/fixedstring.md +++ b/docs/ru/sql-reference/data-types/fixedstring.md @@ -58,4 +58,3 @@ WHERE a = 'b\0' Обратите внимание, что длина значения `FixedString(N)` постоянна. Функция [length](../../sql-reference/data-types/fixedstring.md#array_functions-length) возвращает `N` даже если значение `FixedString(N)` заполнено только нулевыми байтами, однако функция [empty](../../sql-reference/data-types/fixedstring.md#empty) в этом же случае возвращает `1`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/fixedstring/) diff --git a/docs/ru/sql-reference/data-types/float.md b/docs/ru/sql-reference/data-types/float.md index 0e861f170b7..89ac00ab62f 100644 --- a/docs/ru/sql-reference/data-types/float.md +++ b/docs/ru/sql-reference/data-types/float.md @@ -89,4 +89,3 @@ SELECT 0 / 0 Смотрите правила сортировки `NaN` в разделе [Секция ORDER BY ](../../sql-reference/statements/select/order-by.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/float/) diff --git a/docs/ru/sql-reference/data-types/geo.md b/docs/ru/sql-reference/data-types/geo.md index 23293b30927..23b47f38d05 100644 --- a/docs/ru/sql-reference/data-types/geo.md +++ b/docs/ru/sql-reference/data-types/geo.md @@ -103,4 +103,3 @@ Result: └─────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/data-types/geo/) diff --git a/docs/ru/sql-reference/data-types/index.md b/docs/ru/sql-reference/data-types/index.md index 53c983a147a..2b29ee1bc19 100644 --- a/docs/ru/sql-reference/data-types/index.md +++ b/docs/ru/sql-reference/data-types/index.md @@ -11,4 +11,3 @@ ClickHouse может сохранять в ячейках таблиц данн Зависимость имен типов данных от регистра можно проверить в системной таблице [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families). Раздел содержит описания поддерживаемых типов данных и специфику их использования и/или реализации, если таковые имеются. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/) diff --git a/docs/ru/sql-reference/data-types/int-uint.md b/docs/ru/sql-reference/data-types/int-uint.md index d3c342e467a..c026f5fc4a5 100644 --- a/docs/ru/sql-reference/data-types/int-uint.md +++ b/docs/ru/sql-reference/data-types/int-uint.md @@ -35,4 +35,3 @@ toc_title: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 `UInt128` пока не реализован. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/int_uint/) diff --git a/docs/ru/sql-reference/data-types/lowcardinality.md b/docs/ru/sql-reference/data-types/lowcardinality.md index 52713e2d747..fe9118b1e14 100644 --- a/docs/ru/sql-reference/data-types/lowcardinality.md +++ b/docs/ru/sql-reference/data-types/lowcardinality.md @@ -58,4 +58,3 @@ ORDER BY id - [Reducing Clickhouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/). - [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/yandex/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf). -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/data-types/lowcardinality/) diff --git a/docs/ru/sql-reference/data-types/multiword-types.md b/docs/ru/sql-reference/data-types/multiword-types.md index 559755ef989..0a8afff448d 100644 --- a/docs/ru/sql-reference/data-types/multiword-types.md +++ b/docs/ru/sql-reference/data-types/multiword-types.md @@ -26,4 +26,3 @@ toc_title: Составные типы | BINARY LARGE OBJECT | [String](../../sql-reference/data-types/string.md) | | BINARY VARYING | [String](../../sql-reference/data-types/string.md) | -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/data-types/multiword-types/) diff --git a/docs/ru/sql-reference/data-types/nested-data-structures/index.md b/docs/ru/sql-reference/data-types/nested-data-structures/index.md index db214b90c03..78262347bac 100644 --- a/docs/ru/sql-reference/data-types/nested-data-structures/index.md +++ b/docs/ru/sql-reference/data-types/nested-data-structures/index.md @@ -7,4 +7,3 @@ toc_title: hidden # Вложенные структуры данных {#vlozhennye-struktury-dannykh} -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/nested_data_structures/) diff --git a/docs/ru/sql-reference/data-types/nested-data-structures/nested.md b/docs/ru/sql-reference/data-types/nested-data-structures/nested.md index 0e43383b283..199d141a191 100644 --- a/docs/ru/sql-reference/data-types/nested-data-structures/nested.md +++ b/docs/ru/sql-reference/data-types/nested-data-structures/nested.md @@ -96,4 +96,3 @@ LIMIT 10 Работоспособность запроса ALTER для элементов вложенных структур данных, является сильно ограниченной. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/nested_data_structures/nested/) diff --git a/docs/ru/sql-reference/data-types/nullable.md b/docs/ru/sql-reference/data-types/nullable.md index 71e1f7a37a0..3f33c4b2540 100644 --- a/docs/ru/sql-reference/data-types/nullable.md +++ b/docs/ru/sql-reference/data-types/nullable.md @@ -48,4 +48,3 @@ SELECT x + y from t_null └────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/nullable/) diff --git a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md index 668b579ff78..454add05e8a 100644 --- a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md @@ -36,4 +36,3 @@ CREATE TABLE simple (id UInt64, val SimpleAggregateFunction(sum, Double)) ENGINE=AggregatingMergeTree ORDER BY id; ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/data-types/simpleaggregatefunction/) diff --git a/docs/ru/sql-reference/data-types/special-data-types/expression.md b/docs/ru/sql-reference/data-types/special-data-types/expression.md index 718fcc886a6..f11f66a40c7 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/expression.md +++ b/docs/ru/sql-reference/data-types/special-data-types/expression.md @@ -7,4 +7,3 @@ toc_title: Expression Используется для представления лямбда-выражений в функциях высшего порядка. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/special_data_types/expression/) diff --git a/docs/ru/sql-reference/data-types/special-data-types/index.md b/docs/ru/sql-reference/data-types/special-data-types/index.md index e6d9fa8b011..823a84e2e43 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/index.md +++ b/docs/ru/sql-reference/data-types/special-data-types/index.md @@ -9,4 +9,3 @@ toc_title: hidden Значения служебных типов данных не могут сохраняться в таблицу и выводиться в качестве результата, а возникают как промежуточный результат выполнения запроса. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/special_data_types/) diff --git a/docs/ru/sql-reference/data-types/special-data-types/nothing.md b/docs/ru/sql-reference/data-types/special-data-types/nothing.md index c6a9cb868d8..30d425461e1 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/nothing.md +++ b/docs/ru/sql-reference/data-types/special-data-types/nothing.md @@ -19,4 +19,3 @@ SELECT toTypeName(Array()) └─────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/special_data_types/nothing/) diff --git a/docs/ru/sql-reference/data-types/special-data-types/set.md b/docs/ru/sql-reference/data-types/special-data-types/set.md index 4c2f4ed2c66..5867df3c947 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/set.md +++ b/docs/ru/sql-reference/data-types/special-data-types/set.md @@ -7,4 +7,3 @@ toc_title: Set Используется для представления правой части выражения IN. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/special_data_types/set/) diff --git a/docs/ru/sql-reference/data-types/string.md b/docs/ru/sql-reference/data-types/string.md index 6a07f7e51de..9470f523629 100644 --- a/docs/ru/sql-reference/data-types/string.md +++ b/docs/ru/sql-reference/data-types/string.md @@ -17,4 +17,3 @@ toc_title: String Также, некоторые функции по работе со строками, имеют отдельные варианты, которые работают при допущении, что строка содержит набор байт, представляющий текст в кодировке UTF-8. Например, функция length вычисляет длину строки в байтах, а функция lengthUTF8 - длину строки в кодовых точках Unicode, при допущении, что значение в кодировке UTF-8. -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/string/) diff --git a/docs/ru/sql-reference/data-types/tuple.md b/docs/ru/sql-reference/data-types/tuple.md index e2a1450b47f..702b5962f7b 100644 --- a/docs/ru/sql-reference/data-types/tuple.md +++ b/docs/ru/sql-reference/data-types/tuple.md @@ -47,4 +47,3 @@ SELECT tuple(1,NULL) AS x, toTypeName(x) └──────────┴─────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/tuple/) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md index 9c0b731bc7d..da8492e7cc0 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md @@ -65,4 +65,3 @@ ClickHouse поддерживает свойство [hierarchical](external-dic ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict_hierarchical/) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 0fd4a85c46f..1d1e46250e2 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -443,4 +443,3 @@ dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) Данные должны полностью помещаться в оперативной памяти. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict_layout/) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 48d891b2042..9589353649d 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -86,4 +86,3 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher ... ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict_lifetime/) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 9735e91c8b6..e3816e78547 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -725,4 +725,3 @@ Setting fields: - `invalidate_query` – Запрос для проверки условия загрузки словаря. Необязательный параметр. Читайте больше в разделе [Обновление словарей](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict_sources/) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index 6efbe706110..57f53390d1c 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -169,4 +169,3 @@ CREATE DICTIONARY somename ( - [Функции для работы с внешними словарями](../../../sql-reference/functions/ext-dict-functions.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict_structure/) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md index 7e35f59609d..4dc74200093 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md @@ -48,4 +48,3 @@ LIFETIME(...) -- Lifetime of dictionary in memory - [structure](external-dicts-dict-structure.md) — Структура словаря. Ключ и атрибуты, которые можно получить по ключу. - [lifetime](external-dicts-dict-lifetime.md) — Периодичность обновления словарей. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts_dict/) diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md index 6467b5f82e4..04ef24b68c5 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -61,4 +61,3 @@ ClickHouse: - [Ключ и поля словаря](external-dicts-dict-structure.md) - [Функции для работы с внешними словарями](../../../sql-reference/functions/ext-dict-functions.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/external_dicts/) diff --git a/docs/ru/sql-reference/dictionaries/index.md b/docs/ru/sql-reference/dictionaries/index.md index 238aa244967..bd432497be8 100644 --- a/docs/ru/sql-reference/dictionaries/index.md +++ b/docs/ru/sql-reference/dictionaries/index.md @@ -17,4 +17,3 @@ ClickHouse поддерживает: - [Встроенные словари](internal-dicts.md#internal_dicts) со специфическим [набором функций](../../sql-reference/dictionaries/external-dictionaries/index.md). - [Подключаемые (внешние) словари](external-dictionaries/external-dicts.md#dicts-external-dicts) с [набором функций](../../sql-reference/dictionaries/external-dictionaries/index.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/) diff --git a/docs/ru/sql-reference/dictionaries/internal-dicts.md b/docs/ru/sql-reference/dictionaries/internal-dicts.md index af7f13f7133..34e407ceacd 100644 --- a/docs/ru/sql-reference/dictionaries/internal-dicts.md +++ b/docs/ru/sql-reference/dictionaries/internal-dicts.md @@ -50,4 +50,3 @@ ClickHouse содержит встроенную возможность рабо Также имеются функции для работы с идентификаторами операционных систем и поисковых систем Яндекс.Метрики, пользоваться которыми не нужно. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/dicts/internal_dicts/) diff --git a/docs/ru/sql-reference/distributed-ddl.md b/docs/ru/sql-reference/distributed-ddl.md index 17c38cfe820..e03ecb893bc 100644 --- a/docs/ru/sql-reference/distributed-ddl.md +++ b/docs/ru/sql-reference/distributed-ddl.md @@ -15,5 +15,4 @@ CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE Для корректного выполнения таких запросов необходимо на каждом хосте иметь одинаковое определение кластера (для упрощения синхронизации конфигов можете использовать подстановки из ZooKeeper). Также необходимо подключение к ZooKeeper серверам. Локальная версия запроса в конечном итоге будет выполнена на каждом хосте кластера, даже если некоторые хосты в данный момент не доступны. Гарантируется упорядоченность выполнения запросов в рамках одного хоста. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/distributed-ddl) \ No newline at end of file diff --git a/docs/ru/sql-reference/functions/arithmetic-functions.md b/docs/ru/sql-reference/functions/arithmetic-functions.md index 779e0a9fe4a..f587b7b5b5d 100644 --- a/docs/ru/sql-reference/functions/arithmetic-functions.md +++ b/docs/ru/sql-reference/functions/arithmetic-functions.md @@ -83,4 +83,3 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0 Вычисляет наименьшее общее кратное чисел. При делении на ноль или при делении минимального отрицательного числа на минус единицу, кидается исключение. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/arithmetic_functions/) diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index dca645888a9..eb62fdd70cb 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -1455,4 +1455,3 @@ select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]) └────────────────────────────────────────---──┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/array_functions/) diff --git a/docs/ru/sql-reference/functions/array-join.md b/docs/ru/sql-reference/functions/array-join.md index ed67d30062b..3e3cf5c4011 100644 --- a/docs/ru/sql-reference/functions/array-join.md +++ b/docs/ru/sql-reference/functions/array-join.md @@ -32,4 +32,3 @@ SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src └─────┴───────────┴─────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/array_join/) diff --git a/docs/ru/sql-reference/functions/bit-functions.md b/docs/ru/sql-reference/functions/bit-functions.md index 79ea05f4bd7..da34c7f3aba 100644 --- a/docs/ru/sql-reference/functions/bit-functions.md +++ b/docs/ru/sql-reference/functions/bit-functions.md @@ -240,4 +240,3 @@ SELECT bitCount(333) └───────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/bit_functions/) diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md index cd0ddee01a6..af28be67116 100644 --- a/docs/ru/sql-reference/functions/bitmap-functions.md +++ b/docs/ru/sql-reference/functions/bitmap-functions.md @@ -397,4 +397,3 @@ SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res └─────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/bitmap_functions/) diff --git a/docs/ru/sql-reference/functions/comparison-functions.md b/docs/ru/sql-reference/functions/comparison-functions.md index 179df5c2ed5..b7301bde275 100644 --- a/docs/ru/sql-reference/functions/comparison-functions.md +++ b/docs/ru/sql-reference/functions/comparison-functions.md @@ -34,4 +34,3 @@ toc_title: "Функции сравнения" ## greaterOrEquals, оператор `>=` {#function-greaterorequals} -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/comparison_functions/) diff --git a/docs/ru/sql-reference/functions/conditional-functions.md b/docs/ru/sql-reference/functions/conditional-functions.md index 888e9427a79..537ac9dc11e 100644 --- a/docs/ru/sql-reference/functions/conditional-functions.md +++ b/docs/ru/sql-reference/functions/conditional-functions.md @@ -111,4 +111,3 @@ SELECT if(0, plus(2, 2), plus(2, 6)) └────────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/conditional_functions/) diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index 9f3df92922f..f2cc051e9bd 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -941,4 +941,3 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime; └─────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/date_time_functions/) diff --git a/docs/ru/sql-reference/functions/encoding-functions.md b/docs/ru/sql-reference/functions/encoding-functions.md index 951c6c60e38..f0f0a5a1aaf 100644 --- a/docs/ru/sql-reference/functions/encoding-functions.md +++ b/docs/ru/sql-reference/functions/encoding-functions.md @@ -172,4 +172,3 @@ If you want to convert the result to a number, you can use the ‘reverse’ and Принимает целое число. Возвращает массив чисел типа UInt64, содержащий степени двойки, в сумме дающих исходное число; числа в массиве идут по возрастанию. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/encoding_functions/) diff --git a/docs/ru/sql-reference/functions/ext-dict-functions.md b/docs/ru/sql-reference/functions/ext-dict-functions.md index 8d018e8e9ac..0e7ee78e52c 100644 --- a/docs/ru/sql-reference/functions/ext-dict-functions.md +++ b/docs/ru/sql-reference/functions/ext-dict-functions.md @@ -198,4 +198,3 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr) Если значение атрибута не удалось обработать или оно не соответствует типу данных атрибута, то ClickHouse генерирует исключение. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/ext_dict_functions/) diff --git a/docs/ru/sql-reference/functions/functions-for-nulls.md b/docs/ru/sql-reference/functions/functions-for-nulls.md index f0277a59699..34de3acf5ca 100644 --- a/docs/ru/sql-reference/functions/functions-for-nulls.md +++ b/docs/ru/sql-reference/functions/functions-for-nulls.md @@ -309,4 +309,3 @@ SELECT toTypeName(toNullable(10)) └────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/functions_for_nulls/) diff --git a/docs/ru/sql-reference/functions/geo/coordinates.md b/docs/ru/sql-reference/functions/geo/coordinates.md index 09e2d7d01bf..2605dc7a82f 100644 --- a/docs/ru/sql-reference/functions/geo/coordinates.md +++ b/docs/ru/sql-reference/functions/geo/coordinates.md @@ -133,4 +133,3 @@ SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res └─────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/functions/geo/coordinates) diff --git a/docs/ru/sql-reference/functions/geo/geohash.md b/docs/ru/sql-reference/functions/geo/geohash.md index 2dd3f83ddf1..01193eab543 100644 --- a/docs/ru/sql-reference/functions/geo/geohash.md +++ b/docs/ru/sql-reference/functions/geo/geohash.md @@ -112,4 +112,3 @@ SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos └─────────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/functions/geo/geohash) diff --git a/docs/ru/sql-reference/functions/geo/h3.md b/docs/ru/sql-reference/functions/geo/h3.md index 7046833f7ec..41ff9f1a4f9 100644 --- a/docs/ru/sql-reference/functions/geo/h3.md +++ b/docs/ru/sql-reference/functions/geo/h3.md @@ -520,4 +520,3 @@ SELECT h3GetResolution(617420388352917503) as res; └─────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/functions/geo/h3) diff --git a/docs/ru/sql-reference/functions/geo/index.md b/docs/ru/sql-reference/functions/geo/index.md index 6b9a14e4d02..4d3bdfcd468 100644 --- a/docs/ru/sql-reference/functions/geo/index.md +++ b/docs/ru/sql-reference/functions/geo/index.md @@ -5,4 +5,3 @@ toc_title: hidden --- -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/functions/geo/) diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index 1742abe5b56..5a6d82d2738 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -484,4 +484,3 @@ SELECT xxHash32('Hello, world!'); - [xxHash](http://cyan4973.github.io/xxHash/). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/hash_functions/) diff --git a/docs/ru/sql-reference/functions/in-functions.md b/docs/ru/sql-reference/functions/in-functions.md index 7326d087610..2bdb71d5f93 100644 --- a/docs/ru/sql-reference/functions/in-functions.md +++ b/docs/ru/sql-reference/functions/in-functions.md @@ -9,4 +9,3 @@ toc_title: "Функции для реализации оператора IN" Смотрите раздел [Операторы IN](../operators/in.md#select-in-operators). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/in_functions/) diff --git a/docs/ru/sql-reference/functions/index.md b/docs/ru/sql-reference/functions/index.md index ae3879b6c96..1eefd4d9f73 100644 --- a/docs/ru/sql-reference/functions/index.md +++ b/docs/ru/sql-reference/functions/index.md @@ -82,4 +82,3 @@ str -> str != Referer Если функция в запросе выполняется на сервере-инициаторе запроса, а вам нужно, чтобы она выполнялась на удалённых серверах, вы можете обернуть её в агрегатную функцию any или добавить в ключ в `GROUP BY`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/) diff --git a/docs/ru/sql-reference/functions/ip-address-functions.md b/docs/ru/sql-reference/functions/ip-address-functions.md index a2a08b1938e..501e42d78db 100644 --- a/docs/ru/sql-reference/functions/ip-address-functions.md +++ b/docs/ru/sql-reference/functions/ip-address-functions.md @@ -395,4 +395,3 @@ SELECT addr, isIPv6String(addr) FROM ( SELECT ['::', '1111::ffff', '::ffff:127.0 └──────────────────┴────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/ip_address_functions/) diff --git a/docs/ru/sql-reference/functions/json-functions.md b/docs/ru/sql-reference/functions/json-functions.md index 69b8f8f98f5..704979b9dc6 100644 --- a/docs/ru/sql-reference/functions/json-functions.md +++ b/docs/ru/sql-reference/functions/json-functions.md @@ -294,4 +294,3 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello" └───────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/json_functions/) diff --git a/docs/ru/sql-reference/functions/logical-functions.md b/docs/ru/sql-reference/functions/logical-functions.md index 2d71c60a509..8566657d2eb 100644 --- a/docs/ru/sql-reference/functions/logical-functions.md +++ b/docs/ru/sql-reference/functions/logical-functions.md @@ -17,4 +17,3 @@ toc_title: "Логические функции" ## xor {#xor} -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/logical_functions/) diff --git a/docs/ru/sql-reference/functions/math-functions.md b/docs/ru/sql-reference/functions/math-functions.md index a5ba01f6282..eb369d476a0 100644 --- a/docs/ru/sql-reference/functions/math-functions.md +++ b/docs/ru/sql-reference/functions/math-functions.md @@ -468,4 +468,3 @@ SELECT sign(-1); └──────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/math_functions/) diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index 595d2458ca9..60c867a2158 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1854,4 +1854,3 @@ SELECT tcpPort(); - [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/other_functions/) diff --git a/docs/ru/sql-reference/functions/random-functions.md b/docs/ru/sql-reference/functions/random-functions.md index a09f5159309..efe5259c44a 100644 --- a/docs/ru/sql-reference/functions/random-functions.md +++ b/docs/ru/sql-reference/functions/random-functions.md @@ -107,4 +107,3 @@ FROM numbers(3) └───────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/random_functions/) diff --git a/docs/ru/sql-reference/functions/rounding-functions.md b/docs/ru/sql-reference/functions/rounding-functions.md index 704e7f5dd52..9eb59d955fc 100644 --- a/docs/ru/sql-reference/functions/rounding-functions.md +++ b/docs/ru/sql-reference/functions/rounding-functions.md @@ -177,4 +177,3 @@ roundBankers(10.755, 2) = 11,76 Принимает число. Если число меньше 18 - возвращает 0. Иначе округляет число вниз до чисел из набора: 18, 25, 35, 45, 55. Эта функция специфична для Яндекс.Метрики и предназначена для реализации отчёта по возрасту посетителей. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/rounding_functions/) diff --git a/docs/ru/sql-reference/functions/splitting-merging-functions.md b/docs/ru/sql-reference/functions/splitting-merging-functions.md index cacce5f4ba2..f393b220e1f 100644 --- a/docs/ru/sql-reference/functions/splitting-merging-functions.md +++ b/docs/ru/sql-reference/functions/splitting-merging-functions.md @@ -115,4 +115,3 @@ SELECT alphaTokens('abca1abc') └─────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/splitting_merging_functions/) diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md index 65a1cd63563..c222a7bd4f9 100644 --- a/docs/ru/sql-reference/functions/string-functions.md +++ b/docs/ru/sql-reference/functions/string-functions.md @@ -645,4 +645,3 @@ SELECT decodeXMLComponent('< Σ >'); - [Мнемоники в HTML](https://ru.wikipedia.org/wiki/%D0%9C%D0%BD%D0%B5%D0%BC%D0%BE%D0%BD%D0%B8%D0%BA%D0%B8_%D0%B2_HTML) -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/string_functions/) diff --git a/docs/ru/sql-reference/functions/string-replace-functions.md b/docs/ru/sql-reference/functions/string-replace-functions.md index f00a06d1560..9426e8685b0 100644 --- a/docs/ru/sql-reference/functions/string-replace-functions.md +++ b/docs/ru/sql-reference/functions/string-replace-functions.md @@ -83,4 +83,3 @@ SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res └─────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/string_replace_functions/) diff --git a/docs/ru/sql-reference/functions/string-search-functions.md b/docs/ru/sql-reference/functions/string-search-functions.md index 95ac922a4a8..51ec90bbeff 100644 --- a/docs/ru/sql-reference/functions/string-search-functions.md +++ b/docs/ru/sql-reference/functions/string-search-functions.md @@ -757,4 +757,3 @@ SELECT countSubstringsCaseInsensitiveUTF8('аБв__АбВ__абв', 'Абв'); └────────────────────────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/string_search_functions/) diff --git a/docs/ru/sql-reference/functions/tuple-functions.md b/docs/ru/sql-reference/functions/tuple-functions.md index f88886ec6f1..eb7a873072b 100644 --- a/docs/ru/sql-reference/functions/tuple-functions.md +++ b/docs/ru/sql-reference/functions/tuple-functions.md @@ -111,4 +111,3 @@ SELECT untuple((* EXCEPT (v2, v3),)) FROM kv; - [Tuple](../../sql-reference/data-types/tuple.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/functions/tuple-functions/) diff --git a/docs/ru/sql-reference/functions/tuple-map-functions.md b/docs/ru/sql-reference/functions/tuple-map-functions.md index 696fdb9e5ae..23aca717c05 100644 --- a/docs/ru/sql-reference/functions/tuple-map-functions.md +++ b/docs/ru/sql-reference/functions/tuple-map-functions.md @@ -301,4 +301,3 @@ SELECT mapValues(a) FROM test; └──────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/functions/tuple-map-functions/) diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index f312f9f5847..c1aa982f051 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1195,4 +1195,3 @@ FROM numbers(3); └───────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/type_conversion_functions/) diff --git a/docs/ru/sql-reference/functions/url-functions.md b/docs/ru/sql-reference/functions/url-functions.md index 83f7fd32f6c..0d2b3030f9b 100644 --- a/docs/ru/sql-reference/functions/url-functions.md +++ b/docs/ru/sql-reference/functions/url-functions.md @@ -405,4 +405,3 @@ SELECT netloc('http://paul@www.example.com:80/'); Удаляет параметр URL с именем name, если такой есть. Функция работает при допущении, что имя параметра закодировано в URL в точности таким же образом, что и в переданном аргументе. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/url_functions/) diff --git a/docs/ru/sql-reference/functions/ym-dict-functions.md b/docs/ru/sql-reference/functions/ym-dict-functions.md index f6d02e553a0..aa2be3e1892 100644 --- a/docs/ru/sql-reference/functions/ym-dict-functions.md +++ b/docs/ru/sql-reference/functions/ym-dict-functions.md @@ -151,4 +151,3 @@ regionToTopContinent(id[, geobase]); `ua` и `uk` обозначают одно и то же - украинский язык. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/ym_dict_functions/) diff --git a/docs/ru/sql-reference/index.md b/docs/ru/sql-reference/index.md index 7aea530c7ee..62d6a9cecde 100644 --- a/docs/ru/sql-reference/index.md +++ b/docs/ru/sql-reference/index.md @@ -13,4 +13,3 @@ toc_title: hidden - [ALTER](statements/alter/index.md#query_language_queries_alter) - [Прочие виды запросов](statements/misc.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/) diff --git a/docs/ru/sql-reference/operators/index.md b/docs/ru/sql-reference/operators/index.md index 691c398ce4c..b7cacaf7a03 100644 --- a/docs/ru/sql-reference/operators/index.md +++ b/docs/ru/sql-reference/operators/index.md @@ -297,4 +297,3 @@ SELECT * FROM t_null WHERE y IS NOT NULL └───┴───┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/operators/) diff --git a/docs/ru/sql-reference/statements/alter/column.md b/docs/ru/sql-reference/statements/alter/column.md index 35a1952d842..d1a117e4dd1 100644 --- a/docs/ru/sql-reference/statements/alter/column.md +++ b/docs/ru/sql-reference/statements/alter/column.md @@ -170,4 +170,3 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; Для таблиц, которые не хранят данные самостоятельно (типа [Merge](../../../sql-reference/statements/alter/index.md) и [Distributed](../../../sql-reference/statements/alter/index.md)), `ALTER` всего лишь меняет структуру таблицы, но не меняет структуру подчинённых таблиц. Для примера, при ALTER-е таблицы типа `Distributed`, вам также потребуется выполнить запрос `ALTER` для таблиц на всех удалённых серверах. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/column/) diff --git a/docs/ru/sql-reference/statements/alter/constraint.md b/docs/ru/sql-reference/statements/alter/constraint.md index 13396f33621..452bf649415 100644 --- a/docs/ru/sql-reference/statements/alter/constraint.md +++ b/docs/ru/sql-reference/statements/alter/constraint.md @@ -20,4 +20,3 @@ ALTER TABLE [db].name DROP CONSTRAINT constraint_name; Запрос на изменение ограничений для Replicated таблиц реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/constraint/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/delete.md b/docs/ru/sql-reference/statements/alter/delete.md index ee5f03d9d95..70a411dab83 100644 --- a/docs/ru/sql-reference/statements/alter/delete.md +++ b/docs/ru/sql-reference/statements/alter/delete.md @@ -26,4 +26,3 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr - [Синхронность запросов ALTER](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) - [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/delete/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/index.md b/docs/ru/sql-reference/statements/alter/index.md index 830c4a5745b..648fb7e7c5c 100644 --- a/docs/ru/sql-reference/statements/alter/index.md +++ b/docs/ru/sql-reference/statements/alter/index.md @@ -69,4 +69,3 @@ ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name Для запросов `ALTER TABLE ... UPDATE|DELETE` синхронность выполнения определяется настройкой [mutations_sync](../../../operations/settings/settings.md#mutations_sync). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/index/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/index/index.md b/docs/ru/sql-reference/statements/alter/index/index.md index a42bccd7b47..862def5cc04 100644 --- a/docs/ru/sql-reference/statements/alter/index/index.md +++ b/docs/ru/sql-reference/statements/alter/index/index.md @@ -21,4 +21,3 @@ ALTER TABLE [db].name DROP INDEX name Запрос на изменение индексов реплицируется, сохраняя новые метаданные в ZooKeeper и применяя изменения на всех репликах. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/index/index/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/order-by.md b/docs/ru/sql-reference/statements/alter/order-by.md index 32c0e382445..f0a9bfe3730 100644 --- a/docs/ru/sql-reference/statements/alter/order-by.md +++ b/docs/ru/sql-reference/statements/alter/order-by.md @@ -19,4 +19,3 @@ MODIFY ORDER BY new_expression сортировки, разрешено добавлять в ключ только новые столбцы (т.е. столбцы, добавляемые командой `ADD COLUMN` в том же запросе `ALTER`), у которых нет выражения по умолчанию. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/order-by/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/partition.md b/docs/ru/sql-reference/statements/alter/partition.md index 8776c70c89e..3e7b069b066 100644 --- a/docs/ru/sql-reference/statements/alter/partition.md +++ b/docs/ru/sql-reference/statements/alter/partition.md @@ -306,4 +306,3 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/partition/) diff --git a/docs/ru/sql-reference/statements/alter/quota.md b/docs/ru/sql-reference/statements/alter/quota.md index 0bdac1381da..4dadac1f473 100644 --- a/docs/ru/sql-reference/statements/alter/quota.md +++ b/docs/ru/sql-reference/statements/alter/quota.md @@ -39,4 +39,3 @@ ALTER QUOTA IF EXISTS qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/quota/) diff --git a/docs/ru/sql-reference/statements/alter/role.md b/docs/ru/sql-reference/statements/alter/role.md index 69f7c5828c5..e9ce62c58d5 100644 --- a/docs/ru/sql-reference/statements/alter/role.md +++ b/docs/ru/sql-reference/statements/alter/role.md @@ -15,4 +15,3 @@ ALTER ROLE [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/role/) diff --git a/docs/ru/sql-reference/statements/alter/row-policy.md b/docs/ru/sql-reference/statements/alter/row-policy.md index e2d23cda3ff..cff4d4e497a 100644 --- a/docs/ru/sql-reference/statements/alter/row-policy.md +++ b/docs/ru/sql-reference/statements/alter/row-policy.md @@ -18,4 +18,3 @@ ALTER [ROW] POLICY [IF EXISTS] name1 [ON CLUSTER cluster_name1] ON [database1.]t [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/row-policy/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/settings-profile.md b/docs/ru/sql-reference/statements/alter/settings-profile.md index 54502901837..9b8646919ca 100644 --- a/docs/ru/sql-reference/statements/alter/settings-profile.md +++ b/docs/ru/sql-reference/statements/alter/settings-profile.md @@ -15,4 +15,3 @@ ALTER SETTINGS PROFILE [IF EXISTS] TO name1 [ON CLUSTER cluster_name1] [RENAME T [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/settings-profile) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/ttl.md b/docs/ru/sql-reference/statements/alter/ttl.md index 5721ec9cf27..32b35da8627 100644 --- a/docs/ru/sql-reference/statements/alter/ttl.md +++ b/docs/ru/sql-reference/statements/alter/ttl.md @@ -83,4 +83,3 @@ SELECT * FROM table_with_ttl; - Подробнее о [свойстве TTL](../../../engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/ttl/) diff --git a/docs/ru/sql-reference/statements/alter/update.md b/docs/ru/sql-reference/statements/alter/update.md index e3d6725419a..206412d4be9 100644 --- a/docs/ru/sql-reference/statements/alter/update.md +++ b/docs/ru/sql-reference/statements/alter/update.md @@ -26,4 +26,3 @@ ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr - [Синхронность запросов ALTER](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) - [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/update/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/alter/user.md b/docs/ru/sql-reference/statements/alter/user.md index 41574f74200..604eff9de15 100644 --- a/docs/ru/sql-reference/statements/alter/user.md +++ b/docs/ru/sql-reference/statements/alter/user.md @@ -44,4 +44,3 @@ ALTER USER user DEFAULT ROLE ALL ALTER USER user DEFAULT ROLE ALL EXCEPT role1, role2 ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/user/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/attach.md b/docs/ru/sql-reference/statements/attach.md index 259ab893e63..b0d0a31ba4a 100644 --- a/docs/ru/sql-reference/statements/attach.md +++ b/docs/ru/sql-reference/statements/attach.md @@ -19,5 +19,4 @@ ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] Этот запрос используется при старте сервера. Сервер хранит метаданные таблиц в виде файлов с запросами `ATTACH`, которые он просто исполняет при запуске (за исключением системных таблиц, которые явно создаются на сервере). -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/attach/) diff --git a/docs/ru/sql-reference/statements/check-table.md b/docs/ru/sql-reference/statements/check-table.md index 3dc135d87c6..10336f821d0 100644 --- a/docs/ru/sql-reference/statements/check-table.md +++ b/docs/ru/sql-reference/statements/check-table.md @@ -41,4 +41,3 @@ CHECK TABLE [db.]name 4. Перезапустите `clickhouse-client`, чтобы вернуть предыдущее значение параметра `max_threads`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/check-table/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/database.md b/docs/ru/sql-reference/statements/create/database.md index 0e880517134..7d19f3e8f17 100644 --- a/docs/ru/sql-reference/statements/create/database.md +++ b/docs/ru/sql-reference/statements/create/database.md @@ -31,5 +31,4 @@ CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster] [ENGINE = engine(.. По умолчанию ClickHouse использует собственный движок баз данных. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/database) diff --git a/docs/ru/sql-reference/statements/create/dictionary.md b/docs/ru/sql-reference/statements/create/dictionary.md index dba2aa61ca1..a41b2cb9ad5 100644 --- a/docs/ru/sql-reference/statements/create/dictionary.md +++ b/docs/ru/sql-reference/statements/create/dictionary.md @@ -27,5 +27,4 @@ LIFETIME({MIN min_val MAX max_val | max_val}) Смотрите [Внешние словари](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/dictionary) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/index.md b/docs/ru/sql-reference/statements/create/index.md index 70961e4f404..dfa5c28fff7 100644 --- a/docs/ru/sql-reference/statements/create/index.md +++ b/docs/ru/sql-reference/statements/create/index.md @@ -18,4 +18,3 @@ toc_title: "Обзор" - [QUOTA](../../../sql-reference/statements/create/quota.md) - [SETTINGS PROFILE](../../../sql-reference/statements/create/settings-profile.md) -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/statements/create/) diff --git a/docs/ru/sql-reference/statements/create/quota.md b/docs/ru/sql-reference/statements/create/quota.md index f5ac0df010e..8b07b51337a 100644 --- a/docs/ru/sql-reference/statements/create/quota.md +++ b/docs/ru/sql-reference/statements/create/quota.md @@ -37,5 +37,4 @@ CREATE QUOTA qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER; CREATE QUOTA qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/quota) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/role.md b/docs/ru/sql-reference/statements/create/role.md index 8592f263156..16450b41126 100644 --- a/docs/ru/sql-reference/statements/create/role.md +++ b/docs/ru/sql-reference/statements/create/role.md @@ -46,5 +46,4 @@ SET ROLE accountant; SELECT * FROM db.*; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/role) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/row-policy.md b/docs/ru/sql-reference/statements/create/row-policy.md index 75f6fdfd2e1..88709598906 100644 --- a/docs/ru/sql-reference/statements/create/row-policy.md +++ b/docs/ru/sql-reference/statements/create/row-policy.md @@ -42,5 +42,4 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste `CREATE ROW POLICY filter ON mydb.mytable FOR SELECT USING a<1000 TO ALL EXCEPT mira` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/row-policy) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/settings-profile.md b/docs/ru/sql-reference/statements/create/settings-profile.md index 5838ddc9153..522caf04c80 100644 --- a/docs/ru/sql-reference/statements/create/settings-profile.md +++ b/docs/ru/sql-reference/statements/create/settings-profile.md @@ -25,5 +25,4 @@ CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] TO name1 [ON CLUSTER cluste CREATE SETTINGS PROFILE max_memory_usage_profile SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/settings-profile) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index 8e2c471e548..a210c3687ef 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -243,5 +243,4 @@ CREATE TEMPORARY TABLE [IF NOT EXISTS] table_name -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/table) diff --git a/docs/ru/sql-reference/statements/create/user.md b/docs/ru/sql-reference/statements/create/user.md index ac9547691e6..68277d67052 100644 --- a/docs/ru/sql-reference/statements/create/user.md +++ b/docs/ru/sql-reference/statements/create/user.md @@ -81,5 +81,4 @@ CREATE USER user DEFAULT ROLE ALL CREATE USER john DEFAULT ROLE ALL EXCEPT role1, role2 ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/user) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/create/view.md b/docs/ru/sql-reference/statements/create/view.md index da021059a8e..f867fc18de2 100644 --- a/docs/ru/sql-reference/statements/create/view.md +++ b/docs/ru/sql-reference/statements/create/view.md @@ -62,4 +62,3 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na Отсутствует отдельный запрос для удаления представлений. Чтобы удалить представление, следует использовать `DROP TABLE`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/view) diff --git a/docs/ru/sql-reference/statements/describe-table.md b/docs/ru/sql-reference/statements/describe-table.md index 64ed61de232..c66dbb66521 100644 --- a/docs/ru/sql-reference/statements/describe-table.md +++ b/docs/ru/sql-reference/statements/describe-table.md @@ -21,4 +21,3 @@ DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format] Вложенные структуры данных выводятся в «развёрнутом» виде. То есть, каждый столбец - по отдельности, с именем через точку. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/describe-table/) diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index 00d0a4b20c6..1dd5cd97643 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -16,4 +16,3 @@ DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Запроса `DETACH DATABASE` нет. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/detach/) diff --git a/docs/ru/sql-reference/statements/drop.md b/docs/ru/sql-reference/statements/drop.md index 514a92db91f..118f8eb923a 100644 --- a/docs/ru/sql-reference/statements/drop.md +++ b/docs/ru/sql-reference/statements/drop.md @@ -97,4 +97,3 @@ DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/drop/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/exists.md b/docs/ru/sql-reference/statements/exists.md index 0b2fd69273c..d4f1f707e79 100644 --- a/docs/ru/sql-reference/statements/exists.md +++ b/docs/ru/sql-reference/statements/exists.md @@ -12,4 +12,3 @@ EXISTS [TEMPORARY] TABLE [db.]name [INTO OUTFILE filename] [FORMAT format] Возвращает один столбец типа `UInt8`, содержащий одно значение - `0`, если таблицы или БД не существует и `1`, если таблица в указанной БД существует. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/exists/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index d38e2ea38a0..7b2d26902ef 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -483,4 +483,3 @@ GRANT INSERT(x,y) ON db.table TO john Привилегия `ADMIN OPTION` разрешает пользователю назначать свои роли другому пользователю. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/grant/) diff --git a/docs/ru/sql-reference/statements/insert-into.md b/docs/ru/sql-reference/statements/insert-into.md index 0ad85ed0166..bbd330962cf 100644 --- a/docs/ru/sql-reference/statements/insert-into.md +++ b/docs/ru/sql-reference/statements/insert-into.md @@ -119,4 +119,3 @@ INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... - Данные поступают в режиме реального времени. - Вы загружаете данные, которые как правило отсортированы по времени. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/insert_into/) diff --git a/docs/ru/sql-reference/statements/kill.md b/docs/ru/sql-reference/statements/kill.md index e2556a7f782..6981d630dd8 100644 --- a/docs/ru/sql-reference/statements/kill.md +++ b/docs/ru/sql-reference/statements/kill.md @@ -70,4 +70,3 @@ KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = ' Данные, уже изменённые мутацией, остаются в таблице (отката на старую версию данных не происходит). -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/kill/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/misc.md b/docs/ru/sql-reference/statements/misc.md index e9ceece8b2c..cedf52b7a34 100644 --- a/docs/ru/sql-reference/statements/misc.md +++ b/docs/ru/sql-reference/statements/misc.md @@ -19,4 +19,3 @@ toc_priority: 41 - [TRUNCATE](../../sql-reference/statements/truncate.md) - [USE](../../sql-reference/statements/use.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/misc/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/optimize.md b/docs/ru/sql-reference/statements/optimize.md index 8b1d72fed80..44101910a6c 100644 --- a/docs/ru/sql-reference/statements/optimize.md +++ b/docs/ru/sql-reference/statements/optimize.md @@ -21,4 +21,3 @@ OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION I !!! warning "Внимание" Запрос `OPTIMIZE` не может устранить причину появления ошибки «Too many parts». -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/optimize/) diff --git a/docs/ru/sql-reference/statements/rename.md b/docs/ru/sql-reference/statements/rename.md index 94bf3c682a1..104918c1a73 100644 --- a/docs/ru/sql-reference/statements/rename.md +++ b/docs/ru/sql-reference/statements/rename.md @@ -14,4 +14,3 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... Переименовывание таблицы является лёгкой операцией. Если вы указали после `TO` другую базу данных, то таблица будет перенесена в эту базу данных. При этом, директории с базами данных должны быть расположены в одной файловой системе (иначе возвращается ошибка). В случае переименования нескольких таблиц в одном запросе — это неатомарная операция, может выполнится частично, запросы в других сессиях могут получить ошибку `Table ... doesn't exist...`. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/rename/) diff --git a/docs/ru/sql-reference/statements/revoke.md b/docs/ru/sql-reference/statements/revoke.md index 339746b8591..a3a282d6e5c 100644 --- a/docs/ru/sql-reference/statements/revoke.md +++ b/docs/ru/sql-reference/statements/revoke.md @@ -45,4 +45,3 @@ GRANT SELECT ON accounts.staff TO mira; REVOKE SELECT(wage) ON accounts.staff FROM mira; ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/ru/sql-reference/statements/select/all.md b/docs/ru/sql-reference/statements/select/all.md index 4049d77a173..d36a23ca54e 100644 --- a/docs/ru/sql-reference/statements/select/all.md +++ b/docs/ru/sql-reference/statements/select/all.md @@ -19,4 +19,3 @@ SELECT sum(ALL number) FROM numbers(10); SELECT sum(number) FROM numbers(10); ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/select/all) diff --git a/docs/ru/sql-reference/statements/select/index.md b/docs/ru/sql-reference/statements/select/index.md index a548a988a89..886952ea5cf 100644 --- a/docs/ru/sql-reference/statements/select/index.md +++ b/docs/ru/sql-reference/statements/select/index.md @@ -280,4 +280,3 @@ SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from columns_transformers; SELECT * FROM some_table SETTINGS optimize_read_in_order=1, cast_keep_nullable=1; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/select/) diff --git a/docs/ru/sql-reference/statements/select/order-by.md b/docs/ru/sql-reference/statements/select/order-by.md index f8b838cbd15..9ddec923701 100644 --- a/docs/ru/sql-reference/statements/select/order-by.md +++ b/docs/ru/sql-reference/statements/select/order-by.md @@ -473,4 +473,3 @@ SELECT * FROM test_fetch ORDER BY a OFFSET 3 ROW FETCH FIRST 3 ROWS WITH TIES; └───┴───┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/select/order-by/) diff --git a/docs/ru/sql-reference/statements/select/union.md b/docs/ru/sql-reference/statements/select/union.md index 8f1dc11c802..de8a9b0e4ea 100644 --- a/docs/ru/sql-reference/statements/select/union.md +++ b/docs/ru/sql-reference/statements/select/union.md @@ -78,4 +78,3 @@ SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 2; Запросы, которые являются частью `UNION/UNION ALL/UNION DISTINCT`, выполняются параллельно, и их результаты могут быть смешаны вместе. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/select/union/) diff --git a/docs/ru/sql-reference/statements/select/with.md b/docs/ru/sql-reference/statements/select/with.md index 328b28c27ef..7e09d94770a 100644 --- a/docs/ru/sql-reference/statements/select/with.md +++ b/docs/ru/sql-reference/statements/select/with.md @@ -67,4 +67,3 @@ WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT * FROM test1; ``` -[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/statements/select/with/) diff --git a/docs/ru/sql-reference/statements/set-role.md b/docs/ru/sql-reference/statements/set-role.md index ccbef41aa9b..b21a9ec8319 100644 --- a/docs/ru/sql-reference/statements/set-role.md +++ b/docs/ru/sql-reference/statements/set-role.md @@ -54,4 +54,3 @@ SET DEFAULT ROLE ALL EXCEPT role1, role2 TO user ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/set-role/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/set.md b/docs/ru/sql-reference/statements/set.md index b60dfcf8324..fa96c3c2a1b 100644 --- a/docs/ru/sql-reference/statements/set.md +++ b/docs/ru/sql-reference/statements/set.md @@ -19,4 +19,3 @@ SET profile = 'profile-name-from-the-settings-file' Подробности смотрите в разделе [Настройки](../../operations/settings/settings.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/set/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/show.md b/docs/ru/sql-reference/statements/show.md index b214f0072e3..6d39bab4990 100644 --- a/docs/ru/sql-reference/statements/show.md +++ b/docs/ru/sql-reference/statements/show.md @@ -427,4 +427,3 @@ SHOW CHANGED SETTINGS ILIKE '%MEMORY%' - Таблица [system.settings](../../operations/system-tables/settings.md) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/show/) diff --git a/docs/ru/sql-reference/statements/system.md b/docs/ru/sql-reference/statements/system.md index a6a6c5047af..ab68033d4f3 100644 --- a/docs/ru/sql-reference/statements/system.md +++ b/docs/ru/sql-reference/statements/system.md @@ -265,4 +265,3 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name ### RESTART REPLICAS {#query_language-system-restart-replicas} Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/system/) diff --git a/docs/ru/sql-reference/statements/truncate.md b/docs/ru/sql-reference/statements/truncate.md index 4909d349658..b23d96d5b08 100644 --- a/docs/ru/sql-reference/statements/truncate.md +++ b/docs/ru/sql-reference/statements/truncate.md @@ -14,4 +14,3 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Запрос `TRUNCATE` не поддерживается для следующих движков: [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) и [Null](../../engines/table-engines/special/null.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/truncate/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/use.md b/docs/ru/sql-reference/statements/use.md index c84329ea5ff..0d40870c23a 100644 --- a/docs/ru/sql-reference/statements/use.md +++ b/docs/ru/sql-reference/statements/use.md @@ -13,4 +13,3 @@ USE db Текущая база данных используется для поиска таблиц, если база данных не указана в запросе явно через точку перед именем таблицы. При использовании HTTP протокола запрос не может быть выполнен, так как понятия сессии не существует. -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/use/) \ No newline at end of file diff --git a/docs/ru/sql-reference/syntax.md b/docs/ru/sql-reference/syntax.md index d8eaa4f1731..6a923fd6b58 100644 --- a/docs/ru/sql-reference/syntax.md +++ b/docs/ru/sql-reference/syntax.md @@ -181,4 +181,3 @@ Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception Список выражений - одно выражение или несколько выражений через запятую. Функции и операторы, в свою очередь, в качестве аргументов, могут иметь произвольные выражения. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/syntax/) diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index f9bdf902ad8..1d8604528be 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -126,4 +126,3 @@ SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, - [Виртуальные столбцы](index.md#table_engines-virtual_columns) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/table-functions/file/) diff --git a/docs/ru/sql-reference/table-functions/generate.md b/docs/ru/sql-reference/table-functions/generate.md index 47b7e43bc86..91b8847be8f 100644 --- a/docs/ru/sql-reference/table-functions/generate.md +++ b/docs/ru/sql-reference/table-functions/generate.md @@ -38,4 +38,3 @@ SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64( └──────────┴──────────────┴────────────────────────────────────────────────────────────────────┘ ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/generate/) diff --git a/docs/ru/sql-reference/table-functions/hdfs.md b/docs/ru/sql-reference/table-functions/hdfs.md index 6edd70b7b1b..56aaeae487c 100644 --- a/docs/ru/sql-reference/table-functions/hdfs.md +++ b/docs/ru/sql-reference/table-functions/hdfs.md @@ -61,4 +61,3 @@ LIMIT 2 - [Виртуальные столбцы](index.md#table_engines-virtual_columns) -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/hdfs/) diff --git a/docs/ru/sql-reference/table-functions/index.md b/docs/ru/sql-reference/table-functions/index.md index ca90306bbd5..52eaad13507 100644 --- a/docs/ru/sql-reference/table-functions/index.md +++ b/docs/ru/sql-reference/table-functions/index.md @@ -35,4 +35,3 @@ toc_title: "Введение" | [hdfs](../../sql-reference/table-functions/hdfs.md) | Создаёт таблицу с движком [HDFS](../../engines/table-engines/integrations/hdfs.md). | | [s3](../../sql-reference/table-functions/s3.md) | Создаёт таблицу с движком [S3](../../engines/table-engines/integrations/s3.md). | -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/) diff --git a/docs/ru/sql-reference/table-functions/input.md b/docs/ru/sql-reference/table-functions/input.md index 96cf7515d52..0f5f621a247 100644 --- a/docs/ru/sql-reference/table-functions/input.md +++ b/docs/ru/sql-reference/table-functions/input.md @@ -43,4 +43,3 @@ $ cat data.csv | clickhouse-client --query="INSERT INTO test FORMAT CSV" $ cat data.csv | clickhouse-client --query="INSERT INTO test SELECT * FROM input('test_structure') FORMAT CSV" ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/input/) diff --git a/docs/ru/sql-reference/table-functions/jdbc.md b/docs/ru/sql-reference/table-functions/jdbc.md index d388262606f..4fc237f940d 100644 --- a/docs/ru/sql-reference/table-functions/jdbc.md +++ b/docs/ru/sql-reference/table-functions/jdbc.md @@ -24,4 +24,3 @@ SELECT * FROM jdbc('mysql://localhost:3306/?user=root&password=root', 'schema', SELECT * FROM jdbc('datasource://mysql-local', 'schema', 'table') ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/jdbc/) diff --git a/docs/ru/sql-reference/table-functions/merge.md b/docs/ru/sql-reference/table-functions/merge.md index 0822fdfe535..5b33f458468 100644 --- a/docs/ru/sql-reference/table-functions/merge.md +++ b/docs/ru/sql-reference/table-functions/merge.md @@ -9,4 +9,3 @@ toc_title: merge Структура таблицы берётся из первой попавшейся таблицы, подходящей под регулярное выражение. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/merge/) diff --git a/docs/ru/sql-reference/table-functions/mysql.md b/docs/ru/sql-reference/table-functions/mysql.md index 18b34d0bf6c..b90aad22fdf 100644 --- a/docs/ru/sql-reference/table-functions/mysql.md +++ b/docs/ru/sql-reference/table-functions/mysql.md @@ -96,4 +96,3 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123'); - [Движок таблиц ‘MySQL’](../../sql-reference/table-functions/mysql.md) - [Использование MySQL как источника данных для внешнего словаря](../../sql-reference/table-functions/mysql.md#dicts-external_dicts_dict_sources-mysql) -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/table_functions/mysql/) diff --git a/docs/ru/sql-reference/table-functions/numbers.md b/docs/ru/sql-reference/table-functions/numbers.md index 005f400e082..71f63078415 100644 --- a/docs/ru/sql-reference/table-functions/numbers.md +++ b/docs/ru/sql-reference/table-functions/numbers.md @@ -25,4 +25,3 @@ SELECT * FROM system.numbers LIMIT 10; select toDate('2010-01-01') + number as d FROM numbers(365); ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/numbers/) diff --git a/docs/ru/sql-reference/table-functions/odbc.md b/docs/ru/sql-reference/table-functions/odbc.md index 19203123840..557e7d2a15b 100644 --- a/docs/ru/sql-reference/table-functions/odbc.md +++ b/docs/ru/sql-reference/table-functions/odbc.md @@ -103,4 +103,3 @@ SELECT * FROM odbc('DSN=mysqlconn', 'test', 'test') - [Внешние словари ODBC](../../sql-reference/table-functions/odbc.md#dicts-external_dicts_dict_sources-odbc) - [Движок таблиц ODBC](../../sql-reference/table-functions/odbc.md). -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/jdbc/) diff --git a/docs/ru/sql-reference/table-functions/remote.md b/docs/ru/sql-reference/table-functions/remote.md index 83b3687f61d..00179abb207 100644 --- a/docs/ru/sql-reference/table-functions/remote.md +++ b/docs/ru/sql-reference/table-functions/remote.md @@ -106,4 +106,3 @@ INSERT INTO FUNCTION remote('127.0.0.1', currentDatabase(), 'remote_table') VALU SELECT * FROM remote_table; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/table-functions/remote/) diff --git a/docs/ru/sql-reference/table-functions/url.md b/docs/ru/sql-reference/table-functions/url.md index 043a9231e75..0552aa8e076 100644 --- a/docs/ru/sql-reference/table-functions/url.md +++ b/docs/ru/sql-reference/table-functions/url.md @@ -41,4 +41,3 @@ INSERT INTO FUNCTION url('http://127.0.0.1:8123/?query=INSERT+INTO+test_table+FO SELECT * FROM test_table; ``` -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/table-functions/url/) diff --git a/docs/ru/sql-reference/table-functions/view.md b/docs/ru/sql-reference/table-functions/view.md index 8a97253d048..c081ec12747 100644 --- a/docs/ru/sql-reference/table-functions/view.md +++ b/docs/ru/sql-reference/table-functions/view.md @@ -59,4 +59,3 @@ SELECT * FROM cluster(`cluster_name`, view(SELECT a, b, c FROM table_name)) **Смотрите также** - [view](https://clickhouse.tech/docs/ru/engines/table-engines/special/view/#table_engines-view) -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/table_functions/view/) \ No newline at end of file diff --git a/docs/ru/whats-new/extended-roadmap.md b/docs/ru/whats-new/extended-roadmap.md index 16c7709ec28..7b317d424f1 100644 --- a/docs/ru/whats-new/extended-roadmap.md +++ b/docs/ru/whats-new/extended-roadmap.md @@ -7,4 +7,3 @@ toc_title: Roadmap Планы развития на 2021 год опубликованы для обсуждения [здесь](https://github.com/ClickHouse/ClickHouse/issues/17623). -[Оригинальная статья](https://clickhouse.tech/docs/ru/roadmap/) diff --git a/docs/ru/whats-new/security-changelog.md b/docs/ru/whats-new/security-changelog.md index 1f46535833d..e3d26e772c4 100644 --- a/docs/ru/whats-new/security-changelog.md +++ b/docs/ru/whats-new/security-changelog.md @@ -73,4 +73,3 @@ unixODBC позволял указать путь для подключения Обнаружено благодаря: the UK’s National Cyber Security Centre (NCSC) -{## [Оригинальная статья](https://clickhouse.tech/docs/ru/security_changelog/) ##} From 10a885c9b2e90f37a896d02b74e5d8f9474ea8a7 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Mar 2021 14:36:01 +0300 Subject: [PATCH 210/333] return back formatting in client --- programs/client/Client.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index c878a3071c4..3c27908741c 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -2096,10 +2096,10 @@ private: current_format = "Vertical"; /// It is not clear how to write progress with parallel formatting. It may increase code complexity significantly. - // if (!need_render_progress) - // block_out_stream = context.getOutputStreamParallelIfPossible(current_format, *out_buf, block); - // else - block_out_stream = context.getOutputStream(current_format, *out_buf, block); + if (!need_render_progress) + block_out_stream = context.getOutputStreamParallelIfPossible(current_format, *out_buf, block); + else + block_out_stream = context.getOutputStream(current_format, *out_buf, block); block_out_stream->writePrefix(); } From 6d6633eca442a6364e9cc6a61f1af3bed2da3dd4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:38:28 +0300 Subject: [PATCH 211/333] Fix garbage --- docs/ru/interfaces/formats.md | 16 ++++++++-------- .../data-types/simpleaggregatefunction.md | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 3a61d789e75..67cc80f5cd8 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -1173,7 +1173,7 @@ ClickHouse поддерживает настраиваемую точность Неподдержанные типы данных Parquet: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных, ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. +Типы данных столбцов в ClickHouse могут отличаться от типов данных соответствующих полей файла в формате Parquet. При вставке данных, ClickHouse интерпретирует типы данных в соответствии с таблицей выше, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к тому типу, который установлен для столбца таблицы. ### Вставка и выборка данных {#vstavka-i-vyborka-dannykh} @@ -1230,7 +1230,7 @@ ClickHouse поддерживает настраиваемую точность Неподдержанные типы данных ORC: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`. -Типы данных столбцов в таблицах ClickHouse могут отличаться от типов данных для соответствующих полей ORC. При вставке данных, ClickHouse интерпретирует типы данных ORC согласно таблице соответствия, а затем [приводит](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) данные к типу, установленному для столбца таблицы ClickHouse. +Типы данных столбцов в таблицах ClickHouse могут отличаться от типов данных для соответствующих полей ORC. При вставке данных, ClickHouse интерпретирует типы данных ORC согласно таблице соответствия, а затем [приводит](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) данные к типу, установленному для столбца таблицы ClickHouse. ### Вставка данных {#vstavka-dannykh-1} @@ -1268,7 +1268,7 @@ SELECT * FROM line_as_string; ## Regexp {#data-format-regexp} -Каждая строка импортируемых данных разбирается в соответствии с регулярным выражением. +Каждая строка импортируемых данных разбирается в соответствии с регулярным выражением. При работе с форматом `Regexp` можно использовать следующие параметры: @@ -1279,15 +1279,15 @@ SELECT * FROM line_as_string; - Escaped (как в [TSV](#tabseparated)) - Quoted (как в [Values](#data-format-values)) - Raw (данные импортируются как есть, без сериализации) -- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Признак, будет ли генерироваться исключение в случае, если импортируемые данные не соответствуют регулярному выражению `format_regexp`. Может принимать значение `0` или `1`. +- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Признак, будет ли генерироваться исключение в случае, если импортируемые данные не соответствуют регулярному выражению `format_regexp`. Может принимать значение `0` или `1`. -**Использование** +**Использование** -Регулярное выражение (шаблон) из параметра `format_regexp` применяется к каждой строке импортируемых данных. Количество частей в шаблоне (подшаблонов) должно соответствовать количеству колонок в импортируемых данных. +Регулярное выражение (шаблон) из параметра `format_regexp` применяется к каждой строке импортируемых данных. Количество частей в шаблоне (подшаблонов) должно соответствовать количеству колонок в импортируемых данных. -Строки импортируемых данных должны разделяться символом новой строки `'\n'` или символами `"\r\n"` (перенос строки в формате DOS). +Строки импортируемых данных должны разделяться символом новой строки `'\n'` или символами `"\r\n"` (перенос строки в формате DOS). -Данные, выделенные по подшаблонам, интерпретируются в соответствии с типом, указанным в параметре `format_regexp_escaping_rule`. +Данные, выделенные по подшаблонам, интерпретируются в соответствии с типом, указанным в параметре `format_regexp_escaping_rule`. Если строка импортируемых данных не соответствует регулярному выражению и параметр `format_regexp_skip_unmatched` равен 1, строка просто игнорируется. Если же параметр `format_regexp_skip_unmatched` равен 0, генерируется исключение. diff --git a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md index 454add05e8a..0948153362b 100644 --- a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md @@ -21,8 +21,8 @@ - [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap) !!! note "Примечание" - Значения `SimpleAggregateFunction(func, Type)` отображаются и хранятся так же, как и `Type`, поэтому комбинаторы [-Merge](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators-merge) и [-State]((../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) не требуются. - + Значения `SimpleAggregateFunction(func, Type)` отображаются и хранятся так же, как и `Type`, поэтому комбинаторы [-Merge](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators-merge) и [-State](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-state) не требуются. + `SimpleAggregateFunction` имеет лучшую производительность, чем `AggregateFunction` с той же агрегатной функцией. **Параметры** From c506356cc6e5d93bc67b263ae5b394932a7159e4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:41:20 +0300 Subject: [PATCH 212/333] Fix trash links --- docs/ru/operations/system-tables/replication_queue.md | 4 ++-- docs/ru/sql-reference/statements/alter/ttl.md | 6 +++--- docs/ru/sql-reference/table-functions/url.md | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/ru/operations/system-tables/replication_queue.md b/docs/ru/operations/system-tables/replication_queue.md index 2851551955a..56e8c695a21 100644 --- a/docs/ru/operations/system-tables/replication_queue.md +++ b/docs/ru/operations/system-tables/replication_queue.md @@ -70,11 +70,11 @@ num_tries: 36 last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build)) last_attempt_time: 2020-12-08 17:35:54 num_postponed: 0 -postpone_reason: +postpone_reason: last_postpone_time: 1970-01-01 03:00:00 ``` **Смотрите также** -- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md/#query-language-system-replicated) +- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated) diff --git a/docs/ru/sql-reference/statements/alter/ttl.md b/docs/ru/sql-reference/statements/alter/ttl.md index 32b35da8627..e949c992bbe 100644 --- a/docs/ru/sql-reference/statements/alter/ttl.md +++ b/docs/ru/sql-reference/statements/alter/ttl.md @@ -18,7 +18,7 @@ ALTER TABLE table-name MODIFY TTL ttl-expression Удалить табличный TTL можно запросом следующего вида: ```sql -ALTER TABLE table_name REMOVE TTL +ALTER TABLE table_name REMOVE TTL ``` **Пример** @@ -64,7 +64,7 @@ ALTER TABLE table_with_ttl REMOVE TTL; Заново вставляем удаленную строку и снова принудительно запускаем очистку по `TTL` с помощью `OPTIMIZE`: -```sql +```sql INSERT INTO table_with_ttl VALUES (now() - INTERVAL 4 MONTH, 2, 'username2'); OPTIMIZE TABLE table_with_ttl FINAL; SELECT * FROM table_with_ttl; @@ -81,5 +81,5 @@ SELECT * FROM table_with_ttl; ### Смотрите также -- Подробнее о [свойстве TTL](../../../engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-ttl). +- Подробнее о [свойстве TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl). diff --git a/docs/ru/sql-reference/table-functions/url.md b/docs/ru/sql-reference/table-functions/url.md index 0552aa8e076..a41a1f53cde 100644 --- a/docs/ru/sql-reference/table-functions/url.md +++ b/docs/ru/sql-reference/table-functions/url.md @@ -5,7 +5,7 @@ toc_title: url # url {#url} -Функция `url` берет данные по указанному адресу `URL` и создает из них таблицу указанной структуры со столбцами указанного формата. +Функция `url` берет данные по указанному адресу `URL` и создает из них таблицу указанной структуры со столбцами указанного формата. Функция `url` может быть использована в запросах `SELECT` и `INSERT` с таблицами на движке [URL](../../engines/table-engines/special/url.md). @@ -27,7 +27,7 @@ url(URL, format, structure) **Примеры** -Получение с HTTP-сервера первых 3 строк таблицы с данными в формате [CSV](../../interfaces/formats.md/#csv), содержащей столбцы типа [String](../../sql-reference/data-types/string.md) и [UInt32](../../sql-reference/data-types/int-uint.md). +Получение с HTTP-сервера первых 3 строк таблицы с данными в формате [CSV](../../interfaces/formats.md#csv), содержащей столбцы типа [String](../../sql-reference/data-types/string.md) и [UInt32](../../sql-reference/data-types/int-uint.md). ``` sql SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3; From 588f3ee11ec5cd3f753ceda50fca06650b04a1e9 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Mar 2021 14:43:57 +0300 Subject: [PATCH 213/333] better[2] --- src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h | 2 -- src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp | 2 -- 2 files changed, 4 deletions(-) diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 3fcd1f0aadf..584aa364d27 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -6,8 +6,6 @@ #include #include #include -#include "IO/ReadBuffer.h" -#include "IO/ReadBufferFromString.h" #include "IO/WriteBufferFromString.h" #include #include diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp index 7ded716b34e..355af038da9 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp @@ -149,8 +149,6 @@ void WriteBufferFromHTTPServerResponse::onProgress(const Progress & progress) { std::lock_guard lock(mutex); - std::cout << StackTrace().toString() << std::endl; - /// Cannot add new headers if body was started to send. if (headers_finished_sending) return; From 7978320cc2013c5f82284d8e51bad96a83d7cc44 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:49:38 +0300 Subject: [PATCH 214/333] Fix tons of trash --- docs/ru/operations/settings/settings.md | 36 +++++++++---------- .../system-tables/data_type_families.md | 2 +- .../system-tables/detached_parts.md | 4 +-- .../operations/system-tables/table_engines.md | 4 +-- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index ab24b7f3a44..d35b40e6556 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -134,7 +134,7 @@ ClickHouse применяет настройку в тех случаях, ко ## max_http_get_redirects {#setting-max_http_get_redirects} -Ограничивает максимальное количество переходов по редиректам в таблицах с движком [URL](../../engines/table-engines/special/url.md) при выполнении HTTP запросов методом GET. Настройка применяется для обоих типов таблиц: созданных запросом [CREATE TABLE](../../sql_reference/create/#create-table-query) и с помощью табличной функции [url](../../sql-reference/table-functions/url.md). +Ограничивает максимальное количество переходов по редиректам в таблицах с движком [URL](../../engines/table-engines/special/url.md) при выполнении HTTP запросов методом GET. Настройка применяется для обоих типов таблиц: созданных запросом [CREATE TABLE](../../sql_reference/statements/create/table.md#create-table-query) и с помощью табличной функции [url](../../sql-reference/table-functions/url.md). Возможные значения: @@ -306,7 +306,7 @@ INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), ( CREATE TABLE table_with_enum_column_for_tsv_insert (Id Int32,Value Enum('first' = 1, 'second' = 2)) ENGINE=Memory(); ``` -При включенной настройке `input_format_tsv_enum_as_number`: +При включенной настройке `input_format_tsv_enum_as_number`: ```sql SET input_format_tsv_enum_as_number = 1; @@ -556,7 +556,7 @@ ClickHouse может парсить только базовый формат `Y Возможные значения: -- 0 — Устаревшее поведение отключено. +- 0 — Устаревшее поведение отключено. - 1 — Устаревшее поведение включено. Значение по умолчанию: 0. @@ -1236,7 +1236,7 @@ SELECT area/period FROM account_orders FORMAT JSON; CREATE TABLE table_with_enum_column_for_csv_insert (Id Int32,Value Enum('first' = 1, 'second' = 2)) ENGINE=Memory(); ``` -При включенной настройке `input_format_csv_enum_as_number`: +При включенной настройке `input_format_csv_enum_as_number`: ```sql SET input_format_csv_enum_as_number = 1; @@ -1731,7 +1731,7 @@ ClickHouse генерирует исключение Включает или отключает режим синхронного добавления данных в распределенные таблицы (таблицы с движком [Distributed](../../engines/table-engines/special/distributed.md#distributed)). -По умолчанию ClickHouse вставляет данные в распределённую таблицу в асинхронном режиме. Если `insert_distributed_sync=1`, то данные вставляются сихронно, а запрос `INSERT` считается выполненным успешно, когда данные записаны на все шарды (по крайней мере на одну реплику для каждого шарда, если `internal_replication = true`). +По умолчанию ClickHouse вставляет данные в распределённую таблицу в асинхронном режиме. Если `insert_distributed_sync=1`, то данные вставляются сихронно, а запрос `INSERT` считается выполненным успешно, когда данные записаны на все шарды (по крайней мере на одну реплику для каждого шарда, если `internal_replication = true`). Возможные значения: @@ -2067,11 +2067,11 @@ SELECT * FROM a; ## ttl_only_drop_parts {#ttl_only_drop_parts} -Для таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) включает или отключает возможность полного удаления кусков данных, в которых все записи устарели. +Для таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) включает или отключает возможность полного удаления кусков данных, в которых все записи устарели. -Когда настройка `ttl_only_drop_parts` отключена (т.е. по умолчанию), сервер лишь удаляет устаревшие записи в соответствии с их временем жизни (TTL). +Когда настройка `ttl_only_drop_parts` отключена (т.е. по умолчанию), сервер лишь удаляет устаревшие записи в соответствии с их временем жизни (TTL). -Когда настройка `ttl_only_drop_parts` включена, сервер целиком удаляет куски данных, в которых все записи устарели. +Когда настройка `ttl_only_drop_parts` включена, сервер целиком удаляет куски данных, в которых все записи устарели. Удаление целых кусков данных вместо удаления отдельных записей позволяет устанавливать меньший таймаут `merge_with_ttl_timeout` и уменьшает нагрузку на сервер, что способствует росту производительности. @@ -2082,18 +2082,18 @@ SELECT * FROM a; Значение по умолчанию: `0`. -**См. также** +**См. также** - [Секции и настройки запроса CREATE TABLE](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) (настройка `merge_with_ttl_timeout`) - [Table TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) ## output_format_pretty_max_value_width {#output_format_pretty_max_value_width} -Ограничивает длину значения, выводимого в формате [Pretty](../../interfaces/formats.md#pretty). Если значение длиннее указанного количества символов, оно обрезается. +Ограничивает длину значения, выводимого в формате [Pretty](../../interfaces/formats.md#pretty). Если значение длиннее указанного количества символов, оно обрезается. Возможные значения: -- Положительное целое число. +- Положительное целое число. - 0 — значение обрезается полностью. Значение по умолчанию: `10000` символов. @@ -2242,17 +2242,17 @@ SELECT * FROM system.events WHERE event='QueryMemoryLimitExceeded'; Включает или отключает сохранение типа `Nullable` для аргумента функции [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast). -Если настройка включена, то когда в функцию `CAST` передается аргумент с типом `Nullable`, функция возвращает результат, также преобразованный к типу `Nullable`. -Если настройка отключена, то функция `CAST` всегда возвращает результат строго указанного типа. +Если настройка включена, то когда в функцию `CAST` передается аргумент с типом `Nullable`, функция возвращает результат, также преобразованный к типу `Nullable`. +Если настройка отключена, то функция `CAST` всегда возвращает результат строго указанного типа. Возможные значения: - 0 — функция `CAST` преобразует аргумент строго к указанному типу. -- 1 — если аргумент имеет тип `Nullable`, то функция `CAST` преобразует его к типу `Nullable` для указанного типа. +- 1 — если аргумент имеет тип `Nullable`, то функция `CAST` преобразует его к типу `Nullable` для указанного типа. Значение по умолчанию: `0`. -**Примеры** +**Примеры** Запрос возвращает аргумент, преобразованный строго к указанному типу: @@ -2284,9 +2284,9 @@ SELECT CAST(toNullable(toInt32(0)) AS Int32) as x, toTypeName(x); └───┴───────────────────────────────────────────────────┘ ``` -**См. также** +**См. также** -- Функция [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) +- Функция [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) ## persistent {#persistent} @@ -2364,7 +2364,7 @@ SELECT number FROM numbers(3) FORMAT JSONEachRow; [ {"number":"0"}, {"number":"1"}, -{"number":"2"} +{"number":"2"} ] ``` diff --git a/docs/ru/operations/system-tables/data_type_families.md b/docs/ru/operations/system-tables/data_type_families.md index 3a9a4a3413a..ba4e5e64ec3 100644 --- a/docs/ru/operations/system-tables/data_type_families.md +++ b/docs/ru/operations/system-tables/data_type_families.md @@ -1,6 +1,6 @@ # system.data_type_families {#system_tables-data_type_families} -Содержит информацию о поддерживаемых [типах данных](../../sql-reference/data-types/). +Содержит информацию о поддерживаемых [типах данных](../../sql-reference/data-types/index.md). Столбцы: diff --git a/docs/ru/operations/system-tables/detached_parts.md b/docs/ru/operations/system-tables/detached_parts.md index 23fd4882c44..5e0aa29001f 100644 --- a/docs/ru/operations/system-tables/detached_parts.md +++ b/docs/ru/operations/system-tables/detached_parts.md @@ -1,6 +1,6 @@ # system.detached_parts {#system_tables-detached_parts} Содержит информацию об отсоединённых кусках таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Столбец `reason` содержит причину, по которой кусок был отсоединён. Для кусов, отсоединённых пользователем, `reason` содержит пустую строку. -Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION\|PART](../../sql_reference/alter/#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). -Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../../sql_reference/alter/#alter_drop-detached). +Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION|PART](../../sql_reference/statements/alter/index.md#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). +Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../../sql_reference/statements/alter/index.md#alter_drop-detached). diff --git a/docs/ru/operations/system-tables/table_engines.md b/docs/ru/operations/system-tables/table_engines.md index 6af29753bbf..b6f6d3decc2 100644 --- a/docs/ru/operations/system-tables/table_engines.md +++ b/docs/ru/operations/system-tables/table_engines.md @@ -6,8 +6,8 @@ - `name` (String) — имя движка. - `supports_settings` (UInt8) — флаг, показывающий поддержку секции `SETTINGS`. -- `supports_skipping_indices` (UInt8) — флаг, показывающий поддержку [индексов пропуска данных](table_engines/mergetree/#table_engine-mergetree-data_skipping-indexes). -- `supports_ttl` (UInt8) — флаг, показывающий поддержку [TTL](table_engines/mergetree/#table_engine-mergetree-ttl). +- `supports_skipping_indices` (UInt8) — флаг, показывающий поддержку [индексов пропуска данных](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-data_skipping-indexes). +- `supports_ttl` (UInt8) — флаг, показывающий поддержку [TTL](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). - `supports_sort_order` (UInt8) — флаг, показывающий поддержку секций `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` и `SAMPLE_BY`. - `supports_replication` (UInt8) — флаг, показывающий поддержку [репликации](../../engines/table-engines/mergetree-family/replication.md). - `supports_deduplication` (UInt8) — флаг, показывающий наличие в движке дедупликации данных. From 1f92c8ce581bfb8c820a7220a63ae005ed13316c Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Mar 2021 14:51:24 +0300 Subject: [PATCH 215/333] parallel formatting everywhere --- programs/obfuscator/Obfuscator.cpp | 2 +- programs/odbc-bridge/MainHandler.cpp | 2 +- src/Dictionaries/HTTPDictionarySource.cpp | 4 ++-- src/Storages/HDFS/StorageHDFS.cpp | 2 +- src/Storages/StorageS3.cpp | 2 +- src/Storages/StorageURL.cpp | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 950db4e4f05..3ccbfd44357 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1180,7 +1180,7 @@ try file_in.seek(0, SEEK_SET); BlockInputStreamPtr input = context.getInputFormat(input_format, file_in, header, max_block_size); - BlockOutputStreamPtr output = context.getOutputStream(output_format, file_out, header); + BlockOutputStreamPtr output = context.getOutputStreamParallelIfPossible(output_format, file_out, header); if (processed_rows + source_rows > limit) input = std::make_shared(input, limit - processed_rows, 0); diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index 4fcc9deea6a..079fc371ab4 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -176,7 +176,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse std::string query = params.get("query"); LOG_TRACE(log, "Query: {}", query); - BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStream(format, out, *sample_block, context); + BlockOutputStreamPtr writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, out, *sample_block, context); auto pool = getPool(connection_string); ODBCBlockInputStream inp(pool->get(), query, *sample_block, max_block_size); copyData(inp, *writer); diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index ddcac117e58..62bf478afc4 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -136,7 +136,7 @@ BlockInputStreamPtr HTTPDictionarySource::loadIds(const std::vector & id ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = [block, this](std::ostream & ostr) { WriteBufferFromOStream out_buffer(ostr); - auto output_stream = context.getOutputStream(format, out_buffer, sample_block); + auto output_stream = context.getOutputStreamParallelIfPossible(format, out_buffer, sample_block); formatBlock(output_stream, block); }; @@ -157,7 +157,7 @@ BlockInputStreamPtr HTTPDictionarySource::loadKeys(const Columns & key_columns, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = [block, this](std::ostream & ostr) { WriteBufferFromOStream out_buffer(ostr); - auto output_stream = context.getOutputStream(format, out_buffer, sample_block); + auto output_stream = context.getOutputStreamParallelIfPossible(format, out_buffer, sample_block); formatBlock(output_stream, block); }; diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index f7afd4a497d..e26d3375c33 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -183,7 +183,7 @@ public: : sample_block(sample_block_) { write_buf = wrapWriteBufferWithCompressionMethod(std::make_unique(uri, context.getGlobalContext().getConfigRef()), compression_method, 3); - writer = FormatFactory::instance().getOutputStream(format, *write_buf, sample_block, context); + writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, *write_buf, sample_block, context); } Block getHeader() const override diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index a31a7fa0944..1cbbe14d09f 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -155,7 +155,7 @@ namespace { write_buf = wrapWriteBufferWithCompressionMethod( std::make_unique(client, bucket, key, min_upload_part_size, max_single_part_upload_size), compression_method, 3); - writer = FormatFactory::instance().getOutputStream(format, *write_buf, sample_block, context); + writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, *write_buf, sample_block, context); } Block getHeader() const override diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index ca984f9ece9..8b16a08b957 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -155,7 +155,7 @@ StorageURLBlockOutputStream::StorageURLBlockOutputStream(const Poco::URI & uri, write_buf = wrapWriteBufferWithCompressionMethod( std::make_unique(uri, Poco::Net::HTTPRequest::HTTP_POST, timeouts), compression_method, 3); - writer = FormatFactory::instance().getOutputStream(format, *write_buf, sample_block, + writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, *write_buf, sample_block, context, {} /* write callback */, format_settings); } From da08d299552b73d5d07b3dbc704bbb7fdac79457 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 14:56:04 +0300 Subject: [PATCH 216/333] Remove trash and fix broken links --- docs/ru/operations/settings/settings.md | 2 +- docs/ru/operations/system-tables/detached_parts.md | 4 ++-- docs/tools/single_page.py | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index d35b40e6556..c937a5f7112 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -134,7 +134,7 @@ ClickHouse применяет настройку в тех случаях, ко ## max_http_get_redirects {#setting-max_http_get_redirects} -Ограничивает максимальное количество переходов по редиректам в таблицах с движком [URL](../../engines/table-engines/special/url.md) при выполнении HTTP запросов методом GET. Настройка применяется для обоих типов таблиц: созданных запросом [CREATE TABLE](../../sql_reference/statements/create/table.md#create-table-query) и с помощью табличной функции [url](../../sql-reference/table-functions/url.md). +Ограничивает максимальное количество переходов по редиректам в таблицах с движком [URL](../../engines/table-engines/special/url.md) при выполнении HTTP запросов методом GET. Настройка применяется для обоих типов таблиц: созданных запросом [CREATE TABLE](../../sql-reference/statements/create/table.md#create-table-query) и с помощью табличной функции [url](../../sql-reference/table-functions/url.md). Возможные значения: diff --git a/docs/ru/operations/system-tables/detached_parts.md b/docs/ru/operations/system-tables/detached_parts.md index 5e0aa29001f..7abed6500aa 100644 --- a/docs/ru/operations/system-tables/detached_parts.md +++ b/docs/ru/operations/system-tables/detached_parts.md @@ -1,6 +1,6 @@ # system.detached_parts {#system_tables-detached_parts} Содержит информацию об отсоединённых кусках таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). Столбец `reason` содержит причину, по которой кусок был отсоединён. Для кусов, отсоединённых пользователем, `reason` содержит пустую строку. -Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION|PART](../../sql_reference/statements/alter/index.md#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). -Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../../sql_reference/statements/alter/index.md#alter_drop-detached). +Такие куски могут быть присоединены с помощью [ALTER TABLE ATTACH PARTITION|PART](../../sql-reference/statements/alter/index.md#alter_attach-partition). Остальные столбцы описаны в [system.parts](#system_tables-parts). +Если имя куска некорректно, значения некоторых столбцов могут быть `NULL`. Такие куски могут быть удалены с помощью [ALTER TABLE DROP DETACHED PART](../../sql-reference/statements/alter/index.md#alter_drop-detached). diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index f885a84ec89..b88df5a03cb 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -28,13 +28,15 @@ anchor_not_allowed_chars = re.compile(r'[^\w\-]') def generate_anchor_from_path(path): return re.sub(anchor_not_allowed_chars, '-', path) +absolute_link = re.compile(r'^https?://') + def replace_link(match, path): title = match.group(1) link = match.group(2) # Not a relative link - if link.startswith('http'): + if re.search(absolute_link, link): return match.group(0) if link.endswith('/'): From c92d8c776cb650f96d3303e1982d04cdfae7ee56 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Mar 2021 15:14:58 +0300 Subject: [PATCH 217/333] try to avoid race in grpc server --- src/Server/GRPCServer.cpp | 6 ++++-- tests/integration/test_grpc_protocol/configs/users.xml | 8 -------- tests/integration/test_grpc_protocol/test.py | 2 +- 3 files changed, 5 insertions(+), 11 deletions(-) delete mode 100644 tests/integration/test_grpc_protocol/configs/users.xml diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index ede9bbff063..52a2c106488 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -783,8 +783,6 @@ namespace if (!io.out) return; - initializeBlockInputStream(io.out->getHeader()); - bool has_data_to_insert = (insert_query && insert_query->data) || !query_info.input_data().empty() || query_info.next_query_info(); if (!has_data_to_insert) @@ -795,6 +793,10 @@ namespace throw Exception("No data to insert", ErrorCodes::NO_DATA_TO_INSERT); } + /// This is significant, because parallel parsing may be used. + /// So we mustn't touch the input stream from other thread. + initializeBlockInputStream(io.out->getHeader()); + block_input_stream->readPrefix(); io.out->writePrefix(); diff --git a/tests/integration/test_grpc_protocol/configs/users.xml b/tests/integration/test_grpc_protocol/configs/users.xml deleted file mode 100644 index 2ae1a397fe5..00000000000 --- a/tests/integration/test_grpc_protocol/configs/users.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - 0 - - - diff --git a/tests/integration/test_grpc_protocol/test.py b/tests/integration/test_grpc_protocol/test.py index 594879427ca..d8604276281 100644 --- a/tests/integration/test_grpc_protocol/test.py +++ b/tests/integration/test_grpc_protocol/test.py @@ -27,7 +27,7 @@ import clickhouse_grpc_pb2_grpc config_dir = os.path.join(SCRIPT_DIR, './configs') cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/grpc_config.xml'], user_configs=["configs/users.xml"]) +node = cluster.add_instance('node', main_configs=['configs/grpc_config.xml']) grpc_port = 9100 main_channel = None From 9820ab3558bf896f54c2232e102f84f674753ea7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 15:46:19 +0300 Subject: [PATCH 218/333] Fix not working links on single-page docs --- website/js/base.js | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/website/js/base.js b/website/js/base.js index 6cec8313bd4..aca6f407d24 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -16,23 +16,6 @@ if (target_id && target_id.startsWith('logo-')) { selector = '#'; } - if (selector && selector.startsWith('#') && !is_tab && !is_collapse && !is_rating) { - event.preventDefault(); - var dst = window.location.href.replace(window.location.hash, ''); - var offset = 0; - - if (selector !== '#') { - var destination = $(selector); - if (destination.length) { - offset = destination.offset().top - $('#top-nav').height() * 1.5; - dst += selector; - } - } - $('html, body').animate({ - scrollTop: offset - }, 500); - window.history.replaceState('', document.title, dst); - } }); var top_nav = $('#top-nav.sticky-top'); From 4028461c77067c509950145f2a31c55001d778e3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 15:54:44 +0300 Subject: [PATCH 219/333] Whitespaces --- docs/ru/introduction/index.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/ru/introduction/index.md b/docs/ru/introduction/index.md index c37cde09060..99f8aad0531 100644 --- a/docs/ru/introduction/index.md +++ b/docs/ru/introduction/index.md @@ -2,5 +2,3 @@ toc_folder_title: "Введение" toc_priority: 1 --- - - From 67a24de661ec0c95cf5e506c433cd37331f83081 Mon Sep 17 00:00:00 2001 From: Vladimir Date: Mon, 15 Mar 2021 15:54:51 +0300 Subject: [PATCH 220/333] Apply suggestions from code review --- docs/en/sql-reference/functions/bitmap-functions.md | 2 +- docs/ru/sql-reference/aggregate-functions/combinators.md | 4 ++-- .../sql-reference/aggregate-functions/reference/count.md | 2 +- .../aggregate-functions/reference/quantile.md | 2 +- .../reference/quantiledeterministic.md | 4 ++-- .../aggregate-functions/reference/quantileexact.md | 9 ++++++--- .../reference/quantileexactweighted.md | 2 +- .../aggregate-functions/reference/quantiletdigest.md | 2 +- .../reference/quantiletdigestweighted.md | 2 +- .../aggregate-functions/reference/quantiletiming.md | 2 +- .../reference/quantiletimingweighted.md | 2 +- docs/ru/sql-reference/functions/array-functions.md | 6 +++--- docs/ru/sql-reference/functions/conditional-functions.md | 2 +- 13 files changed, 22 insertions(+), 19 deletions(-) diff --git a/docs/en/sql-reference/functions/bitmap-functions.md b/docs/en/sql-reference/functions/bitmap-functions.md index c809aee85fe..36675a37e61 100644 --- a/docs/en/sql-reference/functions/bitmap-functions.md +++ b/docs/en/sql-reference/functions/bitmap-functions.md @@ -73,7 +73,7 @@ bitmapSubsetInRange(bitmap, range_start, range_end) - `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). - `range_start` – Range start point. Type: [UInt32](../../sql-reference/data-types/int-uint.md). -- `range_end` – Range end point(excluded). Type: [UInt32](../../sql-reference/data-types/int-uint.md). +- `range_end` – Range end point (excluded). Type: [UInt32](../../sql-reference/data-types/int-uint.md). **Example** diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index 7d56fe7bafc..4310009ad09 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -72,7 +72,7 @@ toc_title: "Комбинаторы агрегатных функций" **Аргументы** -- `x` — параметры агрегатной функции. +- `x` — аргументы агрегатной функции. **Возращаемые зачения** @@ -133,7 +133,7 @@ FROM **Аргументы** -- `x` — параметры агрегатной функции. +- `x` — аргументы агрегатной функции. **Возвращаемые значения** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/count.md b/docs/ru/sql-reference/aggregate-functions/reference/count.md index a9135a35f20..ab6de9379b2 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/count.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/count.md @@ -21,7 +21,7 @@ ClickHouse поддерживает следующие виды синтакси **Возвращаемое значение** - Если функция вызывается без параметров, она вычисляет количество строк. -- Если передаётся [выражение](../../syntax.md#syntax-expressions), то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../../sql-reference/data-types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк. +- Если передаётся [выражение](../../syntax.md#syntax-expressions), то функция подсчитывает количество раз, когда выражение не равно NULL. Если выражение имеет тип [Nullable](../../../sql-reference/data-types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение равно `NULL` для всех строк. В обоих случаях тип возвращаемого значения [UInt64](../../../sql-reference/data-types/int-uint.md). diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md index 7cc4f8c7aef..a73e8c3abad 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md @@ -21,7 +21,7 @@ quantile(level)(expr) **Аргументы** - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md index 3c03c356ab9..b7ab7429fb3 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -21,8 +21,8 @@ quantileDeterministic(level)(expr, determinator) **Аргументы** - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). -- `determinator` — число, хэш которого используется при сэмплировании в алгоритме reservoir sampling, чтобы сделать результат детерминированным. В качестве детерминатора можно использовать любое определённое положительное число, например, идентификатор пользователя или события. Если одно и то же значение детерминатора попадается в выборке слишком часто, то функция выдаёт некорректный результат. +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `determinator` — число, хэш которого используется при сэмплировании в алгоритме «Reservoir sampling», чтобы сделать результат детерминированным. В качестве значения можно использовать любое определённое положительное число, например, идентификатор пользователя или события. Если одно и то же значение попадается в выборке слишком часто, то функция выдаёт некорректный результат. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md index 452b810cf03..82cf687a55c 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -21,7 +21,8 @@ quantileExact(level)(expr) **Аргументы** - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). + **Возвращаемое значение** @@ -80,7 +81,8 @@ quantileExact(level)(expr) **Аргументы** - `level` — уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + **Возвращаемое значение** @@ -130,7 +132,8 @@ quantileExactHigh(level)(expr) **Аргументы** - `level` — уровень квантили. Опциональный параметр. Константное занчение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://en.wikipedia.org/wiki/Median). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types), [Date](../../../sql-reference/data-types/date.md) или [DateTime](../../../sql-reference/data-types/datetime.md). + **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md index ee55aaec121..7a7a1f1d412 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -21,7 +21,7 @@ quantileExactWeighted(level)(expr, weight) **Аргументы** - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). - `weight` — столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md index a119ca940b6..63593c4f468 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -23,7 +23,7 @@ quantileTDigest(level)(expr) **Аргументы** - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md index a00c1f8af58..3dee2ffdc27 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -23,7 +23,7 @@ quantileTDigestWeighted(level)(expr, weight) **Аргументы** - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `expr` — выражение, зависящее от значений столбцов, возвращающее данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). - `weight` — столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md index 6131f2035cb..8921fc7766b 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -22,7 +22,7 @@ quantileTiming(level)(expr) - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — [выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). +- `expr` — [выражение](../../syntax.md#syntax-expressions), зависящее от значений столбцов, возвращающее данные типа [Float\*](../../../sql-reference/data-types/float.md). - Если в функцию передать отрицательные значения, то её поведение не определено. - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md index 58a0a4599f9..79a709181fe 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -22,7 +22,7 @@ quantileTimingWeighted(level)(expr, weight) - `level` — уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — [выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). +- `expr` — [выражение](../../syntax.md#syntax-expressions), зависящее от значений столбцов, возвращающее данные типа [Float\*](../../../sql-reference/data-types/float.md). - Если в функцию передать отрицательные значения, то её поведение не определено. - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index 965f8f725d7..51a34f9700f 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -371,7 +371,7 @@ arrayPushBack(array, single_value) **Аргументы** - `array` – массив. -- `single_value` – одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. +- `single_value` – значение добавляемого элемента. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`, в этом случае функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** @@ -396,7 +396,7 @@ arrayPushFront(array, single_value) **Аргументы** - `array` – массив. -- `single_value` – одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. +- `single_value` – значение добавляемого элемента. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе «[Типы данных](../../sql-reference/functions/array-functions.md#data_types)». Может быть равно `NULL`, в этом случае функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** @@ -890,7 +890,7 @@ arrayReduceInRanges(agg_func, ranges, arr1, arr2, ..., arrN) **Аргументы** - `agg_func` — имя агрегатной функции, которая должна быть [строковой](../../sql-reference/data-types/string.md) константой. -- `ranges` — диапазоны для агрегирования, которые должны быть [массивом](../../sql-reference/data-types/array.md) of [кортежей](../../sql-reference/data-types/tuple.md) который содержит индекс и длину каждого диапазона. +- `ranges` — диапазоны для агрегирования, которые должны быть [массивом](../../sql-reference/data-types/array.md) of [кортежей](../../sql-reference/data-types/tuple.md) содержащих индекс и длину каждого диапазона. - `arr` — любое количество столбцов типа [Array](../../sql-reference/data-types/array.md) в качестве параметров агрегатной функции. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/functions/conditional-functions.md b/docs/ru/sql-reference/functions/conditional-functions.md index 1ca57ea8eb1..e70e4e2c02d 100644 --- a/docs/ru/sql-reference/functions/conditional-functions.md +++ b/docs/ru/sql-reference/functions/conditional-functions.md @@ -19,7 +19,7 @@ SELECT if(cond, then, else) **Аргументы** -- `cond` – условие, которое может быть равно 0 или нет. Может быть [UInt8](../../sql-reference/functions/conditional-functions.md) или `NULL`. +- `cond` – проверяемое условие. Может быть [UInt8](../../sql-reference/functions/conditional-functions.md) или `NULL`. - `then` – возвращается результат выражения, если условие `cond` истинно. - `else` – возвращается результат выражения, если условие `cond` ложно. From d7c43291b14092c7a393b5d3bfc2f9c8dd636c0b Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 15 Mar 2021 17:23:04 +0300 Subject: [PATCH 221/333] improve hung check in stress test --- docker/test/stress/run.sh | 1 + docker/test/stress/stress | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 9ddf7421934..3594eead992 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -130,6 +130,7 @@ zgrep -Fa "########################################" /test_output/* > /dev/null pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: mv /var/log/clickhouse-server/stderr.log /test_output/ +tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||: # Write check result into check_status.tsv clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv diff --git a/docker/test/stress/stress b/docker/test/stress/stress index a6f1ae19303..25a705ecbd1 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -67,6 +67,10 @@ def prepare_for_hung_check(): logging.info("Will terminate gdb (if any)") call("kill -TERM $(pidof gdb)", shell=True, stderr=STDOUT) + # Some tests set too low memory limit for default user and forget to reset in back. + # It may cause SYSTEM queries to fail, let's disable memory limit. + call("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'", shell=True, stderr=STDOUT) + # Some tests execute SYSTEM STOP MERGES or similar queries. # It may cause some ALTERs to hang. # Possibly we should fix tests and forbid to use such queries without specifying table. @@ -78,7 +82,13 @@ def prepare_for_hung_check(): call("clickhouse client -q 'SYSTEM START REPLICATED SENDS'", shell=True, stderr=STDOUT) call("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'", shell=True, stderr=STDOUT) - time.sleep(30) + # Issue #21004, live views are experimental, so let's just suppress it + call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT) + + # Wait for last queries to finish if any, not longer than 120 seconds + call("""clickhouse client -q "select sleepEachRow(( + select maxOrDefault(120 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 120 + ) / 120) from numbers(120) format Null" """, shell=True, stderr=STDOUT) if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') From 23a1c634602c94fa010c238088c0c0706d4d2b80 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Mar 2021 17:40:51 +0300 Subject: [PATCH 222/333] Update StorageURL.cpp --- src/Storages/StorageURL.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 8b16a08b957..ca984f9ece9 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -155,7 +155,7 @@ StorageURLBlockOutputStream::StorageURLBlockOutputStream(const Poco::URI & uri, write_buf = wrapWriteBufferWithCompressionMethod( std::make_unique(uri, Poco::Net::HTTPRequest::HTTP_POST, timeouts), compression_method, 3); - writer = FormatFactory::instance().getOutputStreamParallelIfPossible(format, *write_buf, sample_block, + writer = FormatFactory::instance().getOutputStream(format, *write_buf, sample_block, context, {} /* write callback */, format_settings); } From 4e35cf55098ea8df3f68542f46eaed4420f76838 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 15 Mar 2021 19:26:44 +0300 Subject: [PATCH 223/333] Update src/Storages/StoragePostgreSQL.cpp Co-authored-by: tavplubix --- src/Storages/StoragePostgreSQL.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 3e122ed2fc7..ac1a5569293 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -298,7 +298,7 @@ void registerStoragePostgreSQL(StorageFactory & factory) ASTs & engine_args = args.engine_args; if (engine_args.size() < 5 || engine_args.size() > 6) - throw Exception("Storage PostgreSQL requires 5-6 parameters: " + throw Exception("Storage PostgreSQL requires from 5 to 6 parameters: " "PostgreSQL('host:port', 'database', 'table', 'username', 'password' [, 'schema']", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); From dec1e9eb6fb5c9402bd41ffd5db6f691cdb5dc9f Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 15 Mar 2021 19:40:48 +0300 Subject: [PATCH 224/333] make the fuzzer use sources from the CI --- docker/test/fuzzer/run-fuzzer.sh | 53 ++++++++++++++------------------ 1 file changed, 23 insertions(+), 30 deletions(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 9af401238a3..22fe4220aaf 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -14,35 +14,31 @@ BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-11_debug_none_bundled_unsplitted function clone { -( + # The download() function is dependent on CI binaries anyway, so we can take + # the repo from the CI as well. For local runs, start directly from the "fuzz" + # stage. rm -rf ch ||: mkdir ch cd ch - - git init - git remote add origin https://github.com/ClickHouse/ClickHouse - - # Network is unreliable. GitHub neither. - for _ in {1..100}; do git fetch --depth=100 origin "$SHA_TO_TEST" && break; sleep 1; done - # Used to obtain the list of modified or added tests - for _ in {1..100}; do git fetch --depth=100 origin master && break; sleep 1; done - - # If not master, try to fetch pull/.../{head,merge} - if [ "$PR_TO_TEST" != "0" ] - then - for _ in {1..100}; do git fetch --depth=100 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*" && break; sleep 1; done - fi - - git checkout "$SHA_TO_TEST" -) + wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" + tar -xvf clickhouse_no_subs.tar.gz + tree ||: + ls -lath ||: } function download { - wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse" + wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse" & + wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/ci-changed-files.txt" & + wait + chmod +x clickhouse ln -s ./clickhouse ./clickhouse-server ln -s ./clickhouse ./clickhouse-client + + + # clickhouse-server is in the current dir + export PATH="$PWD:$PATH" } function configure @@ -77,22 +73,19 @@ function watchdog function fuzz { # Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests. - cd ch - NEW_TESTS=$(git diff --name-only "$(git merge-base origin/master "$SHA_TO_TEST"~)" "$SHA_TO_TEST" | grep -P 'tests/queries/0_stateless/.*\.sql' | sed -r -e 's!^!ch/!' | sort -R) - cd .. + # Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment. + NEW_TESTS="$(grep -P 'tests/queries/0_stateless/.*\.sql' ci-changed-files.txt | sed -r -e 's!^!ch/!' | sort -R)" if [[ -n "$NEW_TESTS" ]] then - NEW_TESTS_OPT="--interleave-queries-file ${NEW_TESTS}" - else - NEW_TESTS_OPT="" + NEW_TESTS_OPT="${NEW_TESTS_OPT:---interleave-queries-file ${NEW_TESTS}}" fi - ./clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log & + clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log & server_pid=$! kill -0 $server_pid - while ! ./clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done - ./clickhouse-client --query "select 1" + while ! clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done + clickhouse-client --query "select 1" kill -0 $server_pid echo Server started @@ -111,14 +104,14 @@ continue # SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric. # SC2046: Quote this to prevent word splitting. Actually I need word splitting. # shellcheck disable=SC2012,SC2046 - ./clickhouse-client --query-fuzzer-runs=1000 --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) $NEW_TESTS_OPT \ + clickhouse-client --query-fuzzer-runs=1000 --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) $NEW_TESTS_OPT \ > >(tail -n 100000 > fuzzer.log) \ 2>&1 \ || fuzzer_exit_code=$? echo "Fuzzer exit code is $fuzzer_exit_code" - ./clickhouse-client --query "select elapsed, query from system.processes" ||: + clickhouse-client --query "select elapsed, query from system.processes" ||: killall clickhouse-server ||: for _ in {1..10} do From 9e8ebb5e2bbf9a740d269c4e251280aec4578125 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Mon, 15 Mar 2021 20:07:36 +0300 Subject: [PATCH 225/333] Update TableFunctionPostgreSQL.cpp --- src/TableFunctions/TableFunctionPostgreSQL.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index 0e3f1c5da24..0cf7c36b5ae 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -58,7 +58,7 @@ void TableFunctionPostgreSQL::parseArguments(const ASTPtr & ast_function, const ASTs & args = func_args.arguments->children; if (args.size() < 5 || args.size() > 6) - throw Exception("Table function 'PostgreSQL' requires 5-6 parameters: " + throw Exception("Table function 'PostgreSQL' requires from 5 to 6 parameters: " "PostgreSQL('host:port', 'database', 'table', 'user', 'password', [, 'schema']).", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); From badd5165dac3e864efeb863da3324a57ebabc6ee Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 20:36:49 +0300 Subject: [PATCH 226/333] Fix UBSan report in rounding to years intervals --- base/common/DateLUTImpl.h | 9 ++++++++- .../0_stateless/01761_round_year_bounds.reference | 0 tests/queries/0_stateless/01761_round_year_bounds.sql | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01761_round_year_bounds.reference create mode 100644 tests/queries/0_stateless/01761_round_year_bounds.sql diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 6e968a0cd50..fc30ee9c6a0 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -807,7 +807,14 @@ public: return toFirstDayNumOfYear(v); const LUTIndex i = toLUTIndex(v); - return toDayNum(years_lut[lut[i].year / years * years - DATE_LUT_MIN_YEAR]); + + UInt16 year = lut[i].year / years * years; + + /// For example, rounding down 1925 to 100 years will be 1900, but it's less than min supported year. + if (unlikely(year < DATE_LUT_MIN_YEAR)) + year = DATE_LUT_MIN_YEAR; + + return toDayNum(years_lut[year - DATE_LUT_MIN_YEAR]); } inline ExtendedDayNum toStartOfQuarterInterval(ExtendedDayNum d, UInt64 quarters) const diff --git a/tests/queries/0_stateless/01761_round_year_bounds.reference b/tests/queries/0_stateless/01761_round_year_bounds.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01761_round_year_bounds.sql b/tests/queries/0_stateless/01761_round_year_bounds.sql new file mode 100644 index 00000000000..fed12c55568 --- /dev/null +++ b/tests/queries/0_stateless/01761_round_year_bounds.sql @@ -0,0 +1 @@ +SELECT toStartOfInterval(toDateTime(-9223372036854775808), toIntervalYear(100), 'Europe/Moscow') FORMAT Null; From 845f4afbf4c7287a586b42874150f7b6ef039b80 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 21:09:05 +0300 Subject: [PATCH 227/333] Fix parsing of pre-epoch time --- base/common/DateLUTImpl.h | 7 +------ .../01762_datetime64_extended_parsing.reference | 1 + .../0_stateless/01762_datetime64_extended_parsing.sql | 1 + 3 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/01762_datetime64_extended_parsing.reference create mode 100644 tests/queries/0_stateless/01762_datetime64_extended_parsing.sql diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index fc30ee9c6a0..867862ad51e 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -913,12 +913,7 @@ public: if (time_offset >= lut[index].time_at_offset_change()) time_offset -= lut[index].amount_of_offset_change(); - UInt32 res = lut[index].date + time_offset; - - if (unlikely(res > DATE_LUT_MAX)) - return 0; - - return res; + return lut[index].date + time_offset; } template diff --git a/tests/queries/0_stateless/01762_datetime64_extended_parsing.reference b/tests/queries/0_stateless/01762_datetime64_extended_parsing.reference new file mode 100644 index 00000000000..531b6f8bf13 --- /dev/null +++ b/tests/queries/0_stateless/01762_datetime64_extended_parsing.reference @@ -0,0 +1 @@ +1925-01-02 03:04:05.678901 diff --git a/tests/queries/0_stateless/01762_datetime64_extended_parsing.sql b/tests/queries/0_stateless/01762_datetime64_extended_parsing.sql new file mode 100644 index 00000000000..a7ad447b215 --- /dev/null +++ b/tests/queries/0_stateless/01762_datetime64_extended_parsing.sql @@ -0,0 +1 @@ +SELECT toDateTime64('1925-01-02 03:04:05.678901', 6); From 60f125c2b5b9c002e12238ff73f80f6814448699 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 15 Mar 2021 21:36:42 +0300 Subject: [PATCH 228/333] translation draft --- docs/ru/sql-reference/statements/attach.md | 5 +- docs/ru/sql-reference/statements/detach.md | 61 ++++++++++++++++++++-- 2 files changed, 59 insertions(+), 7 deletions(-) diff --git a/docs/ru/sql-reference/statements/attach.md b/docs/ru/sql-reference/statements/attach.md index 259ab893e63..be5b0b6d44a 100644 --- a/docs/ru/sql-reference/statements/attach.md +++ b/docs/ru/sql-reference/statements/attach.md @@ -10,14 +10,15 @@ toc_title: ATTACH - вместо слова `CREATE` используется слово `ATTACH`; - запрос не создаёт данные на диске, а предполагает, что данные уже лежат в соответствующих местах, и всего лишь добавляет информацию о таблице на сервер. После выполнения запроса `ATTACH` сервер будет знать о существовании таблицы. -Если таблица перед этим была отсоединена (`DETACH`), т.е. её структура известна, можно использовать сокращенную форму записи без определения структуры. +Если таблица перед этим была откреплена ([DETACH](../../sql-reference/statements/detach.md)), т.е. её структура известна, можно использовать сокращенную форму записи без определения структуры. ``` sql ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] ``` -Этот запрос используется при старте сервера. Сервер хранит метаданные таблиц в виде файлов с запросами `ATTACH`, которые он просто исполняет при запуске (за исключением системных таблиц, которые явно создаются на сервере). +Этот запрос используется при старте сервера. Сервер хранит метаданные таблиц в виде файлов с запросами `ATTACH`, которые он просто исполняет при запуске (за исключением некоторых системных таблиц, которые явно создаются на сервере). +Если таблица была откреплена перманентно, она не будет прикреплена обратно во время старта сервера, так что нужно явно использовать запрос `ATTACH`, чтобы прикрепить ее. [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/attach/) diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index 00d0a4b20c6..bec8f4c5ff7 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -5,15 +5,66 @@ toc_title: DETACH # DETACH {#detach-statement} -Удаляет из сервера информацию о таблице name. Сервер перестаёт знать о существовании таблицы. +Удаляет из сервера информацию о таблице или материализованном представлении. Сервер перестаёт знать о существовании таблицы. + +Синтаксис: ``` sql -DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] +DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] ``` -Но ни данные, ни метаданные таблицы не удаляются. При следующем запуске сервера, сервер прочитает метаданные и снова узнает о таблице. -Также, «отцепленную» таблицу можно прицепить заново запросом `ATTACH` (за исключением системных таблиц, для которых метаданные не хранятся). +Но ни данные, ни метаданные таблицы или материализованного представления не удаляются. При следующем запуске сервера, если не было использовано `PERMANENTLY`, сервер прочитает метаданные и снова узнает о таблице/представлении. Если таблица или представление были откреплено перманентно, сервер не прикрепит их обратно автоматически. -Запроса `DETACH DATABASE` нет. +Независимо от того, каким способом таблица была откреплена, ее можно прикрепить обратно с помощью запроса [ATTACH](../../sql-reference/statements/attach.md). Системные log таблицы также могут быть прикреплены обратно (к примеру `query_log`, `text_log` и др.) Другие системные таблицы не могут быть прикреплены обратно, но на следующем запуске сервер снова вспомнит об этих таблицах. + +`ATTACH MATERIALIZED VIEW` не может быть использован с кратким синтаксисом (без `SELECT`), но можно прикрепить представление с помощью запроса `ATTACH TABLE`. + +Обратите внимание, что нельзя перманентно открепить таблицу, которая уже временно откреплена. Для этого ее сначала надо прикрепить обратно, а затем снова открепить перманентно. + +Также нельзя использовать [DROP](../../sql-reference/statements/drop.md#drop-table) с открепленной таблицей или создавать таблицу с помощью [CREATE TABLE](../../sql-reference/statements/create/table.md) с таким же именем, как уже открепленная таблица. Еще нельзя заменить открепленную таблицу другой с помощью запроса [RENAME TABLE](../../sql-reference/statements/rename.md). + +**Пример** + +Создание таблицы: + +Запрос: + +``` sql +CREATE TABLE test ENGINE = Log AS SELECT * FROM numbers(10); +SELECT * FROM test; +``` + +Результат: + +``` text +┌─number─┐ +│ 0 │ +│ 1 │ +│ 2 │ +│ 3 │ +│ 4 │ +│ 5 │ +│ 6 │ +│ 7 │ +│ 8 │ +│ 9 │ +└────────┘ +``` + +Открепление таблицы: + +Запрос: + +``` sql +DETACH TABLE test; +SELECT * FROM test; +``` + +Результат: + +``` text +Received exception from server (version 21.4.1): +Code: 60. DB::Exception: Received from localhost:9000. DB::Exception: Table default.test doesn't exist. +``` [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/detach/) From a78b234a6d62412edbdc21f187495a240c13ea63 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 15 Mar 2021 21:45:57 +0300 Subject: [PATCH 229/333] fixes --- docker/test/fuzzer/run-fuzzer.sh | 10 +++++++--- docker/test/performance-comparison/compare.sh | 4 +++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 22fe4220aaf..6858e838850 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -4,7 +4,9 @@ set -eux set -o pipefail trap "exit" INT TERM -trap 'kill $(jobs -pr) ||:' EXIT +# The watchdog is in the separate process group, so we have to kill it separately +# if the script terminates earlier. +trap 'kill $(jobs -pr) ${watchdog_pid:-} ||:' EXIT stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" @@ -20,7 +22,7 @@ function clone rm -rf ch ||: mkdir ch cd ch - wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" + wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" tar -xvf clickhouse_no_subs.tar.gz tree ||: ls -lath ||: @@ -29,7 +31,7 @@ function clone function download { wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse" & - wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/ci-changed-files.txt" & + wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/ci-changed-files.txt" & wait chmod +x clickhouse @@ -78,6 +80,8 @@ function fuzz if [[ -n "$NEW_TESTS" ]] then NEW_TESTS_OPT="${NEW_TESTS_OPT:---interleave-queries-file ${NEW_TESTS}}" + else + NEW_TESTS_OPT="${NEW_TESTS_OPT:-}" fi clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log & diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 9a0d8093a55..1505fc77691 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -2,7 +2,9 @@ set -exu set -o pipefail trap "exit" INT TERM -trap 'kill $(jobs -pr) ||:' EXIT +# The watchdog is in the separate process group, so we have to kill it separately +# if the script terminates earlier. +trap 'kill $(jobs -pr) ${watchdog_pid:-} ||:' EXIT stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" From 671395e8c81e79f079334650112737eca6e3f6c7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 22:23:27 +0300 Subject: [PATCH 230/333] Most likely improve performance --- base/common/DateLUTImpl.h | 145 +++++++++++++----- base/common/LocalDate.h | 20 --- base/common/LocalDateTime.h | 57 ++----- programs/git-import/git-import.cpp | 2 +- .../PostgreSQLBlockInputStream.cpp | 15 +- .../ClickHouseDictionarySource.cpp | 5 +- src/Dictionaries/MySQLDictionarySource.cpp | 7 +- .../PostgreSQLDictionarySource.cpp | 5 +- src/Dictionaries/RedisBlockInputStream.cpp | 7 +- src/Dictionaries/XDBCDictionarySource.cpp | 5 +- src/Formats/MySQLBlockInputStream.cpp | 8 +- src/IO/WriteHelpers.h | 12 +- .../MergeTree/MergeTreeMutationEntry.cpp | 4 +- .../MergeTree/ReplicatedMergeTreeLogEntry.cpp | 4 +- .../ReplicatedMergeTreeMutationEntry.cpp | 4 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 16 files changed, 163 insertions(+), 139 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 867862ad51e..6bac9bd5126 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -944,16 +944,70 @@ public: } + struct DateComponents + { + uint16_t year; + uint8_t month; + uint8_t day; + }; + + struct TimeComponents + { + uint8_t hour; + uint8_t minute; + uint8_t second; + }; + + struct DateTimeComponents + { + DateComponents date; + TimeComponents time; + }; + + inline DateComponents toDateComponents(time_t t) const + { + const Values & values = getValues(t); + return { values.year, values.month, values.day_of_month }; + } + + inline DateTimeComponents toDateTimeComponents(time_t t) const + { + const LUTIndex index = findIndex(t); + const Values & values = lut[index]; + + DateTimeComponents res; + + res.date.year = values.year; + res.date.month = values.month; + res.date.day = values.day_of_month; + + time_t time = t - values.date; + if (time >= values.time_at_offset_change()) + time += values.amount_of_offset_change(); + + res.time.second = time % 60; + res.time.minute = time / 60 % 60; + res.time.hour = time / 3600; + + /// In case time was changed backwards at the start of next day, we will repeat the hour 23. + if (unlikely(res.time.hour > 23)) + res.time.hour = 23; + + return res; + } + + inline UInt64 toNumYYYYMMDDhhmmss(time_t t) const { - const Values & values = find(t); + DateTimeComponents components = toDateTimeComponents(t); + return - toSecond(t) - + toMinute(t) * 100 - + toHour(t) * 10000 - + UInt64(values.day_of_month) * 1000000 - + UInt64(values.month) * 100000000 - + UInt64(values.year) * 10000000000; + components.time.second + + components.time.minute * 100 + + components.time.hour * 10000 + + UInt64(components.date.day) * 1000000 + + UInt64(components.date.month) * 100000000 + + UInt64(components.date.year) * 10000000000; } inline time_t YYYYMMDDhhmmssToTime(UInt64 num) const @@ -972,16 +1026,19 @@ public: inline NO_SANITIZE_UNDEFINED time_t addDays(time_t t, Int64 delta) const { - LUTIndex index = findIndex(t); - time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); + const LUTIndex index = findIndex(t); + const Values & values = lut[index]; - index += delta; - index &= date_lut_mask; + time_t time = t - values.date; + if (time >= values.time_at_offset_change()) + time += values.amount_of_offset_change(); - if (time_offset >= lut[index].time_at_offset_change()) - time_offset -= lut[index].amount_of_offset_change(); + const LUTIndex new_index = index + delta; - return lut[index].date + time_offset; + if (time >= lut[new_index].time_at_offset_change()) + time -= lut[new_index].amount_of_offset_change(); + + return lut[new_index].date + time; } inline NO_SANITIZE_UNDEFINED time_t addWeeks(time_t t, Int64 delta) const @@ -1033,12 +1090,17 @@ public: { const auto result_day = addMonthsIndex(t, delta); - time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); + const LUTIndex index = findIndex(t); + const Values & values = lut[index]; - if (time_offset >= lut[result_day].time_at_offset_change()) - time_offset -= lut[result_day].amount_of_offset_change(); + time_t time = t - values.date; + if (time >= values.time_at_offset_change()) + time += values.amount_of_offset_change(); - return lut[result_day].date + time_offset; + if (time >= lut[result_day].time_at_offset_change()) + time -= lut[result_day].amount_of_offset_change(); + + return lut[result_day].date + time; } inline ExtendedDayNum NO_SANITIZE_UNDEFINED addMonths(ExtendedDayNum d, Int64 delta) const @@ -1077,12 +1139,17 @@ public: { auto result_day = addYearsIndex(t, delta); - time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t); + const LUTIndex index = findIndex(t); + const Values & values = lut[index]; - if (time_offset >= lut[result_day].time_at_offset_change()) - time_offset -= lut[result_day].amount_of_offset_change(); + time_t time = t - values.date; + if (time >= values.time_at_offset_change()) + time += values.amount_of_offset_change(); - return lut[result_day].date + time_offset; + if (time >= lut[result_day].time_at_offset_change()) + time -= lut[result_day].amount_of_offset_change(); + + return lut[result_day].date + time; } inline ExtendedDayNum addYears(ExtendedDayNum d, Int64 delta) const @@ -1093,29 +1160,25 @@ public: inline std::string timeToString(time_t t) const { - const Values & values = getValues(t); + DateTimeComponents components = toDateTimeComponents(t); std::string s {"0000-00-00 00:00:00"}; - s[0] += values.year / 1000; - s[1] += (values.year / 100) % 10; - s[2] += (values.year / 10) % 10; - s[3] += values.year % 10; - s[5] += values.month / 10; - s[6] += values.month % 10; - s[8] += values.day_of_month / 10; - s[9] += values.day_of_month % 10; + s[0] += components.date.year / 1000; + s[1] += (components.date.year / 100) % 10; + s[2] += (components.date.year / 10) % 10; + s[3] += components.date.year % 10; + s[5] += components.date.month / 10; + s[6] += components.date.month % 10; + s[8] += components.date.day / 10; + s[9] += components.date.day % 10; - auto hour = toHour(t); - auto minute = toMinute(t); - auto second = toSecond(t); - - s[11] += hour / 10; - s[12] += hour % 10; - s[14] += minute / 10; - s[15] += minute % 10; - s[17] += second / 10; - s[18] += second % 10; + s[11] += components.time.hour / 10; + s[12] += components.time.hour % 10; + s[14] += components.time.minute / 10; + s[15] += components.time.minute % 10; + s[17] += components.time.second / 10; + s[18] += components.time.second % 10; return s; } diff --git a/base/common/LocalDate.h b/base/common/LocalDate.h index 7e1260c1385..b1e6eeb907c 100644 --- a/base/common/LocalDate.h +++ b/base/common/LocalDate.h @@ -92,17 +92,6 @@ public: LocalDate(const LocalDate &) noexcept = default; LocalDate & operator= (const LocalDate &) noexcept = default; - LocalDate & operator= (time_t time) - { - init(time); - return *this; - } - - operator time_t() const - { - return DateLUT::instance().makeDate(m_year, m_month, m_day); - } - DayNum getDayNum() const { const auto & lut = DateLUT::instance(); @@ -167,12 +156,3 @@ public: }; static_assert(sizeof(LocalDate) == 4); - - -namespace std -{ -inline string to_string(const LocalDate & date) -{ - return date.toString(); -} -} diff --git a/base/common/LocalDateTime.h b/base/common/LocalDateTime.h index 0e237789bd1..4c2cf0e637d 100644 --- a/base/common/LocalDateTime.h +++ b/base/common/LocalDateTime.h @@ -29,29 +29,16 @@ private: /// NOTE We may use attribute packed instead, but it is less portable. unsigned char pad = 0; - void init(time_t time) + void init(time_t time, const DateLUTImpl & time_zone) { - if (unlikely(time > DATE_LUT_MAX || time == 0)) - { - m_year = 0; - m_month = 0; - m_day = 0; - m_hour = 0; - m_minute = 0; - m_second = 0; + DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(time); - return; - } - - const auto & date_lut = DateLUT::instance(); - const auto & values = date_lut.getValues(time); - - m_year = values.year; - m_month = values.month; - m_day = values.day_of_month; - m_hour = date_lut.toHour(time); - m_minute = date_lut.toMinute(time); - m_second = date_lut.toSecond(time); + m_year = components.date.year; + m_month = components.date.month; + m_day = components.date.day; + m_hour = components.time.hour; + m_minute = components.time.minute; + m_second = components.time.second; (void)pad; /// Suppress unused private field warning. } @@ -73,9 +60,9 @@ private: } public: - explicit LocalDateTime(time_t time) + explicit LocalDateTime(time_t time, const DateLUTImpl & time_zone = DateLUT::instance()) { - init(time); + init(time, time_zone); } LocalDateTime(unsigned short year_, unsigned char month_, unsigned char day_, @@ -104,19 +91,6 @@ public: LocalDateTime(const LocalDateTime &) noexcept = default; LocalDateTime & operator= (const LocalDateTime &) noexcept = default; - LocalDateTime & operator= (time_t time) - { - init(time); - return *this; - } - - operator time_t() const - { - return m_year == 0 - ? 0 - : DateLUT::instance().makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second); - } - unsigned short year() const { return m_year; } unsigned char month() const { return m_month; } unsigned char day() const { return m_day; } @@ -167,14 +141,3 @@ public: }; static_assert(sizeof(LocalDateTime) == 8); - - -namespace std -{ -inline string to_string(const LocalDateTime & datetime) -{ - stringstream str; - str << datetime; - return str.str(); -} -} diff --git a/programs/git-import/git-import.cpp b/programs/git-import/git-import.cpp index ae8b55e2aff..b07435dcf78 100644 --- a/programs/git-import/git-import.cpp +++ b/programs/git-import/git-import.cpp @@ -1064,7 +1064,7 @@ void processCommit( time_t commit_time; readText(commit_time, in); - commit.time = commit_time; + commit.time = LocalDateTime(commit_time); assertChar('\0', in); readNullTerminated(commit.author, in); std::string parent_hash; diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index da6a83fb930..4cf2d942885 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -160,8 +160,13 @@ void PostgreSQLBlockInputStream::insertValue(IColumn & column, std::string_view assert_cast(column).insertValue(UInt16{LocalDate{std::string(value)}.getDayNum()}); break; case ValueType::vtDateTime: - assert_cast(column).insertValue(time_t{LocalDateTime{std::string(value)}}); + { + ReadBufferFromString in(value); + time_t time = 0; + readDateTimeText(time, in); + assert_cast(column).insertValue(time); break; + } case ValueType::vtDateTime64:[[fallthrough]]; case ValueType::vtDecimal32: [[fallthrough]]; case ValueType::vtDecimal64: [[fallthrough]]; @@ -257,7 +262,13 @@ void PostgreSQLBlockInputStream::prepareArrayInfo(size_t column_idx, const DataT else if (which.isDate()) parser = [](std::string & field) -> Field { return UInt16{LocalDate{field}.getDayNum()}; }; else if (which.isDateTime()) - parser = [](std::string & field) -> Field { return time_t{LocalDateTime{field}}; }; + parser = [](std::string & field) -> Field + { + ReadBufferFromString in(field); + time_t time = 0; + readDateTimeText(time, in); + return time; + }; else if (which.isDecimal32()) parser = [nested](std::string & field) -> Field { diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index f4c17884afa..5e69df2c8a0 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -118,10 +118,9 @@ std::string ClickHouseDictionarySource::getUpdateFieldAndDate() { if (update_time != std::chrono::system_clock::from_time_t(0)) { - auto tmp_time = update_time; + time_t hr_time = std::chrono::system_clock::to_time_t(update_time) - 1; + std::string str_time = DateLUT::instance().timeToString(hr_time); update_time = std::chrono::system_clock::now(); - time_t hr_time = std::chrono::system_clock::to_time_t(tmp_time) - 1; - std::string str_time = std::to_string(LocalDateTime(hr_time)); return query_builder.composeUpdateQuery(update_field, str_time); } else diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 572080aee1e..a1d0cbe7ffc 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -107,10 +107,9 @@ std::string MySQLDictionarySource::getUpdateFieldAndDate() { if (update_time != std::chrono::system_clock::from_time_t(0)) { - auto tmp_time = update_time; + time_t hr_time = std::chrono::system_clock::to_time_t(update_time) - 1; + std::string str_time = DateLUT::instance().timeToString(hr_time); update_time = std::chrono::system_clock::now(); - time_t hr_time = std::chrono::system_clock::to_time_t(tmp_time) - 1; - std::string str_time = std::to_string(LocalDateTime(hr_time)); return query_builder.composeUpdateQuery(update_field, str_time); } else @@ -262,7 +261,7 @@ LocalDateTime MySQLDictionarySource::getLastModification(mysqlxx::Pool::Entry & if (!update_time_value.isNull()) { modification_time = update_time_value.getDateTime(); - LOG_TRACE(log, "Got modification time: {}", modification_time); + LOG_TRACE(log, "Got modification time: {}", update_time_value.getString()); } /// fetch remaining rows to avoid "commands out of sync" error diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index aa852404750..93a57383380 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -127,10 +127,9 @@ std::string PostgreSQLDictionarySource::getUpdateFieldAndDate() { if (update_time != std::chrono::system_clock::from_time_t(0)) { - auto tmp_time = update_time; + time_t hr_time = std::chrono::system_clock::to_time_t(update_time) - 1; + std::string str_time = DateLUT::instance().timeToString(hr_time); update_time = std::chrono::system_clock::now(); - time_t hr_time = std::chrono::system_clock::to_time_t(tmp_time) - 1; - std::string str_time = std::to_string(LocalDateTime(hr_time)); return query_builder.composeUpdateQuery(update_field, str_time); } else diff --git a/src/Dictionaries/RedisBlockInputStream.cpp b/src/Dictionaries/RedisBlockInputStream.cpp index a5514d14155..6a2072f7f6a 100644 --- a/src/Dictionaries/RedisBlockInputStream.cpp +++ b/src/Dictionaries/RedisBlockInputStream.cpp @@ -99,8 +99,13 @@ namespace DB assert_cast(column).insertValue(parse(string_value).getDayNum()); break; case ValueType::vtDateTime: - assert_cast(column).insertValue(static_cast(parse(string_value))); + { + ReadBufferFromString in(string_value); + time_t time = 0; + readDateTimeText(time, in); + assert_cast(column).insertValue(time); break; + } case ValueType::vtUUID: assert_cast(column).insertValue(parse(string_value)); break; diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 3615f72605f..37d54f3549c 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -156,10 +156,9 @@ std::string XDBCDictionarySource::getUpdateFieldAndDate() { if (update_time != std::chrono::system_clock::from_time_t(0)) { - auto tmp_time = update_time; + time_t hr_time = std::chrono::system_clock::to_time_t(update_time) - 1; + std::string str_time = DateLUT::instance().timeToString(hr_time); update_time = std::chrono::system_clock::now(); - time_t hr_time = std::chrono::system_clock::to_time_t(tmp_time) - 1; - std::string str_time = std::to_string(LocalDateTime(hr_time)); return query_builder.composeUpdateQuery(update_field, str_time); } else diff --git a/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp index 87df0c1f4b1..bf7cf266e91 100644 --- a/src/Formats/MySQLBlockInputStream.cpp +++ b/src/Formats/MySQLBlockInputStream.cpp @@ -11,6 +11,7 @@ # include # include # include +# include # include # include # include @@ -97,8 +98,13 @@ namespace assert_cast(column).insertValue(UInt16(value.getDate().getDayNum())); break; case ValueType::vtDateTime: - assert_cast(column).insertValue(UInt32(value.getDateTime())); + { + ReadBufferFromString in(value); + time_t time = 0; + readDateTimeText(time, in); + assert_cast(column).insertValue(time); break; + } case ValueType::vtUUID: assert_cast(column).insert(parse(value.data(), value.size())); break; diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 72c6b69114f..b9497b6f87e 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -821,24 +821,18 @@ inline void writeDateTimeText(const LocalDateTime & datetime, WriteBuffer & buf) template inline void writeDateTimeText(time_t datetime, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { - const auto & values = time_zone.getValues(datetime); - writeDateTimeText( - LocalDateTime(values.year, values.month, values.day_of_month, - time_zone.toHour(datetime), time_zone.toMinute(datetime), time_zone.toSecond(datetime)), buf); + writeDateTimeText(LocalDateTime(datetime, time_zone), buf); } /// In the format YYYY-MM-DD HH:MM:SS.NNNNNNNNN, according to the specified time zone. template -inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline void writeDateTimeText(DateTime64 datetime64, UInt32 scale, WriteBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) { static constexpr UInt32 MaxScale = DecimalUtils::max_precision; scale = scale > MaxScale ? MaxScale : scale; auto components = DecimalUtils::split(datetime64, scale); - const auto & values = date_lut.getValues(components.whole); - writeDateTimeText( - LocalDateTime(values.year, values.month, values.day_of_month, - date_lut.toHour(components.whole), date_lut.toMinute(components.whole), date_lut.toSecond(components.whole)), buf); + writeDateTimeText(LocalDateTime(components.whole, time_zone), buf); if (scale > 0) { diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp index 44c4b3c4d10..49c4e93eb1d 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp @@ -75,7 +75,9 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(DiskPtr disk_, const String & pat LocalDateTime create_time_dt; *buf >> "create time: " >> create_time_dt >> "\n"; - create_time = create_time_dt; + create_time = DateLUT::instance().makeDateTime( + create_time_dt.year(), create_time_dt.month(), create_time_dt.day(), + create_time_dt.hour(), create_time_dt.minute(), create_time_dt.second()); *buf >> "commands: "; commands.readText(*buf); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp index 9a9f25fd470..07c64d9c95c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp @@ -162,7 +162,9 @@ void ReplicatedMergeTreeLogEntryData::readText(ReadBuffer & in) { LocalDateTime create_time_dt; in >> "create_time: " >> create_time_dt >> "\n"; - create_time = create_time_dt; + create_time = DateLUT::instance().makeDateTime( + create_time_dt.year(), create_time_dt.month(), create_time_dt.day(), + create_time_dt.hour(), create_time_dt.minute(), create_time_dt.second()); } in >> "source replica: " >> source_replica >> "\n"; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp index b2299b2cbbd..c617befe9c4 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMutationEntry.cpp @@ -37,7 +37,9 @@ void ReplicatedMergeTreeMutationEntry::readText(ReadBuffer & in) LocalDateTime create_time_dt; in >> "create time: " >> create_time_dt >> "\n"; - create_time = create_time_dt; + create_time = DateLUT::instance().makeDateTime( + create_time_dt.year(), create_time_dt.month(), create_time_dt.day(), + create_time_dt.hour(), create_time_dt.minute(), create_time_dt.second()); in >> "source replica: " >> source_replica >> "\n"; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 3490e9918c5..a8de89a8bb0 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1450,7 +1450,7 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry) { LOG_INFO(log, "Will try to fetch part {} until '{}' because this part assigned to recompression merge. " "Source replica {} will try to merge this part first", entry.new_part_name, - LocalDateTime(entry.create_time + storage_settings_ptr->try_fetch_recompressed_part_timeout.totalSeconds()), entry.source_replica); + DateLUT::instance().timeToString(entry.create_time + storage_settings_ptr->try_fetch_recompressed_part_timeout.totalSeconds()), entry.source_replica); return false; } From e485a27dc1b86077481e7b74b08272070ddcfe40 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 15 Mar 2021 22:45:45 +0300 Subject: [PATCH 231/333] some fixes --- programs/client/Client.cpp | 13 +++++++++---- programs/client/QueryFuzzer.cpp | 9 +++++++++ src/Common/FieldVisitors.cpp | 12 +++++++++++- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 91fa36e9e92..527fec17c63 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1413,13 +1413,18 @@ private: if (formatted_twice != fuzzed_text) { + fmt::print(stderr, "The query formatting is broken.\n"); + printChangedSettings(); - fmt::print(stderr, "The query formatting is broken. Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", + fmt::print(stderr, "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", formatted_twice, fuzzed_text); - fmt::print(stderr, "AST parsed back:\n'{}'\nSource AST:\n'{}'\n", - parsed_formatted_query->dumpTree(), - ast_to_process->dumpTree()); + fmt::print(stderr, "In more detail:\n"); + fmt::print(stderr, "AST-1:\n'{}'\n", ast_to_process->dumpTree()); + fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", fuzzed_text); + fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", parsed_formatted_query->dumpTree()); + fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", formatted_twice); + fmt::print(stderr, "Text-1 must be equal to Text-2, but it is not.\n"); exit(1); } diff --git a/programs/client/QueryFuzzer.cpp b/programs/client/QueryFuzzer.cpp index 8d8d8daaf39..0c8dc0731f9 100644 --- a/programs/client/QueryFuzzer.cpp +++ b/programs/client/QueryFuzzer.cpp @@ -570,6 +570,15 @@ void QueryFuzzer::addColumnLike(const ASTPtr ast) } const auto name = ast->formatForErrorMessage(); + if (name == "Null") + { + // The `Null` identifier from FORMAT Null clause. We don't quote it + // properly when formatting the AST, and while the resulting query + // technically works, it has non-standard case for Null (the standard + // is NULL), so it breaks the query formatting idempotence check. + // Just plug this particular case for now. + return; + } if (name.size() < 200) { column_like_map.insert({name, ast}); diff --git a/src/Common/FieldVisitors.cpp b/src/Common/FieldVisitors.cpp index dae153bd8d2..62f04816032 100644 --- a/src/Common/FieldVisitors.cpp +++ b/src/Common/FieldVisitors.cpp @@ -180,7 +180,17 @@ String FieldVisitorToString::operator() (const Tuple & x) const { WriteBufferFromOwnString wb; - wb << '('; + // For single-element tuples we must use the explicit tuple() function, + // or they will be parsed back as plain literals. + if (x.size() > 1) + { + wb << '('; + } + else + { + wb << "tuple("; + } + for (auto it = x.begin(); it != x.end(); ++it) { if (it != x.begin()) From f48bf2aaba9f38a02a830991922e1cd97fe9c0f5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 23:29:35 +0300 Subject: [PATCH 232/333] Fix issue --- base/common/DateLUTImpl.h | 15 ++++++++++++--- ...1702_toDateTime_from_string_clamping.reference | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 6bac9bd5126..98b767ccbcc 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -985,9 +985,18 @@ public: if (time >= values.time_at_offset_change()) time += values.amount_of_offset_change(); - res.time.second = time % 60; - res.time.minute = time / 60 % 60; - res.time.hour = time / 3600; + if (unlikely(time < 0)) + { + res.time.second = 0; + res.time.minute = 0; + res.time.hour = 0; + } + else + { + res.time.second = time % 60; + res.time.minute = time / 60 % 60; + res.time.hour = time / 3600; + } /// In case time was changed backwards at the start of next day, we will repeat the hour 23. if (unlikely(res.time.hour > 23)) diff --git a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference index 77da114be68..7e8307d66a6 100644 --- a/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference +++ b/tests/queries/0_stateless/01702_toDateTime_from_string_clamping.reference @@ -6,4 +6,4 @@ SELECT toString(toDateTime('9922337203.6854775808', 1)); SELECT toDateTime64(CAST('10000000000.1' AS Decimal64(1)), 1); 2283-11-11 23:46:40.1 SELECT toDateTime64(CAST('-10000000000.1' AS Decimal64(1)), 1); -1925-01-01 23:09:20.1 +1925-01-01 00:00:00.1 From 31d7f10a94c5c9505c8ca11b3cee311a313e9948 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 23:34:42 +0300 Subject: [PATCH 233/333] Remove bad code --- src/DataTypes/DataTypeDateTime.cpp | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/DataTypes/DataTypeDateTime.cpp b/src/DataTypes/DataTypeDateTime.cpp index d2bbb4a1efa..d8ae7f688ae 100644 --- a/src/DataTypes/DataTypeDateTime.cpp +++ b/src/DataTypes/DataTypeDateTime.cpp @@ -12,10 +12,14 @@ #include #include +namespace DB +{ + namespace { -using namespace DB; -inline void readText(time_t & x, ReadBuffer & istr, const FormatSettings & settings, const DateLUTImpl & time_zone, const DateLUTImpl & utc_time_zone) + +inline void readTextHelper( + time_t & x, ReadBuffer & istr, const FormatSettings & settings, const DateLUTImpl & time_zone, const DateLUTImpl & utc_time_zone) { switch (settings.date_time_input_format) { @@ -27,16 +31,16 @@ inline void readText(time_t & x, ReadBuffer & istr, const FormatSettings & setti return; } } + } -namespace DB -{ TimezoneMixin::TimezoneMixin(const String & time_zone_name) : has_explicit_time_zone(!time_zone_name.empty()), time_zone(DateLUT::instance(time_zone_name)), utc_time_zone(DateLUT::instance("UTC")) -{} +{ +} DataTypeDateTime::DataTypeDateTime(const String & time_zone_name) : TimezoneMixin(time_zone_name) @@ -45,7 +49,8 @@ DataTypeDateTime::DataTypeDateTime(const String & time_zone_name) DataTypeDateTime::DataTypeDateTime(const TimezoneMixin & time_zone_) : TimezoneMixin(time_zone_) -{} +{ +} String DataTypeDateTime::doGetName() const { @@ -87,7 +92,7 @@ void DataTypeDateTime::deserializeWholeText(IColumn & column, ReadBuffer & istr, void DataTypeDateTime::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { time_t x; - ::readText(x, istr, settings, time_zone, utc_time_zone); + readTextHelper(x, istr, settings, time_zone, utc_time_zone); assert_cast(column).getData().push_back(x); } @@ -103,7 +108,7 @@ void DataTypeDateTime::deserializeTextQuoted(IColumn & column, ReadBuffer & istr time_t x; if (checkChar('\'', istr)) /// Cases: '2017-08-31 18:36:48' or '1504193808' { - ::readText(x, istr, settings, time_zone, utc_time_zone); + readTextHelper(x, istr, settings, time_zone, utc_time_zone); assertChar('\'', istr); } else /// Just 1504193808 or 01504193808 @@ -125,7 +130,7 @@ void DataTypeDateTime::deserializeTextJSON(IColumn & column, ReadBuffer & istr, time_t x; if (checkChar('"', istr)) { - ::readText(x, istr, settings, time_zone, utc_time_zone); + readTextHelper(x, istr, settings, time_zone, utc_time_zone); assertChar('"', istr); } else @@ -154,7 +159,7 @@ void DataTypeDateTime::deserializeTextCSV(IColumn & column, ReadBuffer & istr, c if (maybe_quote == '\'' || maybe_quote == '\"') ++istr.position(); - ::readText(x, istr, settings, time_zone, utc_time_zone); + readTextHelper(x, istr, settings, time_zone, utc_time_zone); if (maybe_quote == '\'' || maybe_quote == '\"') assertChar(maybe_quote, istr); From 3f67f4f47b79e101b8fc70e7501a5bc07c0a3c03 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Mar 2021 23:40:33 +0300 Subject: [PATCH 234/333] Saturation for DateTime --- src/DataStreams/PostgreSQLBlockInputStream.cpp | 2 ++ src/DataTypes/DataTypeDateTime.cpp | 11 +++++++++++ src/Dictionaries/RedisBlockInputStream.cpp | 2 ++ src/Formats/MySQLBlockInputStream.cpp | 2 ++ src/Formats/ProtobufSerializer.cpp | 2 ++ src/Functions/FunctionsConversion.h | 8 +++++--- 6 files changed, 24 insertions(+), 3 deletions(-) diff --git a/src/DataStreams/PostgreSQLBlockInputStream.cpp b/src/DataStreams/PostgreSQLBlockInputStream.cpp index 4cf2d942885..8350dc86849 100644 --- a/src/DataStreams/PostgreSQLBlockInputStream.cpp +++ b/src/DataStreams/PostgreSQLBlockInputStream.cpp @@ -164,6 +164,8 @@ void PostgreSQLBlockInputStream::insertValue(IColumn & column, std::string_view ReadBufferFromString in(value); time_t time = 0; readDateTimeText(time, in); + if (time < 0) + time = 0; assert_cast(column).insertValue(time); break; } diff --git a/src/DataTypes/DataTypeDateTime.cpp b/src/DataTypes/DataTypeDateTime.cpp index d8ae7f688ae..09dcb5f3e2e 100644 --- a/src/DataTypes/DataTypeDateTime.cpp +++ b/src/DataTypes/DataTypeDateTime.cpp @@ -93,6 +93,8 @@ void DataTypeDateTime::deserializeTextEscaped(IColumn & column, ReadBuffer & ist { time_t x; readTextHelper(x, istr, settings, time_zone, utc_time_zone); + if (x < 0) + x = 0; assert_cast(column).getData().push_back(x); } @@ -115,6 +117,8 @@ void DataTypeDateTime::deserializeTextQuoted(IColumn & column, ReadBuffer & istr { readIntText(x, istr); } + if (x < 0) + x = 0; assert_cast(column).getData().push_back(x); /// It's important to do this at the end - for exception safety. } @@ -137,6 +141,10 @@ void DataTypeDateTime::deserializeTextJSON(IColumn & column, ReadBuffer & istr, { readIntText(x, istr); } + + if (x < 0) + x = 0; + assert_cast(column).getData().push_back(x); } @@ -164,6 +172,9 @@ void DataTypeDateTime::deserializeTextCSV(IColumn & column, ReadBuffer & istr, c if (maybe_quote == '\'' || maybe_quote == '\"') assertChar(maybe_quote, istr); + if (x < 0) + x = 0; + assert_cast(column).getData().push_back(x); } diff --git a/src/Dictionaries/RedisBlockInputStream.cpp b/src/Dictionaries/RedisBlockInputStream.cpp index 6a2072f7f6a..39cc2d610df 100644 --- a/src/Dictionaries/RedisBlockInputStream.cpp +++ b/src/Dictionaries/RedisBlockInputStream.cpp @@ -103,6 +103,8 @@ namespace DB ReadBufferFromString in(string_value); time_t time = 0; readDateTimeText(time, in); + if (time < 0) + time = 0; assert_cast(column).insertValue(time); break; } diff --git a/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp index bf7cf266e91..29cf749de3b 100644 --- a/src/Formats/MySQLBlockInputStream.cpp +++ b/src/Formats/MySQLBlockInputStream.cpp @@ -102,6 +102,8 @@ namespace ReadBufferFromString in(value); time_t time = 0; readDateTimeText(time, in); + if (time < 0) + time = 0; assert_cast(column).insertValue(time); break; } diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index 2ab8e11a854..f99888975bc 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -1486,6 +1486,8 @@ namespace ReadBufferFromString buf{str}; time_t tm = 0; readDateTimeText(tm, buf); + if (tm < 0) + tm = 0; return tm; } diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index fb3c2787c96..ef74f7778b1 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -769,9 +769,11 @@ inline void parseImpl(DataTypeDate::FieldType & x, ReadBuffer & rb template <> inline void parseImpl(DataTypeDateTime::FieldType & x, ReadBuffer & rb, const DateLUTImpl * time_zone) { - time_t tmp = 0; - readDateTimeText(tmp, rb, *time_zone); - x = tmp; + time_t time = 0; + readDateTimeText(time, rb, *time_zone); + if (time < 0) + time = 0; + x = time; } From c1a2745bfbdf54906ede3411c02b6f320107d052 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 00:04:03 +0300 Subject: [PATCH 235/333] Add one more variant to memcpy benchmark --- base/glibc-compatibility/memcpy/memcpy.h | 2 +- utils/memcpy-bench/memcpy-bench.cpp | 172 ++++++++++++++++++++++- 2 files changed, 172 insertions(+), 2 deletions(-) diff --git a/base/glibc-compatibility/memcpy/memcpy.h b/base/glibc-compatibility/memcpy/memcpy.h index f9f81bcb0fe..211d144cecb 100644 --- a/base/glibc-compatibility/memcpy/memcpy.h +++ b/base/glibc-compatibility/memcpy/memcpy.h @@ -178,7 +178,7 @@ tail: size -= padding; } - /// Aligned unrolled copy. We will use all available SSE registers. + /// Aligned unrolled copy. We will use half of available SSE registers. /// It's not possible to have both src and dst aligned. /// So, we will use aligned stores and unaligned loads. __m128i c0, c1, c2, c3, c4, c5, c6, c7; diff --git a/utils/memcpy-bench/memcpy-bench.cpp b/utils/memcpy-bench/memcpy-bench.cpp index cd769640017..5c664a76fe2 100644 --- a/utils/memcpy-bench/memcpy-bench.cpp +++ b/utils/memcpy-bench/memcpy-bench.cpp @@ -35,7 +35,7 @@ void NO_INLINE loop(uint8_t * dst, uint8_t * src, size_t size, F && chunk_size_d size -= bytes_to_copy; /// Execute at least one SSE instruction as a penalty after running AVX code. - __asm__ volatile ("pxor %%xmm7, %%xmm7" ::: "xmm7"); + __asm__ __volatile__ ("pxor %%xmm15, %%xmm15" ::: "xmm15"); } } @@ -385,6 +385,7 @@ void memcpy_my_medium_avx(uint8_t * __restrict & __restrict dst, const uint8_t * bool have_avx = true; + static uint8_t * memcpy_my(uint8_t * __restrict dst, const uint8_t * __restrict src, size_t size) { uint8_t * ret = dst; @@ -560,6 +561,174 @@ tail: return ret; } + +static uint8_t * memcpy_my2(uint8_t * __restrict dst, const uint8_t * __restrict src, size_t size) +{ + uint8_t * ret = dst; + + if (size <= 16) + { + if (size >= 8) + { + __builtin_memcpy(dst + size - 8, src + size - 8, 8); + __builtin_memcpy(dst, src, 8); + } + else if (size >= 4) + { + __builtin_memcpy(dst + size - 4, src + size - 4, 4); + __builtin_memcpy(dst, src, 4); + } + else if (size >= 2) + { + __builtin_memcpy(dst + size - 2, src + size - 2, 2); + __builtin_memcpy(dst, src, 2); + } + else if (size >= 1) + { + *dst = *src; + } + } + else + { + if (size <= 128) + { + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast(src + size - 16))); + + while (size > 16) + { + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast(src))); + dst += 16; + src += 16; + size -= 16; + } + } + else if (size < 30000 || !have_avx) + { + /// Align destination to 16 bytes boundary. + size_t padding = (16 - (reinterpret_cast(dst) & 15)) & 15; + + if (padding > 0) + { + __m128i head = _mm_loadu_si128(reinterpret_cast(src)); + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), head); + dst += padding; + src += padding; + size -= padding; + } + + /// Aligned unrolled copy. + __m128i c0, c1, c2, c3, c4, c5, c6, c7; + + while (size >= 128) + { + c0 = _mm_loadu_si128(reinterpret_cast(src) + 0); + c1 = _mm_loadu_si128(reinterpret_cast(src) + 1); + c2 = _mm_loadu_si128(reinterpret_cast(src) + 2); + c3 = _mm_loadu_si128(reinterpret_cast(src) + 3); + c4 = _mm_loadu_si128(reinterpret_cast(src) + 4); + c5 = _mm_loadu_si128(reinterpret_cast(src) + 5); + c6 = _mm_loadu_si128(reinterpret_cast(src) + 6); + c7 = _mm_loadu_si128(reinterpret_cast(src) + 7); + src += 128; + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 0), c0); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 1), c1); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 2), c2); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 3), c3); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 4), c4); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 5), c5); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 6), c6); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 7), c7); + dst += 128; + + size -= 128; + } + + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast(src + size - 16))); + + while (size > 16) + { + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast(src))); + dst += 16; + src += 16; + size -= 16; + } + } + else + { + size_t padding = (32 - (reinterpret_cast(dst) & 31)) & 31; + + if (padding > 0) + { + __asm__( + "vmovups (%[s]), %%ymm0\n" + "vmovups %%ymm0, (%[d])\n" + : [d]"+r"(dst), [s]"+r"(src) + : + : "ymm0", "memory"); + + dst += padding; + src += padding; + size -= padding; + } + + while (size >= 256) + { + __asm__( + "vmovups (%[s]), %%ymm0\n" + "vmovups 0x20(%[s]), %%ymm1\n" + "vmovups 0x40(%[s]), %%ymm2\n" + "vmovups 0x60(%[s]), %%ymm3\n" + "vmovups 0x80(%[s]), %%ymm4\n" + "vmovups 0xa0(%[s]), %%ymm5\n" + "vmovups 0xc0(%[s]), %%ymm6\n" + "vmovups 0xe0(%[s]), %%ymm7\n" + "add $0x100,%[s]\n" + "vmovaps %%ymm0, (%[d])\n" + "vmovaps %%ymm1, 0x20(%[d])\n" + "vmovaps %%ymm2, 0x40(%[d])\n" + "vmovaps %%ymm3, 0x60(%[d])\n" + "vmovaps %%ymm4, 0x80(%[d])\n" + "vmovaps %%ymm5, 0xa0(%[d])\n" + "vmovaps %%ymm6, 0xc0(%[d])\n" + "vmovaps %%ymm7, 0xe0(%[d])\n" + "add $0x100, %[d]\n" + : [d]"+r"(dst), [s]"+r"(src) + : + : "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "memory"); + + size -= 256; + } + + __asm__( + "vmovups -0x20(%[s],%[size],1), %%ymm0\n" + "vmovups %%ymm0, -0x20(%[d],%[size],1)\n" + : [d]"+r"(dst), [s]"+r"(src) + : [size]"r"(size) + : "ymm0", "memory"); + + while (size > 32) + { + __asm__( + "vmovups (%[s]), %%ymm0\n" + "vmovups %%ymm0, (%[d])\n" + : [d]"+r"(dst), [s]"+r"(src) + : + : "ymm0", "memory"); + + dst += 32; + src += 32; + size -= 32; + } + + __asm__ __volatile__ ("vzeroupper" + ::: "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", + "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"); + } + } + + return ret; +} + extern "C" void * __memcpy_erms(void * __restrict destination, const void * __restrict source, size_t size); extern "C" void * __memcpy_sse2_unaligned(void * __restrict destination, const void * __restrict source, size_t size); extern "C" void * __memcpy_ssse3(void * __restrict destination, const void * __restrict source, size_t size); @@ -592,6 +761,7 @@ uint64_t dispatchMemcpyVariants(size_t memcpy_variant, uint8_t * dst, uint8_t * VARIANT(10, memcpy_fast_sse) VARIANT(11, memcpy_fast_avx) VARIANT(12, memcpy_my) + VARIANT(13, memcpy_my2) VARIANT(21, __memcpy_erms) VARIANT(22, __memcpy_sse2_unaligned) From b9ae9c9cd2f55e494a2dd0dfa181e6406a2be936 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 00:05:30 +0300 Subject: [PATCH 236/333] Add one more variant to memcpy benchmark --- utils/memcpy-bench/memcpy-bench.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/memcpy-bench/memcpy-bench.cpp b/utils/memcpy-bench/memcpy-bench.cpp index 5c664a76fe2..704ea04a184 100644 --- a/utils/memcpy-bench/memcpy-bench.cpp +++ b/utils/memcpy-bench/memcpy-bench.cpp @@ -816,7 +816,7 @@ int main(int argc, char ** argv) for size in 4096 16384 50000 65536 100000 1000000 10000000 100000000; do for threads in 1 2 4 $(($(nproc) / 2)) $(nproc); do for distribution in 1 2 3 4 5; do - for variant in {1..12} {21..29}; do + for variant in {1..13} {21..29}; do for i in {1..10}; do ./memcpy-bench --tsv --size $size --variant $variant --threads $threads --distribution $distribution; done; From fe371d9cfceb5b97f324565351d6f75ced34fbc1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 01:13:27 +0300 Subject: [PATCH 237/333] Minor modifications --- utils/memcpy-bench/memcpy-bench.cpp | 320 +++++++++++++++++----------- 1 file changed, 191 insertions(+), 129 deletions(-) diff --git a/utils/memcpy-bench/memcpy-bench.cpp b/utils/memcpy-bench/memcpy-bench.cpp index 704ea04a184..b607c45370d 100644 --- a/utils/memcpy-bench/memcpy-bench.cpp +++ b/utils/memcpy-bench/memcpy-bench.cpp @@ -588,142 +588,203 @@ static uint8_t * memcpy_my2(uint8_t * __restrict dst, const uint8_t * __restrict *dst = *src; } } + else if (size <= 128) + { + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast(src + size - 16))); + + while (size > 16) + { + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast(src))); + dst += 16; + src += 16; + size -= 16; + } + } + else if (size < 30000 || !have_avx) + { + /// Align destination to 16 bytes boundary. + size_t padding = (16 - (reinterpret_cast(dst) & 15)) & 15; + + if (padding > 0) + { + __m128i head = _mm_loadu_si128(reinterpret_cast(src)); + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), head); + dst += padding; + src += padding; + size -= padding; + } + + /// Aligned unrolled copy. + __m128i c0, c1, c2, c3, c4, c5, c6, c7; + + while (size >= 128) + { + c0 = _mm_loadu_si128(reinterpret_cast(src) + 0); + c1 = _mm_loadu_si128(reinterpret_cast(src) + 1); + c2 = _mm_loadu_si128(reinterpret_cast(src) + 2); + c3 = _mm_loadu_si128(reinterpret_cast(src) + 3); + c4 = _mm_loadu_si128(reinterpret_cast(src) + 4); + c5 = _mm_loadu_si128(reinterpret_cast(src) + 5); + c6 = _mm_loadu_si128(reinterpret_cast(src) + 6); + c7 = _mm_loadu_si128(reinterpret_cast(src) + 7); + src += 128; + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 0), c0); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 1), c1); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 2), c2); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 3), c3); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 4), c4); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 5), c5); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 6), c6); + _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 7), c7); + dst += 128; + + size -= 128; + } + + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast(src + size - 16))); + + while (size > 16) + { + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast(src))); + dst += 16; + src += 16; + size -= 16; + } + } else { - if (size <= 128) + size_t padding = (32 - (reinterpret_cast(dst) & 31)) & 31; + + if (padding > 0) { - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast(src + size - 16))); - - while (size > 16) - { - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast(src))); - dst += 16; - src += 16; - size -= 16; - } - } - else if (size < 30000 || !have_avx) - { - /// Align destination to 16 bytes boundary. - size_t padding = (16 - (reinterpret_cast(dst) & 15)) & 15; - - if (padding > 0) - { - __m128i head = _mm_loadu_si128(reinterpret_cast(src)); - _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), head); - dst += padding; - src += padding; - size -= padding; - } - - /// Aligned unrolled copy. - __m128i c0, c1, c2, c3, c4, c5, c6, c7; - - while (size >= 128) - { - c0 = _mm_loadu_si128(reinterpret_cast(src) + 0); - c1 = _mm_loadu_si128(reinterpret_cast(src) + 1); - c2 = _mm_loadu_si128(reinterpret_cast(src) + 2); - c3 = _mm_loadu_si128(reinterpret_cast(src) + 3); - c4 = _mm_loadu_si128(reinterpret_cast(src) + 4); - c5 = _mm_loadu_si128(reinterpret_cast(src) + 5); - c6 = _mm_loadu_si128(reinterpret_cast(src) + 6); - c7 = _mm_loadu_si128(reinterpret_cast(src) + 7); - src += 128; - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 0), c0); - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 1), c1); - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 2), c2); - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 3), c3); - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 4), c4); - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 5), c5); - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 6), c6); - _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 7), c7); - dst += 128; - - size -= 128; - } - - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast(src + size - 16))); - - while (size > 16) - { - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast(src))); - dst += 16; - src += 16; - size -= 16; - } - } - else - { - size_t padding = (32 - (reinterpret_cast(dst) & 31)) & 31; - - if (padding > 0) - { - __asm__( - "vmovups (%[s]), %%ymm0\n" - "vmovups %%ymm0, (%[d])\n" - : [d]"+r"(dst), [s]"+r"(src) - : - : "ymm0", "memory"); - - dst += padding; - src += padding; - size -= padding; - } - - while (size >= 256) - { - __asm__( - "vmovups (%[s]), %%ymm0\n" - "vmovups 0x20(%[s]), %%ymm1\n" - "vmovups 0x40(%[s]), %%ymm2\n" - "vmovups 0x60(%[s]), %%ymm3\n" - "vmovups 0x80(%[s]), %%ymm4\n" - "vmovups 0xa0(%[s]), %%ymm5\n" - "vmovups 0xc0(%[s]), %%ymm6\n" - "vmovups 0xe0(%[s]), %%ymm7\n" - "add $0x100,%[s]\n" - "vmovaps %%ymm0, (%[d])\n" - "vmovaps %%ymm1, 0x20(%[d])\n" - "vmovaps %%ymm2, 0x40(%[d])\n" - "vmovaps %%ymm3, 0x60(%[d])\n" - "vmovaps %%ymm4, 0x80(%[d])\n" - "vmovaps %%ymm5, 0xa0(%[d])\n" - "vmovaps %%ymm6, 0xc0(%[d])\n" - "vmovaps %%ymm7, 0xe0(%[d])\n" - "add $0x100, %[d]\n" - : [d]"+r"(dst), [s]"+r"(src) - : - : "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "memory"); - - size -= 256; - } - __asm__( - "vmovups -0x20(%[s],%[size],1), %%ymm0\n" - "vmovups %%ymm0, -0x20(%[d],%[size],1)\n" + "vmovups (%[s]), %%ymm0\n" + "vmovups %%ymm0, (%[d])\n" : [d]"+r"(dst), [s]"+r"(src) - : [size]"r"(size) + : : "ymm0", "memory"); - while (size > 32) - { - __asm__( - "vmovups (%[s]), %%ymm0\n" - "vmovups %%ymm0, (%[d])\n" - : [d]"+r"(dst), [s]"+r"(src) - : - : "ymm0", "memory"); - - dst += 32; - src += 32; - size -= 32; - } - - __asm__ __volatile__ ("vzeroupper" - ::: "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", - "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"); + dst += padding; + src += padding; + size -= padding; } + + while (size >= 512) + { + __asm__( + "vmovups (%[s]), %%ymm0\n" + "vmovups 0x20(%[s]), %%ymm1\n" + "vmovups 0x40(%[s]), %%ymm2\n" + "vmovups 0x60(%[s]), %%ymm3\n" + "vmovups 0x80(%[s]), %%ymm4\n" + "vmovups 0xa0(%[s]), %%ymm5\n" + "vmovups 0xc0(%[s]), %%ymm6\n" + "vmovups 0xe0(%[s]), %%ymm7\n" + "vmovups 0x100(%[s]), %%ymm8\n" + "vmovups 0x120(%[s]), %%ymm9\n" + "vmovups 0x140(%[s]), %%ymm10\n" + "vmovups 0x160(%[s]), %%ymm11\n" + "vmovups 0x180(%[s]), %%ymm12\n" + "vmovups 0x1a0(%[s]), %%ymm13\n" + "vmovups 0x1c0(%[s]), %%ymm14\n" + "vmovups 0x1e0(%[s]), %%ymm15\n" + "add $0x200, %[s]\n" + "sub $0x200, %[size]\n" + "vmovaps %%ymm0, (%[d])\n" + "vmovaps %%ymm1, 0x20(%[d])\n" + "vmovaps %%ymm2, 0x40(%[d])\n" + "vmovaps %%ymm3, 0x60(%[d])\n" + "vmovaps %%ymm4, 0x80(%[d])\n" + "vmovaps %%ymm5, 0xa0(%[d])\n" + "vmovaps %%ymm6, 0xc0(%[d])\n" + "vmovaps %%ymm7, 0xe0(%[d])\n" + "vmovaps %%ymm8, 0x100(%[d])\n" + "vmovaps %%ymm9, 0x120(%[d])\n" + "vmovaps %%ymm10, 0x140(%[d])\n" + "vmovaps %%ymm11, 0x160(%[d])\n" + "vmovaps %%ymm12, 0x180(%[d])\n" + "vmovaps %%ymm13, 0x1a0(%[d])\n" + "vmovaps %%ymm14, 0x1c0(%[d])\n" + "vmovaps %%ymm15, 0x1e0(%[d])\n" + "add $0x200, %[d]\n" + : [d]"+r"(dst), [s]"+r"(src), [size]"+r"(size) + : + : "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", + "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15", + "memory"); + } + + /*while (size >= 256) + { + __asm__( + "vmovups (%[s]), %%ymm0\n" + "vmovups 0x20(%[s]), %%ymm1\n" + "vmovups 0x40(%[s]), %%ymm2\n" + "vmovups 0x60(%[s]), %%ymm3\n" + "vmovups 0x80(%[s]), %%ymm4\n" + "vmovups 0xa0(%[s]), %%ymm5\n" + "vmovups 0xc0(%[s]), %%ymm6\n" + "vmovups 0xe0(%[s]), %%ymm7\n" + "add $0x100,%[s]\n" + "vmovaps %%ymm0, (%[d])\n" + "vmovaps %%ymm1, 0x20(%[d])\n" + "vmovaps %%ymm2, 0x40(%[d])\n" + "vmovaps %%ymm3, 0x60(%[d])\n" + "vmovaps %%ymm4, 0x80(%[d])\n" + "vmovaps %%ymm5, 0xa0(%[d])\n" + "vmovaps %%ymm6, 0xc0(%[d])\n" + "vmovaps %%ymm7, 0xe0(%[d])\n" + "add $0x100, %[d]\n" + : [d]"+r"(dst), [s]"+r"(src) + : + : "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "memory"); + + size -= 256; + }*/ + + /*while (size > 128) + { + __asm__( + "vmovups (%[s]), %%ymm0\n" + "vmovups 0x20(%[s]), %%ymm1\n" + "vmovups 0x40(%[s]), %%ymm2\n" + "vmovups 0x60(%[s]), %%ymm3\n" + "add $0x80, %[s]\n" + "sub $0x80, %[size]\n" + "vmovaps %%ymm0, (%[d])\n" + "vmovaps %%ymm1, 0x20(%[d])\n" + "vmovaps %%ymm2, 0x40(%[d])\n" + "vmovaps %%ymm3, 0x60(%[d])\n" + "add $0x80, %[d]\n" + : [d]"+r"(dst), [s]"+r"(src), [size]"+r"(size) + : + : "ymm0", "ymm1", "ymm2", "ymm3", "memory"); + }*/ + + __asm__( + "vmovups -0x20(%[s],%[size],1), %%ymm0\n" + "vmovups %%ymm0, -0x20(%[d],%[size],1)\n" + : [d]"+r"(dst), [s]"+r"(src) + : [size]"r"(size) + : "ymm0", "memory"); + + while (size > 32) + { + __asm__( + "vmovups (%[s]), %%ymm0\n" + "vmovups %%ymm0, (%[d])\n" + : [d]"+r"(dst), [s]"+r"(src) + : + : "ymm0", "memory"); + + dst += 32; + src += 32; + size -= 32; + } + + __asm__ __volatile__ ("vzeroupper" + ::: "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", + "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"); } return ret; @@ -893,7 +954,8 @@ clickhouse-local --structure ' } else { - std::cout << ": processed in " << (elapsed_ns / 1e9) << " sec, " << (size * iterations * 1.0 / elapsed_ns) << " GB/sec\n"; + std::cout << ": " << num_threads << " threads, " << "size: " << size << ", distribution " << generator_variant + << ", processed in " << (elapsed_ns / 1e9) << " sec, " << (size * iterations * 1.0 / elapsed_ns) << " GB/sec\n"; } return 0; From 3c16ea0bc80153400a0d24b881ec5951bc021d80 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 16 Mar 2021 01:56:27 +0300 Subject: [PATCH 238/333] fix incorrect number of rows for Chunks with no columns in PartialSortingTransform --- .../Transforms/PartialSortingTransform.cpp | 7 ++++--- src/Processors/Transforms/WindowTransform.cpp | 17 +++++++++++------ .../01591_window_functions.reference | 5 +++++ .../0_stateless/01591_window_functions.sql | 4 ++++ 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/Processors/Transforms/PartialSortingTransform.cpp b/src/Processors/Transforms/PartialSortingTransform.cpp index 2fd0a64ee92..10ce2c4b773 100644 --- a/src/Processors/Transforms/PartialSortingTransform.cpp +++ b/src/Processors/Transforms/PartialSortingTransform.cpp @@ -91,8 +91,10 @@ size_t getFilterMask(const ColumnRawPtrs & lhs, const ColumnRawPtrs & rhs, size_ void PartialSortingTransform::transform(Chunk & chunk) { + const auto rows_num = chunk.getNumRows(); + if (read_rows) - read_rows->add(chunk.getNumRows()); + read_rows->add(rows_num); auto block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns()); @@ -101,7 +103,6 @@ void PartialSortingTransform::transform(Chunk & chunk) */ if (!threshold_block_columns.empty()) { - UInt64 rows_num = block.rows(); auto block_columns = extractColumns(block, description); size_t result_size_hint = getFilterMask( @@ -134,7 +135,7 @@ void PartialSortingTransform::transform(Chunk & chunk) } } - chunk.setColumns(block.getColumns(), block.rows()); + chunk.setColumns(block.getColumns(), rows_num); } } diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 0013e0061e2..1fc51bd4112 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -881,12 +881,13 @@ void WindowTransform::appendChunk(Chunk & chunk) assert(chunk.hasRows()); blocks.push_back({}); auto & block = blocks.back(); + // Use the number of rows from the Chunk, because it is correct even in + // the case where the Chunk has no columns. Not sure if this actually + // happens, because even in the case of `count() over ()` we have a dummy + // input column. + block.rows = chunk.getNumRows(); block.input_columns = chunk.detachColumns(); - // Even in case of `count() over ()` we should have a dummy input column. - // Not sure how reliable this is... - block.rows = block.input_columns[0]->size(); - for (auto & ws : workspaces) { // Aggregate functions can't work with constant columns, so we have to @@ -1109,9 +1110,7 @@ IProcessor::Status WindowTransform::prepare() if (output.canPush()) { // Output the ready block. -// fmt::print(stderr, "output block {}\n", next_output_block_number); const auto i = next_output_block_number - first_block_number; - ++next_output_block_number; auto & block = blocks[i]; auto columns = block.input_columns; for (auto & res : block.output_columns) @@ -1120,6 +1119,12 @@ IProcessor::Status WindowTransform::prepare() } output_data.chunk.setColumns(columns, block.rows); +// fmt::print(stderr, "output block {} as chunk '{}'\n", +// next_output_block_number, +// output_data.chunk.dumpStructure()); + + ++next_output_block_number; + output.pushData(std::move(output_data)); } diff --git a/tests/queries/0_stateless/01591_window_functions.reference b/tests/queries/0_stateless/01591_window_functions.reference index d2543f0db75..d83808ce37d 100644 --- a/tests/queries/0_stateless/01591_window_functions.reference +++ b/tests/queries/0_stateless/01591_window_functions.reference @@ -993,3 +993,8 @@ order by number 7 6 8 8 7 9 9 8 9 +-- In this case, we had a problem with PartialSortingTransform returning zero-row +-- chunks for input chunks w/o columns. +select count() over () from numbers(4) where number < 2; +2 +2 diff --git a/tests/queries/0_stateless/01591_window_functions.sql b/tests/queries/0_stateless/01591_window_functions.sql index 03bd8371e23..14b08ad2875 100644 --- a/tests/queries/0_stateless/01591_window_functions.sql +++ b/tests/queries/0_stateless/01591_window_functions.sql @@ -345,3 +345,7 @@ from numbers(10) window w as (order by number range between 1 preceding and 1 following) order by number ; + +-- In this case, we had a problem with PartialSortingTransform returning zero-row +-- chunks for input chunks w/o columns. +select count() over () from numbers(4) where number < 2; From 6403198c8491c5c4cba88f93327f62159df506ff Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 16 Mar 2021 02:18:50 +0300 Subject: [PATCH 239/333] check formatting only for the queries we can execute --- programs/client/Client.cpp | 120 +++++++++++++++++++------------------ 1 file changed, 61 insertions(+), 59 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 527fec17c63..4a61662c238 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1372,65 +1372,6 @@ private: continue; } - // Check that the query is formatted properly and we can parse - // it back and format again and get the same result. Unfortunately - // we can't compare the ASTs, which would be more sensitive to - // errors. This double formatting check doesn't catch all errors, - // e.g. we can format query incorrectly, but to a valid SQL that - // we can then parse and format into the same SQL. - { - ASTPtr parsed_formatted_query; - try - { - const auto * tmp_pos = fuzzed_text.c_str(); - parsed_formatted_query = parseQuery(tmp_pos, - tmp_pos + fuzzed_text.size(), - false /* allow_multi_statements */); - } - catch (Exception & e) - { - // Some complicated cases where we can generate the SQL - // which we can't parse: - // * first argument of lambda() replaced by fuzzer with - // something else, leading to constructs such as - // arrayMap((min(x) + 3) -> x + 1, ....) - // * internals of Enum replaced, leading to: - // Enum(equals(someFunction(y), 3)). - // We could filter them on case-by-case basis, but they - // are probably also helpful in that they test the parsing - // errors, so let's just ignore them in this check and - // send them to the server normally. - if (e.code() != ErrorCodes::SYNTAX_ERROR) - { - throw; - } - } - - if (parsed_formatted_query) - { - const auto formatted_twice - = parsed_formatted_query->formatForErrorMessage(); - - if (formatted_twice != fuzzed_text) - { - fmt::print(stderr, "The query formatting is broken.\n"); - - printChangedSettings(); - - fmt::print(stderr, "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", - formatted_twice, fuzzed_text); - fmt::print(stderr, "In more detail:\n"); - fmt::print(stderr, "AST-1:\n'{}'\n", ast_to_process->dumpTree()); - fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", fuzzed_text); - fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", parsed_formatted_query->dumpTree()); - fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", formatted_twice); - fmt::print(stderr, "Text-1 must be equal to Text-2, but it is not.\n"); - - exit(1); - } - } - } - parsed_query = ast_to_process; query_to_send = parsed_query->formatForErrorMessage(); @@ -1470,6 +1411,67 @@ private: return false; } + // Check that after the query is formatted, we can parse it back, + // format again and get the same result. Unfortunately, we can't + // compare the ASTs, which would be more sensitive to errors. This + // double formatting check doesn't catch all errors, e.g. we can + // format query incorrectly, but to a valid SQL that we can then + // parse and format into the same SQL. + // There are some complicated cases where we can generate the SQL + // which we can't parse: + // * first argument of lambda() replaced by fuzzer with + // something else, leading to constructs such as + // arrayMap((min(x) + 3) -> x + 1, ....) + // * internals of Enum replaced, leading to: + // Enum(equals(someFunction(y), 3)). + // And there are even the cases when we can parse the query, but + // it's logically incorrect and its formatting is a mess, such as + // when `lambda()` function gets substituted into a wrong place. + // To avoid dealing with these cases, run the check only for the + // queries we were able to successfully execute. + if (!have_error) + { + ASTPtr parsed_formatted_query; + try + { + const auto * tmp_pos = query_to_send.c_str(); + parsed_formatted_query = parseQuery(tmp_pos, + tmp_pos + query_to_send.size(), + false /* allow_multi_statements */); + } + catch (Exception & e) + { + if (e.code() != ErrorCodes::SYNTAX_ERROR) + { + throw; + } + } + + if (parsed_formatted_query) + { + const auto formatted_twice + = parsed_formatted_query->formatForErrorMessage(); + + if (formatted_twice != query_to_send) + { + fmt::print(stderr, "The query formatting is broken.\n"); + + printChangedSettings(); + + fmt::print(stderr, "Got the following (different) text after formatting the fuzzed query and parsing it back:\n'{}'\n, expected:\n'{}'\n", + formatted_twice, query_to_send); + fmt::print(stderr, "In more detail:\n"); + fmt::print(stderr, "AST-1:\n'{}'\n", parsed_query->dumpTree()); + fmt::print(stderr, "Text-1 (AST-1 formatted):\n'{}'\n", query_to_send); + fmt::print(stderr, "AST-2 (Text-1 parsed):\n'{}'\n", parsed_formatted_query->dumpTree()); + fmt::print(stderr, "Text-2 (AST-2 formatted):\n'{}'\n", formatted_twice); + fmt::print(stderr, "Text-1 must be equal to Text-2, but it is not.\n"); + + exit(1); + } + } + } + // The server is still alive so we're going to continue fuzzing. // Determine what we're going to use as the starting AST. if (have_error) From 2328d568278113505fc4ce5e4ef42163059d4d2c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 16 Mar 2021 04:19:33 +0300 Subject: [PATCH 240/333] fix --- src/Processors/Transforms/PartialSortingTransform.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Processors/Transforms/PartialSortingTransform.cpp b/src/Processors/Transforms/PartialSortingTransform.cpp index 10ce2c4b773..33ff639f10d 100644 --- a/src/Processors/Transforms/PartialSortingTransform.cpp +++ b/src/Processors/Transforms/PartialSortingTransform.cpp @@ -91,7 +91,7 @@ size_t getFilterMask(const ColumnRawPtrs & lhs, const ColumnRawPtrs & rhs, size_ void PartialSortingTransform::transform(Chunk & chunk) { - const auto rows_num = chunk.getNumRows(); + auto rows_num = chunk.getNumRows(); if (read_rows) read_rows->add(rows_num); @@ -117,13 +117,15 @@ void PartialSortingTransform::transform(Chunk & chunk) { for (auto & column : block) column.column = column.column->filter(filter, result_size_hint); + + rows_num = block.rows(); } } sortBlock(block, description, limit); /// Check if we can use this block for optimization. - if (min_limit_for_partial_sort_optimization <= limit && limit <= block.rows()) + if (min_limit_for_partial_sort_optimization <= limit && limit <= rows_num) { auto block_columns = extractColumns(block, description); @@ -135,6 +137,8 @@ void PartialSortingTransform::transform(Chunk & chunk) } } + assert(block.columns() == 0 || block.rows() == rows_num); + chunk.setColumns(block.getColumns(), rows_num); } From 54f124aae499d55779632d0f0a2641a4ec41a0c0 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 15 Mar 2021 07:55:59 +0300 Subject: [PATCH 241/333] Cleanup 00626_replace_partition_from_table_zookeeper --- .../00626_replace_partition_from_table_zookeeper.sh | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh index 5aa445858db..f5f667d084b 100755 --- a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh @@ -12,19 +12,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function query_with_retry { - retry=0 + local query="$1" && shift + + local retry=0 until [ $retry -ge 5 ] do - result=$($CLICKHOUSE_CLIENT $2 --query="$1" 2>&1) + local result + result="$($CLICKHOUSE_CLIENT "$@" --query="$query" 2>&1)" if [ "$?" == 0 ]; then echo -n "$result" return else - retry=$(($retry + 1)) + retry=$((retry + 1)) sleep 3 fi done - echo "Query '$1' failed with '$result'" + echo "Query '$query' failed with '$result'" } $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS src;" @@ -139,7 +142,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE src;" $CLICKHOUSE_CLIENT --query="SELECT count(), sum(d), uniqExact(_part) FROM dst_r1;" $CLICKHOUSE_CLIENT --query="SYSTEM SYNC REPLICA dst_r1;" -query_with_retry "OPTIMIZE TABLE dst_r1 PARTITION 1;" "--replication_alter_partitions_sync=0 --optimize_throw_if_noop=1" +query_with_retry "OPTIMIZE TABLE dst_r1 PARTITION 1;" --replication_alter_partitions_sync=0 --optimize_throw_if_noop=1 $CLICKHOUSE_CLIENT --query="SYSTEM SYNC REPLICA dst_r1;" $CLICKHOUSE_CLIENT --query="SELECT count(), sum(d), uniqExact(_part) FROM dst_r1;" From e6b9740a6957ecf94ce92090e233d8441147c7d7 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 15 Mar 2021 08:29:05 +0300 Subject: [PATCH 242/333] Add $CLICKHOUSE_TEST_ZOOKEEPER_PREFIX (for *.sh stateless tests) --- tests/queries/shell_config.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index d20b5669cc5..5b942a95d02 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -5,6 +5,13 @@ export ASAN_OPTIONS=detect_odr_violation=0 export CLICKHOUSE_DATABASE=${CLICKHOUSE_DATABASE:="test"} export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL:="warning"} + +# Unique zookeeper path (based on test name and current database) to avoid overlaps +export CLICKHOUSE_TEST_PATH="${BASH_SOURCE[1]}" +CLICKHOUSE_TEST_NAME="$(basename "$CLICKHOUSE_TEST_PATH" .sh)" +export CLICKHOUSE_TEST_NAME +export CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_NAME}_${CLICKHOUSE_DATABASE}" + [ -v CLICKHOUSE_CONFIG_CLIENT ] && CLICKHOUSE_CLIENT_OPT0+=" --config-file=${CLICKHOUSE_CONFIG_CLIENT} " [ -v CLICKHOUSE_HOST ] && CLICKHOUSE_CLIENT_OPT0+=" --host=${CLICKHOUSE_HOST} " [ -v CLICKHOUSE_PORT_TCP ] && CLICKHOUSE_CLIENT_OPT0+=" --port=${CLICKHOUSE_PORT_TCP} " From 6c2622d4519fee3207426582c8cdafd85b204b1b Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 15 Mar 2021 08:14:28 +0300 Subject: [PATCH 243/333] Add style check for ReplicatedMergeTree path --- utils/check-style/check-style | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index f8926a9af2f..db6b33a569b 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -97,6 +97,36 @@ for test_case in "${tests_with_query_log[@]}"; do grep -qE current_database.*currentDatabase "$test_case" || echo "Queries to system.query_log/system.query_thread_log does not have current_database = currentDatabase() condition in $test_case" done +# Queries with ReplicatedMergeTree +# NOTE: it is not that accuate, but at least something. +tests_with_replicated_merge_tree=( $( + find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' | + grep -vP $EXCLUDE_DIRS | + xargs grep --with-filename -e ReplicatedMergeTree | cut -d: -f1 | sort -u +) ) +for test_case in "${tests_with_replicated_merge_tree[@]}"; do + case "$test_case" in + *.sh) + test_case_zk_prefix="\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX" + grep -q -e "ReplicatedMergeTree.*$test_case_zk_prefix" "$test_case" || echo "ReplicatedMergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)" + ;; + *.sql) + # NOTE: *.sql is not supported because it is not possible right now, because: + # - ReplicatedMergeTree supports only ASTLiteral for zookeeper path + # (and adding support of other nodes, with evaluating them are not that easy, due to zk_prefix is "optional") + # - Hence concat(currentDatabase(), 'foo') + # - Also params cannot be used, because the are wrapped with CAST() + # + # But hopefully they will not be a problem + # (since they do not do any "stressing" and overlap probability should be lower). + ;; + *.py) + # Right now there is not such tests anyway + echo "No ReplicatedMergeTree style check for *.py ($test_case)" + ;; + esac +done + # All the submodules should be from https://github.com/ find $ROOT_PATH -name '.gitmodules' | while read i; do grep -F 'url = ' $i | grep -v -F 'https://github.com/' && echo 'All the submodules should be from https://github.com/'; done From 05a8c73eb948ad2e225af3e69ba7c493d2ae4b8a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 15 Mar 2021 07:51:28 +0300 Subject: [PATCH 244/333] Do not overlap zookeeper path for ReplicatedMergeTree in stateless *.sh tests Found with stress tests for 00626_replace_partition_from_table_zookeeper [1]: 2021.03.15 00:59:48.200106 [ 27417 ] {0f47dbeb-938a-4560-8408-a7cc7b3bafb8} ContextAccess (default): Access granted: CREATE TABLE ON test_31.dst_r1 ... 2021.03.15 00:59:48.403227 [ 27417 ] {0f47dbeb-938a-4560-8408-a7cc7b3bafb8} test_31.dst_r1: This table /clickhouse/test_00626/dst_1 is already created, will add new replica 2021.03.15 00:59:48.736450 [ 83006 ] {b2db1355-3ec3-4e3a-9c79-f93f27c6e658} ContextAccess (default): Access granted: CREATE TABLE ON test_31.dst_r2 ... 2021.03.15 00:59:48.851768 [ 83006 ] {b2db1355-3ec3-4e3a-9c79-f93f27c6e658} test_31.dst_r2: This table /clickhouse/test_00626/dst_1 is already created, will add new replica ... 2021.03.15 00:59:48.919059 [ 366 ] {} test_31.dst_r2 (ReplicatedMergeTreeQueue): Loading queue from /clickhouse/test_00626/dst_1/replicas/2/queue 2021.03.15 00:59:48.919948 [ 366 ] {} test_31.dst_r2 (ReplicatedMergeTreeQueue): Having 3 queue entries to load, 0 entries already loaded. 2021.03.15 00:59:48.921833 [ 366 ] {} test_31.dst_r2 (ReplicatedMergeTreeQueue): Loaded queue ... 2021.03.15 00:59:51.904230 [ 246952 ] {59753eea-3896-45ca-8625-fdaa094ee9ef} ContextAccess (default): Access granted: SYSTEM SYNC REPLICA ON test_31.dst_r2 ... 2021.03.15 01:04:51.913683 [ 246952 ] {59753eea-3896-45ca-8625-fdaa094ee9ef} InterpreterSystemQuery: SYNC REPLICA test_31.dst_r2: Timed out! [1]: https://clickhouse-test-reports.s3.yandex.net/21716/402bf77783cbda48a9ee1b748bfce3c52ef8fe11/stress_test_(memory)/test_run.txt.out.log But the problem is more generic, so fix all tests. --- .../00029_test_zookeeper_optimize_exception.sh | 3 +-- ...ear_column_in_partition_concurrent_zookeeper.sh | 4 ++-- ...00626_replace_partition_from_table_zookeeper.sh | 4 ++-- ...terialized_view_and_too_many_parts_zookeeper.sh | 10 +++++----- ...licated_mutations_default_database_zookeeper.sh | 2 +- .../00652_replicated_mutations_zookeeper.sh | 8 ++++---- ...00715_fetch_merged_or_mutated_part_zookeeper.sh | 4 ++-- .../00834_kill_mutation_replicated_zookeeper.sh | 4 ++-- .../00953_zookeeper_suetin_deduplication_bug.sh | 14 +++++++------- .../00975_indices_mutation_replicated_zookeeper.sh | 4 ++-- .../00992_system_parts_race_condition_zookeeper.sh | 4 ++-- ...3_system_parts_race_condition_drop_zookeeper.sh | 2 +- .../01013_sync_replica_timeout_zookeeper.sh | 5 ++--- ...ns_with_nondeterministic_functions_zookeeper.sh | 5 ++--- .../01034_move_partition_from_table_zookeeper.sh | 12 ++++++------ ...ncurrent_move_partition_from_table_zookeeper.sh | 4 ++-- ..._zookeeper_system_mutations_with_parts_names.sh | 2 +- .../01076_parallel_alter_replicated_zookeeper.sh | 2 +- .../01079_bad_alters_zookeeper.reference | 4 ++-- .../0_stateless/01079_bad_alters_zookeeper.sh | 2 +- ...079_parallel_alter_add_drop_column_zookeeper.sh | 2 +- .../01079_parallel_alter_detach_table_zookeeper.sh | 2 +- .../01079_parallel_alter_modify_zookeeper.sh | 2 +- .../01103_optimize_drop_race_zookeeper.sh | 2 +- ...8_restart_replicas_rename_deadlock_zookeeper.sh | 2 +- .../0_stateless/01192_rename_database_zookeeper.sh | 2 +- .../01213_alter_rename_column_zookeeper.reference | 4 ++-- .../01213_alter_rename_column_zookeeper.sh | 2 +- .../01305_replica_create_drop_zookeeper.sh | 4 ++-- .../01307_multiple_leaders_zookeeper.sh | 2 +- .../01318_long_unsuccessful_mutation_zookeeper.sh | 2 +- .../01320_create_sync_race_condition_zookeeper.sh | 2 +- ...01338_long_select_and_alter_zookeeper.reference | 2 +- .../01338_long_select_and_alter_zookeeper.sh | 2 +- ...396_inactive_replica_cleanup_nodes_zookeeper.sh | 12 ++++++------ .../01414_mutations_and_errors_zookeeper.sh | 2 +- .../01417_freeze_partition_verbose_zookeeper.sh | 2 +- .../0_stateless/01459_manual_write_to_replicas.sh | 2 +- .../01459_manual_write_to_replicas_quorum.sh | 2 +- .../01508_race_condition_rename_clear_zookeeper.sh | 2 +- .../01509_check_many_parallel_quorum_inserts.sh | 2 +- .../01509_check_parallel_quorum_inserts.sh | 2 +- .../0_stateless/01509_parallel_quorum_and_merge.sh | 4 ++-- ...01593_concurrent_alter_mutations_kill.reference | 2 +- .../01593_concurrent_alter_mutations_kill.sh | 2 +- ...nt_alter_mutations_kill_many_replicas.reference | 10 +++++----- ...oncurrent_alter_mutations_kill_many_replicas.sh | 6 +++--- .../queries/0_stateless/01671_ddl_hang_timeout.sh | 2 +- .../01753_system_zookeeper_query_param_path.sh | 4 ++-- 49 files changed, 92 insertions(+), 95 deletions(-) diff --git a/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh index 86f1d1f161c..3360b8da83d 100755 --- a/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh +++ b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh @@ -4,12 +4,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh - ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_optimize_exception" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_optimize_exception_replicated" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_optimize_exception (date Date) ENGINE=MergeTree() PARTITION BY toYYYYMM(date) ORDER BY date" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_optimize_exception_replicated (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_00029/optimize', 'r1') PARTITION BY toYYYYMM(date) ORDER BY date" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_optimize_exception_replicated (date Date) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/optimize', 'r1') PARTITION BY toYYYYMM(date) ORDER BY date" ${CLICKHOUSE_CLIENT} --query="INSERT INTO test_optimize_exception VALUES (toDate('2017-09-09')), (toDate('2017-09-10'))" ${CLICKHOUSE_CLIENT} --query="INSERT INTO test_optimize_exception VALUES (toDate('2017-09-09')), (toDate('2017-09-10'))" diff --git a/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh index 60de1822318..5c5ecd4564b 100755 --- a/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh +++ b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh @@ -8,8 +8,8 @@ ch="$CLICKHOUSE_CLIENT --stacktrace -q" $ch "DROP TABLE IF EXISTS clear_column1" $ch "DROP TABLE IF EXISTS clear_column2" -$ch "CREATE TABLE clear_column1 (d Date, i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column_concurrent', '1', d, d, 8192)" -$ch "CREATE TABLE clear_column2 (d Date, i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column_concurrent', '2', d, d, 8192)" +$ch "CREATE TABLE clear_column1 (d Date, i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/tables/clear_column_concurrent', '1', d, d, 8192)" +$ch "CREATE TABLE clear_column2 (d Date, i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/tables/clear_column_concurrent', '2', d, d, 8192)" $ch "ALTER TABLE clear_column1 CLEAR COLUMN VasyaUnexistingColumn IN PARTITION '200001'" --replication_alter_partitions_sync=2 1>/dev/null 2>/dev/null rc=$? diff --git a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh index f5f667d084b..443f2856c88 100755 --- a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh @@ -35,8 +35,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst_r1;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst_r2;" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r1 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00626/dst_1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r2 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00626/dst_1', '2') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r1 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r2 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '2') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" diff --git a/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh index 817da08bfa0..def8e8f4cfe 100755 --- a/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh +++ b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh @@ -10,10 +10,10 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS a" ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS b" ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS c" -${CLICKHOUSE_CLIENT} --query "CREATE TABLE root (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00633/root', '1') ORDER BY d" -${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW a (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00633/a', '1') ORDER BY d AS SELECT * FROM root" -${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW b (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00633/b', '1') ORDER BY d SETTINGS parts_to_delay_insert=1, parts_to_throw_insert=1 AS SELECT * FROM root" -${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW c (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00633/c', '1') ORDER BY d AS SELECT * FROM root" +${CLICKHOUSE_CLIENT} --query "CREATE TABLE root (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root', '1') ORDER BY d" +${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW a (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/a', '1') ORDER BY d AS SELECT * FROM root" +${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW b (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/b', '1') ORDER BY d SETTINGS parts_to_delay_insert=1, parts_to_throw_insert=1 AS SELECT * FROM root" +${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW c (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/c', '1') ORDER BY d AS SELECT * FROM root" ${CLICKHOUSE_CLIENT} --query "INSERT INTO root VALUES (1)"; ${CLICKHOUSE_CLIENT} --query "SELECT _table, d FROM merge('${CLICKHOUSE_DATABASE}', '^[abc]\$') ORDER BY _table" @@ -33,7 +33,7 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE c" # Deduplication check for non-replicated root table echo ${CLICKHOUSE_CLIENT} --query "CREATE TABLE root (d UInt64) ENGINE = Null" -${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW d (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00633/d', '1') ORDER BY d AS SELECT * FROM root" +${CLICKHOUSE_CLIENT} --query "CREATE MATERIALIZED VIEW d (d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/d', '1') ORDER BY d AS SELECT * FROM root" ${CLICKHOUSE_CLIENT} --query "INSERT INTO root VALUES (1)"; ${CLICKHOUSE_CLIENT} --query "INSERT INTO root VALUES (1)"; ${CLICKHOUSE_CLIENT} --query "SELECT * FROM d"; diff --git a/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh index 02f552c250d..58295e17790 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh @@ -11,7 +11,7 @@ ${CLICKHOUSE_CLIENT} --multiquery << EOF DROP TABLE IF EXISTS mutations_r1; DROP TABLE IF EXISTS for_subquery; -CREATE TABLE mutations_r1(x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}/mutations', 'r1') ORDER BY x; +CREATE TABLE mutations_r1(x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations', 'r1') ORDER BY x; INSERT INTO mutations_r1 VALUES (123, 1), (234, 2), (345, 3); CREATE TABLE for_subquery(x UInt32) ENGINE TinyLog; diff --git a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh index 08a39c58c3e..3ec6e4e3e90 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh @@ -10,8 +10,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations_r1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations_r2" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_r1(d Date, x UInt32, s String, m MATERIALIZED x + 2) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00652/mutations', 'r1', d, intDiv(x, 10), 8192)" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_r2(d Date, x UInt32, s String, m MATERIALIZED x + 2) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00652/mutations', 'r2', d, intDiv(x, 10), 8192)" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_r1(d Date, x UInt32, s String, m MATERIALIZED x + 2) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations', 'r1', d, intDiv(x, 10), 8192)" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_r2(d Date, x UInt32, s String, m MATERIALIZED x + 2) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations', 'r2', d, intDiv(x, 10), 8192)" # Test a mutation on empty table ${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_r1 DELETE WHERE x = 1 SETTINGS mutations_sync = 2" @@ -51,11 +51,11 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations_cleaner_r1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations_cleaner_r2" # Create 2 replicas with finished_mutations_to_keep = 2 -${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00652/mutations_cleaner', 'r1') ORDER BY x SETTINGS \ +${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations_cleaner', 'r1') ORDER BY x SETTINGS \ finished_mutations_to_keep = 2, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00652/mutations_cleaner', 'r2') ORDER BY x SETTINGS \ +${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations_cleaner', 'r2') ORDER BY x SETTINGS \ finished_mutations_to_keep = 2, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0" diff --git a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh index 54b6c80f2ac..48833d2643c 100755 --- a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh +++ b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh @@ -11,8 +11,8 @@ ${CLICKHOUSE_CLIENT} -n --query=" DROP TABLE IF EXISTS fetches_r1; DROP TABLE IF EXISTS fetches_r2" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00715/fetches', 'r1') ORDER BY x" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00715/fetches', 'r2') ORDER BY x \ +${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/fetches', 'r1') ORDER BY x" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/fetches', 'r2') ORDER BY x \ SETTINGS prefer_fetch_merged_part_time_threshold=0, \ prefer_fetch_merged_part_size_threshold=0" diff --git a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh index d1f938f73fe..92ab6814235 100755 --- a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh @@ -10,8 +10,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation_r1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation_r2" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r1(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00834/kill_mutation', '1') ORDER BY x PARTITION BY d" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r2(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00834/kill_mutation', '2') ORDER BY x PARTITION BY d" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r1(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/kill_mutation', '1') ORDER BY x PARTITION BY d" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE kill_mutation_r2(d Date, x UInt32, s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/kill_mutation', '2') ORDER BY x PARTITION BY d" ${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation_r1 VALUES ('2000-01-01', 1, 'a')" ${CLICKHOUSE_CLIENT} --query="INSERT INTO kill_mutation_r1 VALUES ('2001-01-01', 2, 'b')" diff --git a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh index bbc2d957937..d7a27693e98 100755 --- a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh +++ b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh @@ -15,7 +15,7 @@ CREATE TABLE elog ( engine_id UInt32, referrer String ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00953/elog', 'test') +ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog', 'test') PARTITION BY date ORDER BY (engine_id) SETTINGS replicated_deduplication_window = 2, cleanup_delay_period=4, cleanup_delay_period_random_add=0;" @@ -28,35 +28,35 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 3, 'h $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 3 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/test_00953/elog/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/test_00953/elog/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 1, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 4 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/test_00953/elog/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/test_00953/elog/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" $CLICKHOUSE_CLIENT --query="SELECT count(*) from elog" # 5 rows -count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/test_00953/elog/blocks'") +count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/blocks'") while [[ $count != 2 ]] do sleep 1 - count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/test_00953/elog/blocks'") + count=$($CLICKHOUSE_CLIENT --query="SELECT COUNT(*) FROM system.zookeeper where path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/blocks'") done $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" diff --git a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh index 81c0c563db1..a3ac5692caa 100755 --- a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh @@ -17,7 +17,7 @@ CREATE TABLE indices_mutaions1 i64 Int64, i32 Int32, INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00975/indices_mutaions', 'r1') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/indices_mutaions', 'r1') PARTITION BY i32 ORDER BY u64 SETTINGS index_granularity = 2; @@ -27,7 +27,7 @@ CREATE TABLE indices_mutaions2 i64 Int64, i32 Int32, INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00975/indices_mutaions', 'r2') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/indices_mutaions', 'r2') PARTITION BY i32 ORDER BY u64 SETTINGS index_granularity = 2;" diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh index 613e032f42a..1e61c8d64f3 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh @@ -10,8 +10,8 @@ $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS alter_table; DROP TABLE IF EXISTS alter_table2; - CREATE TABLE alter_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_DATABASE.alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0; - CREATE TABLE alter_table2 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_DATABASE.alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0 + CREATE TABLE alter_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0; + CREATE TABLE alter_table2 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0 " function thread1() diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh index 1731148f71f..d960d8ff91d 100755 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh @@ -52,7 +52,7 @@ function thread6() while true; do REPLICA=$(($RANDOM % 10)) $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS alter_table_$REPLICA; - CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00993/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; + CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; sleep 0.$RANDOM; done } diff --git a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh index 724caa7f414..89b178a38ea 100755 --- a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh +++ b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh @@ -4,7 +4,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh - R1=table_1013_1 R2=table_1013_2 @@ -12,8 +11,8 @@ ${CLICKHOUSE_CLIENT} -n -q " DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; - CREATE TABLE $R1 (x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1013', 'r1') ORDER BY x; - CREATE TABLE $R2 (x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1013', 'r2') ORDER BY x; + CREATE TABLE $R1 (x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_1013', 'r1') ORDER BY x; + CREATE TABLE $R2 (x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_1013', 'r2') ORDER BY x; SYSTEM STOP FETCHES $R2; INSERT INTO $R1 VALUES (1) diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index d7d0dab71b9..a10e5fb2788 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -4,7 +4,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh - R1=table_1017_1 R2=table_1017_2 T1=table_1017_merge @@ -29,8 +28,8 @@ ${CLICKHOUSE_CLIENT} -n -q " CREATE TABLE lookup_table (y UInt32, y_new UInt32) ENGINE = Join(ANY, LEFT, y); INSERT INTO lookup_table VALUES(1,1001),(2,1002); - CREATE TABLE $R1 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r1') ORDER BY x; - CREATE TABLE $R2 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/${CLICKHOUSE_DATABASE}.table_1017', 'r2') ORDER BY x; + CREATE TABLE $R1 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_1017', 'r1') ORDER BY x; + CREATE TABLE $R2 (x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_1017', 'r2') ORDER BY x; CREATE TABLE $T1 (x UInt32, y UInt32) ENGINE MergeTree() ORDER BY x; INSERT INTO $R1 VALUES (0, 1)(1, 2)(2, 3)(3, 4); diff --git a/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh index 11c7296932c..ae3dd7851c8 100755 --- a/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh @@ -26,8 +26,8 @@ function query_with_retry $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS src;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/src1', '1') PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/dst1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src1', '1') PARTITION BY p ORDER BY k;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" @@ -56,8 +56,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE dst;" $CLICKHOUSE_CLIENT --query="SELECT 'MOVE incompatible schema missing column';" -$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/src2', '1') PARTITION BY p ORDER BY (d, p);" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/dst2', '1') PARTITION BY p ORDER BY (d, p) SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src2', '1') PARTITION BY p ORDER BY (d, p);" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst2', '1') PARTITION BY p ORDER BY (d, p) SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" @@ -75,8 +75,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE dst;" $CLICKHOUSE_CLIENT --query="SELECT 'MOVE incompatible schema different order by';" -$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/src3', '1') PARTITION BY p ORDER BY (p, k, d);" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/dst3', '1') PARTITION BY p ORDER BY (d, k, p);" +$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CURR_DATABASE/src3', '1') PARTITION BY p ORDER BY (p, k, d);" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CURR_DATABASE/dst3', '1') PARTITION BY p ORDER BY (d, k, p);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" diff --git a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh index 4eb3cb9a7bd..7c15b795c36 100755 --- a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh @@ -9,8 +9,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS src;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/src', '1') PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_DATABASE/dst', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src', '1') PARTITION BY p ORDER BY k;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" function thread1() { diff --git a/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh index c035a692d12..6510fcf408d 100755 --- a/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh +++ b/tests/queries/0_stateless/01045_zookeeper_system_mutations_with_parts_names.sh @@ -47,7 +47,7 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS table_for_mutations" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS replicated_table_for_mutations" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE replicated_table_for_mutations(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01045/replicated_table_for_mutations', '1') ORDER BY k PARTITION BY modulo(k, 2)" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE replicated_table_for_mutations(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/replicated_table_for_mutations', '1') ORDER BY k PARTITION BY modulo(k, 2)" ${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES replicated_table_for_mutations" diff --git a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh index ca453ee8f0d..efe518046a1 100755 --- a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh @@ -20,7 +20,7 @@ for i in $(seq $REPLICAS); do done for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_mutate_mt_$i (key UInt64, value1 UInt64, value2 String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01076/concurrent_mutate_mt', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000,temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0" + $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_mutate_mt_$i (key UInt64, value1 UInt64, value2 String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_mutate_mt', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000,temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_mutate_mt_1 SELECT number, number + 10, toString(number) from numbers(10)" diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference index ebefe4b2a29..67510b28a34 100644 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference @@ -1,6 +1,6 @@ Wrong column name. -CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01079_bad_alters_zookeeper_default/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01079_bad_alters_zookeeper_default/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 syntax error at begin of string. 7 Hello diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh index 1c0206453b7..6452b830f38 100755 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh @@ -10,7 +10,7 @@ $CLICKHOUSE_CLIENT -n --query "CREATE TABLE table_for_bad_alters ( key UInt64, value1 UInt8, value2 String -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01079/table_for_bad_alters', '1') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_for_bad_alters', '1') ORDER BY key;" $CLICKHOUSE_CLIENT --query "INSERT INTO table_for_bad_alters VALUES(1, 1, 'Hello');" diff --git a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh index b3a5de8f9bc..7a3e3cf155f 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh @@ -12,7 +12,7 @@ done for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_add_drop_$i (key UInt64, value0 UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01079/concurrent_alter_add_drop_column', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" + $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_add_drop_$i (key UInt64, value0 UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_alter_add_drop_column', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_alter_add_drop_1 SELECT number, number + 10 from numbers(100000)" diff --git a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh index d5f0c987e5d..83f3196253a 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh @@ -11,7 +11,7 @@ for i in $(seq $REPLICAS); do done for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_detach_$i (key UInt64, value1 UInt8, value2 UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01079/concurrent_alter_detach', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000,temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0" + $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_detach_$i (key UInt64, value1 UInt8, value2 UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_alter_detach', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000,temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_alter_detach_1 SELECT number, number + 10, number from numbers(10)" diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh index 5b14c5a8543..9cca73b5eef 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh @@ -11,7 +11,7 @@ for i in $(seq $REPLICAS); do done for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_mt_$i (key UInt64, value1 UInt64, value2 Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01079/concurrent_alter_mt', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" + $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_mt_$i (key UInt64, value1 UInt64, value2 Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_alter_mt', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_alter_mt_1 SELECT number, number + 10, number from numbers(10)" diff --git a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh index 287a63f858b..72459e21b69 100755 --- a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh +++ b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh @@ -26,7 +26,7 @@ function thread3() { while true; do $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS concurrent_optimize_table; - CREATE TABLE concurrent_optimize_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01103/concurrent_optimize_table', '1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; + CREATE TABLE concurrent_optimize_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_optimize_table', '1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; sleep 0.$RANDOM; sleep 0.$RANDOM; sleep 0.$RANDOM; diff --git a/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh index cddf1ebcda6..d564594291a 100755 --- a/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh +++ b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for i in $(seq 4); do $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS replica_01108_$i" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS replica_01108_${i}_tmp" - $CLICKHOUSE_CLIENT -q "CREATE TABLE replica_01108_$i (n int) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01108/replica_01108_$i', 'replica') ORDER BY tuple()" + $CLICKHOUSE_CLIENT -q "CREATE TABLE replica_01108_$i (n int) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/replica_01108_$i', 'replica') ORDER BY tuple()" $CLICKHOUSE_CLIENT -q "INSERT INTO replica_01108_$i SELECT * FROM system.numbers LIMIT $i * 10, 10" done diff --git a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh index 90b9baf4ebf..58bdfbf71ad 100755 --- a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh +++ b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh @@ -36,7 +36,7 @@ $CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM test_01192_renamed.mt" # 5. check moving tables from Ordinary to Atomic (can be used to "alter" database engine) $CLICKHOUSE_CLIENT --default_database_engine=Ordinary -q "CREATE DATABASE test_01192" $CLICKHOUSE_CLIENT -q "CREATE TABLE test_01192.mt AS test_01192_renamed.mt ENGINE=MergeTree ORDER BY n" -$CLICKHOUSE_CLIENT -q "CREATE TABLE test_01192.rmt AS test_01192_renamed.mt ENGINE=ReplicatedMergeTree('/test/01192/', '1') ORDER BY n" +$CLICKHOUSE_CLIENT -q "CREATE TABLE test_01192.rmt AS test_01192_renamed.mt ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/', '1') ORDER BY n" $CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW test_01192.mv TO test_01192.rmt AS SELECT * FROM test_01192.mt" $CLICKHOUSE_CLIENT -q "INSERT INTO test_01192.mt SELECT number FROM numbers(10)" && echo "inserted" diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference index fc2a74d1a93..35385731ad3 100644 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference @@ -1,7 +1,7 @@ 1 -CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01213/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01213_alter_rename_column_zookeeper_default/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 renamed_value1 -CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date,\n `key` UInt64,\n `renamed_value1` String,\n `value2` String,\n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01213/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date,\n `key` UInt64,\n `renamed_value1` String,\n `value2` String,\n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01213_alter_rename_column_zookeeper_default/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 1 date key renamed_value1 value2 value3 2019-10-02 1 1 1 1 diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh index 5ab0e800d39..5da8de70c46 100755 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh @@ -15,7 +15,7 @@ CREATE TABLE table_for_rename_replicated value2 String, value3 String ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01213/table_for_rename_replicated', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_for_rename_replicated', '1') PARTITION BY date ORDER BY key; " diff --git a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh index 5dd3d2b38d6..01bb9af461c 100755 --- a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh +++ b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh @@ -13,13 +13,13 @@ function thread() # Ignore "Replica already exists" exception while true; do $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 NO DELAY; - CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01305/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | + CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now|is already finished removing by another replica right now|Removing leftovers from table|Another replica was suddenly created|was successfully removed from ZooKeeper|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time|already exists' done else while true; do $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1; - CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01305/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | + CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now|is already finished removing by another replica right now|Removing leftovers from table|Another replica was suddenly created|was successfully removed from ZooKeeper|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time' done fi diff --git a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh index 24c6199a94a..21fc88d7c2d 100755 --- a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh +++ b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh @@ -12,7 +12,7 @@ DATA_SIZE=200 SEQ=$(seq 0 $(($NUM_REPLICAS - 1))) for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE IF EXISTS r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test_01307/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done function thread() { diff --git a/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh b/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh index ced668e9849..a05304c670c 100755 --- a/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh +++ b/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query " key UInt64, value String ) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01318/mutation_table', '1') + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutation_table', '1') ORDER BY key PARTITION BY key % 10 " diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh index a15d8c8d2cd..97c200c651f 100755 --- a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh +++ b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query "CREATE DATABASE test_01320 ENGINE=Ordinary" # Diff function thread1() { - while true; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test_01320/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"; done + while true; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"; done } function thread2() diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference index e7db2788824..b4ed8efab63 100644 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference @@ -1,3 +1,3 @@ 10 5 -CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01338/alter_mt\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01338_long_select_and_alter_zookeeper_default/alter_mt\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh index d990a8a1c08..4aeecc7343d 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" -$CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01338/alter_mt', '1') ORDER BY key" +$CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_mt', '1') ORDER BY key" $CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number, toString(number) FROM numbers(5)" diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh index 30b2b665658..b604ace85cc 100755 --- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh +++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh @@ -12,8 +12,8 @@ SCALE=5000 $CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS r1; DROP TABLE IF EXISTS r2; - CREATE TABLE r1 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01396/r', '1') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; - CREATE TABLE r2 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01396/r', '2') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; + CREATE TABLE r1 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', '1') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; + CREATE TABLE r2 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', '2') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; DETACH TABLE r2; " @@ -29,16 +29,16 @@ for _ in {1..60}; do done -$CLICKHOUSE_CLIENT --query "SELECT numChildren < $((SCALE / 4)) FROM system.zookeeper WHERE path = '/clickhouse/tables/test_01396/r' AND name = 'log'"; +$CLICKHOUSE_CLIENT --query "SELECT numChildren < $((SCALE / 4)) FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r' AND name = 'log'"; echo -e '\n---\n'; -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/test_01396/r/replicas/1' AND name = 'is_lost'"; -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/test_01396/r/replicas/2' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r/replicas/1' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r/replicas/2' AND name = 'is_lost'"; echo -e '\n---\n'; $CLICKHOUSE_CLIENT --query "ATTACH TABLE r2" $CLICKHOUSE_CLIENT --receive_timeout 600 --query "SYSTEM SYNC REPLICA r2" # Need to increase timeout, otherwise it timed out in debug build -$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/test_01396/r/replicas/2' AND name = 'is_lost'"; +$CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r/replicas/2' AND name = 'is_lost'"; $CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS r1; diff --git a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh index ceeeed41049..6e1a6e01757 100755 --- a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh +++ b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh @@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --query " key UInt64, value String ) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01414/mutation_table', '1') + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutation_table', '1') ORDER BY tuple() PARTITION BY date " diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh index 480daeefa46..bb935a950ff 100755 --- a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh @@ -11,7 +11,7 @@ FREEZE_OUT_STRUCTURE='backup_name String, backup_path String , part_backup_path # setup ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table_for_freeze_replicated;" -${CLICKHOUSE_CLIENT} --query "CREATE TABLE table_for_freeze_replicated (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test_01417/table_for_freeze_replicated', '1') ORDER BY key PARTITION BY key % 10;" +${CLICKHOUSE_CLIENT} --query "CREATE TABLE table_for_freeze_replicated (key UInt64, value String) ENGINE = ReplicatedMergeTree('/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table_for_freeze_replicated', '1') ORDER BY key PARTITION BY key % 10;" ${CLICKHOUSE_CLIENT} --query "INSERT INTO table_for_freeze_replicated SELECT number, toString(number) from numbers(10);" ${CLICKHOUSE_CLIENT} --query "ALTER TABLE table_for_freeze_replicated FREEZE WITH NAME 'test_01417' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1;" \ diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh index 467c29d3d33..cf239fd7032 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh @@ -11,7 +11,7 @@ NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i; - CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/01459_manual_write_ro_replicas/r', 'r$i') ORDER BY x; + CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh index 376ee58859e..8c322798173 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh @@ -11,7 +11,7 @@ NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i; - CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/01459_manual_write_ro_replicas_quorum/r', 'r$i') ORDER BY x; + CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " done diff --git a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh index 4cb4734b448..156deb60ff9 100755 --- a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh +++ b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh @@ -8,7 +8,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames0" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames50" -$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01508/concurrent_rename', '1') ORDER BY tuple() SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, min_rows_for_compact_part = 100000, min_rows_for_compact_part = 10000000, write_ahead_log_max_bytes = 1" +$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_rename', '1') ORDER BY tuple() SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, min_rows_for_compact_part = 100000, min_rows_for_compact_part = 10000000, write_ahead_log_max_bytes = 1" $CLICKHOUSE_CLIENT --query "INSERT INTO table_for_renames0 SELECT number, toString(number) FROM numbers(1000)" diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh index c5ffad1c4ca..b71654e7e6c 100755 --- a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh +++ b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh @@ -11,7 +11,7 @@ NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i; - CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01509/parallel_quorum_many', 'r$i') ORDER BY x; + CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum_many', 'r$i') ORDER BY x; " done diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh index 898a68d9c77..78336ea073b 100755 --- a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh +++ b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh @@ -12,7 +12,7 @@ NUM_INSERTS=5 for i in $(seq 1 $NUM_REPLICAS); do $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i; - CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01509/parallel_quorum', 'r$i') ORDER BY x; + CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum', 'r$i') ORDER BY x; " done diff --git a/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh b/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh index ca5f58512a3..fbeb65419ce 100755 --- a/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh +++ b/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh @@ -10,9 +10,9 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parallel_q1" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parallel_q2" -$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01509/parallel_q', 'r1') ORDER BY tuple() SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r1') ORDER BY tuple() SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0" -$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q2 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01509/parallel_q', 'r2') ORDER BY tuple() SETTINGS always_fetch_merged_part = 1" +$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q2 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r2') ORDER BY tuple() SETTINGS always_fetch_merged_part = 1" $CLICKHOUSE_CLIENT -q "SYSTEM STOP REPLICATION QUEUES parallel_q2" diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.reference b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.reference index 94e15c09768..4b07f533f5a 100644 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.reference +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.concurrent_mutate_kill\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01593/concurrent_mutate_kill\', \'1\')\nPARTITION BY key % 100\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 +CREATE TABLE default.concurrent_mutate_kill\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01593_concurrent_alter_mutations_kill_default/concurrent_mutate_kill\', \'1\')\nPARTITION BY key % 100\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 499999500000 diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh index 6ae103bdf6e..d40406222c2 100755 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS concurrent_mutate_kill" -$CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_mutate_kill (key UInt64, value String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01593/concurrent_mutate_kill', '1') ORDER BY key PARTITION BY key % 100 SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" +$CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_mutate_kill (key UInt64, value String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_mutate_kill', '1') ORDER BY key PARTITION BY key % 100 SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_mutate_kill SELECT number, toString(number) FROM numbers(1000000)" diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.reference b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.reference index cb1eace24a2..80b9977aac7 100644 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.reference +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.reference @@ -4,13 +4,13 @@ 499999500000 499999500000 Metadata version on replica 1 equal with first replica, OK -CREATE TABLE default.concurrent_kill_1\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01593_concurrent_kill\', \'1\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 +CREATE TABLE default.concurrent_kill_1\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01593_concurrent_alter_mutations_kill_many_replicas_default\', \'1\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 Metadata version on replica 2 equal with first replica, OK -CREATE TABLE default.concurrent_kill_2\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01593_concurrent_kill\', \'2\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 +CREATE TABLE default.concurrent_kill_2\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01593_concurrent_alter_mutations_kill_many_replicas_default\', \'2\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 Metadata version on replica 3 equal with first replica, OK -CREATE TABLE default.concurrent_kill_3\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01593_concurrent_kill\', \'3\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 +CREATE TABLE default.concurrent_kill_3\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01593_concurrent_alter_mutations_kill_many_replicas_default\', \'3\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 Metadata version on replica 4 equal with first replica, OK -CREATE TABLE default.concurrent_kill_4\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01593_concurrent_kill\', \'4\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 +CREATE TABLE default.concurrent_kill_4\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01593_concurrent_alter_mutations_kill_many_replicas_default\', \'4\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 Metadata version on replica 5 equal with first replica, OK -CREATE TABLE default.concurrent_kill_5\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01593_concurrent_kill\', \'5\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 +CREATE TABLE default.concurrent_kill_5\n(\n `key` UInt64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01593_concurrent_alter_mutations_kill_many_replicas_default\', \'5\')\nORDER BY key\nSETTINGS max_replicated_mutations_in_queue = 1000, number_of_free_entries_in_pool_to_execute_mutation = 0, max_replicated_merges_in_queue = 1000, index_granularity = 8192 499999500000 diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh index bfa68328c06..4cb3fd35294 100755 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh @@ -11,7 +11,7 @@ for i in $(seq $REPLICAS); do done for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_kill_$i (key UInt64, value String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01593_concurrent_kill', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" + $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_kill_$i (key UInt64, value String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_kill_1 SELECT number, toString(number) FROM numbers(1000000)" @@ -77,9 +77,9 @@ while true; do done -metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/test_01593_concurrent_kill/replicas/$i/' and name = 'metadata_version'") +metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/replicas/$i/' and name = 'metadata_version'") for i in $(seq $REPLICAS); do - replica_metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/test_01593_concurrent_kill/replicas/$i/' and name = 'metadata_version'") + replica_metadata_version=$($CLICKHOUSE_CLIENT --query "SELECT value FROM system.zookeeper WHERE path = '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/replicas/$i/' and name = 'metadata_version'") if [ "$metadata_version" != "$replica_metadata_version" ]; then echo "Metadata version on replica $i differs from the first replica, FAIL" else diff --git a/tests/queries/0_stateless/01671_ddl_hang_timeout.sh b/tests/queries/0_stateless/01671_ddl_hang_timeout.sh index 2ca97e3978b..641eba2d8fa 100755 --- a/tests/queries/0_stateless/01671_ddl_hang_timeout.sh +++ b/tests/queries/0_stateless/01671_ddl_hang_timeout.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function thread_create_drop_table { while true; do REPLICA=$(($RANDOM % 10)) - $CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS t1 (x UInt64, s Array(Nullable(String))) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01671/test_01671', 'r_$REPLICA') order by x" 2>/dev/null + $CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS t1 (x UInt64, s Array(Nullable(String))) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_01671', 'r_$REPLICA') order by x" 2>/dev/null sleep 0.0$RANDOM $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS t1" done diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh index 1f4ba412a19..d3046e73b93 100755 --- a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh +++ b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh @@ -6,9 +6,9 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_01753"; -${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_01753 (n Int8) ENGINE=ReplicatedMergeTree('/$CLICKHOUSE_DATABASE/test_01753/test', '1') ORDER BY n" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_01753 (n Int8) ENGINE=ReplicatedMergeTree('/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_01753/test', '1') ORDER BY n" -${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path "$CLICKHOUSE_DATABASE/test_01753" +${CLICKHOUSE_CLIENT} --query="SELECT name FROM system.zookeeper WHERE path = {path:String}" --param_path "$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_01753" ${CLICKHOUSE_CLIENT} --query="DROP TABLE test_01753 SYNC"; From a03a9051e1882adbf43b8ea23c6ef7a38c92cd98 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 15 Mar 2021 21:03:24 +0300 Subject: [PATCH 245/333] Mark some tests as long From [1] and [2]: [1]: https://clickhouse-test-reports.s3.yandex.net/21724/dacb6066237b78511ad5f07cc65045b8cf4e5e7b/functional_stateless_tests_flaky_check_(address).html#fail1 [2]: https://clickhouse-test-reports.s3.yandex.net/21724/ee8ece157987f3cd8b68ca4a5b7df709f4688208/functional_stateless_tests_flaky_check_(address)/test_run.txt.out.log [3]: https://clickhouse-test-reports.s3.yandex.net/21724/e4485ce0b1d1b21c4360bbdecacd2485d1634a91/functional_stateless_tests_flaky_check_(address)/test_run.txt.out.log --- ...75_indices_mutation_replicated_zookeeper_long.reference} | 0 ... => 00975_indices_mutation_replicated_zookeeper_long.sh} | 0 ...92_system_parts_race_condition_zookeeper_long.reference} | 0 ... => 00992_system_parts_race_condition_zookeeper_long.sh} | 0 ....reference => 01079_bad_alters_zookeeper_long.reference} | 4 ++-- ...ters_zookeeper.sh => 01079_bad_alters_zookeeper_long.sh} | 0 ...=> 01079_parallel_alter_modify_zookeeper_long.reference} | 0 ...per.sh => 01079_parallel_alter_modify_zookeeper_long.sh} | 0 ...08_race_condition_rename_clear_zookeeper_long.reference} | 0 ... => 01508_race_condition_rename_clear_zookeeper_long.sh} | 0 ...01509_check_many_parallel_quorum_inserts_long.reference} | 0 ....sh => 01509_check_many_parallel_quorum_inserts_long.sh} | 0 ...e => 01509_check_parallel_quorum_inserts_long.reference} | 0 ...serts.sh => 01509_check_parallel_quorum_inserts_long.sh} | 0 ...rence => 01509_parallel_quorum_and_merge_long.reference} | 0 ...and_merge.sh => 01509_parallel_quorum_and_merge_long.sh} | 0 ...eout.reference => 01671_ddl_hang_timeout_long.reference} | 0 ...1_ddl_hang_timeout.sh => 01671_ddl_hang_timeout_long.sh} | 0 ... 01753_system_zookeeper_query_param_path_long.reference} | 0 ...h.sh => 01753_system_zookeeper_query_param_path_long.sh} | 0 tests/queries/skip_list.json | 6 +++--- 21 files changed, 5 insertions(+), 5 deletions(-) rename tests/queries/0_stateless/{00975_indices_mutation_replicated_zookeeper.reference => 00975_indices_mutation_replicated_zookeeper_long.reference} (100%) rename tests/queries/0_stateless/{00975_indices_mutation_replicated_zookeeper.sh => 00975_indices_mutation_replicated_zookeeper_long.sh} (100%) rename tests/queries/0_stateless/{00992_system_parts_race_condition_zookeeper.reference => 00992_system_parts_race_condition_zookeeper_long.reference} (100%) rename tests/queries/0_stateless/{00992_system_parts_race_condition_zookeeper.sh => 00992_system_parts_race_condition_zookeeper_long.sh} (100%) rename tests/queries/0_stateless/{01079_bad_alters_zookeeper.reference => 01079_bad_alters_zookeeper_long.reference} (52%) rename tests/queries/0_stateless/{01079_bad_alters_zookeeper.sh => 01079_bad_alters_zookeeper_long.sh} (100%) rename tests/queries/0_stateless/{01079_parallel_alter_modify_zookeeper.reference => 01079_parallel_alter_modify_zookeeper_long.reference} (100%) rename tests/queries/0_stateless/{01079_parallel_alter_modify_zookeeper.sh => 01079_parallel_alter_modify_zookeeper_long.sh} (100%) rename tests/queries/0_stateless/{01508_race_condition_rename_clear_zookeeper.reference => 01508_race_condition_rename_clear_zookeeper_long.reference} (100%) rename tests/queries/0_stateless/{01508_race_condition_rename_clear_zookeeper.sh => 01508_race_condition_rename_clear_zookeeper_long.sh} (100%) rename tests/queries/0_stateless/{01509_check_many_parallel_quorum_inserts.reference => 01509_check_many_parallel_quorum_inserts_long.reference} (100%) rename tests/queries/0_stateless/{01509_check_many_parallel_quorum_inserts.sh => 01509_check_many_parallel_quorum_inserts_long.sh} (100%) rename tests/queries/0_stateless/{01509_check_parallel_quorum_inserts.reference => 01509_check_parallel_quorum_inserts_long.reference} (100%) rename tests/queries/0_stateless/{01509_check_parallel_quorum_inserts.sh => 01509_check_parallel_quorum_inserts_long.sh} (100%) rename tests/queries/0_stateless/{01509_parallel_quorum_and_merge.reference => 01509_parallel_quorum_and_merge_long.reference} (100%) rename tests/queries/0_stateless/{01509_parallel_quorum_and_merge.sh => 01509_parallel_quorum_and_merge_long.sh} (100%) rename tests/queries/0_stateless/{01671_ddl_hang_timeout.reference => 01671_ddl_hang_timeout_long.reference} (100%) rename tests/queries/0_stateless/{01671_ddl_hang_timeout.sh => 01671_ddl_hang_timeout_long.sh} (100%) rename tests/queries/0_stateless/{01753_system_zookeeper_query_param_path.reference => 01753_system_zookeeper_query_param_path_long.reference} (100%) rename tests/queries/0_stateless/{01753_system_zookeeper_query_param_path.sh => 01753_system_zookeeper_query_param_path_long.sh} (100%) diff --git a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.reference rename to tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh similarity index 100% rename from tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh rename to tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.reference rename to tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh similarity index 100% rename from tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh rename to tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.reference similarity index 52% rename from tests/queries/0_stateless/01079_bad_alters_zookeeper.reference rename to tests/queries/0_stateless/01079_bad_alters_zookeeper_long.reference index 67510b28a34..731cd871b3b 100644 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.reference @@ -1,6 +1,6 @@ Wrong column name. -CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01079_bad_alters_zookeeper_default/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01079_bad_alters_zookeeper_default/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01079_bad_alters_zookeeper_long_default/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01079_bad_alters_zookeeper_long_default/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 syntax error at begin of string. 7 Hello diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh similarity index 100% rename from tests/queries/0_stateless/01079_bad_alters_zookeeper.sh rename to tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.reference rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh similarity index 100% rename from tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh rename to tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh diff --git a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.reference b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.reference rename to tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh similarity index 100% rename from tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh rename to tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.reference b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.reference similarity index 100% rename from tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.reference rename to tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.reference diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh similarity index 100% rename from tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh rename to tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.reference b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.reference similarity index 100% rename from tests/queries/0_stateless/01509_check_parallel_quorum_inserts.reference rename to tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.reference diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh similarity index 100% rename from tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh rename to tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh diff --git a/tests/queries/0_stateless/01509_parallel_quorum_and_merge.reference b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.reference similarity index 100% rename from tests/queries/0_stateless/01509_parallel_quorum_and_merge.reference rename to tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.reference diff --git a/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh similarity index 100% rename from tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh rename to tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh diff --git a/tests/queries/0_stateless/01671_ddl_hang_timeout.reference b/tests/queries/0_stateless/01671_ddl_hang_timeout_long.reference similarity index 100% rename from tests/queries/0_stateless/01671_ddl_hang_timeout.reference rename to tests/queries/0_stateless/01671_ddl_hang_timeout_long.reference diff --git a/tests/queries/0_stateless/01671_ddl_hang_timeout.sh b/tests/queries/0_stateless/01671_ddl_hang_timeout_long.sh similarity index 100% rename from tests/queries/0_stateless/01671_ddl_hang_timeout.sh rename to tests/queries/0_stateless/01671_ddl_hang_timeout_long.sh diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path_long.reference similarity index 100% rename from tests/queries/0_stateless/01753_system_zookeeper_query_param_path.reference rename to tests/queries/0_stateless/01753_system_zookeeper_query_param_path_long.reference diff --git a/tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh b/tests/queries/0_stateless/01753_system_zookeeper_query_param_path_long.sh similarity index 100% rename from tests/queries/0_stateless/01753_system_zookeeper_query_param_path.sh rename to tests/queries/0_stateless/01753_system_zookeeper_query_param_path_long.sh diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 974ef48ef3c..9501343754c 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -307,7 +307,7 @@ "00954_client_prepared_statements", "00956_sensitive_data_masking", "00969_columns_clause", - "00975_indices_mutation_replicated_zookeeper", + "00975_indices_mutation_replicated_zookeeper_long", "00975_values_list", "00976_system_stop_ttl_merges", "00977_int_div", @@ -442,8 +442,8 @@ "01504_compression_multiple_streams", "01508_explain_header", "01508_partition_pruning_long", - "01509_check_parallel_quorum_inserts", - "01509_parallel_quorum_and_merge", + "01509_check_parallel_quorum_inserts_long", + "01509_parallel_quorum_and_merge_long", "01515_mv_and_array_join_optimisation_bag", "01516_create_table_primary_key", "01517_drop_mv_with_inner_table", From 0bb665428d166d1e589886b62a3f645d6a5dc5d6 Mon Sep 17 00:00:00 2001 From: Vitaliy Fedorchenko Date: Tue, 16 Mar 2021 09:05:08 +0200 Subject: [PATCH 246/333] Update gui.md: add SeekTable --- docs/en/interfaces/third-party/gui.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index fa123d8b23d..5d14b3aa3cc 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -167,4 +167,21 @@ Features: [How to configure ClickHouse in Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse) +### SeekTable {#seektable} + +[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. SeekTable is available both as a cloud service and a self-hosted version. SeekTable reports may be embedded into any web-app. + +Features: + +- Business users-friendly reports builder. +- Powerful report parameters for SQL filtering and report-specific query customizations. +- Can connect to ClickHouse both with a native TCP/IP endpoint and a HTTP(S) interface (2 different drivers). +- It is possible to use all power of CH SQL dialect in dimensions/measures definitions +- [Web API](https://www.seektable.com/help/web-api-integration) for automated reports generation. +- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore), data models (cubes) / reports configuration is a human-readable XML and can be stored under version control. + +SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/individual usage. + +[How to configure ClickHouse connection in SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table) + [Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) From 27b8d10bd8edcd0778ae06c2f480bf5762db4cd6 Mon Sep 17 00:00:00 2001 From: Ali Demirci Date: Tue, 16 Mar 2021 13:30:05 +0300 Subject: [PATCH 247/333] docs(fix): typo --- docs/en/sql-reference/functions/machine-learning-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/machine-learning-functions.md b/docs/en/sql-reference/functions/machine-learning-functions.md index c9abd130ef3..60dabd73781 100644 --- a/docs/en/sql-reference/functions/machine-learning-functions.md +++ b/docs/en/sql-reference/functions/machine-learning-functions.md @@ -9,7 +9,7 @@ toc_title: Machine Learning Prediction using fitted regression models uses `evalMLMethod` function. See link in `linearRegression`. -## stochasticLinearRegressionn {#stochastic-linear-regression} +## stochasticLinearRegression {#stochastic-linear-regression} The [stochasticLinearRegression](../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlinearregression) aggregate function implements stochastic gradient descent method using linear model and MSE loss function. Uses `evalMLMethod` to predict on new data. From eadf0248d0e9cf57be70f5707277e2cda3c5855e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 14:07:53 +0300 Subject: [PATCH 248/333] Build fixes --- base/common/LocalDateTime.h | 24 +++++- base/mysqlxx/tests/CMakeLists.txt | 3 - base/mysqlxx/tests/failover.xml | 21 ----- base/mysqlxx/tests/mysqlxx_test.cpp | 77 ------------------- programs/odbc-bridge/ODBCBlockInputStream.cpp | 4 +- .../odbc-bridge/ODBCBlockOutputStream.cpp | 2 +- src/Storages/tests/CMakeLists.txt | 3 - src/Storages/tests/part_name.cpp | 21 ----- utils/wikistat-loader/main.cpp | 2 +- 9 files changed, 27 insertions(+), 130 deletions(-) delete mode 100644 base/mysqlxx/tests/failover.xml delete mode 100644 base/mysqlxx/tests/mysqlxx_test.cpp delete mode 100644 src/Storages/tests/part_name.cpp diff --git a/base/common/LocalDateTime.h b/base/common/LocalDateTime.h index 4c2cf0e637d..dde283e5ebb 100644 --- a/base/common/LocalDateTime.h +++ b/base/common/LocalDateTime.h @@ -106,8 +106,30 @@ public: void second(unsigned char x) { m_second = x; } LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); } + LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); } - LocalDateTime toStartOfDate() { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); } + std::string toString() const + { + std::string s{"0000-00-00 00:00:00"}; + + s[0] += m_year / 1000; + s[1] += (m_year / 100) % 10; + s[2] += (m_year / 10) % 10; + s[3] += m_year % 10; + s[5] += m_month / 10; + s[6] += m_month % 10; + s[8] += m_day / 10; + s[9] += m_day % 10; + + s[11] += m_hour / 10; + s[12] += m_hour % 10; + s[14] += m_minute / 10; + s[15] += m_minute % 10; + s[17] += m_second / 10; + s[18] += m_second % 10; + + return s; + } bool operator< (const LocalDateTime & other) const { diff --git a/base/mysqlxx/tests/CMakeLists.txt b/base/mysqlxx/tests/CMakeLists.txt index 2cf19d78418..6473a927308 100644 --- a/base/mysqlxx/tests/CMakeLists.txt +++ b/base/mysqlxx/tests/CMakeLists.txt @@ -1,5 +1,2 @@ -add_executable (mysqlxx_test mysqlxx_test.cpp) -target_link_libraries (mysqlxx_test PRIVATE mysqlxx) - add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp) target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx) diff --git a/base/mysqlxx/tests/failover.xml b/base/mysqlxx/tests/failover.xml deleted file mode 100644 index 73702eabb29..00000000000 --- a/base/mysqlxx/tests/failover.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - 3306 - root - Metrica - qwerty - - example02t - 0 - - - example02t - 3306 - root - qwerty - Metrica - 1 - - - diff --git a/base/mysqlxx/tests/mysqlxx_test.cpp b/base/mysqlxx/tests/mysqlxx_test.cpp deleted file mode 100644 index c505d34a58d..00000000000 --- a/base/mysqlxx/tests/mysqlxx_test.cpp +++ /dev/null @@ -1,77 +0,0 @@ -#include -#include - - -int main(int, char **) -{ - try - { - mysqlxx::Connection connection("test", "127.0.0.1", "root", "qwerty", 3306); - std::cerr << "Connected." << std::endl; - - { - mysqlxx::Query query = connection.query(); - query << "SELECT 1 x, '2010-01-01 01:01:01' d"; - mysqlxx::UseQueryResult result = query.use(); - std::cerr << "use() called." << std::endl; - - while (mysqlxx::Row row = result.fetch()) - { - std::cerr << "Fetched row." << std::endl; - std::cerr << row[0] << ", " << row["x"] << std::endl; - std::cerr << row[1] << ", " << row["d"] - << ", " << row[1].getDate() - << ", " << row[1].getDateTime() - << ", " << row[1].getDate() - << ", " << row[1].getDateTime() - << std::endl - << row[1].getDate() << ", " << row[1].getDateTime() << std::endl - << row[1].getDate() << ", " << row[1].getDateTime() << std::endl - << row[1].getDate() << ", " << row[1].getDateTime() << std::endl - << row[1].getDate() << ", " << row[1].getDateTime() << std::endl - ; - - time_t t1 = row[0]; - time_t t2 = row[1]; - std::cerr << t1 << ", " << LocalDateTime(t1) << std::endl; - std::cerr << t2 << ", " << LocalDateTime(t2) << std::endl; - } - } - - { - mysqlxx::UseQueryResult result = connection.query("SELECT 'abc\\\\def' x").use(); - mysqlxx::Row row = result.fetch(); - std::cerr << row << std::endl; - std::cerr << row << std::endl; - } - - { - /// Копирование Query - mysqlxx::Query query1 = connection.query("SELECT"); - mysqlxx::Query query2 = query1; - query2 << " 1"; - - std::cerr << query1.str() << ", " << query2.str() << std::endl; - } - - { - /// NULL - mysqlxx::Null x = mysqlxx::null; - std::cerr << (x == mysqlxx::null ? "Ok" : "Fail") << std::endl; - std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl; - std::cerr << (x.isNull() ? "Ok" : "Fail") << std::endl; - x = 1; - std::cerr << (x == mysqlxx::null ? "Fail" : "Ok") << std::endl; - std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl; - std::cerr << (x == 1 ? "Ok" : "Fail") << std::endl; - std::cerr << (x.isNull() ? "Fail" : "Ok") << std::endl; - } - } - catch (const mysqlxx::Exception & e) - { - std::cerr << e.code() << ", " << e.message() << std::endl; - throw; - } - - return 0; -} diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index 3e2a2d0c7d4..b8a4209ac94 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -87,8 +87,8 @@ namespace case ValueType::vtDateTime: { Poco::DateTime datetime = value.convert(); - assert_cast(column).insertValue(time_t{LocalDateTime( - datetime.year(), datetime.month(), datetime.day(), datetime.hour(), datetime.minute(), datetime.second())}); + assert_cast(column).insertValue(DateLUT::instance().makeDateTime( + datetime.year(), datetime.month(), datetime.day(), datetime.hour(), datetime.minute(), datetime.second())); break; } case ValueType::vtUUID: diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index 4d8b9fa6bdf..db3c9441419 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -81,7 +81,7 @@ namespace case ValueType::vtDate: return Poco::Dynamic::Var(LocalDate(DayNum(field.get())).toString()).convert(); case ValueType::vtDateTime: - return Poco::Dynamic::Var(std::to_string(LocalDateTime(time_t(field.get())))).convert(); + return Poco::Dynamic::Var(DateLUT::instance().timeToString(time_t(field.get()))).convert(); case ValueType::vtUUID: return Poco::Dynamic::Var(UUID(field.get()).toUnderType().toHexString()).convert(); default: diff --git a/src/Storages/tests/CMakeLists.txt b/src/Storages/tests/CMakeLists.txt index b58fed9edf5..59d44829363 100644 --- a/src/Storages/tests/CMakeLists.txt +++ b/src/Storages/tests/CMakeLists.txt @@ -1,6 +1,3 @@ -add_executable (part_name part_name.cpp) -target_link_libraries (part_name PRIVATE dbms) - add_executable (remove_symlink_directory remove_symlink_directory.cpp) target_link_libraries (remove_symlink_directory PRIVATE dbms) diff --git a/src/Storages/tests/part_name.cpp b/src/Storages/tests/part_name.cpp deleted file mode 100644 index 227e19cf17c..00000000000 --- a/src/Storages/tests/part_name.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include -#include -#include - - -int main(int, char **) -{ - const DayNum today(DateLUT::instance().toDayNum(time(nullptr)).toUnderType()); - - for (DayNum date = today; DayNum(date + 10) > today; --date) - { - DB::MergeTreePartInfo part_info("partition", 0, 0, 0); - std::string name = part_info.getPartNameV0(date, date); - std::cerr << name << '\n'; - - time_t time = DateLUT::instance().YYYYMMDDToDate(DB::parse(name)); - std::cerr << LocalDateTime(time) << '\n'; - } - - return 0; -} diff --git a/utils/wikistat-loader/main.cpp b/utils/wikistat-loader/main.cpp index f2adcc43a3a..31ade014c74 100644 --- a/utils/wikistat-loader/main.cpp +++ b/utils/wikistat-loader/main.cpp @@ -151,7 +151,7 @@ try std::string time_str = options.at("time").as(); LocalDateTime time(time_str); - LocalDate date(time); + LocalDate date(time_str); DB::ReadBufferFromFileDescriptor in(STDIN_FILENO); DB::WriteBufferFromFileDescriptor out(STDOUT_FILENO); From f8a99804c178487ab2e7ca3e391cc7b823877054 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 16 Mar 2021 10:20:31 +0000 Subject: [PATCH 249/333] Add double quotes --- src/Databases/PostgreSQL/DatabasePostgreSQL.cpp | 3 ++- src/TableFunctions/TableFunctionPostgreSQL.cpp | 4 +++- tests/integration/test_storage_postgresql/test.py | 11 +++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 722b9c64edb..d2eb6797c84 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -16,6 +16,7 @@ #include #include #include +#include namespace DB @@ -162,7 +163,7 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, const Conte return StoragePtr{}; auto use_nulls = context.getSettingsRef().external_table_functions_use_nulls; - auto columns = fetchPostgreSQLTableStructure(connection->conn(), table_name, use_nulls); + auto columns = fetchPostgreSQLTableStructure(connection->conn(), doubleQuoteString(table_name), use_nulls); if (!columns) return StoragePtr{}; diff --git a/src/TableFunctions/TableFunctionPostgreSQL.cpp b/src/TableFunctions/TableFunctionPostgreSQL.cpp index 0e3f1c5da24..ae34ce41e47 100644 --- a/src/TableFunctions/TableFunctionPostgreSQL.cpp +++ b/src/TableFunctions/TableFunctionPostgreSQL.cpp @@ -11,6 +11,7 @@ #include "registerTableFunctions.h" #include #include +#include namespace DB @@ -41,7 +42,8 @@ ColumnsDescription TableFunctionPostgreSQL::getActualTableStructure(const Contex const bool use_nulls = context.getSettingsRef().external_table_functions_use_nulls; auto columns = fetchPostgreSQLTableStructure( connection->conn(), - remote_table_schema.empty() ? remote_table_name : remote_table_schema + '.' + remote_table_name, + remote_table_schema.empty() ? doubleQuoteString(remote_table_name) + : doubleQuoteString(remote_table_schema) + '.' + doubleQuoteString(remote_table_name), use_nulls); return ColumnsDescription{*columns}; diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py index 58f3233bacc..8be72f5a7c7 100644 --- a/tests/integration/test_storage_postgresql/test.py +++ b/tests/integration/test_storage_postgresql/test.py @@ -152,6 +152,17 @@ def test_non_default_scema(started_cluster): result = node1.query('SELECT * FROM {}'.format(table_function)) assert(result == expected) + cursor.execute('''CREATE SCHEMA "test.nice.schema"''') + cursor.execute('''CREATE TABLE "test.nice.schema"."test.nice.table" (a integer)''') + cursor.execute('INSERT INTO "test.nice.schema"."test.nice.table" SELECT i FROM generate_series(0, 99) as t(i)') + + node1.query(''' + CREATE TABLE test_pg_table_schema_with_dots (a UInt32) + ENGINE PostgreSQL('postgres1:5432', 'clickhouse', 'test.nice.table', 'postgres', 'mysecretpassword', 'test.nice.schema'); + ''') + result = node1.query('SELECT * FROM test_pg_table_schema_with_dots') + assert(result == expected) + if __name__ == '__main__': cluster.start() From de091114f3cee6ac5a775a9811855295ff29a34e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 15:22:52 +0300 Subject: [PATCH 250/333] Fix UBSan report --- base/common/DateLUTImpl.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h index 98b767ccbcc..43fc1b8befd 100644 --- a/base/common/DateLUTImpl.h +++ b/base/common/DateLUTImpl.h @@ -63,7 +63,7 @@ private: template friend inline LUTIndex operator+(const LUTIndex & index, const T v) { - return LUTIndex{(index.toUnderType() + v) & date_lut_mask}; + return LUTIndex{(index.toUnderType() + UInt32(v)) & date_lut_mask}; } template @@ -80,7 +80,7 @@ private: template friend inline LUTIndex operator-(const LUTIndex & index, const T v) { - return LUTIndex{(index.toUnderType() - v) & date_lut_mask}; + return LUTIndex{(index.toUnderType() - UInt32(v)) & date_lut_mask}; } template @@ -97,7 +97,7 @@ private: template friend inline LUTIndex operator*(const LUTIndex & index, const T v) { - return LUTIndex{(index.toUnderType() * v) & date_lut_mask}; + return LUTIndex{(index.toUnderType() * UInt32(v)) & date_lut_mask}; } template @@ -109,13 +109,13 @@ private: template friend inline LUTIndex operator/(const LUTIndex & index, const T v) { - return LUTIndex{(index.toUnderType() / v) & date_lut_mask}; + return LUTIndex{(index.toUnderType() / UInt32(v)) & date_lut_mask}; } template friend inline LUTIndex operator/(const T v, const LUTIndex & index) { - return LUTIndex{(v / index.toUnderType()) & date_lut_mask}; + return LUTIndex{(UInt32(v) / index.toUnderType()) & date_lut_mask}; } public: From 172f668fa3fe78e49fec00f0eaa2faf0a124eff6 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 16 Mar 2021 15:45:08 +0300 Subject: [PATCH 251/333] fix doc --- docs/en/engines/table-engines/mergetree-family/mergetree.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 70cf7f2212e..0b551040ba1 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -529,7 +529,7 @@ CREATE TABLE table_for_aggregation y Int ) ENGINE = MergeTree -ORDER BY k1, k2 +ORDER BY (k1, k2) TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y); ``` From 1dd730d2d6a48fb03d8f9f692888bee9f5969743 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 15:59:14 +0300 Subject: [PATCH 252/333] Fix perf test --- tests/performance/date_time_long.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/performance/date_time_long.xml b/tests/performance/date_time_long.xml index 3a61a5992e5..ae41602e825 100644 --- a/tests/performance/date_time_long.xml +++ b/tests/performance/date_time_long.xml @@ -77,7 +77,6 @@ toYYYYMMDDhhmmss toRelativeQuarterNum - toUnixTimestamp
From 637b683ae9afedbad922ba6a7216ac8095891d12 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 18:35:14 +0300 Subject: [PATCH 253/333] Fix clang-tidy --- src/DataTypes/DataTypeDateTime.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/DataTypes/DataTypeDateTime.cpp b/src/DataTypes/DataTypeDateTime.cpp index 09dcb5f3e2e..510747f6ef9 100644 --- a/src/DataTypes/DataTypeDateTime.cpp +++ b/src/DataTypes/DataTypeDateTime.cpp @@ -91,7 +91,7 @@ void DataTypeDateTime::deserializeWholeText(IColumn & column, ReadBuffer & istr, void DataTypeDateTime::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - time_t x; + time_t x = 0; readTextHelper(x, istr, settings, time_zone, utc_time_zone); if (x < 0) x = 0; @@ -107,7 +107,8 @@ void DataTypeDateTime::serializeTextQuoted(const IColumn & column, size_t row_nu void DataTypeDateTime::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - time_t x; + time_t x = 0; + if (checkChar('\'', istr)) /// Cases: '2017-08-31 18:36:48' or '1504193808' { readTextHelper(x, istr, settings, time_zone, utc_time_zone); @@ -131,7 +132,8 @@ void DataTypeDateTime::serializeTextJSON(const IColumn & column, size_t row_num, void DataTypeDateTime::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - time_t x; + time_t x = 0; + if (checkChar('"', istr)) { readTextHelper(x, istr, settings, time_zone, utc_time_zone); @@ -157,7 +159,7 @@ void DataTypeDateTime::serializeTextCSV(const IColumn & column, size_t row_num, void DataTypeDateTime::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { - time_t x; + time_t x = 0; if (istr.eof()) throwReadAfterEOF(); From 02a06eb448d63668c1973c361feb7276cc14388a Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 16 Mar 2021 18:39:39 +0300 Subject: [PATCH 254/333] Update mergetree.md --- .../table-engines/mergetree-family/mergetree.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 0b551040ba1..a24b7229d17 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -353,7 +353,7 @@ The `set` index can be used with all functions. Function subsets for other index | Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | |------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------| | [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, \<\>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | | [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | | [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | | [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | @@ -361,10 +361,10 @@ The `set` index can be used with all functions. Function subsets for other index | [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | | [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | | [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [less (<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | | [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | | [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | From e5bef75728ed16f3c23146a9c1986461695b6f5f Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 16 Mar 2021 19:15:31 +0300 Subject: [PATCH 255/333] fix --- .../Transforms/PartialSortingTransform.cpp | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src/Processors/Transforms/PartialSortingTransform.cpp b/src/Processors/Transforms/PartialSortingTransform.cpp index 33ff639f10d..7c29f506617 100644 --- a/src/Processors/Transforms/PartialSortingTransform.cpp +++ b/src/Processors/Transforms/PartialSortingTransform.cpp @@ -91,10 +91,19 @@ size_t getFilterMask(const ColumnRawPtrs & lhs, const ColumnRawPtrs & rhs, size_ void PartialSortingTransform::transform(Chunk & chunk) { - auto rows_num = chunk.getNumRows(); - + if (chunk.getColumns().empty()) + { + // Sometimes we can have Chunks w/o columns, e.g. in case of + // `select count() over () from numbers(4) where number < 2`. + // We don't have to modify this Chunk, but we have to preserve the input + // number of rows. The following code uses Block for sorting, and Block + // is incapable of recording the number of rows when there is no columns. + // The simplest solution is to specifically check for Chunk with no + // columns and not modify it, which is what we do here. + return; + } if (read_rows) - read_rows->add(rows_num); + read_rows->add(chunk.getNumRows()); auto block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns()); @@ -103,6 +112,7 @@ void PartialSortingTransform::transform(Chunk & chunk) */ if (!threshold_block_columns.empty()) { + UInt64 rows_num = block.rows(); auto block_columns = extractColumns(block, description); size_t result_size_hint = getFilterMask( @@ -117,15 +127,13 @@ void PartialSortingTransform::transform(Chunk & chunk) { for (auto & column : block) column.column = column.column->filter(filter, result_size_hint); - - rows_num = block.rows(); } } sortBlock(block, description, limit); /// Check if we can use this block for optimization. - if (min_limit_for_partial_sort_optimization <= limit && limit <= rows_num) + if (min_limit_for_partial_sort_optimization <= limit && limit <= block.rows()) { auto block_columns = extractColumns(block, description); @@ -137,9 +145,7 @@ void PartialSortingTransform::transform(Chunk & chunk) } } - assert(block.columns() == 0 || block.rows() == rows_num); - - chunk.setColumns(block.getColumns(), rows_num); + chunk.setColumns(block.getColumns(), block.rows()); } } From 9f05fc22d062391dec8b704987a3d269f4f13741 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 19:31:25 +0300 Subject: [PATCH 256/333] Modern JavaScript --- programs/server/play.html | 64 +++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index 81fc13f1f86..7d0ceeeaeb1 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -306,10 +306,10 @@ /// Incremental request number. When response is received, /// if it's request number does not equal to the current request number, response will be ignored. /// This is to avoid race conditions. - var request_num = 0; + let request_num = 0; /// Save query in history only if it is different. - var previous_query = ''; + let previous_query = ''; /// Substitute the address of the server where the page is served. if (location.protocol != 'file:') { @@ -317,7 +317,7 @@ } /// Substitute user name if it's specified in the query string - var user_from_url = (new URL(window.location)).searchParams.get('user'); + let user_from_url = (new URL(window.location)).searchParams.get('user'); if (user_from_url) { document.getElementById('user').value = user_from_url; } @@ -326,10 +326,10 @@ { /// TODO: Check if URL already contains query string (append parameters). - var user = document.getElementById('user').value; - var password = document.getElementById('password').value; + let user = document.getElementById('user').value; + let password = document.getElementById('password').value; - var url = document.getElementById('url').value + + let url = document.getElementById('url').value + /// Ask server to allow cross-domain requests. '?add_http_cors_header=1' + '&user=' + encodeURIComponent(user) + @@ -338,7 +338,7 @@ /// Safety settings to prevent results that browser cannot display. '&max_result_rows=1000&max_result_bytes=10000000&result_overflow_mode=break'; - var xhr = new XMLHttpRequest; + let xhr = new XMLHttpRequest; xhr.open('POST', url, true); @@ -352,13 +352,13 @@ /// The query is saved in browser history (in state JSON object) /// as well as in URL fragment identifier. if (query != previous_query) { - var state = { + let state = { query: query, status: this.status, response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit. }; - var title = "ClickHouse Query: " + query; - var url = window.location.pathname + '?user=' + encodeURIComponent(user) + '#' + window.btoa(query); + let title = "ClickHouse Query: " + query; + let url = window.location.pathname + '?user=' + encodeURIComponent(user) + '#' + window.btoa(query); if (previous_query == '') { history.replaceState(state, title, url); } else { @@ -382,7 +382,7 @@ document.getElementById('hourglass').style.display = 'none'; if (status === 200) { - var json; + let json; try { json = JSON.parse(response); } catch (e) {} if (json !== undefined && json.statistics !== undefined) { renderResult(json); @@ -415,7 +415,7 @@ function post() { ++request_num; - var query = document.getElementById('query').value; + let query = document.getElementById('query').value; postImpl(request_num, query); } @@ -434,7 +434,7 @@ function clear() { - var table = document.getElementById('data-table'); + let table = document.getElementById('data-table'); while (table.firstChild) { table.removeChild(table.lastChild); } @@ -456,30 +456,30 @@ //console.log(response); clear(); - var stats = document.getElementById('stats'); + let stats = document.getElementById('stats'); stats.innerText = 'Elapsed: ' + response.statistics.elapsed.toFixed(3) + " sec, read " + response.statistics.rows_read + " rows."; - var thead = document.createElement('thead'); - for (var idx in response.meta) { - var th = document.createElement('th'); - var name = document.createTextNode(response.meta[idx].name); + let thead = document.createElement('thead'); + for (let idx in response.meta) { + let th = document.createElement('th'); + let name = document.createTextNode(response.meta[idx].name); th.appendChild(name); thead.appendChild(th); } /// To prevent hanging the browser, limit the number of cells in a table. /// It's important to have the limit on number of cells, not just rows, because tables may be wide or narrow. - var max_rows = 10000 / response.meta.length; - var row_num = 0; + let max_rows = 10000 / response.meta.length; + let row_num = 0; - var tbody = document.createElement('tbody'); - for (var row_idx in response.data) { - var tr = document.createElement('tr'); - for (var col_idx in response.data[row_idx]) { - var td = document.createElement('td'); - var cell = response.data[row_idx][col_idx]; - var is_null = (cell === null); - var content = document.createTextNode(is_null ? 'ᴺᵁᴸᴸ' : cell); + let tbody = document.createElement('tbody'); + for (let row_idx in response.data) { + let tr = document.createElement('tr'); + for (let col_idx in response.data[row_idx]) { + let td = document.createElement('td'); + let cell = response.data[row_idx][col_idx]; + let is_null = (cell === null); + let content = document.createTextNode(is_null ? 'ᴺᵁᴸᴸ' : cell); td.appendChild(content); /// TODO: Execute regexp only once for each column. td.className = response.meta[col_idx].type.match(/^(U?Int|Decimal|Float)/) ? 'right' : 'left'; @@ -496,7 +496,7 @@ } } - var table = document.getElementById('data-table'); + let table = document.getElementById('data-table'); table.appendChild(thead); table.appendChild(tbody); } @@ -505,7 +505,7 @@ function renderUnparsedResult(response) { clear(); - var data = document.getElementById('data-unparsed') + let data = document.getElementById('data-unparsed') if (response === '') { /// TODO: Fade or remove previous result when new request will be performed. @@ -531,12 +531,12 @@ } /// The choice of color theme is saved in browser. - var theme = window.localStorage.getItem('theme'); + let theme = window.localStorage.getItem('theme'); if (theme) { setColorTheme(theme); } else { /// Obtain system-level user preference - var media_query_list = window.matchMedia('prefers-color-scheme: dark') + let media_query_list = window.matchMedia('prefers-color-scheme: dark') if (media_query_list.matches) { /// Set without saving to localstorage From bc25624b885bf74ad0898c16f18229fa32d09c48 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 19:54:16 +0300 Subject: [PATCH 257/333] Better formatting for Array and Map in Web UI --- programs/server/play.html | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index 7d0ceeeaeb1..e9404d13acb 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -472,17 +472,30 @@ let max_rows = 10000 / response.meta.length; let row_num = 0; + let column_classes = response.meta.map(elem => elem.type.match(/^(U?Int|Decimal|Float)/) ? 'right' : 'left'); + let tbody = document.createElement('tbody'); for (let row_idx in response.data) { let tr = document.createElement('tr'); for (let col_idx in response.data[row_idx]) { let td = document.createElement('td'); let cell = response.data[row_idx][col_idx]; + let is_null = (cell === null); - let content = document.createTextNode(is_null ? 'ᴺᵁᴸᴸ' : cell); - td.appendChild(content); + + /// Test: SELECT number, toString(number) AS str, number % 2 ? number : NULL AS nullable, range(number) AS arr, CAST((['hello', 'world'], [number, number % 2]) AS Map(String, UInt64)) AS map FROM numbers(10) + let text; + if (is_null) { + text = 'ᴺᵁᴸᴸ'; + } else if (typeof(cell) === 'object') { + text = JSON.stringify(cell); + } else { + text = cell; + } + + td.appendChild(document.createTextNode(text)); /// TODO: Execute regexp only once for each column. - td.className = response.meta[col_idx].type.match(/^(U?Int|Decimal|Float)/) ? 'right' : 'left'; + td.className = column_classes[col_idx]; if (is_null) { td.className += ' null'; } From 174bce9ca4966b56ee60ee876a5f2337fddbfa1e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Mar 2021 19:55:56 +0300 Subject: [PATCH 258/333] TODO is resolved --- programs/server/play.html | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/server/play.html b/programs/server/play.html index e9404d13acb..0c039097ce1 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -494,7 +494,6 @@ } td.appendChild(document.createTextNode(text)); - /// TODO: Execute regexp only once for each column. td.className = column_classes[col_idx]; if (is_null) { td.className += ' null'; From 94da7f422c3ca954d422741edb76ed4f9a735b4a Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Mar 2021 20:57:17 +0300 Subject: [PATCH 259/333] updated translation --- docs/ru/sql-reference/statements/detach.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index bec8f4c5ff7..90e9663def9 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -15,13 +15,13 @@ DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] Но ни данные, ни метаданные таблицы или материализованного представления не удаляются. При следующем запуске сервера, если не было использовано `PERMANENTLY`, сервер прочитает метаданные и снова узнает о таблице/представлении. Если таблица или представление были откреплено перманентно, сервер не прикрепит их обратно автоматически. -Независимо от того, каким способом таблица была откреплена, ее можно прикрепить обратно с помощью запроса [ATTACH](../../sql-reference/statements/attach.md). Системные log таблицы также могут быть прикреплены обратно (к примеру `query_log`, `text_log` и др.) Другие системные таблицы не могут быть прикреплены обратно, но на следующем запуске сервер снова вспомнит об этих таблицах. +Независимо от того, каким способом таблица была откреплена, ее можно прикрепить обратно с помощью запроса [ATTACH](../../sql-reference/statements/attach.md). Системные log таблицы также могут быть прикреплены обратно (к примеру `query_log`, `text_log` и др.) Другие системные таблицы не могут быть прикреплены обратно, но на следующем запуске сервер снова "вспомнит" об этих таблицах. `ATTACH MATERIALIZED VIEW` не может быть использован с кратким синтаксисом (без `SELECT`), но можно прикрепить представление с помощью запроса `ATTACH TABLE`. Обратите внимание, что нельзя перманентно открепить таблицу, которая уже временно откреплена. Для этого ее сначала надо прикрепить обратно, а затем снова открепить перманентно. -Также нельзя использовать [DROP](../../sql-reference/statements/drop.md#drop-table) с открепленной таблицей или создавать таблицу с помощью [CREATE TABLE](../../sql-reference/statements/create/table.md) с таким же именем, как уже открепленная таблица. Еще нельзя заменить открепленную таблицу другой с помощью запроса [RENAME TABLE](../../sql-reference/statements/rename.md). +Также нельзя использовать [DROP](../../sql-reference/statements/drop.md#drop-table) с открепленной таблицей или создавать таблицу с помощью [CREATE TABLE](../../sql-reference/statements/create/table.md) с таким же именем, как у уже открепленной таблицы. Еще нельзя заменить открепленную таблицу другой с помощью запроса [RENAME TABLE](../../sql-reference/statements/rename.md). **Пример** From 3c61e7d3f97c34325dfb09de03d4b1559ace193e Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 20:59:28 +0300 Subject: [PATCH 260/333] Update docs/ru/operations/external-authenticators/index.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/index.md b/docs/ru/operations/external-authenticators/index.md index 6b75e864fb8..c8ac7459cfa 100644 --- a/docs/ru/operations/external-authenticators/index.md +++ b/docs/ru/operations/external-authenticators/index.md @@ -6,7 +6,7 @@ toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" # Внешние аутентификаторы пользователей и каталоги {#external-authenticators} -ClickHouse поддерживает аунтетификацию и управление пользователями внешними сервисами. +ClickHouse поддерживает аутентификацию и управление пользователями при помощи внешних сервисов. Поддерживаются следующие внешние аутентификаторы и каталоги: From dc18ad6359940d13e429569d03e22315916f0265 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:00:31 +0300 Subject: [PATCH 261/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 7f901898a99..e3c9f17d2af 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -1,6 +1,6 @@ # LDAP {#external-authenticators-ldap} -Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Существует два подхода: +Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Существуют два подхода: - Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных путях управления контролем. - Использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере. From 19d989c6871e12596d29f5c8669a6a6421021d81 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:00:47 +0300 Subject: [PATCH 262/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index e3c9f17d2af..86b096c7d32 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -2,7 +2,7 @@ Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Существуют два подхода: -- Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных путях управления контролем. +- Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных параметрах управления доступом. - Использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере. Для обоих подходов необходимо определить в конфиге ClickHouse LDAP сервер с внутренним именем, чтобы другие части конфига могли ссылаться на него. From 225aa5c3fbb666a43634031e45c3271a3832e2bf Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:01:04 +0300 Subject: [PATCH 263/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 86b096c7d32..6a436c775ec 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -5,7 +5,7 @@ - Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных параметрах управления доступом. - Использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере. -Для обоих подходов необходимо определить в конфиге ClickHouse LDAP сервер с внутренним именем, чтобы другие части конфига могли ссылаться на него. +Для обоих подходов необходимо определить внутреннее имя LDAP сервера в конфигурации ClickHouse, чтобы другие параметры конфигурации могли ссылаться на это имя. ## Определение LDAP сервера {#ldap-server-definition} From 916c50017aaab06ce04cc5a4c4955bf142a8e37b Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:01:10 +0300 Subject: [PATCH 264/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 6a436c775ec..d07c9dae3cd 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -39,7 +39,7 @@ **Параметры** -- `host` — имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть оставлен пустым. +- `host` — имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть пустым. - `port` — порт сервера LDAP. По-умолчанию: `636` при значении `true` настройки `enable_tls`, иначе `389`. - `bind_dn` — шаблон для создания DN для привязки. - Конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на фактическое имя пользователя при каждой попытке аутентификации. From ad92537251f7c8ebc50e33c3af53b01d7d8d571a Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:01:15 +0300 Subject: [PATCH 265/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index d07c9dae3cd..bc3a647bda7 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -40,7 +40,7 @@ **Параметры** - `host` — имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть пустым. -- `port` — порт сервера LDAP. По-умолчанию: `636` при значении `true` настройки `enable_tls`, иначе `389`. +- `port` — порт сервера LDAP. Если настройка `enable_tls` равна `true`, то по умолчанию используется порт `636`, иначе — порт `389`. - `bind_dn` — шаблон для создания DN для привязки. - Конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на фактическое имя пользователя при каждой попытке аутентификации. - `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться успешно аутентифицированным, и сможет совершать запросы без контакта с серверов LDAP. From 06b01bed1720fb0eb43242c1d46aedbe0ff7ed74 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:01:34 +0300 Subject: [PATCH 266/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index bc3a647bda7..d1c13f7534a 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -42,7 +42,7 @@ - `host` — имя хоста сервера LDAP или его IP. Этот параметр обязательный и не может быть пустым. - `port` — порт сервера LDAP. Если настройка `enable_tls` равна `true`, то по умолчанию используется порт `636`, иначе — порт `389`. - `bind_dn` — шаблон для создания DN для привязки. - - Конечный DN будет создан заменой всех подстрок `{user_name}` шаблона на фактическое имя пользователя при каждой попытке аутентификации. + - При формировании DN все подстроки `{user_name}` в шаблоне будут заменяться на фактическое имя пользователя при каждой попытке аутентификации. - `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться успешно аутентифицированным, и сможет совершать запросы без контакта с серверов LDAP. - Укажите `0` (по-умолчанию), чтобы отключить кеширование и заставить связываться с сервером LDAP для каждого запроса аутентификации. - `enable_tls` — флаг, включающий использование защищенного соединения с сервером LDAP. From d5a0f58c0fbf6452d95d7a53e58f26d07605a3b9 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:01:48 +0300 Subject: [PATCH 267/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index d1c13f7534a..ae1d3bb935b 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -43,7 +43,7 @@ - `port` — порт сервера LDAP. Если настройка `enable_tls` равна `true`, то по умолчанию используется порт `636`, иначе — порт `389`. - `bind_dn` — шаблон для создания DN для привязки. - При формировании DN все подстроки `{user_name}` в шаблоне будут заменяться на фактическое имя пользователя при каждой попытке аутентификации. -- `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться успешно аутентифицированным, и сможет совершать запросы без контакта с серверов LDAP. +- `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться аутентифицированным и сможет выполнять запросы без повторного обращения к серверам LDAP. - Укажите `0` (по-умолчанию), чтобы отключить кеширование и заставить связываться с сервером LDAP для каждого запроса аутентификации. - `enable_tls` — флаг, включающий использование защищенного соединения с сервером LDAP. - Укажите `no` для текстового `ldap://` протокола (не рекомендовано). From 09ee74f1972c3af8277c97b22eb4f1584ef237bb Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:01:55 +0300 Subject: [PATCH 268/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index ae1d3bb935b..9e8727cfa76 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -44,7 +44,7 @@ - `bind_dn` — шаблон для создания DN для привязки. - При формировании DN все подстроки `{user_name}` в шаблоне будут заменяться на фактическое имя пользователя при каждой попытке аутентификации. - `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться аутентифицированным и сможет выполнять запросы без повторного обращения к серверам LDAP. - - Укажите `0` (по-умолчанию), чтобы отключить кеширование и заставить связываться с сервером LDAP для каждого запроса аутентификации. + - Чтобы отключить кеширование и заставить обращаться к серверу LDAP для каждого запроса аутентификации, укажите `0` (значение по умолчанию). - `enable_tls` — флаг, включающий использование защищенного соединения с сервером LDAP. - Укажите `no` для текстового `ldap://` протокола (не рекомендовано). - Укажите `yes` для LDAP через SSL/TLS `ldaps://` протокола (рекомендовано, используется по-умолчанию). From 9eee949e4a735cf534ff6dae5fdfaeb402d0a4a1 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:02:19 +0300 Subject: [PATCH 269/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 9e8727cfa76..44474502f46 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -46,9 +46,9 @@ - `verification_cooldown` — промежуток времени (в секундах) после успешной попытки привязки, в течение которого пользователь будет считаться аутентифицированным и сможет выполнять запросы без повторного обращения к серверам LDAP. - Чтобы отключить кеширование и заставить обращаться к серверу LDAP для каждого запроса аутентификации, укажите `0` (значение по умолчанию). - `enable_tls` — флаг, включающий использование защищенного соединения с сервером LDAP. - - Укажите `no` для текстового `ldap://` протокола (не рекомендовано). - - Укажите `yes` для LDAP через SSL/TLS `ldaps://` протокола (рекомендовано, используется по-умолчанию). - - Укажите `starttls` для устаревшего StartTLS протокола (текстовый `ldap://` протокол, модернизированный до TLS). + - Укажите `no` для использования текстового протокола `ldap://` (не рекомендовано). + - Укажите `yes` для обращения к LDAP по протоколу SSL/TLS `ldaps://` (рекомендовано, используется по умолчанию). + - Укажите `starttls` для использования устаревшего протокола StartTLS (текстовый `ldap://` протокол, модернизированный до TLS). - `tls_minimum_protocol_version` — минимальная версия протокола SSL/TLS. - Принимаемые значения: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (по-умолчанию). - `tls_require_cert` — поведение при проверке сертификата SSL/TLS. From a38264a78d191567adca9be5fef080aa1c9d3d9c Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:02:27 +0300 Subject: [PATCH 270/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 44474502f46..86fa6322b76 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -50,9 +50,9 @@ - Укажите `yes` для обращения к LDAP по протоколу SSL/TLS `ldaps://` (рекомендовано, используется по умолчанию). - Укажите `starttls` для использования устаревшего протокола StartTLS (текстовый `ldap://` протокол, модернизированный до TLS). - `tls_minimum_protocol_version` — минимальная версия протокола SSL/TLS. - - Принимаемые значения: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (по-умолчанию). + - Возможные значения: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (по-умолчанию). - `tls_require_cert` — поведение при проверке сертификата SSL/TLS. - - Принимаемые значения: `never`, `allow`, `try`, `demand` (по-умолчанию). + - Возможные значения: `never`, `allow`, `try`, `demand` (по-умолчанию). - `tls_cert_file` — путь к файлу сертификата. - `tls_key_file` — путь к файлу ключа сертификата. - `tls_ca_cert_file` — путь к файлу ЦС сертификата. From 38df6717c827f9b0ea5dd81ad16cf6510694018b Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:02:34 +0300 Subject: [PATCH 271/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 86fa6322b76..3ca3369f4df 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -56,7 +56,7 @@ - `tls_cert_file` — путь к файлу сертификата. - `tls_key_file` — путь к файлу ключа сертификата. - `tls_ca_cert_file` — путь к файлу ЦС сертификата. -- `tls_ca_cert_dir` — путь к каталогу, содержащая сертификаты ЦС. +- `tls_ca_cert_dir` — путь к каталогу, содержащему сертификаты ЦС. - `tls_cipher_suite` — разрешенный набор шифров (в нотации OpenSSL). ## Внешний аутентификатор LDAP {#ldap-external-authenticator} From eab348f139c0ba2f58e905253cab3083750d4640 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:02:46 +0300 Subject: [PATCH 272/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 3ca3369f4df..83c92af0130 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -61,7 +61,7 @@ ## Внешний аутентификатор LDAP {#ldap-external-authenticator} -Удаленный сервер LDAP можно использовать как метод верификации паролей локально определенных пользователей (пользователей, которые определены в `users.xml` или в локальных путях управления контролем). Для этого укажите имя определенного до этого сервера LDAP вместо `password` или другой похожей секции в определении пользователя. +Удаленный сервер LDAP можно использовать для верификации паролей локально определенных пользователей (пользователей, которые определены в `users.xml` или в локальных параметрах управления доступом). Для этого укажите имя определенного ранее сервера LDAP вместо `password` или другой аналогичной секции в настройках пользователя. При каждой попытке авторизации, ClickHouse пытается "привязаться" к DN, указанному в [определении LDAP сервера](#ldap-server-definition) параметром `bind_dn`, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается аутентифицированным. Обычно это называют методом "простой привязки". From e0d4487528759574c1f6f167688990941eafc2ea Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:03:03 +0300 Subject: [PATCH 273/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 83c92af0130..441cb519917 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -63,7 +63,7 @@ Удаленный сервер LDAP можно использовать для верификации паролей локально определенных пользователей (пользователей, которые определены в `users.xml` или в локальных параметрах управления доступом). Для этого укажите имя определенного ранее сервера LDAP вместо `password` или другой аналогичной секции в настройках пользователя. -При каждой попытке авторизации, ClickHouse пытается "привязаться" к DN, указанному в [определении LDAP сервера](#ldap-server-definition) параметром `bind_dn`, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается аутентифицированным. Обычно это называют методом "простой привязки". +При каждой попытке авторизации ClickHouse пытается "привязаться" к DN, указанному в [определении LDAP сервера](#ldap-server-definition), используя параметр `bind_dn` и предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается аутентифицированным. Обычно это называют методом "простой привязки". **Пример** From d64cef7990c90502cc1db650f0eff65fad9ae941 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:03:21 +0300 Subject: [PATCH 274/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 441cb519917..1cdc3e4e6ea 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -94,7 +94,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; ## Внешний пользовательский каталог LDAP {#ldap-external-user-directory} -В добавок к локально определенным пользователям, удаленный LDAP сервер может быть использован как источник определения пользователей. Для этого укажите имя определенного до этого сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. +В дополнение к локально определенным пользователям, удаленный LDAP сервер может служить источником определения пользователей. Для этого укажите имя определенного ранее сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. При каждой попытке авторизации ClicHouse пытается локально найти определение пользователя и авторизовать его как обычно. Если определение не будет найдено, ClickHouse предполагает, что оно находится во внешнем LDAP каталоге, и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю будут присвоены роли из списка, указанного в секции `roles`. Кроме того, может быть выполнен LDAP поиск, а его результаты могут быть преобразованы в имена ролей и присвоены пользователям, если была настроена секция `role_mapping`. Все это работает при условии, что SQL-ориентированное [Управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). From 53b005ade334394859cf2d82b17055b3b7ba452f Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:03:52 +0300 Subject: [PATCH 275/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 1cdc3e4e6ea..691de8004ff 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -96,7 +96,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; В дополнение к локально определенным пользователям, удаленный LDAP сервер может служить источником определения пользователей. Для этого укажите имя определенного ранее сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -При каждой попытке авторизации ClicHouse пытается локально найти определение пользователя и авторизовать его как обычно. Если определение не будет найдено, ClickHouse предполагает, что оно находится во внешнем LDAP каталоге, и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю будут присвоены роли из списка, указанного в секции `roles`. Кроме того, может быть выполнен LDAP поиск, а его результаты могут быть преобразованы в имена ролей и присвоены пользователям, если была настроена секция `role_mapping`. Все это работает при условии, что SQL-ориентированное [Управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). +При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). **Пример** From 44a3b9dd6530957e41fd2f1bf3adff6a0f080e7b Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:04:09 +0300 Subject: [PATCH 276/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 691de8004ff..d10cd0cfe3d 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -131,7 +131,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - `server` — одно из имен сервера LDAP, определенного в секции конфига `ldap_servers` выше. Этот параметр обязательный и не может быть оставлен пустым. - `roles` — секция со списком локально определенных ролей, которые будут присвоены каждому пользователю, полученному от сервера LDAP. - - Если роли не указаны здесь или в секции `role_mapping` (ниже), пользователь не сможет выполнять никаких операций после аутентификации. + - Если роли не указаны ни здесь, ни в секции `role_mapping` (см. ниже), пользователь после аутентификации не сможет выполнять никаких действий. - `role_mapping` — секция c параметрами LDAP поиска и правилами отображения. - При аутентификации пользователя, пока еще связанного с LDAP, производится LDAP поиск с помощью `search_filter` и имени этого пользователя. Для каждой записи, найденной в ходе поиска, выделяется значение указанного атрибута. У каждого атрибута, имеющего указанный префикс, удаляется этот префикс, а остальная часть значения становится именем локальной роли, определенной в ClickHouse, причем предполагается, что эта роль была создана выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) до этого. - Внутри одной секции `ldap` может быть несколько секций `role_mapping`. Все они будут применены. From 82dff38fe673c7415445a064faa26a8b6bd1a516 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:04:18 +0300 Subject: [PATCH 277/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index d10cd0cfe3d..95cbb2952f5 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -133,7 +133,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - `roles` — секция со списком локально определенных ролей, которые будут присвоены каждому пользователю, полученному от сервера LDAP. - Если роли не указаны ни здесь, ни в секции `role_mapping` (см. ниже), пользователь после аутентификации не сможет выполнять никаких действий. - `role_mapping` — секция c параметрами LDAP поиска и правилами отображения. - - При аутентификации пользователя, пока еще связанного с LDAP, производится LDAP поиск с помощью `search_filter` и имени этого пользователя. Для каждой записи, найденной в ходе поиска, выделяется значение указанного атрибута. У каждого атрибута, имеющего указанный префикс, удаляется этот префикс, а остальная часть значения становится именем локальной роли, определенной в ClickHouse, причем предполагается, что эта роль была создана выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) до этого. + - При аутентификации пользователя, пока еще связанного с LDAP, производится LDAP поиск с помощью `search_filter` и имени этого пользователя. Для каждой записи, найденной в ходе поиска, выделяется значение указанного атрибута. У каждого атрибута, имеющего указанный префикс, этот префикс удаляется, а остальная часть значения становится именем локальной роли, определенной в ClickHouse, причем предполагается, что эта роль была ранее создана выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) до этого. - Внутри одной секции `ldap` может быть несколько секций `role_mapping`. Все они будут применены. - `base_dn` — шаблон, который используется для создания базового DN для LDAP поиска. - конечный DN будет создан заменой всех подстрок `{user_name}` и `{bind_dn}` шаблона на фактическое имя пользователя и DN привязки соответственно при каждом LDAP поиске. From 6e17c26fe3620bdc22fbdbb68d374b8cfba72a4c Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:04:25 +0300 Subject: [PATCH 278/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 95cbb2952f5..b2e088aa721 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -136,7 +136,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - При аутентификации пользователя, пока еще связанного с LDAP, производится LDAP поиск с помощью `search_filter` и имени этого пользователя. Для каждой записи, найденной в ходе поиска, выделяется значение указанного атрибута. У каждого атрибута, имеющего указанный префикс, этот префикс удаляется, а остальная часть значения становится именем локальной роли, определенной в ClickHouse, причем предполагается, что эта роль была ранее создана выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) до этого. - Внутри одной секции `ldap` может быть несколько секций `role_mapping`. Все они будут применены. - `base_dn` — шаблон, который используется для создания базового DN для LDAP поиска. - - конечный DN будет создан заменой всех подстрок `{user_name}` и `{bind_dn}` шаблона на фактическое имя пользователя и DN привязки соответственно при каждом LDAP поиске. + - При формировании DN все подстроки `{user_name}` и `{bind_dn}` в шаблоне будут заменяться на фактическое имя пользователя и DN привязки соответственно при каждом LDAP поиске. - `scope` — Область LDAP поиска. - Принимаемые значения: `base`, `one_level`, `children`, `subtree` (по-умолчанию). - `search_filter` — шаблон, который используется для создания фильтра для каждого LDAP поиска. From ce5f88a14e45f711d9fed731f27aae422434d66c Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:04:33 +0300 Subject: [PATCH 279/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index b2e088aa721..88b804c10a2 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -138,7 +138,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - `base_dn` — шаблон, который используется для создания базового DN для LDAP поиска. - При формировании DN все подстроки `{user_name}` и `{bind_dn}` в шаблоне будут заменяться на фактическое имя пользователя и DN привязки соответственно при каждом LDAP поиске. - `scope` — Область LDAP поиска. - - Принимаемые значения: `base`, `one_level`, `children`, `subtree` (по-умолчанию). + - Возможные значения: `base`, `one_level`, `children`, `subtree` (по умолчанию). - `search_filter` — шаблон, который используется для создания фильтра для каждого LDAP поиска. - Конечный фильтр будет создан заменой всех подстрок `{user_name}`, `{bind_dn}` и `{base_dn}` шаблона на фактическое имя пользователя, DN привязи и базовый DN при соответственно каждом LDAP поиске. - Обратите внимание, что специальные символы должны быть правильно экранированы в XML. From 8563b3b822c93ad4f42eedbf69f1357139e757f9 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:05:01 +0300 Subject: [PATCH 280/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 88b804c10a2..96e598a5eb2 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -143,6 +143,6 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - Конечный фильтр будет создан заменой всех подстрок `{user_name}`, `{bind_dn}` и `{base_dn}` шаблона на фактическое имя пользователя, DN привязи и базовый DN при соответственно каждом LDAP поиске. - Обратите внимание, что специальные символы должны быть правильно экранированы в XML. - `attribute` — имя атрибута, значение которого будет возвращаться LDAP поиском. - - `prefix` — префикс, который, как предполагается, будет находиться перед началом каждой строки в исходном списке строк, возвращаемых LDAP поиском. Префикс будет удален из исходных строк, а сами они будут рассматриваться как имена локальных ролей. По-умолчанию пусто. + - `prefix` — префикс, который, как предполагается, будет находиться перед началом каждой строки в исходном списке строк, возвращаемых LDAP поиском. Префикс будет удален из исходных строк, а сами они будут рассматриваться как имена локальных ролей. По умолчанию: пустая строка. [Оригинальная статья](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) From 37e5578a03aadb98888605994e882da986a20c5c Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:12:53 +0300 Subject: [PATCH 281/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 96e598a5eb2..86ccb14896d 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -129,7 +129,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; **Параметры** -- `server` — одно из имен сервера LDAP, определенного в секции конфига `ldap_servers` выше. Этот параметр обязательный и не может быть оставлен пустым. +- `server` — имя одного из серверов LDAP, определенных в секции `ldap_servers` в файле конфигурации (см.выше). Этот параметр обязательный и не может быть пустым. - `roles` — секция со списком локально определенных ролей, которые будут присвоены каждому пользователю, полученному от сервера LDAP. - Если роли не указаны ни здесь, ни в секции `role_mapping` (см. ниже), пользователь после аутентификации не сможет выполнять никаких действий. - `role_mapping` — секция c параметрами LDAP поиска и правилами отображения. From 5f9a854617caea599e35fc1e7b12bf00d384572c Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Mar 2021 21:13:22 +0300 Subject: [PATCH 282/333] Some updates --- docs/en/operations/external-authenticators/ldap.md | 2 +- docs/ru/operations/external-authenticators/ldap.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index e528e2a7c07..fc3dd466ea9 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -84,7 +84,7 @@ At each login attempt, ClickHouse tries to "bind" to the specified DN defined by Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously. -When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. +When SQL-driven [Access Control and Account Management](../access-rights.md) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. Query: diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 7f901898a99..6fcb20b8b3b 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -84,7 +84,7 @@ Обратите внимание, что пользователь `my_user` ссылается на `my_ldap_server`. Этот LDAP сервер должен быть настроен в основном файле `config.xml`, как это было описано ранее. -При включенном SQL-ориентированным [Управлением доступом](../access-rights.md#access-control) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). +При включенном SQL-ориентированным [Управлении доступом](../access-rights.md) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). Запрос: From 1130fd0654d8ff71d6d76fdbf0f3a77d7658a9f3 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Mar 2021 21:14:45 +0300 Subject: [PATCH 283/333] minor fix --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 60640030fd1..49b4d13ccb2 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -84,7 +84,7 @@ Обратите внимание, что пользователь `my_user` ссылается на `my_ldap_server`. Этот LDAP сервер должен быть настроен в основном файле `config.xml`, как это было описано ранее. -При включенном SQL-ориентированным [Управлении доступом](../access-rights.md) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). +При включенном SQL-ориентированном [управлении доступом](../access-rights.md) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). Запрос: From 2c48ea6f59fe2e3c8f0230b498996e81dab49e96 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Mar 2021 21:18:08 +0300 Subject: [PATCH 284/333] fixed description --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 49b4d13ccb2..63e0c73c63e 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -140,7 +140,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - `scope` — Область LDAP поиска. - Возможные значения: `base`, `one_level`, `children`, `subtree` (по умолчанию). - `search_filter` — шаблон, который используется для создания фильтра для каждого LDAP поиска. - - Конечный фильтр будет создан заменой всех подстрок `{user_name}`, `{bind_dn}` и `{base_dn}` шаблона на фактическое имя пользователя, DN привязи и базовый DN при соответственно каждом LDAP поиске. + - при формировании фильтра все подстроки `{user_name}`, `{bind_dn}` и `{base_dn}` в шаблоне будут заменяться на фактическое имя пользователя, DN привязки и базовый DN соответственно при каждом LDAP поиске. - Обратите внимание, что специальные символы должны быть правильно экранированы в XML. - `attribute` — имя атрибута, значение которого будет возвращаться LDAP поиском. - `prefix` — префикс, который, как предполагается, будет находиться перед началом каждой строки в исходном списке строк, возвращаемых LDAP поиском. Префикс будет удален из исходных строк, а сами они будут рассматриваться как имена локальных ролей. По умолчанию: пустая строка. From 9801241760aace052c36090d908726e25f42ede8 Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Mar 2021 21:31:59 +0300 Subject: [PATCH 285/333] fixed links --- docs/en/operations/external-authenticators/ldap.md | 2 +- docs/ru/operations/external-authenticators/ldap.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index fc3dd466ea9..158a44a7492 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -97,7 +97,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. To achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file. -At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. +At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. **Example** diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 63e0c73c63e..f93581123fd 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -96,7 +96,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; В дополнение к локально определенным пользователям, удаленный LDAP сервер может служить источником определения пользователей. Для этого укажите имя определенного ранее сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). +При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). **Пример** From 4d24cef69c6ed2bf3a32a1dcae41411b0baced5c Mon Sep 17 00:00:00 2001 From: George Date: Tue, 16 Mar 2021 21:46:05 +0300 Subject: [PATCH 286/333] fixed links --- docs/en/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 158a44a7492..1cd38ba8430 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -146,4 +146,4 @@ Note that `my_ldap_server` referred in the `ldap` section inside the `user_direc - `attribute` — Attribute name whose values will be returned by the LDAP search. - `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. The prefix will be removed from the original strings and the resulting strings will be treated as local role names. Empty by default. -[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) +[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap/) From b39f8cc6ac2e470b01826458f3f0f286367da64e Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 22:05:51 +0300 Subject: [PATCH 287/333] Move ErrorCodes::increment() into module part --- src/Common/ErrorCodes.cpp | 11 +++++++++++ src/Common/ErrorCodes.h | 11 +---------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 40ce23fffb2..f6c15848553 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -584,6 +584,17 @@ namespace ErrorCodes } ErrorCode end() { return END + 1; } + + void increment(ErrorCode error_code) + { + if (error_code >= end()) + { + /// For everything outside the range, use END. + /// (end() is the pointer pass the end, while END is the last value that has an element in values array). + error_code = end() - 1; + } + values[error_code].fetch_add(1, std::memory_order_relaxed); + } } } diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index cc610c5d927..919a4afdabf 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -31,16 +31,7 @@ namespace ErrorCodes ErrorCode end(); /// Add value for specified error_code. - inline void increment(ErrorCode error_code) - { - if (error_code >= end()) - { - /// For everything outside the range, use END. - /// (end() is the pointer pass the end, while END is the last value that has an element in values array). - error_code = end() - 1; - } - values[error_code].fetch_add(1, std::memory_order_relaxed); - } + void increment(ErrorCode error_code); } } From 259e5ba88e35546279bb46511a5e6ad18b4457d6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 22:05:51 +0300 Subject: [PATCH 288/333] Separate accounting of remote exceptions in system.errors --- docs/en/operations/system-tables/errors.md | 3 ++- src/Common/ErrorCodes.cpp | 9 ++++--- src/Common/ErrorCodes.h | 9 +++++-- src/Common/Exception.cpp | 12 +++++----- src/Storages/System/StorageSystemErrors.cpp | 24 ++++++++++++------- .../0_stateless/01545_system_errors.reference | 3 ++- .../0_stateless/01545_system_errors.sh | 13 +++++++--- 7 files changed, 49 insertions(+), 24 deletions(-) diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index ec874efd711..bf3e33f5275 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -7,11 +7,12 @@ Columns: - `name` ([String](../../sql-reference/data-types/string.md)) — name of the error (`errorCodeToName`). - `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — code number of the error. - `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened. +- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query). **Example** ``` sql -SELECT * +SELECT name, code, value FROM system.errors WHERE value > 0 ORDER BY code ASC diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index f6c15848553..ec3bf9c8917 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -563,7 +563,7 @@ namespace ErrorCodes #undef M constexpr Value END = 3000; - std::atomic values[END + 1]{}; + ValuePair values[END + 1]{}; struct ErrorCodesNames { @@ -585,7 +585,7 @@ namespace ErrorCodes ErrorCode end() { return END + 1; } - void increment(ErrorCode error_code) + void increment(ErrorCode error_code, bool remote) { if (error_code >= end()) { @@ -593,7 +593,10 @@ namespace ErrorCodes /// (end() is the pointer pass the end, while END is the last value that has an element in values array). error_code = end() - 1; } - values[error_code].fetch_add(1, std::memory_order_relaxed); + if (remote) + values[error_code].remote.fetch_add(1, std::memory_order_relaxed); + else + values[error_code].local.fetch_add(1, std::memory_order_relaxed); } } diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index 919a4afdabf..c4a9ae2907b 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -25,13 +25,18 @@ namespace ErrorCodes std::string_view getName(ErrorCode error_code); /// ErrorCode identifier -> current value of error_code. - extern std::atomic values[]; + struct ValuePair + { + std::atomic local; + std::atomic remote; + }; + extern ValuePair values[]; /// Get index just after last error_code identifier. ErrorCode end(); /// Add value for specified error_code. - void increment(ErrorCode error_code); + void increment(ErrorCode error_code, bool remote); } } diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 16f15d4e6f2..1963c1513b9 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -34,9 +34,9 @@ namespace ErrorCodes extern const int CANNOT_MREMAP; } -/// Aborts the process if error code is LOGICAL_ERROR. -/// Increments error codes statistics. -void handle_error_code([[maybe_unused]] const std::string & msg, int code) +/// - Aborts the process if error code is LOGICAL_ERROR. +/// - Increments error codes statistics. +void handle_error_code([[maybe_unused]] const std::string & msg, int code, bool remote) { // In debug builds and builds with sanitizers, treat LOGICAL_ERROR as an assertion failure. // Log the message before we fail. @@ -47,20 +47,20 @@ void handle_error_code([[maybe_unused]] const std::string & msg, int code) abort(); } #endif - ErrorCodes::increment(code); + ErrorCodes::increment(code, remote); } Exception::Exception(const std::string & msg, int code, bool remote_) : Poco::Exception(msg, code) , remote(remote_) { - handle_error_code(msg, code); + handle_error_code(msg, code, remote); } Exception::Exception(const std::string & msg, const Exception & nested, int code) : Poco::Exception(msg, nested, code) { - handle_error_code(msg, code); + handle_error_code(msg, code, remote); } Exception::Exception(CreateFromPocoTag, const Poco::Exception & exc) diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index 89df058900b..1a29484e169 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -13,27 +13,35 @@ NamesAndTypesList StorageSystemErrors::getNamesAndTypes() { "name", std::make_shared() }, { "code", std::make_shared() }, { "value", std::make_shared() }, + { "remote", std::make_shared() }, }; } void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { + auto add_row = [&](std::string_view name, size_t code, size_t value, bool remote) + { + if (value || context.getSettingsRef().system_events_show_zero_values) + { + size_t col_num = 0; + res_columns[col_num++]->insert(name); + res_columns[col_num++]->insert(code); + res_columns[col_num++]->insert(value); + res_columns[col_num++]->insert(remote); + } + }; + for (size_t i = 0, end = ErrorCodes::end(); i < end; ++i) { - UInt64 value = ErrorCodes::values[i]; + const auto & error = ErrorCodes::values[i]; std::string_view name = ErrorCodes::getName(i); if (name.empty()) continue; - if (value || context.getSettingsRef().system_events_show_zero_values) - { - size_t col_num = 0; - res_columns[col_num++]->insert(name); - res_columns[col_num++]->insert(i); - res_columns[col_num++]->insert(value); - } + add_row(name, i, error.local, 0 /* remote=0 */); + add_row(name, i, error.remote, 1 /* remote=1 */); } } diff --git a/tests/queries/0_stateless/01545_system_errors.reference b/tests/queries/0_stateless/01545_system_errors.reference index d00491fd7e5..0e7f2447090 100644 --- a/tests/queries/0_stateless/01545_system_errors.reference +++ b/tests/queries/0_stateless/01545_system_errors.reference @@ -1 +1,2 @@ -1 +local=1 +remote=1 diff --git a/tests/queries/0_stateless/01545_system_errors.sh b/tests/queries/0_stateless/01545_system_errors.sh index 63af6bb8d43..970fd403866 100755 --- a/tests/queries/0_stateless/01545_system_errors.sh +++ b/tests/queries/0_stateless/01545_system_errors.sh @@ -4,7 +4,14 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -prev="$(${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.errors WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO'")" +# local +prev="$(${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.errors WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO' AND NOT remote")" $CLICKHOUSE_CLIENT -q 'SELECT throwIf(1)' >& /dev/null -cur="$(${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.errors WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO'")" -echo $((cur - prev)) +cur="$(${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.errors WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO' AND NOT remote")" +echo local=$((cur - prev)) + +# remote +prev="$(${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.errors WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO' AND remote")" +${CLICKHOUSE_CLIENT} -q "SELECT * FROM remote('127.2', system.one) where throwIf(not dummy)" >& /dev/null +cur="$(${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.errors WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO' AND remote")" +echo remote=$((cur - prev)) From 7f73ac2b7a94ed809f4a2eaa59c13d7585756a6c Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 22:05:51 +0300 Subject: [PATCH 289/333] Fix ErrorCodes::Value/ErrorCode types (sigh) Note, that system.errors already uses correct types --- src/Common/ErrorCodes.cpp | 2 +- src/Common/ErrorCodes.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index ec3bf9c8917..6f1ff8e4f3a 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -562,7 +562,7 @@ namespace ErrorCodes APPLY_FOR_ERROR_CODES(M) #undef M - constexpr Value END = 3000; + constexpr ErrorCode END = 3000; ValuePair values[END + 1]{}; struct ErrorCodesNames diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index c4a9ae2907b..6373ad6d0f9 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -17,8 +17,8 @@ namespace DB namespace ErrorCodes { /// ErrorCode identifier (index in array). - using ErrorCode = size_t; - using Value = int; + using ErrorCode = int; + using Value = size_t; /// Get name of error_code by identifier. /// Returns statically allocated string. From 0d01eaf94fd122213b3d31593248fe3f3e9c6a40 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 23:08:04 +0300 Subject: [PATCH 290/333] Guard ErrorCodes with mutex over atomic --- src/Common/ErrorCodes.cpp | 30 +++++++++++++++++---- src/Common/ErrorCodes.h | 25 +++++++++++++---- src/Storages/System/StorageSystemErrors.cpp | 2 +- 3 files changed, 46 insertions(+), 11 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 6f1ff8e4f3a..32c9c4a452b 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -563,7 +563,7 @@ namespace ErrorCodes #undef M constexpr ErrorCode END = 3000; - ValuePair values[END + 1]{}; + ValuePairHolder values[END + 1]{}; struct ErrorCodesNames { @@ -593,10 +593,30 @@ namespace ErrorCodes /// (end() is the pointer pass the end, while END is the last value that has an element in values array). error_code = end() - 1; } - if (remote) - values[error_code].remote.fetch_add(1, std::memory_order_relaxed); - else - values[error_code].local.fetch_add(1, std::memory_order_relaxed); + + ValuePair inc_value{ + !remote, /* local */ + remote, /* remote */ + }; + values[error_code].increment(inc_value); + } + + ValuePair & ValuePair::operator+=(const ValuePair & value) + { + local += value.local; + remote += value.remote; + return *this; + } + + void ValuePairHolder::increment(const ValuePair & value_) + { + std::lock_guard lock(mutex); + value += value_; + } + ValuePair ValuePairHolder::get() + { + std::lock_guard lock(mutex); + return value; } } diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index 6373ad6d0f9..0db877db205 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include @@ -24,13 +24,28 @@ namespace ErrorCodes /// Returns statically allocated string. std::string_view getName(ErrorCode error_code); - /// ErrorCode identifier -> current value of error_code. struct ValuePair { - std::atomic local; - std::atomic remote; + Value local = 0; + Value remote = 0; + + ValuePair & operator+=(const ValuePair & value); }; - extern ValuePair values[]; + + /// Thread-safe + struct ValuePairHolder + { + public: + void increment(const ValuePair & value_); + ValuePair get(); + + private: + ValuePair value; + std::mutex mutex; + }; + + /// ErrorCode identifier -> current value of error_code. + extern ValuePairHolder values[]; /// Get index just after last error_code identifier. ErrorCode end(); diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index 1a29484e169..d57e8a0a670 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -34,7 +34,7 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & for (size_t i = 0, end = ErrorCodes::end(); i < end; ++i) { - const auto & error = ErrorCodes::values[i]; + const auto & error = ErrorCodes::values[i].get(); std::string_view name = ErrorCodes::getName(i); if (name.empty()) From c8852331a2f3356069155d6ab45ba79f1ecada57 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 23:16:46 +0300 Subject: [PATCH 291/333] Add system.errors.last_error_time column --- docs/en/operations/system-tables/errors.md | 1 + src/Common/ErrorCodes.cpp | 5 +++++ src/Common/ErrorCodes.h | 1 + src/Storages/System/StorageSystemErrors.cpp | 9 ++++++--- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index bf3e33f5275..b440ae4d787 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -7,6 +7,7 @@ Columns: - `name` ([String](../../sql-reference/data-types/string.md)) — name of the error (`errorCodeToName`). - `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — code number of the error. - `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened. +- `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — time when the last error happened. - `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query). **Example** diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 32c9c4a452b..3532c063651 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -1,4 +1,5 @@ #include +#include /** Previously, these constants were located in one enum. * But in this case there is a problem: when you add a new constant, you need to recompile @@ -605,6 +606,10 @@ namespace ErrorCodes { local += value.local; remote += value.remote; + + const auto now = std::chrono::system_clock::now(); + last_error_time_ms = std::chrono::duration_cast(now.time_since_epoch()).count(); + return *this; } diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index 0db877db205..c8c454b51a7 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -28,6 +28,7 @@ namespace ErrorCodes { Value local = 0; Value remote = 0; + UInt64 last_error_time_ms = 0; ValuePair & operator+=(const ValuePair & value); }; diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index d57e8a0a670..a3d68ff5d86 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -13,6 +14,7 @@ NamesAndTypesList StorageSystemErrors::getNamesAndTypes() { "name", std::make_shared() }, { "code", std::make_shared() }, { "value", std::make_shared() }, + { "last_error_time", std::make_shared() }, { "remote", std::make_shared() }, }; } @@ -20,7 +22,7 @@ NamesAndTypesList StorageSystemErrors::getNamesAndTypes() void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - auto add_row = [&](std::string_view name, size_t code, size_t value, bool remote) + auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 last_error_time_ms, bool remote) { if (value || context.getSettingsRef().system_events_show_zero_values) { @@ -28,6 +30,7 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & res_columns[col_num++]->insert(name); res_columns[col_num++]->insert(code); res_columns[col_num++]->insert(value); + res_columns[col_num++]->insert(last_error_time_ms / 1000); res_columns[col_num++]->insert(remote); } }; @@ -40,8 +43,8 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & if (name.empty()) continue; - add_row(name, i, error.local, 0 /* remote=0 */); - add_row(name, i, error.remote, 1 /* remote=1 */); + add_row(name, i, error.local, error.last_error_time_ms, 0 /* remote=0 */); + add_row(name, i, error.remote, error.last_error_time_ms, 1 /* remote=1 */); } } From 775f8f76827378b98d0b679eaa564ac7697ee81f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 23:31:51 +0300 Subject: [PATCH 292/333] Add system.errors.last_error_message column --- docs/en/operations/system-tables/errors.md | 1 + src/Common/ErrorCodes.cpp | 5 ++++- src/Common/ErrorCodes.h | 3 ++- src/Common/Exception.cpp | 2 +- src/Storages/System/StorageSystemErrors.cpp | 8 +++++--- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index b440ae4d787..f8ac1de29a8 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -8,6 +8,7 @@ Columns: - `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — code number of the error. - `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened. - `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — time when the last error happened. +- `last_error_message` ([String](../../sql-reference/data-types/string.md)) — message for the last error. - `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query). **Example** diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 3532c063651..6c9de122a26 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -586,7 +586,7 @@ namespace ErrorCodes ErrorCode end() { return END + 1; } - void increment(ErrorCode error_code, bool remote) + void increment(ErrorCode error_code, bool remote, const std::string & message) { if (error_code >= end()) { @@ -598,6 +598,8 @@ namespace ErrorCodes ValuePair inc_value{ !remote, /* local */ remote, /* remote */ + 0, /* last_error_time_ms */ + message, /* message */ }; values[error_code].increment(inc_value); } @@ -606,6 +608,7 @@ namespace ErrorCodes { local += value.local; remote += value.remote; + message = value.message; const auto now = std::chrono::system_clock::now(); last_error_time_ms = std::chrono::duration_cast(now.time_since_epoch()).count(); diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index c8c454b51a7..962b1f8a20a 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -29,6 +29,7 @@ namespace ErrorCodes Value local = 0; Value remote = 0; UInt64 last_error_time_ms = 0; + std::string message; ValuePair & operator+=(const ValuePair & value); }; @@ -52,7 +53,7 @@ namespace ErrorCodes ErrorCode end(); /// Add value for specified error_code. - void increment(ErrorCode error_code, bool remote); + void increment(ErrorCode error_code, bool remote, const std::string & message); } } diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 1963c1513b9..1fe224edc6e 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -47,7 +47,7 @@ void handle_error_code([[maybe_unused]] const std::string & msg, int code, bool abort(); } #endif - ErrorCodes::increment(code, remote); + ErrorCodes::increment(code, remote, msg); } Exception::Exception(const std::string & msg, int code, bool remote_) diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index a3d68ff5d86..c06bb13beb6 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -15,6 +15,7 @@ NamesAndTypesList StorageSystemErrors::getNamesAndTypes() { "code", std::make_shared() }, { "value", std::make_shared() }, { "last_error_time", std::make_shared() }, + { "last_error_message",std::make_shared() }, { "remote", std::make_shared() }, }; } @@ -22,7 +23,7 @@ NamesAndTypesList StorageSystemErrors::getNamesAndTypes() void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 last_error_time_ms, bool remote) + auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 last_error_time_ms, const std::string & message, bool remote) { if (value || context.getSettingsRef().system_events_show_zero_values) { @@ -31,6 +32,7 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & res_columns[col_num++]->insert(code); res_columns[col_num++]->insert(value); res_columns[col_num++]->insert(last_error_time_ms / 1000); + res_columns[col_num++]->insert(message); res_columns[col_num++]->insert(remote); } }; @@ -43,8 +45,8 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & if (name.empty()) continue; - add_row(name, i, error.local, error.last_error_time_ms, 0 /* remote=0 */); - add_row(name, i, error.remote, error.last_error_time_ms, 1 /* remote=1 */); + add_row(name, i, error.local, error.last_error_time_ms, error.message, 0 /* remote=0 */); + add_row(name, i, error.remote, error.last_error_time_ms, error.message, 1 /* remote=1 */); } } From 44c9dc753da4add80dd0a92ce14152790138e85a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 23:39:19 +0300 Subject: [PATCH 293/333] Add system.errors.last_error_stacktrace column --- docs/en/operations/system-tables/errors.md | 1 + src/Common/ErrorCodes.cpp | 12 +++++++----- src/Common/ErrorCodes.h | 3 ++- src/Common/Exception.cpp | 8 ++++---- src/Storages/System/StorageSystemErrors.cpp | 20 +++++++++++--------- 5 files changed, 25 insertions(+), 19 deletions(-) diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index f8ac1de29a8..72a537f15b9 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -9,6 +9,7 @@ Columns: - `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened. - `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — time when the last error happened. - `last_error_message` ([String](../../sql-reference/data-types/string.md)) — message for the last error. +- `last_error_stacktrace` ([String](../../sql-reference/data-types/string.md)) — stacktrace for the last error. - `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query). **Example** diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 6c9de122a26..14182467351 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -586,7 +586,7 @@ namespace ErrorCodes ErrorCode end() { return END + 1; } - void increment(ErrorCode error_code, bool remote, const std::string & message) + void increment(ErrorCode error_code, bool remote, const std::string & message, const std::string & stacktrace) { if (error_code >= end()) { @@ -596,10 +596,11 @@ namespace ErrorCodes } ValuePair inc_value{ - !remote, /* local */ - remote, /* remote */ - 0, /* last_error_time_ms */ - message, /* message */ + !remote, /* local */ + remote, /* remote */ + 0, /* last_error_time_ms */ + message, /* message */ + stacktrace, /* stacktrace */ }; values[error_code].increment(inc_value); } @@ -609,6 +610,7 @@ namespace ErrorCodes local += value.local; remote += value.remote; message = value.message; + stacktrace = value.stacktrace; const auto now = std::chrono::system_clock::now(); last_error_time_ms = std::chrono::duration_cast(now.time_since_epoch()).count(); diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index 962b1f8a20a..4c79614d55d 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -30,6 +30,7 @@ namespace ErrorCodes Value remote = 0; UInt64 last_error_time_ms = 0; std::string message; + std::string stacktrace; ValuePair & operator+=(const ValuePair & value); }; @@ -53,7 +54,7 @@ namespace ErrorCodes ErrorCode end(); /// Add value for specified error_code. - void increment(ErrorCode error_code, bool remote, const std::string & message); + void increment(ErrorCode error_code, bool remote, const std::string & message, const std::string & stacktrace); } } diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 1fe224edc6e..08afd0397f5 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -36,7 +36,7 @@ namespace ErrorCodes /// - Aborts the process if error code is LOGICAL_ERROR. /// - Increments error codes statistics. -void handle_error_code([[maybe_unused]] const std::string & msg, int code, bool remote) +void handle_error_code([[maybe_unused]] const std::string & msg, const std::string & stacktrace, int code, bool remote) { // In debug builds and builds with sanitizers, treat LOGICAL_ERROR as an assertion failure. // Log the message before we fail. @@ -47,20 +47,20 @@ void handle_error_code([[maybe_unused]] const std::string & msg, int code, bool abort(); } #endif - ErrorCodes::increment(code, remote, msg); + ErrorCodes::increment(code, remote, msg, stacktrace); } Exception::Exception(const std::string & msg, int code, bool remote_) : Poco::Exception(msg, code) , remote(remote_) { - handle_error_code(msg, code, remote); + handle_error_code(msg, getStackTraceString(), code, remote); } Exception::Exception(const std::string & msg, const Exception & nested, int code) : Poco::Exception(msg, nested, code) { - handle_error_code(msg, code, remote); + handle_error_code(msg, getStackTraceString(), code, remote); } Exception::Exception(CreateFromPocoTag, const Poco::Exception & exc) diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index c06bb13beb6..87cf3f2f603 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -11,19 +11,20 @@ namespace DB NamesAndTypesList StorageSystemErrors::getNamesAndTypes() { return { - { "name", std::make_shared() }, - { "code", std::make_shared() }, - { "value", std::make_shared() }, - { "last_error_time", std::make_shared() }, - { "last_error_message",std::make_shared() }, - { "remote", std::make_shared() }, + { "name", std::make_shared() }, + { "code", std::make_shared() }, + { "value", std::make_shared() }, + { "last_error_time", std::make_shared() }, + { "last_error_message", std::make_shared() }, + { "last_error_stacktrace", std::make_shared() }, + { "remote", std::make_shared() }, }; } void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 last_error_time_ms, const std::string & message, bool remote) + auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 last_error_time_ms, const std::string & message, const std::string & stacktrace, bool remote) { if (value || context.getSettingsRef().system_events_show_zero_values) { @@ -33,6 +34,7 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & res_columns[col_num++]->insert(value); res_columns[col_num++]->insert(last_error_time_ms / 1000); res_columns[col_num++]->insert(message); + res_columns[col_num++]->insert(stacktrace); res_columns[col_num++]->insert(remote); } }; @@ -45,8 +47,8 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & if (name.empty()) continue; - add_row(name, i, error.local, error.last_error_time_ms, error.message, 0 /* remote=0 */); - add_row(name, i, error.remote, error.last_error_time_ms, error.message, 1 /* remote=1 */); + add_row(name, i, error.local, error.last_error_time_ms, error.message, error.stacktrace, 0 /* remote=0 */); + add_row(name, i, error.remote, error.last_error_time_ms, error.message, error.stacktrace, 1 /* remote=1 */); } } From efdd04c958960233fad73f480366d3cfcbffba0d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Mar 2021 23:43:58 +0300 Subject: [PATCH 294/333] Drop last_ prefix for ErrorCodes::ValuePair::error_time_ms --- src/Common/ErrorCodes.cpp | 4 ++-- src/Common/ErrorCodes.h | 2 +- src/Storages/System/StorageSystemErrors.cpp | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 14182467351..d7e0d5fb16a 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -598,7 +598,7 @@ namespace ErrorCodes ValuePair inc_value{ !remote, /* local */ remote, /* remote */ - 0, /* last_error_time_ms */ + 0, /* error_time_ms */ message, /* message */ stacktrace, /* stacktrace */ }; @@ -613,7 +613,7 @@ namespace ErrorCodes stacktrace = value.stacktrace; const auto now = std::chrono::system_clock::now(); - last_error_time_ms = std::chrono::duration_cast(now.time_since_epoch()).count(); + error_time_ms = std::chrono::duration_cast(now.time_since_epoch()).count(); return *this; } diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index 4c79614d55d..1c8f0a58884 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -28,7 +28,7 @@ namespace ErrorCodes { Value local = 0; Value remote = 0; - UInt64 last_error_time_ms = 0; + UInt64 error_time_ms = 0; std::string message; std::string stacktrace; diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index 87cf3f2f603..c16eba6754b 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -24,7 +24,7 @@ NamesAndTypesList StorageSystemErrors::getNamesAndTypes() void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 last_error_time_ms, const std::string & message, const std::string & stacktrace, bool remote) + auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 error_time_ms, const std::string & message, const std::string & stacktrace, bool remote) { if (value || context.getSettingsRef().system_events_show_zero_values) { @@ -32,7 +32,7 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & res_columns[col_num++]->insert(name); res_columns[col_num++]->insert(code); res_columns[col_num++]->insert(value); - res_columns[col_num++]->insert(last_error_time_ms / 1000); + res_columns[col_num++]->insert(error_time_ms / 1000); res_columns[col_num++]->insert(message); res_columns[col_num++]->insert(stacktrace); res_columns[col_num++]->insert(remote); @@ -47,8 +47,8 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & if (name.empty()) continue; - add_row(name, i, error.local, error.last_error_time_ms, error.message, error.stacktrace, 0 /* remote=0 */); - add_row(name, i, error.remote, error.last_error_time_ms, error.message, error.stacktrace, 1 /* remote=1 */); + add_row(name, i, error.local, error.error_time_ms, error.message, error.stacktrace, 0 /* remote=0 */); + add_row(name, i, error.remote, error.error_time_ms, error.message, error.stacktrace, 1 /* remote=1 */); } } From cc87bcfb63fbca0a9024364156d60dc703521c63 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 9 Mar 2021 09:09:28 +0300 Subject: [PATCH 295/333] Fix errorCodeToName() for signed integers - https://clickhouse-test-reports.s3.yandex.net/21529/2ce2772d35eb3d81628f4d294d5799e9f05333fd/functional_stateless_tests_(address).html#fail1 - https://clickhouse-test-reports.s3.yandex.net/21529/2ce2772d35eb3d81628f4d294d5799e9f05333fd/functional_stateless_tests_(ubsan).html#fail1 - https://clickhouse-test-reports.s3.yandex.net/21529/2ce2772d35eb3d81628f4d294d5799e9f05333fd/stress_test_(address).html#fail1 --- src/Common/ErrorCodes.cpp | 2 +- tests/queries/0_stateless/01544_errorCodeToName.reference | 1 + tests/queries/0_stateless/01544_errorCodeToName.sql | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index d7e0d5fb16a..879784bb43a 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -579,7 +579,7 @@ namespace ErrorCodes std::string_view getName(ErrorCode error_code) { - if (error_code >= END) + if (error_code < 0 || error_code >= END) return std::string_view(); return error_codes_names.names[error_code]; } diff --git a/tests/queries/0_stateless/01544_errorCodeToName.reference b/tests/queries/0_stateless/01544_errorCodeToName.reference index ace588644e1..fefccf984be 100644 --- a/tests/queries/0_stateless/01544_errorCodeToName.reference +++ b/tests/queries/0_stateless/01544_errorCodeToName.reference @@ -1,4 +1,5 @@ + OK UNSUPPORTED_METHOD diff --git a/tests/queries/0_stateless/01544_errorCodeToName.sql b/tests/queries/0_stateless/01544_errorCodeToName.sql index 9e28ed1116c..aa32270f00b 100644 --- a/tests/queries/0_stateless/01544_errorCodeToName.sql +++ b/tests/queries/0_stateless/01544_errorCodeToName.sql @@ -1,4 +1,5 @@ SELECT errorCodeToName(toUInt32(-1)); +SELECT errorCodeToName(-1); SELECT errorCodeToName(600); /* gap in error codes */ SELECT errorCodeToName(0); SELECT errorCodeToName(1); From 9921e7ca284b7d274af540d13812881dd6a0e578 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 9 Mar 2021 09:12:08 +0300 Subject: [PATCH 296/333] Add 01545_system_errors into skip_list.parallel https://clickhouse-test-reports.s3.yandex.net/21529/2ce2772d35eb3d81628f4d294d5799e9f05333fd/functional_stateless_tests_flaky_check_(address).html#fail1 --- tests/queries/skip_list.json | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index bded0807db9..caab92636b3 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -739,6 +739,7 @@ "01541_max_memory_usage_for_user_long", "01542_dictionary_load_exception_race", "01560_optimize_on_insert_zookeeper", + "01545_system_errors", // looks at the difference of values in system.errors "01575_disable_detach_table_of_dictionary", "01593_concurrent_alter_mutations_kill", "01593_concurrent_alter_mutations_kill_many_replicas", From a337691b060e9ad2c2cf53e9762691352f732837 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 9 Mar 2021 10:05:56 +0300 Subject: [PATCH 297/333] Fix modernize-use-bool-literals clang-tidy warning in StorageSystemErrors --- src/Storages/System/StorageSystemErrors.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index c16eba6754b..c9aac9ce007 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -47,8 +47,8 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & if (name.empty()) continue; - add_row(name, i, error.local, error.error_time_ms, error.message, error.stacktrace, 0 /* remote=0 */); - add_row(name, i, error.remote, error.error_time_ms, error.message, error.stacktrace, 1 /* remote=1 */); + add_row(name, i, error.local, error.error_time_ms, error.message, error.stacktrace, false /* remote=0 */); + add_row(name, i, error.remote, error.error_time_ms, error.message, error.stacktrace, true /* remote=1 */); } } From 9dee842b6082a3e90c65a8a45e5a357de30106a6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 16 Mar 2021 21:31:14 +0300 Subject: [PATCH 298/333] Distinguish remote and local error info --- src/Common/ErrorCodes.cpp | 34 +++++++-------------- src/Common/ErrorCodes.h | 26 ++++++++++------ src/Storages/System/StorageSystemErrors.cpp | 16 +++++----- 3 files changed, 35 insertions(+), 41 deletions(-) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 879784bb43a..a4e2f8742ca 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -564,7 +564,7 @@ namespace ErrorCodes #undef M constexpr ErrorCode END = 3000; - ValuePairHolder values[END + 1]{}; + ErrorPairHolder values[END + 1]{}; struct ErrorCodesNames { @@ -595,35 +595,23 @@ namespace ErrorCodes error_code = end() - 1; } - ValuePair inc_value{ - !remote, /* local */ - remote, /* remote */ - 0, /* error_time_ms */ - message, /* message */ - stacktrace, /* stacktrace */ - }; - values[error_code].increment(inc_value); + values[error_code].increment(remote, message, stacktrace); } - ValuePair & ValuePair::operator+=(const ValuePair & value) + void ErrorPairHolder::increment(bool remote, const std::string & message, const std::string & stacktrace) { - local += value.local; - remote += value.remote; - message = value.message; - stacktrace = value.stacktrace; - const auto now = std::chrono::system_clock::now(); - error_time_ms = std::chrono::duration_cast(now.time_since_epoch()).count(); - return *this; - } - - void ValuePairHolder::increment(const ValuePair & value_) - { std::lock_guard lock(mutex); - value += value_; + + auto & error = remote ? value.remote : value.local; + + ++error.count; + error.message = message; + error.stacktrace = stacktrace; + error.error_time_ms = std::chrono::duration_cast(now.time_since_epoch()).count(); } - ValuePair ValuePairHolder::get() + ErrorPair ErrorPairHolder::get() { std::lock_guard lock(mutex); return value; diff --git a/src/Common/ErrorCodes.h b/src/Common/ErrorCodes.h index 1c8f0a58884..edb9be9e0c0 100644 --- a/src/Common/ErrorCodes.h +++ b/src/Common/ErrorCodes.h @@ -24,31 +24,37 @@ namespace ErrorCodes /// Returns statically allocated string. std::string_view getName(ErrorCode error_code); - struct ValuePair + struct Error { - Value local = 0; - Value remote = 0; + /// Number of times Exception with this ErrorCode had been throw. + Value count; + /// Time of the last error. UInt64 error_time_ms = 0; + /// Message for the last error. std::string message; + /// Stacktrace for the last error. std::string stacktrace; - - ValuePair & operator+=(const ValuePair & value); + }; + struct ErrorPair + { + Error local; + Error remote; }; /// Thread-safe - struct ValuePairHolder + struct ErrorPairHolder { public: - void increment(const ValuePair & value_); - ValuePair get(); + ErrorPair get(); + void increment(bool remote, const std::string & message, const std::string & stacktrace); private: - ValuePair value; + ErrorPair value; std::mutex mutex; }; /// ErrorCode identifier -> current value of error_code. - extern ValuePairHolder values[]; + extern ErrorPairHolder values[]; /// Get index just after last error_code identifier. ErrorCode end(); diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index c9aac9ce007..5243cb11aa3 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -24,17 +24,17 @@ NamesAndTypesList StorageSystemErrors::getNamesAndTypes() void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - auto add_row = [&](std::string_view name, size_t code, size_t value, UInt64 error_time_ms, const std::string & message, const std::string & stacktrace, bool remote) + auto add_row = [&](std::string_view name, size_t code, const auto & error, bool remote) { - if (value || context.getSettingsRef().system_events_show_zero_values) + if (error.count || context.getSettingsRef().system_events_show_zero_values) { size_t col_num = 0; res_columns[col_num++]->insert(name); res_columns[col_num++]->insert(code); - res_columns[col_num++]->insert(value); - res_columns[col_num++]->insert(error_time_ms / 1000); - res_columns[col_num++]->insert(message); - res_columns[col_num++]->insert(stacktrace); + res_columns[col_num++]->insert(error.count); + res_columns[col_num++]->insert(error.error_time_ms / 1000); + res_columns[col_num++]->insert(error.message); + res_columns[col_num++]->insert(error.stacktrace); res_columns[col_num++]->insert(remote); } }; @@ -47,8 +47,8 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, const Context & if (name.empty()) continue; - add_row(name, i, error.local, error.error_time_ms, error.message, error.stacktrace, false /* remote=0 */); - add_row(name, i, error.remote, error.error_time_ms, error.message, error.stacktrace, true /* remote=1 */); + add_row(name, i, error.local, /* remote= */ false); + add_row(name, i, error.remote, /* remote= */ true); } } From 37a17749ea57b9d1958b3f0dba70eaff0e61883a Mon Sep 17 00:00:00 2001 From: 3ldar-nasyrov <80788015+3ldar-nasyrov@users.noreply.github.com> Date: Tue, 16 Mar 2021 23:17:07 +0300 Subject: [PATCH 299/333] fixed exceeded amount of tries typo 'retires' -> 'retries' --- src/Interpreters/DDLWorker.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 0ecb27ee3aa..eceb48ae773 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -872,7 +872,7 @@ bool DDLWorker::tryExecuteQueryOnLeaderReplica( else /// If we exceeded amount of tries { LOG_WARNING(log, "Task {} was not executed by anyone, maximum number of retries exceeded", task.entry_name); - task.execution_status = ExecutionStatus(ErrorCodes::UNFINISHED, "Cannot execute replicated DDL query, maximum retires exceeded"); + task.execution_status = ExecutionStatus(ErrorCodes::UNFINISHED, "Cannot execute replicated DDL query, maximum retries exceeded"); } return false; } From 0855f5fb183d9b8ef8caaef395e8fa792205096e Mon Sep 17 00:00:00 2001 From: George Date: Wed, 17 Mar 2021 01:34:40 +0300 Subject: [PATCH 300/333] Troubleshooting --- docs/en/operations/external-authenticators/ldap.md | 4 ++-- docs/ru/operations/external-authenticators/ldap.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/en/operations/external-authenticators/ldap.md b/docs/en/operations/external-authenticators/ldap.md index 1cd38ba8430..1b65ecc968b 100644 --- a/docs/en/operations/external-authenticators/ldap.md +++ b/docs/en/operations/external-authenticators/ldap.md @@ -84,7 +84,7 @@ At each login attempt, ClickHouse tries to "bind" to the specified DN defined by Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously. -When SQL-driven [Access Control and Account Management](../access-rights.md) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. +When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement. Query: @@ -97,7 +97,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. To achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file. -At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. +At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement. **Example** diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index f93581123fd..102cc36eaa2 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -84,7 +84,7 @@ Обратите внимание, что пользователь `my_user` ссылается на `my_ldap_server`. Этот LDAP сервер должен быть настроен в основном файле `config.xml`, как это было описано ранее. -При включенном SQL-ориентированном [управлении доступом](../access-rights.md) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). +При включенном SQL-ориентированном [управлении доступом](../access-rights.md#access-control) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). Запрос: @@ -96,7 +96,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; В дополнение к локально определенным пользователям, удаленный LDAP сервер может служить источником определения пользователей. Для этого укажите имя определенного ранее сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). +При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). **Пример** @@ -145,4 +145,4 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - `attribute` — имя атрибута, значение которого будет возвращаться LDAP поиском. - `prefix` — префикс, который, как предполагается, будет находиться перед началом каждой строки в исходном списке строк, возвращаемых LDAP поиском. Префикс будет удален из исходных строк, а сами они будут рассматриваться как имена локальных ролей. По умолчанию: пустая строка. -[Оригинальная статья](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap.md) +[Оригинальная статья](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap) From 3060d9bb3be814ba8429b1e39d3eee8051078caa Mon Sep 17 00:00:00 2001 From: George Date: Wed, 17 Mar 2021 01:49:02 +0300 Subject: [PATCH 301/333] fixed links --- docs/en/operations/external-authenticators/index.md | 2 +- docs/ru/operations/external-authenticators/index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/external-authenticators/index.md b/docs/en/operations/external-authenticators/index.md index fe4e6a42974..f4b64865910 100644 --- a/docs/en/operations/external-authenticators/index.md +++ b/docs/en/operations/external-authenticators/index.md @@ -12,4 +12,4 @@ The following external authenticators and directories are supported: - [LDAP](./ldap.md#external-authenticators-ldap) [Authenticator](./ldap.md#ldap-external-authenticator) and [Directory](./ldap.md#ldap-external-user-directory) -[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/index.md) +[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/index/) diff --git a/docs/ru/operations/external-authenticators/index.md b/docs/ru/operations/external-authenticators/index.md index c8ac7459cfa..beb9bb5742c 100644 --- a/docs/ru/operations/external-authenticators/index.md +++ b/docs/ru/operations/external-authenticators/index.md @@ -12,4 +12,4 @@ ClickHouse поддерживает аутентификацию и управл - [LDAP](./ldap.md#external-authenticators-ldap) [аутентификатор](./ldap.md#ldap-external-authenticator) и [каталог](./ldap.md#ldap-external-user-directory) -[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/external-authenticators/index.md) +[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/external-authenticators/index/) From 9ca0566132016abeb25282f2a14127da1ee463f6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Mar 2021 02:08:23 +0300 Subject: [PATCH 302/333] Minor modification --- src/Compression/LZ4_decompress_faster.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Compression/LZ4_decompress_faster.h b/src/Compression/LZ4_decompress_faster.h index dd923279ebf..30a0d7acb22 100644 --- a/src/Compression/LZ4_decompress_faster.h +++ b/src/Compression/LZ4_decompress_faster.h @@ -95,7 +95,7 @@ struct PerformanceStatistics /// How to select method to run. /// -1 - automatically, based on statistics (default); - /// 0..3 - always choose specified method (for performance testing); + /// >= 0 - always choose specified method (for performance testing); /// -2 - choose methods in round robin fashion (for performance testing). ssize_t choose_method = -1; From 9969124cc43555369f7ba36a3b3e911f3d441f8a Mon Sep 17 00:00:00 2001 From: George Date: Wed, 17 Mar 2021 03:09:42 +0300 Subject: [PATCH 303/333] Change wording --- docs/en/sql-reference/statements/detach.md | 2 +- docs/ru/sql-reference/statements/attach.md | 4 ++-- docs/ru/sql-reference/statements/detach.md | 14 +++++++------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index cb0d7cf7b66..e9c9ed3693c 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -5,7 +5,7 @@ toc_title: DETACH # DETACH Statement {#detach} -Deletes information about the table or materialized view from the server. The server stops knowing about their existence. +Makes the server "forget" about the existence of the table or materialized view. Syntax: diff --git a/docs/ru/sql-reference/statements/attach.md b/docs/ru/sql-reference/statements/attach.md index be5b0b6d44a..55d4db80099 100644 --- a/docs/ru/sql-reference/statements/attach.md +++ b/docs/ru/sql-reference/statements/attach.md @@ -10,7 +10,7 @@ toc_title: ATTACH - вместо слова `CREATE` используется слово `ATTACH`; - запрос не создаёт данные на диске, а предполагает, что данные уже лежат в соответствующих местах, и всего лишь добавляет информацию о таблице на сервер. После выполнения запроса `ATTACH` сервер будет знать о существовании таблицы. -Если таблица перед этим была откреплена ([DETACH](../../sql-reference/statements/detach.md)), т.е. её структура известна, можно использовать сокращенную форму записи без определения структуры. +Если таблица перед этим была отключена ([DETACH](../../sql-reference/statements/detach.md)), т.е. её структура известна, можно использовать сокращенную форму записи без определения структуры. ``` sql ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] @@ -18,7 +18,7 @@ ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] Этот запрос используется при старте сервера. Сервер хранит метаданные таблиц в виде файлов с запросами `ATTACH`, которые он просто исполняет при запуске (за исключением некоторых системных таблиц, которые явно создаются на сервере). -Если таблица была откреплена перманентно, она не будет прикреплена обратно во время старта сервера, так что нужно явно использовать запрос `ATTACH`, чтобы прикрепить ее. +Если таблица была отключена перманентно, она не будет подключена обратно во время старта сервера, так что нужно явно использовать запрос `ATTACH`, чтобы подключить ее. [Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/attach/) diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index 90e9663def9..1809d85b958 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -5,7 +5,7 @@ toc_title: DETACH # DETACH {#detach-statement} -Удаляет из сервера информацию о таблице или материализованном представлении. Сервер перестаёт знать о существовании таблицы. +Заставляет сервер "забыть" о существовании таблицы или материализованного представления. Синтаксис: @@ -13,15 +13,15 @@ toc_title: DETACH DETACH TABLE|VIEW [IF EXISTS] [db.]name [PERMANENTLY] [ON CLUSTER cluster] ``` -Но ни данные, ни метаданные таблицы или материализованного представления не удаляются. При следующем запуске сервера, если не было использовано `PERMANENTLY`, сервер прочитает метаданные и снова узнает о таблице/представлении. Если таблица или представление были откреплено перманентно, сервер не прикрепит их обратно автоматически. +Но ни данные, ни метаданные таблицы или материализованного представления не удаляются. При следующем запуске сервера, если не было использовано `PERMANENTLY`, сервер прочитает метаданные и снова узнает о таблице/представлении. Если таблица или представление были отключены перманентно, сервер не подключит их обратно автоматически. -Независимо от того, каким способом таблица была откреплена, ее можно прикрепить обратно с помощью запроса [ATTACH](../../sql-reference/statements/attach.md). Системные log таблицы также могут быть прикреплены обратно (к примеру `query_log`, `text_log` и др.) Другие системные таблицы не могут быть прикреплены обратно, но на следующем запуске сервер снова "вспомнит" об этих таблицах. +Независимо от того, каким способом таблица была отключена, ее можно подключить обратно с помощью запроса [ATTACH](../../sql-reference/statements/attach.md). Системные log таблицы также могут быть подключены обратно (к примеру `query_log`, `text_log` и др.) Другие системные таблицы не могут быть подключены обратно, но на следующем запуске сервер снова "вспомнит" об этих таблицах. -`ATTACH MATERIALIZED VIEW` не может быть использован с кратким синтаксисом (без `SELECT`), но можно прикрепить представление с помощью запроса `ATTACH TABLE`. +`ATTACH MATERIALIZED VIEW` не может быть использован с кратким синтаксисом (без `SELECT`), но можно подключить представление с помощью запроса `ATTACH TABLE`. -Обратите внимание, что нельзя перманентно открепить таблицу, которая уже временно откреплена. Для этого ее сначала надо прикрепить обратно, а затем снова открепить перманентно. +Обратите внимание, что нельзя перманентно отключить таблицу, которая уже временно отключена. Для этого ее сначала надо подключить обратно, а затем снова отключить перманентно. -Также нельзя использовать [DROP](../../sql-reference/statements/drop.md#drop-table) с открепленной таблицей или создавать таблицу с помощью [CREATE TABLE](../../sql-reference/statements/create/table.md) с таким же именем, как у уже открепленной таблицы. Еще нельзя заменить открепленную таблицу другой с помощью запроса [RENAME TABLE](../../sql-reference/statements/rename.md). +Также нельзя использовать [DROP](../../sql-reference/statements/drop.md#drop-table) с отключенной таблицей или создавать таблицу с помощью [CREATE TABLE](../../sql-reference/statements/create/table.md) с таким же именем, как у отключенной таблицы. Еще нельзя заменить отключенную таблицу другой с помощью запроса [RENAME TABLE](../../sql-reference/statements/rename.md). **Пример** @@ -51,7 +51,7 @@ SELECT * FROM test; └────────┘ ``` -Открепление таблицы: +Отключение таблицы: Запрос: From 0640bb4cbaadafed46c74fc1f528200a99851145 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 17 Mar 2021 14:33:57 +0800 Subject: [PATCH 304/333] Fix arena data race in two level merge --- src/Interpreters/Aggregator.cpp | 39 +++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index ea81155e26a..ee8132cd40c 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1389,35 +1389,46 @@ BlocksList Aggregator::prepareBlocksAndFillTwoLevelImpl( for (size_t i = data_variants.aggregates_pools.size(); i < max_threads; ++i) data_variants.aggregates_pools.push_back(std::make_shared()); - auto converter = [&](size_t bucket, ThreadGroupStatusPtr thread_group) + std::atomic next_bucket_to_merge = 0; + + auto converter = [&](size_t thread_id, ThreadGroupStatusPtr thread_group) { if (thread_group) CurrentThread::attachToIfDetached(thread_group); - /// Select Arena to avoid race conditions - size_t thread_number = static_cast(bucket) % max_threads; - Arena * arena = data_variants.aggregates_pools.at(thread_number).get(); + BlocksList blocks; + while (true) + { + UInt32 bucket = next_bucket_to_merge.fetch_add(1); - return convertOneBucketToBlock(data_variants, method, arena, final, bucket); + if (bucket >= Method::Data::NUM_BUCKETS) + break; + + if (method.data.impls[bucket].empty()) + continue; + + /// Select Arena to avoid race conditions + Arena * arena = data_variants.aggregates_pools.at(thread_id).get(); + blocks.emplace_back(convertOneBucketToBlock(data_variants, method, arena, final, bucket)); + } + return blocks; }; /// packaged_task is used to ensure that exceptions are automatically thrown into the main stream. - std::vector> tasks(Method::Data::NUM_BUCKETS); + std::vector> tasks(max_threads); try { - for (size_t bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket) + for (size_t thread_id = 0; thread_id < max_threads; ++thread_id) { - if (method.data.impls[bucket].empty()) - continue; - - tasks[bucket] = std::packaged_task([group = CurrentThread::getGroup(), bucket, &converter]{ return converter(bucket, group); }); + tasks[thread_id] = std::packaged_task( + [group = CurrentThread::getGroup(), thread_id, &converter] { return converter(thread_id, group); }); if (thread_pool) - thread_pool->scheduleOrThrowOnError([bucket, &tasks] { tasks[bucket](); }); + thread_pool->scheduleOrThrowOnError([thread_id, &tasks] { tasks[thread_id](); }); else - tasks[bucket](); + tasks[thread_id](); } } catch (...) @@ -1439,7 +1450,7 @@ BlocksList Aggregator::prepareBlocksAndFillTwoLevelImpl( if (!task.valid()) continue; - blocks.emplace_back(task.get_future().get()); + blocks.splice(blocks.end(), task.get_future().get()); } return blocks; From d8fe02ad910c3c6f9a63f78ba97ee5543207dacd Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 17 Mar 2021 09:58:51 +0300 Subject: [PATCH 305/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 102cc36eaa2..b5892b2aa97 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -133,7 +133,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; - `roles` — секция со списком локально определенных ролей, которые будут присвоены каждому пользователю, полученному от сервера LDAP. - Если роли не указаны ни здесь, ни в секции `role_mapping` (см. ниже), пользователь после аутентификации не сможет выполнять никаких действий. - `role_mapping` — секция c параметрами LDAP поиска и правилами отображения. - - При аутентификации пользователя, пока еще связанного с LDAP, производится LDAP поиск с помощью `search_filter` и имени этого пользователя. Для каждой записи, найденной в ходе поиска, выделяется значение указанного атрибута. У каждого атрибута, имеющего указанный префикс, этот префикс удаляется, а остальная часть значения становится именем локальной роли, определенной в ClickHouse, причем предполагается, что эта роль была ранее создана выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) до этого. + - При аутентификации пользователя, пока еще связанного с LDAP, производится LDAP поиск с помощью `search_filter` и имени этого пользователя. Для каждой записи, найденной в ходе поиска, выделяется значение указанного атрибута. У каждого атрибута, имеющего указанный префикс, этот префикс удаляется, а остальная часть значения становится именем локальной роли, определенной в ClickHouse, причем предполагается, что эта роль была ранее создана запросом [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) до этого. - Внутри одной секции `ldap` может быть несколько секций `role_mapping`. Все они будут применены. - `base_dn` — шаблон, который используется для создания базового DN для LDAP поиска. - При формировании DN все подстроки `{user_name}` и `{bind_dn}` в шаблоне будут заменяться на фактическое имя пользователя и DN привязки соответственно при каждом LDAP поиске. From 349c7bf0d6445d0113fe317e78cf7d4528d4661a Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 17 Mar 2021 09:58:55 +0300 Subject: [PATCH 306/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index b5892b2aa97..4afaa210cfe 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -96,7 +96,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; В дополнение к локально определенным пользователям, удаленный LDAP сервер может служить источником определения пользователей. Для этого укажите имя определенного ранее сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md#access-control) включено, а роли созданы выражением [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). +При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md#access-control) включено, а роли созданы запросом [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). **Пример** From 8272d69f7495ddb1657bf24437742069781776db Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 17 Mar 2021 09:59:01 +0300 Subject: [PATCH 307/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 4afaa210cfe..0c941cee0d5 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -84,7 +84,7 @@ Обратите внимание, что пользователь `my_user` ссылается на `my_ldap_server`. Этот LDAP сервер должен быть настроен в основном файле `config.xml`, как это было описано ранее. -При включенном SQL-ориентированном [управлении доступом](../access-rights.md#access-control) пользователи, аутентифицированные LDAP серверами, могут также быть созданы выражением [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). +При включенном SQL-ориентированном [управлении доступом](../access-rights.md#access-control) пользователи, аутентифицированные LDAP серверами, могут также быть созданы запросом [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement). Запрос: From 4878ad5b6cc094a42b0a3f98194f30ab1dce6e03 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 17 Mar 2021 09:59:09 +0300 Subject: [PATCH 308/333] Update docs/ru/operations/external-authenticators/ldap.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/operations/external-authenticators/ldap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index 0c941cee0d5..b53c4cba121 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -2,7 +2,7 @@ Для аутентификации пользователей ClickHouse можно использовать сервер LDAP. Существуют два подхода: -- Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml` или в локальных параметрах управления доступом. +- Использовать LDAP как внешний аутентификатор для существующих пользователей, которые определены в `users.xml`, или в локальных параметрах управления доступом. - Использовать LDAP как внешний пользовательский каталог и разрешить аутентификацию локально неопределенных пользователей, если они есть на LDAP сервере. Для обоих подходов необходимо определить внутреннее имя LDAP сервера в конфигурации ClickHouse, чтобы другие параметры конфигурации могли ссылаться на это имя. From f69b6ecf0e030716574fae31b69b06db6ff9eae1 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Wed, 17 Mar 2021 13:16:41 +0200 Subject: [PATCH 309/333] Fixed cross-links to other pages --- docs/en/sql-reference/statements/optimize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index ea1fac90466..49a7404d76e 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -23,7 +23,7 @@ When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engin ### BY expression {#by-expression} -If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key). +If you want to perform deduplication on custom set of columns rather than on all, you can specify list of columns explicitly or use any combination of [`*`](../../sql-reference/statements/select/index.md#asterisk), [`COLUMNS`](../../sql-reference/statements/select/index.md#columns-expression) or [`EXCEPT`](../../sql-reference/statements/select/index.md#except-modifier) expressions. The explictly written or implicitly expanded list of columns must include all columns specified in row ordering expression (both primary and sorting keys) and partitioning expression (partitioning key). Note that `*` behaves just like in `SELECT`: `MATERIALIZED`, and `ALIAS` columns are not used for expansion. Also, it is an error to specify empty list of columns, or write an expression that results in an empty list of columns, or deduplicate by an ALIAS column. From e6158be4dac1f19dfad279035dc721618c75bb16 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 17 Mar 2021 14:54:14 +0300 Subject: [PATCH 310/333] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 928991dc937..43531b60267 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## ClickHouse release 21.3 +## ClickHouse release 21.3 (LTS) ### ClickHouse release v21.3, 2021-03-12 From f8fc4281f2c540128cdb689dd6c909b63b037ef3 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Wed, 17 Mar 2021 15:56:47 +0300 Subject: [PATCH 311/333] Update PostgreSQLReplicaConnection.h --- src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h index 9465d4a119b..e58d4bc8100 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicaConnection.h @@ -25,6 +25,7 @@ public: private: + /// Highest priority is 0, the bigger the number in map, the less the priority using ReplicasByPriority = std::map; Poco::Logger * log; From 1cd9f28bd4f8d1cda7347da8ad81a93975a4c840 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 17 Mar 2021 18:31:30 +0300 Subject: [PATCH 312/333] another way --- src/Interpreters/InterpreterSelectQuery.cpp | 8 +++++++- .../Transforms/PartialSortingTransform.cpp | 17 ++++++++--------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index d0c8966cf07..6be43408997 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2038,7 +2038,13 @@ void InterpreterSelectQuery::executeWindow(QueryPlan & query_plan) for (size_t i = 0; i < windows_sorted.size(); ++i) { const auto & w = *windows_sorted[i]; - if (i == 0 || !sortIsPrefix(w, *windows_sorted[i - 1])) + + // We don't need to sort again if the input from previous window already + // has suitable sorting. Also don't create sort steps when there are no + // columns to sort by, because the sort nodes are confused by this. It + // happens in case of `over ()`. + if (!w.full_sort_description.empty() + && (i == 0 || !sortIsPrefix(w, *windows_sorted[i - 1]))) { auto partial_sorting = std::make_unique( query_plan.getCurrentDataStream(), diff --git a/src/Processors/Transforms/PartialSortingTransform.cpp b/src/Processors/Transforms/PartialSortingTransform.cpp index 7c29f506617..3a75571872f 100644 --- a/src/Processors/Transforms/PartialSortingTransform.cpp +++ b/src/Processors/Transforms/PartialSortingTransform.cpp @@ -10,6 +10,8 @@ PartialSortingTransform::PartialSortingTransform( : ISimpleTransform(header_, header_, false) , description(description_), limit(limit_) { + // Sorting by no columns doesn't make sense. + assert(!description.empty()); } static ColumnRawPtrs extractColumns(const Block & block, const SortDescription & description) @@ -91,17 +93,14 @@ size_t getFilterMask(const ColumnRawPtrs & lhs, const ColumnRawPtrs & rhs, size_ void PartialSortingTransform::transform(Chunk & chunk) { - if (chunk.getColumns().empty()) + if (chunk.getNumRows()) { - // Sometimes we can have Chunks w/o columns, e.g. in case of - // `select count() over () from numbers(4) where number < 2`. - // We don't have to modify this Chunk, but we have to preserve the input - // number of rows. The following code uses Block for sorting, and Block - // is incapable of recording the number of rows when there is no columns. - // The simplest solution is to specifically check for Chunk with no - // columns and not modify it, which is what we do here. - return; + // The following code works with Blocks and will lose the number of + // rows when there are no columns. We shouldn't get such block, because + // we have to sort by at least one column. + assert(chunk.getNumColumns()); } + if (read_rows) read_rows->add(chunk.getNumRows()); From 738cb1af62a9f65846bba21757c9032583c9d00f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 11 Mar 2021 13:34:15 +0300 Subject: [PATCH 313/333] Fix filter push down columns order. --- src/Interpreters/ActionsDAG.cpp | 30 ++++++++++++++----- src/Interpreters/ActionsDAG.h | 4 +-- .../Optimizations/filterPushDown.cpp | 3 +- 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 83844176f3b..31abbc89634 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1360,7 +1360,8 @@ ColumnsWithTypeAndName prepareFunctionArguments(const std::vector conjunction) + +ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunction, const ColumnsWithTypeAndName & all_inputs) { if (conjunction.empty()) return nullptr; @@ -1374,6 +1375,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc std::make_shared())); std::unordered_map nodes_mapping; + std::unordered_map> added_inputs; struct Frame { @@ -1416,16 +1418,30 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc child = nodes_mapping[child]; if (node.type == ActionType::INPUT) - { - actions->inputs.emplace_back(&node); - actions->index.insert(&node); - } + added_inputs[node.result_name].push_back(&node); stack.pop(); } } } + + for (const auto & col : all_inputs) + { + Node * input; + auto & list = added_inputs[col.name]; + if (list.empty()) + input = &const_cast(actions->addInput(col)); + else + { + input = list.front(); + list.pop_front(); + actions->inputs.push_back(input); + } + + actions->index.insert(input); + } + Node * result_predicate = nodes_mapping[*conjunction.begin()]; if (conjunction.size() > 1) @@ -1442,7 +1458,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc return actions; } -ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs) +ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs, const ColumnsWithTypeAndName & all_inputs) { Node * predicate; @@ -1480,7 +1496,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, } auto conjunction = getConjunctionNodes(predicate, allowed_nodes); - auto actions = cloneActionsForConjunction(conjunction.allowed); + auto actions = cloneActionsForConjunction(conjunction.allowed, all_inputs); if (!actions) return nullptr; diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 165f712a627..a3cfd9cb776 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -292,7 +292,7 @@ public: /// Otherwise, return actions which inputs are from available_inputs. /// Returned actions add single column which may be used for filter. /// Also, replace some nodes of current inputs to constant 1 in case they are filtered. - ActionsDAGPtr splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs); + ActionsDAGPtr splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs, const ColumnsWithTypeAndName & all_inputs); private: Node & addNode(Node node, bool can_replace = false, bool add_to_index = true); @@ -323,7 +323,7 @@ private: void compileFunctions(); - ActionsDAGPtr cloneActionsForConjunction(std::vector conjunction); + ActionsDAGPtr cloneActionsForConjunction(std::vector conjunction, const ColumnsWithTypeAndName & all_inputs); }; diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index d64f082b7ee..f6a4eecbad1 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -43,7 +43,8 @@ static size_t tryAddNewFilterStep( // std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; - auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, allowed_inputs); + const auto & all_inputs = child->getInputStreams().front().header.getColumnsWithTypeAndName(); + auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, allowed_inputs, all_inputs); if (!split_filter) return 0; From bb17f14d837f0aabf47aafbcd6161f9d09c06e49 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 17 Mar 2021 18:55:53 +0300 Subject: [PATCH 314/333] fix --- programs/client/Client.cpp | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 4a61662c238..4ba96280939 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -65,6 +65,7 @@ #include #include #include +#include #include #include #include @@ -116,6 +117,31 @@ namespace ErrorCodes } +static bool queryHasWithClause(const IAST * ast) +{ + if (const auto * select = dynamic_cast(ast); + select && select->with()) + { + return true; + } + + // This is a bit too much, because most of the children are not queries, + // but on the other hand it will let us to avoid breakage when the AST + // structure changes and some new variant of query nesting is added. This + // function is used in fuzzer, so it's better to be defensive and avoid + // weird unexpected errors. + for (const auto & child : ast->children) + { + if (queryHasWithClause(child.get())) + { + return true; + } + } + + return false; +} + + class Client : public Poco::Util::Application { public: @@ -1429,7 +1455,11 @@ private: // when `lambda()` function gets substituted into a wrong place. // To avoid dealing with these cases, run the check only for the // queries we were able to successfully execute. - if (!have_error) + // The final caveat is that sometimes WITH queries are not executed, + // if they are not referenced by the main SELECT, so they can still + // have the abovementioned problems. Disable this check for such + // queries, for lack of a better solution. + if (!have_error && queryHasWithClause(parsed_query.get())) { ASTPtr parsed_formatted_query; try From cb92d578e1913107f9e25536fea91025aa2d75df Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Mar 2021 18:57:13 +0300 Subject: [PATCH 315/333] Update programs/client/Client.cpp --- programs/client/Client.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 4ba96280939..b879fb0a0ee 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1457,7 +1457,7 @@ private: // queries we were able to successfully execute. // The final caveat is that sometimes WITH queries are not executed, // if they are not referenced by the main SELECT, so they can still - // have the abovementioned problems. Disable this check for such + // have the aforementioned problems. Disable this check for such // queries, for lack of a better solution. if (!have_error && queryHasWithClause(parsed_query.get())) { From 3eba817a688f95744679779775358dc88310a56d Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Mar 2021 19:08:46 +0300 Subject: [PATCH 316/333] Add comments. --- src/Interpreters/ActionsDAG.cpp | 13 ++++++++----- src/Interpreters/ActionsDAG.h | 18 +++++++++++++++++- .../QueryPlan/Optimizations/filterPushDown.cpp | 2 +- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 31abbc89634..e67ab82a5ca 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1360,7 +1360,6 @@ ColumnsWithTypeAndName prepareFunctionArguments(const std::vector conjunction, const ColumnsWithTypeAndName & all_inputs) { if (conjunction.empty()) @@ -1375,7 +1374,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc std::make_shared())); std::unordered_map nodes_mapping; - std::unordered_map> added_inputs; + std::unordered_map> required_inputs; struct Frame { @@ -1418,7 +1417,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc child = nodes_mapping[child]; if (node.type == ActionType::INPUT) - added_inputs[node.result_name].push_back(&node); + required_inputs[node.result_name].push_back(&node); stack.pop(); } @@ -1429,7 +1428,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc for (const auto & col : all_inputs) { Node * input; - auto & list = added_inputs[col.name]; + auto & list = required_inputs[col.name]; if (list.empty()) input = &const_cast(actions->addInput(col)); else @@ -1458,7 +1457,11 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc return actions; } -ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs, const ColumnsWithTypeAndName & all_inputs) +ActionsDAGPtr ActionsDAG::cloneActionsForFilterPushDown( + const std::string & filter_name, + bool can_remove_filter, + const Names & available_inputs, + const ColumnsWithTypeAndName & all_inputs) { Node * predicate; diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index a3cfd9cb776..e0e0e9c8957 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -292,7 +292,23 @@ public: /// Otherwise, return actions which inputs are from available_inputs. /// Returned actions add single column which may be used for filter. /// Also, replace some nodes of current inputs to constant 1 in case they are filtered. - ActionsDAGPtr splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs, const ColumnsWithTypeAndName & all_inputs); + /// + /// @param all_inputs should containt inputs from previous step, which will be used for result actions. + /// It is expected that all_inputs contain columns from available_inputs. + /// This parameter is needed to enforce result actions save columns order in block. + /// Otherwise for some queries, e.g. with GROUP BY, columns colum be swapped. + /// Example: SELECT sum(x), y, z FROM tab WHERE z > 0 and sum(x) > 0 + /// Pushed condition: z > 0 + /// GROUP BY step will transform columns `x, y, z` -> `sum(x), y, z` + /// If we just add filter step with actions `z -> z > 0` before GROUP BY, + /// columns will be transformed like `x, y, z` -> `z, z > 0, x, y` -(remove filter)-> `z, z, y`. + /// To avoid it, add inputs from `all_inputs` list, + /// so actions `x, y, z -> x, y, z, z > 0` -(remove filter)-> `x, y, z` will not change columns order. + ActionsDAGPtr cloneActionsForFilterPushDown( + const std::string & filter_name, + bool can_remove_filter, + const Names & available_inputs, + const ColumnsWithTypeAndName & all_inputs); private: Node & addNode(Node node, bool can_replace = false, bool add_to_index = true); diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index f6a4eecbad1..0b988f9803f 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -44,7 +44,7 @@ static size_t tryAddNewFilterStep( // std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; const auto & all_inputs = child->getInputStreams().front().header.getColumnsWithTypeAndName(); - auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, allowed_inputs, all_inputs); + auto split_filter = expression->cloneActionsForFilterPushDown(filter_column_name, removes_filter, allowed_inputs, all_inputs); if (!split_filter) return 0; From 5dcddbd4681a2182788e62c433a7c37357796e52 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Mar 2021 19:52:50 +0300 Subject: [PATCH 317/333] boop --- docker/test/fuzzer/run-fuzzer.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 6858e838850..611fb411d6c 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -38,7 +38,6 @@ function download ln -s ./clickhouse ./clickhouse-server ln -s ./clickhouse ./clickhouse-client - # clickhouse-server is in the current dir export PATH="$PWD:$PATH" } From 2c1539f641aaac5267700a26e410c30fd54c2dd1 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Mar 2021 20:08:30 +0300 Subject: [PATCH 318/333] Fix added input. --- src/Interpreters/ActionsDAG.cpp | 6 +++--- src/Interpreters/ActionsDAG.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index e67ab82a5ca..e5ae2dcfcf9 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -91,7 +91,7 @@ const ActionsDAG::Node & ActionsDAG::addInput(std::string name, DataTypePtr type return addNode(std::move(node), can_replace, add_to_index); } -const ActionsDAG::Node & ActionsDAG::addInput(ColumnWithTypeAndName column, bool can_replace) +const ActionsDAG::Node & ActionsDAG::addInput(ColumnWithTypeAndName column, bool can_replace, bool add_to_index) { Node node; node.type = ActionType::INPUT; @@ -99,7 +99,7 @@ const ActionsDAG::Node & ActionsDAG::addInput(ColumnWithTypeAndName column, bool node.result_name = std::move(column.name); node.column = std::move(column.column); - return addNode(std::move(node), can_replace); + return addNode(std::move(node), can_replace, add_to_index); } const ActionsDAG::Node & ActionsDAG::addColumn(ColumnWithTypeAndName column, bool can_replace, bool materialize) @@ -1430,7 +1430,7 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc Node * input; auto & list = required_inputs[col.name]; if (list.empty()) - input = &const_cast(actions->addInput(col)); + input = &const_cast(actions->addInput(col, true, false)); else { input = list.front(); diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index e0e0e9c8957..fc6a0545ebd 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -200,7 +200,7 @@ public: std::string dumpDAG() const; const Node & addInput(std::string name, DataTypePtr type, bool can_replace = false, bool add_to_index = true); - const Node & addInput(ColumnWithTypeAndName column, bool can_replace = false); + const Node & addInput(ColumnWithTypeAndName column, bool can_replace = false, bool add_to_index = true); const Node & addColumn(ColumnWithTypeAndName column, bool can_replace = false, bool materialize = false); const Node & addAlias(const std::string & name, std::string alias, bool can_replace = false); const Node & addArrayJoin(const std::string & source_name, std::string result_name); @@ -301,7 +301,7 @@ public: /// Pushed condition: z > 0 /// GROUP BY step will transform columns `x, y, z` -> `sum(x), y, z` /// If we just add filter step with actions `z -> z > 0` before GROUP BY, - /// columns will be transformed like `x, y, z` -> `z, z > 0, x, y` -(remove filter)-> `z, z, y`. + /// columns will be transformed like `x, y, z` -> `z, z > 0, x, y` -(remove filter)-> `z, x, y`. /// To avoid it, add inputs from `all_inputs` list, /// so actions `x, y, z -> x, y, z, z > 0` -(remove filter)-> `x, y, z` will not change columns order. ActionsDAGPtr cloneActionsForFilterPushDown( From 6397099d3cf51b6721f1d3df372fe77e6401b329 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Mar 2021 20:08:49 +0300 Subject: [PATCH 319/333] Added test. --- .../01763_filter_push_down_bugs.reference | 6 +++ .../01763_filter_push_down_bugs.sql | 37 +++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/queries/0_stateless/01763_filter_push_down_bugs.reference create mode 100644 tests/queries/0_stateless/01763_filter_push_down_bugs.sql diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference new file mode 100644 index 00000000000..66ea84a07c1 --- /dev/null +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference @@ -0,0 +1,6 @@ +1 2 +1 2 +[1] 2 +[[1]] 2 +String1_0 String2_0 String3_0 String4_0 1 +String1_0 String2_0 String3_0 String4_0 1 diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql new file mode 100644 index 00000000000..5000eb38878 --- /dev/null +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql @@ -0,0 +1,37 @@ +SELECT * FROM (SELECT col1, col2 FROM (select '1' as col1, '2' as col2) GROUP by col1, col2) AS expr_qry WHERE col2 != ''; +SELECT * FROM (SELECT materialize('1') AS s1, materialize('2') AS s2 GROUP BY s1, s2) WHERE s2 = '2'; +SELECT * FROM (SELECT materialize([1]) AS s1, materialize('2') AS s2 GROUP BY s1, s2) WHERE s2 = '2'; +SELECT * FROM (SELECT materialize([[1]]) AS s1, materialize('2') AS s2 GROUP BY s1, s2) WHERE s2 = '2'; + +DROP TABLE IF EXISTS Test; + +CREATE TABLE Test +ENGINE = MergeTree() +PRIMARY KEY (String1,String2) +ORDER BY (String1,String2) +AS +SELECT + 'String1_' || toString(number) as String1, + 'String2_' || toString(number) as String2, + 'String3_' || toString(number) as String3, + 'String4_' || toString(number%4) as String4 +FROM numbers(1); + +SELECT * +FROM + ( + SELECT String1,String2,String3,String4,COUNT(*) + FROM Test + GROUP by String1,String2,String3,String4 + ) AS expr_qry; + +SELECT * +FROM + ( + SELECT String1,String2,String3,String4,COUNT(*) + FROM Test + GROUP by String1,String2,String3,String4 + ) AS expr_qry +WHERE String4 ='String4_0'; + +DROP TABLE IF EXISTS Test; From 89e79185a0f764e1009a061ba01ea4cb93704c55 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Mar 2021 20:09:29 +0300 Subject: [PATCH 320/333] Update tests.md --- docs/en/development/tests.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index fb453e55417..7547497b9af 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -233,7 +233,7 @@ Google OSS-Fuzz can be found at `docker/fuzz`. We also use simple fuzz test to generate random SQL queries and to check that the server doesn’t die executing them. You can find it in `00746_sql_fuzzy.pl`. This test should be run continuously (overnight and longer). -We also use sophisticated AST-based query fuzzer that is able to find huge amount of corner cases. It does random permutations and substitutions in queries AST. It remembers AST nodes from previous tests to use them for fuzzing of subsequent tests while processing them in random order. +We also use sophisticated AST-based query fuzzer that is able to find huge amount of corner cases. It does random permutations and substitutions in queries AST. It remembers AST nodes from previous tests to use them for fuzzing of subsequent tests while processing them in random order. You can learn more about this fuzzer in [this blog article](https://clickhouse.tech/blog/en/2021/fuzzing-clickhouse/). ## Stress test From 8cb19d63aef4777ee8ebfa3fe94f229dbbd6dc4b Mon Sep 17 00:00:00 2001 From: robert Date: Wed, 17 Mar 2021 20:38:49 +0300 Subject: [PATCH 321/333] zookeeper-dump-tree: added ctime option to dump node ctime --- utils/zookeeper-dump-tree/main.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/utils/zookeeper-dump-tree/main.cpp b/utils/zookeeper-dump-tree/main.cpp index 5ab7bc2d536..5dcc260762f 100644 --- a/utils/zookeeper-dump-tree/main.cpp +++ b/utils/zookeeper-dump-tree/main.cpp @@ -17,6 +17,7 @@ int main(int argc, char ** argv) "addresses of ZooKeeper instances, comma separated. Example: example01e.yandex.ru:2181") ("path,p", boost::program_options::value()->default_value("/"), "where to start") + ("ctime,c", "print node ctime") ; boost::program_options::variables_map options; @@ -79,7 +80,11 @@ int main(int argc, char ** argv) throw; } - std::cout << it->first << '\t' << response.stat.numChildren << '\t' << response.stat.dataLength << '\n'; + std::cout << it->first << '\t' << response.stat.numChildren << '\t' << response.stat.dataLength; + if (options.count("ctime")) { + std::cout << '\t' << response.stat.ctime; + } + std::cout << '\n'; for (const auto & name : response.names) { From e1b11c786bf23844b4132d5560e4fac0248f741c Mon Sep 17 00:00:00 2001 From: robert Date: Wed, 17 Mar 2021 21:10:51 +0300 Subject: [PATCH 322/333] zookeeper-dump-tree: move ctime option to bool --- utils/zookeeper-dump-tree/main.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/zookeeper-dump-tree/main.cpp b/utils/zookeeper-dump-tree/main.cpp index 5dcc260762f..47e23c5af71 100644 --- a/utils/zookeeper-dump-tree/main.cpp +++ b/utils/zookeeper-dump-tree/main.cpp @@ -31,6 +31,8 @@ int main(int argc, char ** argv) return 1; } + bool dump_ctime = (options.count("ctime")) ? true : false; + zkutil::ZooKeeperPtr zookeeper = std::make_shared(options.at("address").as()); std::string initial_path = options.at("path").as(); @@ -81,7 +83,7 @@ int main(int argc, char ** argv) } std::cout << it->first << '\t' << response.stat.numChildren << '\t' << response.stat.dataLength; - if (options.count("ctime")) { + if (dump_ctime) { std::cout << '\t' << response.stat.ctime; } std::cout << '\n'; From 9fdb0e667fbeef9e13750afc4be90a29618c292e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Mar 2021 21:40:26 +0300 Subject: [PATCH 323/333] Add comment. --- src/Interpreters/ActionsDAG.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index e5ae2dcfcf9..94d9b72b8e9 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1424,7 +1424,8 @@ ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunc } } - + /// Actions must have the same inputs as in all_inputs list. + /// See comment to cloneActionsForFilterPushDown. for (const auto & col : all_inputs) { Node * input; From 45fead90621dfd2cf19bbd350aca703f4604b77f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Mar 2021 21:42:42 +0300 Subject: [PATCH 324/333] Fix typos --- src/Interpreters/ActionsDAG.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index fc6a0545ebd..d5a15324280 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -293,10 +293,10 @@ public: /// Returned actions add single column which may be used for filter. /// Also, replace some nodes of current inputs to constant 1 in case they are filtered. /// - /// @param all_inputs should containt inputs from previous step, which will be used for result actions. + /// @param all_inputs should contain inputs from previous step, which will be used for result actions. /// It is expected that all_inputs contain columns from available_inputs. /// This parameter is needed to enforce result actions save columns order in block. - /// Otherwise for some queries, e.g. with GROUP BY, columns colum be swapped. + /// Otherwise for some queries, e.g. with GROUP BY, columns will be mixed. /// Example: SELECT sum(x), y, z FROM tab WHERE z > 0 and sum(x) > 0 /// Pushed condition: z > 0 /// GROUP BY step will transform columns `x, y, z` -> `sum(x), y, z` From 9b1d256f5460bdda9387c5656e1b5d4f43fbf3f4 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 17 Mar 2021 21:44:43 +0300 Subject: [PATCH 325/333] clang-tidy...... --- programs/client/Client.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 4ba96280939..2528357565c 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -130,11 +130,17 @@ static bool queryHasWithClause(const IAST * ast) // structure changes and some new variant of query nesting is added. This // function is used in fuzzer, so it's better to be defensive and avoid // weird unexpected errors. - for (const auto & child : ast->children) + // clang-tidy is confused by this function: it thinks that if `select` is + // nullptr, `ast` is also nullptr, and complains about nullptr dereference. + // NOLINTNEXTLINE + if (ast->children) { - if (queryHasWithClause(child.get())) + for (const auto & child : ast->children) /* NOLINT */ { - return true; + if (queryHasWithClause(child.get())) + { + return true; + } } } From bb460dd7f4e6da81a06920234b5e8cdcb15c9f91 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 17 Mar 2021 21:55:06 +0300 Subject: [PATCH 326/333] fix --- programs/client/Client.cpp | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index d77b196dff1..c2450c9e48f 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -125,22 +125,19 @@ static bool queryHasWithClause(const IAST * ast) return true; } - // This is a bit too much, because most of the children are not queries, - // but on the other hand it will let us to avoid breakage when the AST - // structure changes and some new variant of query nesting is added. This - // function is used in fuzzer, so it's better to be defensive and avoid - // weird unexpected errors. + // This full recursive walk is somewhat excessive, because most of the + // children are not queries, but on the other hand it will let us to avoid + // breakage when the AST structure changes and some new variant of query + // nesting is added. This function is used in fuzzer, so it's better to be + // defensive and avoid weird unexpected errors. // clang-tidy is confused by this function: it thinks that if `select` is // nullptr, `ast` is also nullptr, and complains about nullptr dereference. // NOLINTNEXTLINE - if (ast->children) + for (const auto & child : ast->children) { - for (const auto & child : ast->children) /* NOLINT */ + if (queryHasWithClause(child.get())) { - if (queryHasWithClause(child.get())) - { - return true; - } + return true; } } From a3c1096fdbb59cdc52666e321dea93cbf17a0085 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Mar 2021 21:59:03 +0300 Subject: [PATCH 327/333] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 611fb411d6c..2a6cf7cb0d2 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -20,11 +20,8 @@ function clone # the repo from the CI as well. For local runs, start directly from the "fuzz" # stage. rm -rf ch ||: - mkdir ch - cd ch wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" - tar -xvf clickhouse_no_subs.tar.gz - tree ||: + tar -xf -C ch --strip-components=1 clickhouse_no_subs.tar.gz ls -lath ||: } From 3a8d7e9a48d254eb9adb6f5c9623834b65658df6 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 17 Mar 2021 22:12:10 +0300 Subject: [PATCH 328/333] Update main.cpp --- utils/zookeeper-dump-tree/main.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/zookeeper-dump-tree/main.cpp b/utils/zookeeper-dump-tree/main.cpp index 47e23c5af71..893056564bb 100644 --- a/utils/zookeeper-dump-tree/main.cpp +++ b/utils/zookeeper-dump-tree/main.cpp @@ -31,7 +31,7 @@ int main(int argc, char ** argv) return 1; } - bool dump_ctime = (options.count("ctime")) ? true : false; + bool dump_ctime = options.count("ctime"); zkutil::ZooKeeperPtr zookeeper = std::make_shared(options.at("address").as()); @@ -83,9 +83,8 @@ int main(int argc, char ** argv) } std::cout << it->first << '\t' << response.stat.numChildren << '\t' << response.stat.dataLength; - if (dump_ctime) { + if (dump_ctime) std::cout << '\t' << response.stat.ctime; - } std::cout << '\n'; for (const auto & name : response.names) From 51cd10c8eb440421ce7b7d7f94c4a4a471ccc1d9 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 18 Mar 2021 00:22:36 +0300 Subject: [PATCH 329/333] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 2a6cf7cb0d2..2f6aac5c183 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -21,7 +21,7 @@ function clone # stage. rm -rf ch ||: wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" - tar -xf -C ch --strip-components=1 clickhouse_no_subs.tar.gz + tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz ls -lath ||: } From ebd20e6052888474ba1ef9309a07ab1ea9ca8649 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 18 Mar 2021 05:09:38 +0300 Subject: [PATCH 330/333] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 2f6aac5c183..7707d7e0459 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -20,6 +20,7 @@ function clone # the repo from the CI as well. For local runs, start directly from the "fuzz" # stage. rm -rf ch ||: + mkdir ch ||: wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz ls -lath ||: From 1f5904fea9f332a9986634c5614b08b4b2362c42 Mon Sep 17 00:00:00 2001 From: TCeason Date: Wed, 17 Mar 2021 15:54:55 +0800 Subject: [PATCH 331/333] fix integration MaterializeMySQL test https://github.com/ClickHouse/ClickHouse/pull/21759 --- tests/integration/README.md | 5 +++-- .../materialize_with_ddl.py | 22 +++++++++++++------ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/tests/integration/README.md b/tests/integration/README.md index cdfb6b1a70a..e4073f96449 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -12,7 +12,7 @@ You must install latest Docker from https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#set-up-the-repository Don't use Docker from your system repository. -* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python3-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev` +* [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python3-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev libkrb5-dev` * [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` * [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: @@ -39,7 +39,8 @@ sudo -H pip install \ redis \ tzlocal \ urllib3 \ - requests-kerberos + requests-kerberos \ + dict2xml ``` (highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio` diff --git a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py index f906c309443..1675b72e0c4 100644 --- a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py @@ -653,10 +653,17 @@ def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_nam check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n') check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table FORMAT TSV", '11\n') + + # When ClickHouse dump all history data we can query it on ClickHouse + # but it don't mean that the sync thread is already to connect to MySQL. + # So After ClickHouse can query data, insert some rows to MySQL. Use this to re-check sync successed. + mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (22)") + mysql_node.query("INSERT INTO test_database.test_table VALUES (2)") + check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n') + check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n') + get_sync_id_query = "select id from information_schema.processlist where STATE='Master has sent all binlog to slave; waiting for more updates'" result = mysql_node.query_and_get_data(get_sync_id_query) - assert len(result) == 2 - for row in result: row_result = {} query = "kill " + str(row[0]) + ";" @@ -671,13 +678,13 @@ def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_nam clickhouse_node.query("DETACH DATABASE test_database") clickhouse_node.query("ATTACH DATABASE test_database") - check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n') - - mysql_node.query("INSERT INTO test_database.test_table VALUES (2)") check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n') - mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (12)") - check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n12\n') + mysql_node.query("INSERT INTO test_database.test_table VALUES (3)") + check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n3\n') + + mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (33)") + check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n33\n') clickhouse_node.query("DROP DATABASE test_database") clickhouse_node.query("DROP DATABASE test_database_auto") @@ -756,6 +763,7 @@ def utf8mb4_test(clickhouse_node, mysql_node, service_name): mysql_node.query("CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4") mysql_node.query("INSERT INTO utf8mb4_test.test VALUES(1, '🦄'),(2, '\u2601')") clickhouse_node.query("CREATE DATABASE utf8mb4_test ENGINE = MaterializeMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format(service_name)) + check_query(clickhouse_node, "SHOW TABLES FROM utf8mb4_test FORMAT TSV", "test\n") check_query(clickhouse_node, "SELECT id, name FROM utf8mb4_test.test ORDER BY id", "1\t\U0001F984\n2\t\u2601\n") def system_parts_test(clickhouse_node, mysql_node, service_name): From c4dfd97709972c271b3cc9e306f7be80cbd3af9f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Mar 2021 10:59:48 +0300 Subject: [PATCH 332/333] Fix ActionsDAG::Index::insert --- src/Interpreters/ActionsDAG.h | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index d5a15324280..4e334bd1be8 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -120,8 +120,31 @@ public: /// Insert method doesn't check if map already have node with the same name. /// If node with the same name exists, it is removed from map, but not list. /// It is expected and used for project(), when result may have several columns with the same name. - void insert(Node * node) { map[node->result_name] = list.emplace(list.end(), node); } - void prepend(Node * node) { map[node->result_name] = list.emplace(list.begin(), node); } + void insert(Node * node) + { + auto it = list.emplace(list.end(), node); + if (auto handle = map.extract(node->result_name)) + { + handle.key() = node->result_name; /// Change string_view + handle.mapped() = it; + map.insert(std::move(handle)); + } + else + map[node->result_name] = it; + } + + void prepend(Node * node) + { + auto it = list.emplace(list.begin(), node); + if (auto handle = map.extract(node->result_name)) + { + handle.key() = node->result_name; /// Change string_view + handle.mapped() = it; + map.insert(std::move(handle)); + } + else + map[node->result_name] = it; + } /// If node with same name exists in index, replace it. Otherwise insert new node to index. void replace(Node * node) From c068538a8e2388f8096c8fedd6ff38eed7aae9aa Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 18 Mar 2021 17:04:52 +0300 Subject: [PATCH 333/333] Update ldap.md --- docs/ru/operations/external-authenticators/ldap.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/operations/external-authenticators/ldap.md b/docs/ru/operations/external-authenticators/ldap.md index b53c4cba121..312020000ea 100644 --- a/docs/ru/operations/external-authenticators/ldap.md +++ b/docs/ru/operations/external-authenticators/ldap.md @@ -55,7 +55,7 @@ - Возможные значения: `never`, `allow`, `try`, `demand` (по-умолчанию). - `tls_cert_file` — путь к файлу сертификата. - `tls_key_file` — путь к файлу ключа сертификата. -- `tls_ca_cert_file` — путь к файлу ЦС сертификата. +- `tls_ca_cert_file` — путь к файлу ЦС (certification authority) сертификата. - `tls_ca_cert_dir` — путь к каталогу, содержащему сертификаты ЦС. - `tls_cipher_suite` — разрешенный набор шифров (в нотации OpenSSL). @@ -96,7 +96,7 @@ CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'; В дополнение к локально определенным пользователям, удаленный LDAP сервер может служить источником определения пользователей. Для этого укажите имя определенного ранее сервера LDAP (см. [Определение LDAP сервера](#ldap-server-definition)) в секции `ldap` внутри секции `users_directories` файла `config.xml`. -При каждой попытке авторизации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md#access-control) включено, а роли созданы запросом [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). +При каждой попытке аутентификации ClickHouse пытается локально найти определение пользователя и аутентифицировать его как обычно. Если пользователь не находится локально, ClickHouse предполагает, что он определяется во внешнем LDAP каталоге и пытается "привязаться" к DN, указанному на LDAP сервере, используя предоставленные реквизиты для входа. Если попытка оказалась успешной, пользователь считается существующим и аутентифицированным. Пользователю присваиваются роли из списка, указанного в секции `roles`. Кроме того, если настроена секция `role_mapping`, то выполняется LDAP поиск, а его результаты преобразуются в имена ролей и присваиваются пользователям. Все это работает при условии, что SQL-ориентированное [управлением доступом](../access-rights.md#access-control) включено, а роли созданы запросом [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement). **Пример**