diff --git a/README.md b/README.md index abaf27abf11..3270cd19671 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,6 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else Upcoming meetups -* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12 * [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19 * [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21 * [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26 @@ -53,6 +52,7 @@ Upcoming meetups Recently completed meetups +* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12 * [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22 * [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3 * [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1 diff --git a/base/base/defines.h b/base/base/defines.h index 5685a6d9833..a0c3c0d1de5 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -145,6 +145,7 @@ #define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure #define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability #define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability +#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions /// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function) /// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 0fe9a409ee4..e6bde845c4e 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -113,7 +113,9 @@ RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \ /var/lib/apt/lists/* \ /var/cache/debconf \ /tmp/* \ - && apt-get autoremove --purge -yq dirmngr gnupg2 + && apt-get autoremove --purge -yq dirmngr gnupg2 \ + && chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client +# The last chmod is here to make the next one is No-op in docker official library Dockerfile # post install # we need to allow "others" access to clickhouse folder, because docker container diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index c5f92ccdf68..ca4938b1a47 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -597,6 +597,30 @@ If number of tables is greater than this value, server will throw an exception. 400 ``` +## max\_replicated\_table\_num\_to\_throw {#max-replicated-table-num-to-throw} +If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine. + +**Example** +```xml +400 +``` + +## max\_dictionary\_num\_to\_throw {#max-dictionary-num-to-throw} +If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine. + +**Example** +```xml +400 +``` + +## max\_view\_num\_to\_throw {#max-view-num-to-throw} +If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine. + +**Example** +```xml +400 +``` + ## max\_database\_num\_to\_throw {#max-table-num-to-throw} If number of _database is greater than this value, server will throw an exception. 0 means no limitation. Default value: 0 diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index bf67db8ff2e..e73f61dde83 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -12,9 +12,12 @@ #include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -43,29 +46,24 @@ namespace CurrentMetrics namespace { -/// Outputs sizes of uncompressed and compressed blocks for compressed file. +/// Outputs method, sizes of uncompressed and compressed blocks for compressed file. void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) { while (!in.eof()) { - in.ignore(16); /// checksum - - char header[COMPRESSED_BLOCK_HEADER_SIZE]; - in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE); - - UInt32 size_compressed = unalignedLoad(&header[1]); + UInt32 size_compressed; + UInt32 size_decompressed; + auto codec = DB::getCompressionCodecForFile(in, size_compressed, size_decompressed, true /* skip_to_next_block */); if (size_compressed > DBMS_MAX_COMPRESSED_SIZE) throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data."); - UInt32 size_decompressed = unalignedLoad(&header[5]); - + DB::writeText(queryToString(codec->getFullCodecDesc()), out); + DB::writeChar('\t', out); DB::writeText(size_decompressed, out); DB::writeChar('\t', out); DB::writeText(size_compressed, out); DB::writeChar('\n', out); - - in.ignore(size_compressed - COMPRESSED_BLOCK_HEADER_SIZE); } } diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp index 5819c533fd9..ad1fecac784 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -22,6 +22,13 @@ namespace ErrorCodes namespace { +/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations + * over all pairs of data types, and we deeply regret that. + * + * We cannot remove all combinations, because the binary representation of serialized data has to remain the same, + * but we can partially heal the wound by treating unsigned and signed data types in the same way. + */ + template struct AggregationFunctionDeltaSumTimestampData { @@ -37,23 +44,22 @@ template class AggregationFunctionDeltaSumTimestamp final : public IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - > + AggregationFunctionDeltaSumTimestamp> { public: AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params) : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{arguments, params, createResultType()} - {} + AggregationFunctionDeltaSumTimestamp>{arguments, params, createResultType()} + { + } AggregationFunctionDeltaSumTimestamp() : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{} - {} + AggregationFunctionDeltaSumTimestamp>{} + { + } bool allocatesMemoryInArena() const override { return false; } @@ -63,8 +69,8 @@ public: void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { - auto value = assert_cast &>(*columns[0]).getData()[row_num]; - auto ts = assert_cast &>(*columns[1]).getData()[row_num]; + auto value = unalignedLoad(columns[0]->getRawData().data() + row_num * sizeof(ValueType)); + auto ts = unalignedLoad(columns[1]->getRawData().data() + row_num * sizeof(TimestampType)); auto & data = this->data(place); @@ -172,10 +178,48 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - assert_cast &>(to).getData().push_back(this->data(place).sum); + static_cast(to).template insertRawData( + reinterpret_cast(&this->data(place).sum)); } }; + +template class AggregateFunctionTemplate, typename... TArgs> +IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args) +{ + WhichDataType which(second_type); + + if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate(args...); + + return nullptr; +} + +template