mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'ClickHouse:master' into zvonand-globs-small-fix
This commit is contained in:
commit
6195390c77
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -136,9 +136,7 @@ add_contrib (aws-cmake
|
||||
)
|
||||
|
||||
add_contrib (base64-cmake base64)
|
||||
if (NOT ARCH_S390X)
|
||||
add_contrib (simdjson-cmake simdjson)
|
||||
endif()
|
||||
add_contrib (rapidjson-cmake rapidjson)
|
||||
add_contrib (fastops-cmake fastops)
|
||||
add_contrib (libuv-cmake libuv)
|
||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
||||
Subproject commit 19cc035b6c6f2283573d29c7ea7f7d675cf750ce
|
||||
Subproject commit 245cb0291e0db99d9ccf3692fa76f440b2b054c2
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
||||
Subproject commit 387b78b28b17b8954024ffc81e97cbcfa10d1f30
|
||||
Subproject commit f942b6f334b31716f9bdb02eb6a25fa6b222f5ba
|
36
docs/changelogs/v22.8.21.38-lts.md
Normal file
36
docs/changelogs/v22.8.21.38-lts.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.8.21.38-lts (70872e9859e) FIXME as compared to v22.8.20.11-lts (c9ca79e24e8)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#53017](https://github.com/ClickHouse/ClickHouse/issues/53017): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53459](https://github.com/ClickHouse/ClickHouse/issues/53459): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix Block structure mismatch in Pipe::unitePipes for FINAL [#51492](https://github.com/ClickHouse/ClickHouse/pull/51492) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix broken `02862_sorted_distinct_sparse_fix` [#53738](https://github.com/ClickHouse/ClickHouse/pull/53738) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
17
docs/changelogs/v23.3.11.5-lts.md
Normal file
17
docs/changelogs/v23.3.11.5-lts.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.11.5-lts (5762a23a76d) FIXME as compared to v23.3.10.5-lts (d8737007f9e)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
62
docs/changelogs/v23.5.5.92-stable.md
Normal file
62
docs/changelogs/v23.5.5.92-stable.md
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.5.5.92-stable (557edaddace) FIXME as compared to v23.5.4.25-stable (190f962abcf)
|
||||
|
||||
#### Performance Improvement
|
||||
* Backported in [#52749](https://github.com/ClickHouse/ClickHouse/issues/52749): Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#51886](https://github.com/ClickHouse/ClickHouse/issues/51886): Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#52909](https://github.com/ClickHouse/ClickHouse/issues/52909): Add `clickhouse-keeper-client` symlink to the clickhouse-server package. [#51882](https://github.com/ClickHouse/ClickHouse/pull/51882) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53021](https://github.com/ClickHouse/ClickHouse/issues/53021): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53289](https://github.com/ClickHouse/ClickHouse/issues/53289): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#53463](https://github.com/ClickHouse/ClickHouse/issues/53463): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix async connect to hosts with multiple ips [#51934](https://github.com/ClickHouse/ClickHouse/pull/51934) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix error in `groupArrayMoving` functions [#52161](https://github.com/ClickHouse/ClickHouse/pull/52161) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible error "Cannot drain connections: cancel first" [#52585](https://github.com/ClickHouse/ClickHouse/pull/52585) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Pin rust nightly (to make it stable) [#51903](https://github.com/ClickHouse/ClickHouse/pull/51903) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Less replication errors [#52382](https://github.com/ClickHouse/ClickHouse/pull/52382) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve logging macros [#52519](https://github.com/ClickHouse/ClickHouse/pull/52519) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
58
docs/changelogs/v23.6.3.87-stable.md
Normal file
58
docs/changelogs/v23.6.3.87-stable.md
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.6.3.87-stable (36911c17d0f) FIXME as compared to v23.6.2.18-stable (89f39a7ccfe)
|
||||
|
||||
#### Performance Improvement
|
||||
* Backported in [#52751](https://github.com/ClickHouse/ClickHouse/issues/52751): Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#52911](https://github.com/ClickHouse/ClickHouse/issues/52911): Add `clickhouse-keeper-client` symlink to the clickhouse-server package. [#51882](https://github.com/ClickHouse/ClickHouse/pull/51882) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53023](https://github.com/ClickHouse/ClickHouse/issues/53023): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53290](https://github.com/ClickHouse/ClickHouse/issues/53290): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#53465](https://github.com/ClickHouse/ClickHouse/issues/53465): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix error in `groupArrayMoving` functions [#52161](https://github.com/ClickHouse/ClickHouse/pull/52161) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix abort in function `transform` [#52513](https://github.com/ClickHouse/ClickHouse/pull/52513) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible error "Cannot drain connections: cancel first" [#52585](https://github.com/ClickHouse/ClickHouse/pull/52585) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix: logical error in grace hash join [#51737](https://github.com/ClickHouse/ClickHouse/pull/51737) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Pin rust nightly (to make it stable) [#51903](https://github.com/ClickHouse/ClickHouse/pull/51903) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Less replication errors [#52382](https://github.com/ClickHouse/ClickHouse/pull/52382) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve logging macros [#52519](https://github.com/ClickHouse/ClickHouse/pull/52519) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Increase min protocol version for sparse serialization [#52835](https://github.com/ClickHouse/ClickHouse/pull/52835) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Docker improvements [#52869](https://github.com/ClickHouse/ClickHouse/pull/52869) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "Commands.h"
|
||||
#include <Client/ReplxxLineReader.h>
|
||||
#include <Client/ClientBase.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/EventNotifier.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
@ -155,6 +156,11 @@ void KeeperClient::defineOptions(Poco::Util::OptionSet & options)
|
||||
.argument("<seconds>")
|
||||
.binding("operation-timeout"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("config-file", "c", "if set, will try to get a connection string from clickhouse config. default `config.xml`")
|
||||
.argument("<file>")
|
||||
.binding("config-file"));
|
||||
|
||||
options.addOption(
|
||||
Poco::Util::Option("history-file", "", "set path of history file. default `~/.keeper-client-history`")
|
||||
.argument("<file>")
|
||||
@ -211,7 +217,14 @@ void KeeperClient::initialize(Poco::Util::Application & /* self */)
|
||||
}
|
||||
}
|
||||
|
||||
Poco::Logger::root().setLevel(config().getString("log-level", "error"));
|
||||
String default_log_level;
|
||||
if (config().has("query"))
|
||||
/// We don't want to see any information log in query mode, unless it was set explicitly
|
||||
default_log_level = "error";
|
||||
else
|
||||
default_log_level = "information";
|
||||
|
||||
Poco::Logger::root().setLevel(config().getString("log-level", default_log_level));
|
||||
|
||||
EventNotifier::init();
|
||||
}
|
||||
@ -311,9 +324,39 @@ int KeeperClient::main(const std::vector<String> & /* args */)
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto host = config().getString("host", "localhost");
|
||||
auto port = config().getString("port", "9181");
|
||||
zk_args.hosts = {host + ":" + port};
|
||||
DB::ConfigProcessor config_processor(config().getString("config-file", "config.xml"));
|
||||
|
||||
/// This will handle a situation when clickhouse is running on the embedded config, but config.d folder is also present.
|
||||
config_processor.registerEmbeddedConfig("config.xml", "<clickhouse/>");
|
||||
auto clickhouse_config = config_processor.loadConfig();
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
clickhouse_config.configuration->keys("zookeeper", keys);
|
||||
|
||||
if (!config().has("host") && !config().has("port") && !keys.empty())
|
||||
{
|
||||
LOG_INFO(&Poco::Logger::get("KeeperClient"), "Found keeper node in the config.xml, will use it for connection");
|
||||
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
String prefix = "zookeeper." + key;
|
||||
String host = clickhouse_config.configuration->getString(prefix + ".host");
|
||||
String port = clickhouse_config.configuration->getString(prefix + ".port");
|
||||
|
||||
if (clickhouse_config.configuration->has(prefix + ".secure"))
|
||||
host = "secure://" + host;
|
||||
|
||||
zk_args.hosts.push_back(host + ":" + port);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
String host = config().getString("host", "localhost");
|
||||
String port = config().getString("port", "9181");
|
||||
|
||||
zk_args.hosts.push_back(host + ":" + port);
|
||||
}
|
||||
|
||||
zk_args.connection_timeout_ms = config().getInt("connection-timeout", 10) * 1000;
|
||||
zk_args.session_timeout_ms = config().getInt("session-timeout", 10) * 1000;
|
||||
zk_args.operation_timeout_ms = config().getInt("operation-timeout", 10) * 1000;
|
||||
|
@ -169,6 +169,10 @@ public:
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "merge() with thread pool parameter isn't implemented for {} ", getName());
|
||||
}
|
||||
|
||||
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
|
||||
/// then destroy states (on which src places points to).
|
||||
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0;
|
||||
|
||||
/// Serializes state (to transmit it over the network, for example).
|
||||
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
|
||||
|
||||
@ -506,6 +510,15 @@ public:
|
||||
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
|
||||
}
|
||||
|
||||
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
|
||||
static_cast<const Derived *>(this)->destroy(rhs_places[i] + offset);
|
||||
}
|
||||
}
|
||||
|
||||
void addBatchSinglePlace( /// NOLINT
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
|
@ -402,7 +402,7 @@ struct UInt128HashCRC32 : public UInt128Hash {};
|
||||
|
||||
struct UInt128TrivialHash
|
||||
{
|
||||
size_t operator()(UInt128 x) const { return x.items[0]; }
|
||||
size_t operator()(UInt128 x) const { return x.items[UInt128::_impl::little(0)]; }
|
||||
};
|
||||
|
||||
struct UUIDTrivialHash
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <ranges>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -29,21 +30,13 @@ template <typename T>
|
||||
void SerializationDecimalBase<T>::serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const
|
||||
{
|
||||
const typename ColumnType::Container & x = typeid_cast<const ColumnType &>(column).getData();
|
||||
|
||||
size_t size = x.size();
|
||||
|
||||
if (limit == 0 || offset + limit > size)
|
||||
if (const size_t size = x.size(); limit == 0 || offset + limit > size)
|
||||
limit = size - offset;
|
||||
if constexpr (std::endian::native == std::endian::big && sizeof(T) >= 2)
|
||||
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
{
|
||||
for (size_t i = 0; i < limit; i++)
|
||||
{
|
||||
auto tmp(x[offset+i]);
|
||||
char *start = reinterpret_cast<char*>(&tmp);
|
||||
char *end = start + sizeof(FieldType);
|
||||
std::reverse(start, end);
|
||||
ostr.write(reinterpret_cast<const char *>(&tmp), sizeof(FieldType));
|
||||
}
|
||||
std::ranges::for_each(
|
||||
x | std::views::drop(offset) | std::views::take(limit), [&ostr](const auto & d) { writeBinaryLittleEndian(d, ostr); });
|
||||
}
|
||||
else
|
||||
ostr.write(reinterpret_cast<const char *>(&x[offset]), sizeof(FieldType) * limit);
|
||||
@ -69,20 +62,14 @@ template <typename T>
|
||||
void SerializationDecimalBase<T>::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double) const
|
||||
{
|
||||
typename ColumnType::Container & x = typeid_cast<ColumnType &>(column).getData();
|
||||
size_t initial_size = x.size();
|
||||
const size_t initial_size = x.size();
|
||||
x.resize(initial_size + limit);
|
||||
size_t size = istr.readBig(reinterpret_cast<char*>(&x[initial_size]), sizeof(FieldType) * limit);
|
||||
if constexpr (std::endian::native == std::endian::big && sizeof(T) >= 2)
|
||||
{
|
||||
for (size_t i = 0; i < limit; i++)
|
||||
{
|
||||
char *start = reinterpret_cast<char*>(&x[initial_size + i]);
|
||||
char *end = start + sizeof(FieldType);
|
||||
std::reverse(start, end);
|
||||
}
|
||||
}
|
||||
|
||||
const size_t size = istr.readBig(reinterpret_cast<char *>(&x[initial_size]), sizeof(FieldType) * limit);
|
||||
x.resize(initial_size + size / sizeof(FieldType));
|
||||
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
std::ranges::for_each(
|
||||
x | std::views::drop(initial_size), [](auto & d) { transformEndianness<std::endian::big, std::endian::little>(d); });
|
||||
}
|
||||
|
||||
template class SerializationDecimalBase<Decimal32>;
|
||||
|
@ -145,15 +145,8 @@ void SerializationNumber<T>::serializeBinaryBulk(const IColumn & column, WriteBu
|
||||
|
||||
if constexpr (std::endian::native == std::endian::big && sizeof(T) >= 2)
|
||||
{
|
||||
static constexpr auto to_little_endian = [](auto i)
|
||||
{
|
||||
transformEndianness<std::endian::little>(i);
|
||||
return i;
|
||||
};
|
||||
|
||||
std::ranges::for_each(
|
||||
x | std::views::drop(offset) | std::views::take(limit) | std::views::transform(to_little_endian),
|
||||
[&ostr](const auto & i) { ostr.write(reinterpret_cast<const char *>(&i), sizeof(typename ColumnVector<T>::ValueType)); });
|
||||
x | std::views::drop(offset) | std::views::take(limit), [&ostr](const auto & i) { writeBinaryLittleEndian(i, ostr); });
|
||||
}
|
||||
else
|
||||
ostr.write(reinterpret_cast<const char *>(&x[offset]), sizeof(typename ColumnVector<T>::ValueType) * limit);
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <ranges>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -136,23 +137,31 @@ void SerializationUUID::deserializeBinary(IColumn & column, ReadBuffer & istr, c
|
||||
void SerializationUUID::serializeBinaryBulk(const IColumn & column, WriteBuffer & ostr, size_t offset, size_t limit) const
|
||||
{
|
||||
const typename ColumnVector<UUID>::Container & x = typeid_cast<const ColumnVector<UUID> &>(column).getData();
|
||||
|
||||
size_t size = x.size();
|
||||
|
||||
if (limit == 0 || offset + limit > size)
|
||||
if (const size_t size = x.size(); limit == 0 || offset + limit > size)
|
||||
limit = size - offset;
|
||||
|
||||
if (limit)
|
||||
if (limit == 0)
|
||||
return;
|
||||
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
{
|
||||
std::ranges::for_each(
|
||||
x | std::views::drop(offset) | std::views::take(limit), [&ostr](const auto & uuid) { writeBinaryLittleEndian(uuid, ostr); });
|
||||
}
|
||||
else
|
||||
ostr.write(reinterpret_cast<const char *>(&x[offset]), sizeof(UUID) * limit);
|
||||
}
|
||||
|
||||
void SerializationUUID::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double /*avg_value_size_hint*/) const
|
||||
{
|
||||
typename ColumnVector<UUID>::Container & x = typeid_cast<ColumnVector<UUID> &>(column).getData();
|
||||
size_t initial_size = x.size();
|
||||
const size_t initial_size = x.size();
|
||||
x.resize(initial_size + limit);
|
||||
size_t size = istr.readBig(reinterpret_cast<char*>(&x[initial_size]), sizeof(UUID) * limit);
|
||||
const size_t size = istr.readBig(reinterpret_cast<char *>(&x[initial_size]), sizeof(UUID) * limit);
|
||||
x.resize(initial_size + size / sizeof(UUID));
|
||||
}
|
||||
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
std::ranges::for_each(
|
||||
x | std::views::drop(initial_size), [](auto & uuid) { transformEndianness<std::endian::big, std::endian::little>(uuid); });
|
||||
}
|
||||
}
|
||||
|
@ -25,11 +25,6 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
std::unique_ptr<S3ObjectStorageSettings> getSettings(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, ContextPtr context)
|
||||
{
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
@ -50,6 +45,8 @@ std::unique_ptr<S3::Client> getClient(
|
||||
{
|
||||
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
S3::URI uri(endpoint);
|
||||
if (!uri.key.ends_with('/'))
|
||||
uri.key.push_back('/');
|
||||
|
||||
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
|
||||
config.getString(config_prefix + ".region", ""),
|
||||
@ -61,9 +58,6 @@ std::unique_ptr<S3::Client> getClient(
|
||||
settings.request_settings.put_request_throttler,
|
||||
uri.uri.getScheme());
|
||||
|
||||
if (uri.key.back() != '/')
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 path must ends with '/', but '{}' doesn't.", uri.key);
|
||||
|
||||
client_configuration.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 1000);
|
||||
client_configuration.requestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 3000);
|
||||
client_configuration.maxConnections = config.getUInt(config_prefix + ".max_connections", 100);
|
||||
|
@ -104,12 +104,8 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
|
||||
{
|
||||
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
S3::URI uri(endpoint);
|
||||
|
||||
if (uri.key.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No key in S3 uri: {}", uri.uri.toString());
|
||||
|
||||
if (uri.key.back() != '/')
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 path must ends with '/', but '{}' doesn't.", uri.key);
|
||||
if (!uri.key.ends_with('/'))
|
||||
uri.key.push_back('/');
|
||||
|
||||
S3Capabilities s3_capabilities = getCapabilitiesFromConfig(config, config_prefix);
|
||||
std::shared_ptr<S3ObjectStorage> s3_storage;
|
||||
|
@ -20,8 +20,8 @@ void IndexOfBlockForNativeFormat::read(ReadBuffer & istr)
|
||||
auto & column = columns.emplace_back();
|
||||
readBinary(column.name, istr);
|
||||
readBinary(column.type, istr);
|
||||
readBinary(column.location.offset_in_compressed_file, istr);
|
||||
readBinary(column.location.offset_in_decompressed_block, istr);
|
||||
readBinaryLittleEndian(column.location.offset_in_compressed_file, istr);
|
||||
readBinaryLittleEndian(column.location.offset_in_decompressed_block, istr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,8 +34,8 @@ void IndexOfBlockForNativeFormat::write(WriteBuffer & ostr) const
|
||||
const auto & column = columns[i];
|
||||
writeBinary(column.name, ostr);
|
||||
writeBinary(column.type, ostr);
|
||||
writeBinary(column.location.offset_in_compressed_file, ostr);
|
||||
writeBinary(column.location.offset_in_decompressed_block, ostr);
|
||||
writeBinaryLittleEndian(column.location.offset_in_compressed_file, ostr);
|
||||
writeBinaryLittleEndian(column.location.offset_in_decompressed_block, ostr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2479,48 +2479,21 @@ void NO_INLINE Aggregator::mergeDataNullKey(
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename Method, bool use_compiled_functions, bool prefetch, typename Table>
|
||||
void NO_INLINE Aggregator::mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena) const
|
||||
{
|
||||
if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization)
|
||||
mergeDataNullKey<Method, Table>(table_dst, table_src, arena);
|
||||
|
||||
PaddedPODArray<AggregateDataPtr> dst_places;
|
||||
PaddedPODArray<AggregateDataPtr> src_places;
|
||||
|
||||
auto merge = [&](AggregateDataPtr & __restrict dst, AggregateDataPtr & __restrict src, bool inserted)
|
||||
{
|
||||
if (!inserted)
|
||||
{
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if constexpr (use_compiled_functions)
|
||||
{
|
||||
const auto & compiled_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions;
|
||||
compiled_functions.merge_aggregate_states_function(dst, src);
|
||||
|
||||
if (compiled_aggregate_functions_holder->compiled_aggregate_functions.functions_count != params.aggregates_size)
|
||||
{
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
{
|
||||
if (!is_aggregate_function_compiled[i])
|
||||
aggregate_functions[i]->merge(
|
||||
dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
{
|
||||
if (!is_aggregate_function_compiled[i])
|
||||
aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
aggregate_functions[i]->merge(dst + offsets_of_aggregate_states[i], src + offsets_of_aggregate_states[i], arena);
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]);
|
||||
}
|
||||
dst_places.push_back(dst);
|
||||
src_places.push_back(src);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -2531,8 +2504,30 @@ void NO_INLINE Aggregator::mergeDataImpl(Table & table_dst, Table & table_src, A
|
||||
};
|
||||
|
||||
table_src.template mergeToViaEmplace<decltype(merge), prefetch>(table_dst, std::move(merge));
|
||||
|
||||
table_src.clearAndShrink();
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if constexpr (use_compiled_functions)
|
||||
{
|
||||
const auto & compiled_functions = compiled_aggregate_functions_holder->compiled_aggregate_functions;
|
||||
compiled_functions.merge_aggregate_states_function(dst_places.data(), src_places.data(), dst_places.size());
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
{
|
||||
if (!is_aggregate_function_compiled[i])
|
||||
aggregate_functions[i]->mergeAndDestroyBatch(
|
||||
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
{
|
||||
aggregate_functions[i]->mergeAndDestroyBatch(
|
||||
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -44,10 +44,7 @@ public:
|
||||
if (database)
|
||||
{
|
||||
for (auto table_it = database->getTablesIterator(context); table_it->isValid(); table_it->next())
|
||||
{
|
||||
const auto & storage_id = table_it->table()->getStorageID();
|
||||
result.emplace_back(storage_id.getTableName());
|
||||
}
|
||||
result.emplace_back(table_it->name());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -357,27 +357,60 @@ static void compileMergeAggregatesStates(llvm::Module & module, const std::vecto
|
||||
llvm::IRBuilder<> b(module.getContext());
|
||||
|
||||
auto * aggregate_data_place_type = b.getInt8Ty()->getPointerTo();
|
||||
auto * merge_aggregates_states_func_declaration = llvm::FunctionType::get(b.getVoidTy(), { aggregate_data_place_type, aggregate_data_place_type }, false);
|
||||
auto * merge_aggregates_states_func = llvm::Function::Create(merge_aggregates_states_func_declaration, llvm::Function::ExternalLinkage, name, module);
|
||||
auto * aggregate_data_places_type = aggregate_data_place_type->getPointerTo();
|
||||
auto * size_type = b.getInt64Ty();
|
||||
|
||||
auto * merge_aggregates_states_func_declaration
|
||||
= llvm::FunctionType::get(b.getVoidTy(), {aggregate_data_places_type, aggregate_data_places_type, size_type}, false);
|
||||
auto * merge_aggregates_states_func
|
||||
= llvm::Function::Create(merge_aggregates_states_func_declaration, llvm::Function::ExternalLinkage, name, module);
|
||||
|
||||
auto * arguments = merge_aggregates_states_func->args().begin();
|
||||
llvm::Value * aggregate_data_place_dst_arg = arguments++;
|
||||
llvm::Value * aggregate_data_place_src_arg = arguments++;
|
||||
llvm::Value * aggregate_data_places_dst_arg = arguments++;
|
||||
llvm::Value * aggregate_data_places_src_arg = arguments++;
|
||||
llvm::Value * aggregate_places_size_arg = arguments++;
|
||||
|
||||
auto * entry = llvm::BasicBlock::Create(b.getContext(), "entry", merge_aggregates_states_func);
|
||||
b.SetInsertPoint(entry);
|
||||
|
||||
/// Initialize loop
|
||||
|
||||
auto * end = llvm::BasicBlock::Create(b.getContext(), "end", merge_aggregates_states_func);
|
||||
auto * loop = llvm::BasicBlock::Create(b.getContext(), "loop", merge_aggregates_states_func);
|
||||
b.CreateCondBr(b.CreateICmpEQ(aggregate_places_size_arg, llvm::ConstantInt::get(size_type, 0)), end, loop);
|
||||
|
||||
b.SetInsertPoint(loop);
|
||||
|
||||
/// Loop
|
||||
|
||||
auto * counter_phi = b.CreatePHI(size_type, 2);
|
||||
counter_phi->addIncoming(llvm::ConstantInt::get(size_type, 0), entry);
|
||||
|
||||
for (const auto & function_to_compile : functions)
|
||||
{
|
||||
auto * aggregate_data_place_dst = b.CreateLoad(aggregate_data_place_type,
|
||||
b.CreateInBoundsGEP(aggregate_data_place_type->getPointerTo(), aggregate_data_places_dst_arg, counter_phi));
|
||||
auto * aggregate_data_place_src = b.CreateLoad(aggregate_data_place_type,
|
||||
b.CreateInBoundsGEP(aggregate_data_place_type->getPointerTo(), aggregate_data_places_src_arg, counter_phi));
|
||||
|
||||
size_t aggregate_function_offset = function_to_compile.aggregate_data_offset;
|
||||
|
||||
auto * aggregate_data_place_merge_dst_with_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_place_dst_arg, aggregate_function_offset);
|
||||
auto * aggregate_data_place_merge_src_with_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_place_src_arg, aggregate_function_offset);
|
||||
auto * aggregate_data_place_merge_dst_with_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_place_dst, aggregate_function_offset);
|
||||
auto * aggregate_data_place_merge_src_with_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_place_src, aggregate_function_offset);
|
||||
|
||||
const auto * aggregate_function_ptr = function_to_compile.function;
|
||||
aggregate_function_ptr->compileMerge(b, aggregate_data_place_merge_dst_with_offset, aggregate_data_place_merge_src_with_offset);
|
||||
}
|
||||
|
||||
/// End of loop
|
||||
|
||||
auto * current_block = b.GetInsertBlock();
|
||||
auto * incremeted_counter = b.CreateAdd(counter_phi, llvm::ConstantInt::get(size_type, 1));
|
||||
counter_phi->addIncoming(incremeted_counter, current_block);
|
||||
|
||||
b.CreateCondBr(b.CreateICmpEQ(incremeted_counter, aggregate_places_size_arg), end, loop);
|
||||
|
||||
b.SetInsertPoint(end);
|
||||
b.CreateRetVoid();
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ struct AggregateFunctionWithOffset
|
||||
using JITCreateAggregateStatesFunction = void (*)(AggregateDataPtr);
|
||||
using JITAddIntoAggregateStatesFunction = void (*)(ColumnDataRowsOffset, ColumnDataRowsOffset, ColumnData *, AggregateDataPtr *);
|
||||
using JITAddIntoAggregateStatesFunctionSinglePlace = void (*)(ColumnDataRowsOffset, ColumnDataRowsOffset, ColumnData *, AggregateDataPtr);
|
||||
using JITMergeAggregateStatesFunction = void (*)(AggregateDataPtr, AggregateDataPtr);
|
||||
using JITMergeAggregateStatesFunction = void (*)(AggregateDataPtr *, AggregateDataPtr *, size_t);
|
||||
using JITInsertAggregateStatesIntoColumnsFunction = void (*)(ColumnDataRowsOffset, ColumnDataRowsOffset, ColumnData *, AggregateDataPtr *);
|
||||
|
||||
struct CompiledAggregateFunctions
|
||||
|
26
src/Parsers/ASTForeignKeyDeclaration.h
Normal file
26
src/Parsers/ASTForeignKeyDeclaration.h
Normal file
@ -0,0 +1,26 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/*
|
||||
* Currently ignore the foreign key node, flesh it out when needed
|
||||
*/
|
||||
class ASTForeignKeyDeclaration : public IAST
|
||||
{
|
||||
public:
|
||||
String name;
|
||||
|
||||
String getID(char) const override { return "Foreign Key"; }
|
||||
|
||||
ASTPtr clone() const override
|
||||
{
|
||||
auto res = std::make_shared<ASTForeignKeyDeclaration>();
|
||||
res->name = name;
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -2,6 +2,7 @@
|
||||
#include <Parsers/ASTConstraintDeclaration.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTForeignKeyDeclaration.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTIndexDeclaration.h>
|
||||
@ -224,17 +225,69 @@ bool ParserProjectionDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected &
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ParserForeignKeyDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ParserKeyword s_references("REFERENCES");
|
||||
ParserCompoundIdentifier table_name_p(true, true);
|
||||
ParserExpression expression_p;
|
||||
|
||||
ASTPtr name;
|
||||
ASTPtr expr;
|
||||
|
||||
if (!expression_p.parse(pos, expr, expected))
|
||||
return false;
|
||||
|
||||
if (!s_references.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!table_name_p.parse(pos, name, expected))
|
||||
return false;
|
||||
|
||||
if (!expression_p.parse(pos, expr, expected))
|
||||
return false;
|
||||
|
||||
ParserKeyword s_on("ON");
|
||||
while (s_on.ignore(pos, expected))
|
||||
{
|
||||
ParserKeyword s_delete("DELETE");
|
||||
ParserKeyword s_update("UPDATE");
|
||||
|
||||
if (!s_delete.ignore(pos, expected) && !s_update.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
ParserKeyword s_restrict("RESTRICT");
|
||||
ParserKeyword s_cascade("CASCADE");
|
||||
ParserKeyword s_set_null("SET NULL");
|
||||
ParserKeyword s_no_action("NO ACTION");
|
||||
ParserKeyword s_set_default("SET DEFAULT");
|
||||
|
||||
if (!s_restrict.ignore(pos, expected) && !s_cascade.ignore(pos, expected) &&
|
||||
!s_set_null.ignore(pos, expected) && !s_no_action.ignore(pos, expected) &&
|
||||
!s_set_default.ignore(pos, expected))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
auto foreign_key = std::make_shared<ASTForeignKeyDeclaration>();
|
||||
foreign_key->name = "Foreign Key";
|
||||
node = foreign_key;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ParserTablePropertyDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
ParserKeyword s_index("INDEX");
|
||||
ParserKeyword s_constraint("CONSTRAINT");
|
||||
ParserKeyword s_projection("PROJECTION");
|
||||
ParserKeyword s_foreign_key("FOREIGN KEY");
|
||||
ParserKeyword s_primary_key("PRIMARY KEY");
|
||||
|
||||
ParserIndexDeclaration index_p;
|
||||
ParserConstraintDeclaration constraint_p;
|
||||
ParserProjectionDeclaration projection_p;
|
||||
ParserForeignKeyDeclaration foreign_key_p;
|
||||
ParserColumnDeclaration column_p{true, true};
|
||||
ParserExpression primary_key_p;
|
||||
|
||||
@ -260,6 +313,11 @@ bool ParserTablePropertyDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
if (!primary_key_p.parse(pos, new_node, expected))
|
||||
return false;
|
||||
}
|
||||
else if (s_foreign_key.ignore(pos, expected))
|
||||
{
|
||||
if (!foreign_key_p.parse(pos, new_node, expected))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!column_p.parse(pos, new_node, expected))
|
||||
@ -323,6 +381,11 @@ bool ParserTablePropertiesDeclarationList::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
constraints->children.push_back(elem);
|
||||
else if (elem->as<ASTProjectionDeclaration>())
|
||||
projections->children.push_back(elem);
|
||||
else if (elem->as<ASTForeignKeyDeclaration>())
|
||||
{
|
||||
/// Ignore the foreign key node
|
||||
continue;
|
||||
}
|
||||
else if (elem->as<ASTIdentifier>() || elem->as<ASTFunction>())
|
||||
{
|
||||
if (primary_key)
|
||||
|
@ -403,6 +403,13 @@ protected:
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
class ParserForeignKeyDeclaration : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const override { return "foreign key declaration"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
class ParserTablePropertyDeclaration : public IParserBase
|
||||
{
|
||||
protected:
|
||||
|
@ -1038,7 +1038,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
/// MergeTreeReadPool and MergeTreeThreadSelectProcessor for parallel select.
|
||||
if (num_streams > 1 && settings.do_not_merge_across_partitions_select_final &&
|
||||
std::distance(parts_to_merge_ranges[range_index], parts_to_merge_ranges[range_index + 1]) == 1 &&
|
||||
parts_to_merge_ranges[range_index]->data_part->info.level > 0)
|
||||
parts_to_merge_ranges[range_index]->data_part->info.level > 0
|
||||
&& data.merging_params.is_deleted_column.empty())
|
||||
{
|
||||
sum_marks_in_lonely_parts += parts_to_merge_ranges[range_index]->getMarksCount();
|
||||
lonely_parts.push_back(std::move(*parts_to_merge_ranges[range_index]));
|
||||
@ -1094,7 +1095,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal(
|
||||
/// with level > 0 then we won't postprocess this part
|
||||
if (settings.do_not_merge_across_partitions_select_final &&
|
||||
std::distance(parts_to_merge_ranges[range_index], parts_to_merge_ranges[range_index + 1]) == 1 &&
|
||||
parts_to_merge_ranges[range_index]->data_part->info.level > 0)
|
||||
parts_to_merge_ranges[range_index]->data_part->info.level > 0 &&
|
||||
data.merging_params.is_deleted_column.empty())
|
||||
{
|
||||
partition_pipes.emplace_back(Pipe::unitePipes(std::move(pipes)));
|
||||
continue;
|
||||
|
@ -542,15 +542,22 @@ void KafkaConsumer::storeLastReadMessageOffset()
|
||||
}
|
||||
}
|
||||
|
||||
void KafkaConsumer::setExceptionInfo(const cppkafka::Error & err)
|
||||
void KafkaConsumer::setExceptionInfo(const cppkafka::Error & err, bool with_stacktrace)
|
||||
{
|
||||
setExceptionInfo(err.to_string());
|
||||
setExceptionInfo(err.to_string(), with_stacktrace);
|
||||
}
|
||||
|
||||
void KafkaConsumer::setExceptionInfo(const String & text)
|
||||
void KafkaConsumer::setExceptionInfo(const std::string & text, bool with_stacktrace)
|
||||
{
|
||||
std::string enriched_text = text;
|
||||
|
||||
if (with_stacktrace)
|
||||
{
|
||||
enriched_text.append(StackTrace().toString());
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(exception_mutex);
|
||||
exceptions_buffer.push_back({text, static_cast<UInt64>(Poco::Timestamp().epochTime())});
|
||||
exceptions_buffer.push_back({enriched_text, static_cast<UInt64>(Poco::Timestamp().epochTime())});
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -105,8 +105,8 @@ public:
|
||||
auto currentTimestamp() const { return current[-1].get_timestamp(); }
|
||||
const auto & currentHeaderList() const { return current[-1].get_header_list(); }
|
||||
String currentPayload() const { return current[-1].get_payload(); }
|
||||
void setExceptionInfo(const cppkafka::Error & err);
|
||||
void setExceptionInfo(const String & text);
|
||||
void setExceptionInfo(const cppkafka::Error & err, bool with_stacktrace = true);
|
||||
void setExceptionInfo(const std::string & text, bool with_stacktrace = true);
|
||||
void setRDKafkaStat(const std::string & stat_json_string)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(rdkafka_stat_mutex);
|
||||
|
@ -732,6 +732,8 @@ void StorageKafka::threadFunc(size_t idx)
|
||||
{
|
||||
assert(idx < tasks.size());
|
||||
auto task = tasks[idx];
|
||||
std::string exception_str;
|
||||
|
||||
try
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
@ -771,7 +773,24 @@ void StorageKafka::threadFunc(size_t idx)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
/// do bare minimum in catch block
|
||||
LockMemoryExceptionInThread lock_memory_tracker(VariableContext::Global);
|
||||
exception_str = getCurrentExceptionMessage(true /* with_stacktrace */);
|
||||
}
|
||||
|
||||
if (!exception_str.empty())
|
||||
{
|
||||
LOG_ERROR(log, "{} {}", __PRETTY_FUNCTION__, exception_str);
|
||||
|
||||
auto safe_consumers = getSafeConsumers();
|
||||
for (auto const & consumer_ptr_weak : safe_consumers.consumers)
|
||||
{
|
||||
/// propagate materialized view exception to all consumers
|
||||
if (auto consumer_ptr = consumer_ptr_weak.lock())
|
||||
{
|
||||
consumer_ptr->setExceptionInfo(exception_str, false /* no stacktrace, reuse passed one */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mv_attached.store(false);
|
||||
|
@ -25,6 +25,7 @@ public:
|
||||
template <typename Distance>
|
||||
using AnnoyIndexWithSerializationPtr = std::shared_ptr<AnnoyIndexWithSerialization<Distance>>;
|
||||
|
||||
|
||||
template <typename Distance>
|
||||
struct MergeTreeIndexGranuleAnnoy final : public IMergeTreeIndexGranule
|
||||
{
|
||||
@ -43,6 +44,7 @@ struct MergeTreeIndexGranuleAnnoy final : public IMergeTreeIndexGranule
|
||||
AnnoyIndexWithSerializationPtr<Distance> index;
|
||||
};
|
||||
|
||||
|
||||
template <typename Distance>
|
||||
struct MergeTreeIndexAggregatorAnnoy final : IMergeTreeIndexAggregator
|
||||
{
|
||||
@ -104,7 +106,6 @@ private:
|
||||
const String distance_function;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -35,7 +35,7 @@ USearchIndexWithSerialization<Metric>::USearchIndexWithSerialization(size_t dime
|
||||
}
|
||||
|
||||
template <unum::usearch::metric_kind_t Metric>
|
||||
void USearchIndexWithSerialization<Metric>::serialize([[maybe_unused]] WriteBuffer & ostr) const
|
||||
void USearchIndexWithSerialization<Metric>::serialize(WriteBuffer & ostr) const
|
||||
{
|
||||
auto callback = [&ostr](void * from, size_t n)
|
||||
{
|
||||
@ -43,21 +43,19 @@ void USearchIndexWithSerialization<Metric>::serialize([[maybe_unused]] WriteBuff
|
||||
return true;
|
||||
};
|
||||
|
||||
Base::stream(callback);
|
||||
Base::save_to_stream(callback);
|
||||
}
|
||||
|
||||
template <unum::usearch::metric_kind_t Metric>
|
||||
void USearchIndexWithSerialization<Metric>::deserialize([[maybe_unused]] ReadBuffer & istr)
|
||||
void USearchIndexWithSerialization<Metric>::deserialize(ReadBuffer & istr)
|
||||
{
|
||||
BufferBase::Position & pos = istr.position();
|
||||
unum::usearch::memory_mapped_file_t memory_map(pos, istr.buffer().size() - istr.count());
|
||||
Base::view(std::move(memory_map));
|
||||
pos += Base::stream_length();
|
||||
auto callback = [&istr](void * from, size_t n)
|
||||
{
|
||||
istr.readStrict(reinterpret_cast<char *>(from), n);
|
||||
return true;
|
||||
};
|
||||
|
||||
auto copy = Base::copy();
|
||||
if (!copy)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Could not copy usearch index");
|
||||
Base::swap(copy.index);
|
||||
Base::load_from_stream(callback);
|
||||
}
|
||||
|
||||
template <unum::usearch::metric_kind_t Metric>
|
||||
@ -246,18 +244,17 @@ std::vector<size_t> MergeTreeIndexConditionUSearch::getUsefulRangesImpl(MergeTre
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to optimize query with where without distance");
|
||||
|
||||
const std::vector<float> reference_vector = ann_condition.getReferenceVector();
|
||||
|
||||
const auto granule = std::dynamic_pointer_cast<MergeTreeIndexGranuleUSearch<Metric>>(idx_granule);
|
||||
if (granule == nullptr)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Granule has the wrong type");
|
||||
|
||||
const USearchIndexWithSerializationPtr<Metric> index = granule->index;
|
||||
|
||||
if (ann_condition.getDimensions() != index->dimensions())
|
||||
throw Exception(
|
||||
ErrorCodes::INCORRECT_QUERY,
|
||||
"The dimension of the space in the request ({}) "
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "The dimension of the space in the request ({}) "
|
||||
"does not match the dimension in the index ({})",
|
||||
ann_condition.getDimensions(),
|
||||
index->dimensions());
|
||||
ann_condition.getDimensions(), index->dimensions());
|
||||
|
||||
auto result = index->search(reference_vector.data(), limit);
|
||||
std::vector<UInt64> neighbors(result.size()); /// indexes of dots which were closest to the reference vector
|
||||
|
@ -27,6 +27,7 @@ public:
|
||||
template <unum::usearch::metric_kind_t Metric>
|
||||
using USearchIndexWithSerializationPtr = std::shared_ptr<USearchIndexWithSerialization<Metric>>;
|
||||
|
||||
|
||||
template <unum::usearch::metric_kind_t Metric>
|
||||
struct MergeTreeIndexGranuleUSearch final : public IMergeTreeIndexGranule
|
||||
{
|
||||
@ -45,6 +46,7 @@ struct MergeTreeIndexGranuleUSearch final : public IMergeTreeIndexGranule
|
||||
USearchIndexWithSerializationPtr<Metric> index;
|
||||
};
|
||||
|
||||
|
||||
template <unum::usearch::metric_kind_t Metric>
|
||||
struct MergeTreeIndexAggregatorUSearch final : IMergeTreeIndexAggregator
|
||||
{
|
||||
@ -64,7 +66,11 @@ struct MergeTreeIndexAggregatorUSearch final : IMergeTreeIndexAggregator
|
||||
class MergeTreeIndexConditionUSearch final : public IMergeTreeIndexConditionApproximateNearestNeighbor
|
||||
{
|
||||
public:
|
||||
MergeTreeIndexConditionUSearch(const IndexDescription & index_description, const SelectQueryInfo & query, const String & distance_function, ContextPtr context);
|
||||
MergeTreeIndexConditionUSearch(
|
||||
const IndexDescription & index_description,
|
||||
const SelectQueryInfo & query,
|
||||
const String & distance_function,
|
||||
ContextPtr context);
|
||||
|
||||
~MergeTreeIndexConditionUSearch() override = default;
|
||||
|
||||
@ -98,7 +104,6 @@ private:
|
||||
const String distance_function;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -320,23 +320,19 @@ def test_bad_messages_parsing_exception(kafka_cluster, max_retries=20):
|
||||
)
|
||||
|
||||
expected_result = """avro::Exception: Invalid data file. Magic does not match: : while parsing Kafka message (topic: Avro_err, partition: 0, offset: 0)\\'|1|1|1|default|kafka_Avro
|
||||
Cannot parse input: expected \\'{\\' before: \\'qwertyuiop\\': while parsing Kafka message (topic: JSONEachRow_err, partition: 0, offset: 0)\\'|1|1|1|default|kafka_JSONEachRow
|
||||
Cannot parse input: expected \\'{\\' before: \\'qwertyuiop\\': while parsing Kafka message (topic: JSONEachRow_err, partition: 0, offset: 0|1|1|1|default|kafka_JSONEachRow
|
||||
"""
|
||||
retries = 0
|
||||
result_system_kafka_consumers = ""
|
||||
while True:
|
||||
result_system_kafka_consumers = instance.query(
|
||||
"""
|
||||
SELECT exceptions.text[1], length(exceptions.text) > 1 AND length(exceptions.text) < 15, length(exceptions.time) > 1 AND length(exceptions.time) < 15, abs(dateDiff('second', exceptions.time[1], now())) < 40, database, table FROM system.kafka_consumers ORDER BY table, assignments.partition_id[1]
|
||||
# filter out stacktrace in exceptions.text[1] because it is hardly stable enough
|
||||
result_system_kafka_consumers = instance.query_with_retry(
|
||||
"""
|
||||
SELECT substr(exceptions.text[1], 1, 131), length(exceptions.text) > 1 AND length(exceptions.text) < 15, length(exceptions.time) > 1 AND length(exceptions.time) < 15, abs(dateDiff('second', exceptions.time[1], now())) < 40, database, table FROM system.kafka_consumers WHERE table in('kafka_Avro', 'kafka_JSONEachRow') ORDER BY table, assignments.partition_id[1]
|
||||
""",
|
||||
retry_count=max_retries,
|
||||
sleep_time=1,
|
||||
check_callback=lambda res: res.replace("\t", "|") == expected_result,
|
||||
)
|
||||
result_system_kafka_consumers = result_system_kafka_consumers.replace("\t", "|")
|
||||
if result_system_kafka_consumers == expected_result or retries > max_retries:
|
||||
break
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
|
||||
assert result_system_kafka_consumers == expected_result
|
||||
assert result_system_kafka_consumers.replace("\t", "|") == expected_result
|
||||
|
||||
for format_name in [
|
||||
"Avro",
|
||||
@ -345,6 +341,54 @@ Cannot parse input: expected \\'{\\' before: \\'qwertyuiop\\': while parsing Kaf
|
||||
kafka_delete_topic(admin_client, f"{format_name}_err")
|
||||
|
||||
|
||||
def test_bad_messages_to_mv(kafka_cluster, max_retries=20):
|
||||
admin_client = KafkaAdminClient(
|
||||
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
|
||||
)
|
||||
|
||||
kafka_create_topic(admin_client, "tomv")
|
||||
|
||||
instance.query(
|
||||
f"""
|
||||
DROP TABLE IF EXISTS kafka_materialized;
|
||||
DROP TABLE IF EXISTS kafka_consumer;
|
||||
DROP TABLE IF EXISTS kafka1;
|
||||
|
||||
CREATE TABLE kafka1 (key UInt64, value String)
|
||||
ENGINE = Kafka
|
||||
SETTINGS kafka_broker_list = 'kafka1:19092',
|
||||
kafka_topic_list = 'tomv',
|
||||
kafka_group_name = 'tomv',
|
||||
kafka_format = 'JSONEachRow',
|
||||
kafka_num_consumers = 1;
|
||||
|
||||
CREATE TABLE kafka_materialized(`key` UInt64, `value` UInt64) ENGINE = Log;
|
||||
|
||||
CREATE MATERIALIZED VIEW kafka_consumer TO kafka_materialized
|
||||
(`key` UInt64, `value` UInt64) AS
|
||||
SELECT key, CAST(value, 'UInt64') AS value
|
||||
FROM kafka1;
|
||||
"""
|
||||
)
|
||||
|
||||
kafka_produce(kafka_cluster, "tomv", ['{"key":10, "value":"aaa"}'])
|
||||
|
||||
expected_result = """Code: 6. DB::Exception: Cannot parse string \\'aaa\\' as UInt64: syntax error at begin of string. Note: there are toUInt64OrZero and to|1|1|1|default|kafka1
|
||||
"""
|
||||
result_system_kafka_consumers = instance.query_with_retry(
|
||||
"""
|
||||
SELECT substr(exceptions.text[1], 1, 131), length(exceptions.text) > 1 AND length(exceptions.text) < 15, length(exceptions.time) > 1 AND length(exceptions.time) < 15, abs(dateDiff('second', exceptions.time[1], now())) < 40, database, table FROM system.kafka_consumers WHERE table='kafka1' ORDER BY table, assignments.partition_id[1]
|
||||
""",
|
||||
retry_count=max_retries,
|
||||
sleep_time=1,
|
||||
check_callback=lambda res: res.replace("\t", "|") == expected_result,
|
||||
)
|
||||
|
||||
assert result_system_kafka_consumers.replace("\t", "|") == expected_result
|
||||
|
||||
kafka_delete_topic(admin_client, "tomv")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cluster.start()
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
@ -141,3 +141,8 @@ Expression (Projection)
|
||||
Description: usearch GRANULARITY 4
|
||||
Parts: 1/1
|
||||
Granules: 1/4
|
||||
--- Test correctness of Usearch index with > 1 mark
|
||||
1 [1,0,0,0]
|
||||
9000 [9000,0,0,0]
|
||||
1 (1,0,0,0)
|
||||
9000 (9000,0,0,0)
|
||||
|
@ -228,3 +228,35 @@ ORDER BY L2Distance(vector, [10.0, 0.0, 10.0, 0.0])
|
||||
LIMIT 3;
|
||||
|
||||
DROP TABLE tab;
|
||||
|
||||
SELECT '--- Test correctness of Usearch index with > 1 mark';
|
||||
|
||||
CREATE TABLE tab(id Int32, vector Array(Float32), INDEX usearch_index vector TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes=0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0; -- disable adaptive granularity due to bug
|
||||
INSERT INTO tab SELECT number, [toFloat32(number), 0., 0., 0.] from numbers(10000);
|
||||
|
||||
SELECT *
|
||||
FROM tab
|
||||
ORDER BY L2Distance(vector, [1.0, 0.0, 0.0, 0.0])
|
||||
LIMIT 1;
|
||||
|
||||
SELECT *
|
||||
FROM tab
|
||||
ORDER BY L2Distance(vector, [9000.0, 0.0, 0.0, 0.0])
|
||||
LIMIT 1;
|
||||
|
||||
DROP TABLE tab;
|
||||
|
||||
CREATE TABLE tab(id Int32, vector Tuple(Float32, Float32, Float32, Float32), INDEX usearch_index vector TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes=0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0; -- disable adaptive granularity due to bug
|
||||
INSERT INTO tab SELECT number, (toFloat32(number), 0., 0., 0.) from numbers(10000);
|
||||
|
||||
SELECT *
|
||||
FROM tab
|
||||
ORDER BY L2Distance(vector, (1.0, 0.0, 0.0, 0.0))
|
||||
LIMIT 1;
|
||||
|
||||
SELECT *
|
||||
FROM tab
|
||||
ORDER BY L2Distance(vector, (9000.0, 0.0, 0.0, 0.0))
|
||||
LIMIT 1;
|
||||
|
||||
DROP TABLE tab;
|
||||
|
@ -0,0 +1,31 @@
|
||||
--- Based on https://github.com/ClickHouse/ClickHouse/issues/49685
|
||||
--- Verify that ReplacingMergeTree properly handles _is_deleted:
|
||||
--- SELECT FINAL should take `_is_deleted` into consideration when there is only one partition.
|
||||
-- { echoOn }
|
||||
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t
|
||||
(
|
||||
`account_id` UInt64,
|
||||
`_is_deleted` UInt8,
|
||||
`_version` UInt64
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(_version, _is_deleted)
|
||||
ORDER BY (account_id);
|
||||
INSERT INTO t SELECT number, 0, 1 FROM numbers(1e3);
|
||||
-- Mark the first 100 rows as deleted.
|
||||
INSERT INTO t SELECT number, 1, 1 FROM numbers(1e2);
|
||||
-- Put everything in one partition
|
||||
OPTIMIZE TABLE t FINAL;
|
||||
SELECT count() FROM t;
|
||||
1000
|
||||
SELECT count() FROM t FINAL;
|
||||
900
|
||||
-- Both should produce the same number of rows.
|
||||
-- Previously, `do_not_merge_across_partitions_select_final = 1` showed more rows,
|
||||
-- as if no rows were deleted.
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 1;
|
||||
900
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 0;
|
||||
900
|
||||
DROP TABLE t;
|
@ -0,0 +1,32 @@
|
||||
--- Based on https://github.com/ClickHouse/ClickHouse/issues/49685
|
||||
--- Verify that ReplacingMergeTree properly handles _is_deleted:
|
||||
--- SELECT FINAL should take `_is_deleted` into consideration when there is only one partition.
|
||||
-- { echoOn }
|
||||
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t
|
||||
(
|
||||
`account_id` UInt64,
|
||||
`_is_deleted` UInt8,
|
||||
`_version` UInt64
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(_version, _is_deleted)
|
||||
ORDER BY (account_id);
|
||||
|
||||
INSERT INTO t SELECT number, 0, 1 FROM numbers(1e3);
|
||||
-- Mark the first 100 rows as deleted.
|
||||
INSERT INTO t SELECT number, 1, 1 FROM numbers(1e2);
|
||||
|
||||
-- Put everything in one partition
|
||||
OPTIMIZE TABLE t FINAL;
|
||||
|
||||
SELECT count() FROM t;
|
||||
SELECT count() FROM t FINAL;
|
||||
|
||||
-- Both should produce the same number of rows.
|
||||
-- Previously, `do_not_merge_across_partitions_select_final = 1` showed more rows,
|
||||
-- as if no rows were deleted.
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 1;
|
||||
SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 0;
|
||||
|
||||
DROP TABLE t;
|
@ -0,0 +1,3 @@
|
||||
CREATE TABLE default.child\n(\n `id` Int32,\n `pid` Int32\n)\nENGINE = MergeTree\nPRIMARY KEY id\nORDER BY id\nSETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.child2\n(\n `id` Int32,\n `pid` Int32\n)\nENGINE = MergeTree\nPRIMARY KEY id\nORDER BY id\nSETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.child3\n(\n `id` Int32,\n `pid` Int32\n)\nENGINE = MergeTree\nPRIMARY KEY id\nORDER BY id\nSETTINGS index_granularity = 8192
|
@ -0,0 +1,29 @@
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/53380
|
||||
|
||||
|
||||
drop table if exists parent;
|
||||
drop table if exists child;
|
||||
|
||||
create table parent (id int, primary key(id)) engine MergeTree;
|
||||
create table child (id int, pid int, primary key(id), foreign key(pid)) engine MergeTree; -- { clientError SYNTAX_ERROR }
|
||||
create table child (id int, pid int, primary key(id), foreign key(pid) references) engine MergeTree; -- { clientError SYNTAX_ERROR }
|
||||
create table child (id int, pid int, primary key(id), foreign key(pid) references parent(pid)) engine MergeTree;
|
||||
|
||||
show create table child;
|
||||
|
||||
create table child2 (id int, pid int, primary key(id),
|
||||
foreign key(pid) references parent(pid) on delete) engine MergeTree; -- { clientError SYNTAX_ERROR }
|
||||
create table child2 (id int, pid int, primary key(id),
|
||||
foreign key(pid) references parent(pid) on delete cascade) engine MergeTree;
|
||||
|
||||
show create table child2;
|
||||
|
||||
create table child3 (id int, pid int, primary key(id),
|
||||
foreign key(pid) references parent(pid) on delete cascade on update restrict) engine MergeTree;
|
||||
|
||||
show create table child3;
|
||||
|
||||
drop table child3;
|
||||
drop table child2;
|
||||
drop table child;
|
||||
drop table parent;
|
@ -3,8 +3,10 @@ v23.7.4.5-stable 2023-08-08
|
||||
v23.7.3.14-stable 2023-08-05
|
||||
v23.7.2.25-stable 2023-08-03
|
||||
v23.7.1.2470-stable 2023-07-27
|
||||
v23.6.3.87-stable 2023-08-28
|
||||
v23.6.2.18-stable 2023-07-09
|
||||
v23.6.1.1524-stable 2023-06-30
|
||||
v23.5.5.92-stable 2023-08-28
|
||||
v23.5.4.25-stable 2023-06-29
|
||||
v23.5.3.24-stable 2023-06-17
|
||||
v23.5.2.7-stable 2023-06-10
|
||||
@ -15,6 +17,7 @@ v23.4.4.16-stable 2023-06-17
|
||||
v23.4.3.48-stable 2023-06-12
|
||||
v23.4.2.11-stable 2023-05-02
|
||||
v23.4.1.1943-stable 2023-04-27
|
||||
v23.3.11.5-lts 2023-08-28
|
||||
v23.3.10.5-lts 2023-08-23
|
||||
v23.3.9.55-lts 2023-08-21
|
||||
v23.3.8.21-lts 2023-07-13
|
||||
@ -65,6 +68,7 @@ v22.9.4.32-stable 2022-10-26
|
||||
v22.9.3.18-stable 2022-09-30
|
||||
v22.9.2.7-stable 2022-09-23
|
||||
v22.9.1.2603-stable 2022-09-22
|
||||
v22.8.21.38-lts 2023-08-28
|
||||
v22.8.20.11-lts 2023-07-09
|
||||
v22.8.19.10-lts 2023-06-17
|
||||
v22.8.18.31-lts 2023-06-12
|
||||
|
|
Loading…
Reference in New Issue
Block a user