mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge remote-tracking branch 'origin' into mongodb_refactoring
This commit is contained in:
commit
8173c4fcff
2
contrib/postgres
vendored
2
contrib/postgres
vendored
@ -1 +1 @@
|
||||
Subproject commit cfd77000af28469fcb650485bad65a35e7649e41
|
||||
Subproject commit 2e51f82e27f4be389cc239d1b8784bbf2f01d33a
|
@ -38,12 +38,14 @@ set(SRCS
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/fe_memutils.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/string.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/pg_get_line.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/pg_prng.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/stringinfo.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/psprintf.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/encnames.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/common/logging.c"
|
||||
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/snprintf.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/strlcat.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/strlcpy.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/strerror.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/inet_net_ntop.c"
|
||||
@ -52,6 +54,7 @@ set(SRCS
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/noblock.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/pg_strong_random.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/pgstrcasecmp.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/pg_bitutils.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/thread.c"
|
||||
"${POSTGRES_SOURCE_DIR}/src/port/path.c"
|
||||
)
|
||||
|
471
contrib/postgres-cmake/nodes/nodetags.h
Normal file
471
contrib/postgres-cmake/nodes/nodetags.h
Normal file
@ -0,0 +1,471 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* nodetags.h
|
||||
* Generated node infrastructure code
|
||||
*
|
||||
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* NOTES
|
||||
* ******************************
|
||||
* *** DO NOT EDIT THIS FILE! ***
|
||||
* ******************************
|
||||
*
|
||||
* It has been GENERATED by src/backend/nodes/gen_node_support.pl
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
T_List = 1,
|
||||
T_Alias = 2,
|
||||
T_RangeVar = 3,
|
||||
T_TableFunc = 4,
|
||||
T_IntoClause = 5,
|
||||
T_Var = 6,
|
||||
T_Const = 7,
|
||||
T_Param = 8,
|
||||
T_Aggref = 9,
|
||||
T_GroupingFunc = 10,
|
||||
T_WindowFunc = 11,
|
||||
T_SubscriptingRef = 12,
|
||||
T_FuncExpr = 13,
|
||||
T_NamedArgExpr = 14,
|
||||
T_OpExpr = 15,
|
||||
T_DistinctExpr = 16,
|
||||
T_NullIfExpr = 17,
|
||||
T_ScalarArrayOpExpr = 18,
|
||||
T_BoolExpr = 19,
|
||||
T_SubLink = 20,
|
||||
T_SubPlan = 21,
|
||||
T_AlternativeSubPlan = 22,
|
||||
T_FieldSelect = 23,
|
||||
T_FieldStore = 24,
|
||||
T_RelabelType = 25,
|
||||
T_CoerceViaIO = 26,
|
||||
T_ArrayCoerceExpr = 27,
|
||||
T_ConvertRowtypeExpr = 28,
|
||||
T_CollateExpr = 29,
|
||||
T_CaseExpr = 30,
|
||||
T_CaseWhen = 31,
|
||||
T_CaseTestExpr = 32,
|
||||
T_ArrayExpr = 33,
|
||||
T_RowExpr = 34,
|
||||
T_RowCompareExpr = 35,
|
||||
T_CoalesceExpr = 36,
|
||||
T_MinMaxExpr = 37,
|
||||
T_SQLValueFunction = 38,
|
||||
T_XmlExpr = 39,
|
||||
T_JsonFormat = 40,
|
||||
T_JsonReturning = 41,
|
||||
T_JsonValueExpr = 42,
|
||||
T_JsonConstructorExpr = 43,
|
||||
T_JsonIsPredicate = 44,
|
||||
T_NullTest = 45,
|
||||
T_BooleanTest = 46,
|
||||
T_CoerceToDomain = 47,
|
||||
T_CoerceToDomainValue = 48,
|
||||
T_SetToDefault = 49,
|
||||
T_CurrentOfExpr = 50,
|
||||
T_NextValueExpr = 51,
|
||||
T_InferenceElem = 52,
|
||||
T_TargetEntry = 53,
|
||||
T_RangeTblRef = 54,
|
||||
T_JoinExpr = 55,
|
||||
T_FromExpr = 56,
|
||||
T_OnConflictExpr = 57,
|
||||
T_Query = 58,
|
||||
T_TypeName = 59,
|
||||
T_ColumnRef = 60,
|
||||
T_ParamRef = 61,
|
||||
T_A_Expr = 62,
|
||||
T_A_Const = 63,
|
||||
T_TypeCast = 64,
|
||||
T_CollateClause = 65,
|
||||
T_RoleSpec = 66,
|
||||
T_FuncCall = 67,
|
||||
T_A_Star = 68,
|
||||
T_A_Indices = 69,
|
||||
T_A_Indirection = 70,
|
||||
T_A_ArrayExpr = 71,
|
||||
T_ResTarget = 72,
|
||||
T_MultiAssignRef = 73,
|
||||
T_SortBy = 74,
|
||||
T_WindowDef = 75,
|
||||
T_RangeSubselect = 76,
|
||||
T_RangeFunction = 77,
|
||||
T_RangeTableFunc = 78,
|
||||
T_RangeTableFuncCol = 79,
|
||||
T_RangeTableSample = 80,
|
||||
T_ColumnDef = 81,
|
||||
T_TableLikeClause = 82,
|
||||
T_IndexElem = 83,
|
||||
T_DefElem = 84,
|
||||
T_LockingClause = 85,
|
||||
T_XmlSerialize = 86,
|
||||
T_PartitionElem = 87,
|
||||
T_PartitionSpec = 88,
|
||||
T_PartitionBoundSpec = 89,
|
||||
T_PartitionRangeDatum = 90,
|
||||
T_PartitionCmd = 91,
|
||||
T_RangeTblEntry = 92,
|
||||
T_RTEPermissionInfo = 93,
|
||||
T_RangeTblFunction = 94,
|
||||
T_TableSampleClause = 95,
|
||||
T_WithCheckOption = 96,
|
||||
T_SortGroupClause = 97,
|
||||
T_GroupingSet = 98,
|
||||
T_WindowClause = 99,
|
||||
T_RowMarkClause = 100,
|
||||
T_WithClause = 101,
|
||||
T_InferClause = 102,
|
||||
T_OnConflictClause = 103,
|
||||
T_CTESearchClause = 104,
|
||||
T_CTECycleClause = 105,
|
||||
T_CommonTableExpr = 106,
|
||||
T_MergeWhenClause = 107,
|
||||
T_MergeAction = 108,
|
||||
T_TriggerTransition = 109,
|
||||
T_JsonOutput = 110,
|
||||
T_JsonKeyValue = 111,
|
||||
T_JsonObjectConstructor = 112,
|
||||
T_JsonArrayConstructor = 113,
|
||||
T_JsonArrayQueryConstructor = 114,
|
||||
T_JsonAggConstructor = 115,
|
||||
T_JsonObjectAgg = 116,
|
||||
T_JsonArrayAgg = 117,
|
||||
T_RawStmt = 118,
|
||||
T_InsertStmt = 119,
|
||||
T_DeleteStmt = 120,
|
||||
T_UpdateStmt = 121,
|
||||
T_MergeStmt = 122,
|
||||
T_SelectStmt = 123,
|
||||
T_SetOperationStmt = 124,
|
||||
T_ReturnStmt = 125,
|
||||
T_PLAssignStmt = 126,
|
||||
T_CreateSchemaStmt = 127,
|
||||
T_AlterTableStmt = 128,
|
||||
T_ReplicaIdentityStmt = 129,
|
||||
T_AlterTableCmd = 130,
|
||||
T_AlterCollationStmt = 131,
|
||||
T_AlterDomainStmt = 132,
|
||||
T_GrantStmt = 133,
|
||||
T_ObjectWithArgs = 134,
|
||||
T_AccessPriv = 135,
|
||||
T_GrantRoleStmt = 136,
|
||||
T_AlterDefaultPrivilegesStmt = 137,
|
||||
T_CopyStmt = 138,
|
||||
T_VariableSetStmt = 139,
|
||||
T_VariableShowStmt = 140,
|
||||
T_CreateStmt = 141,
|
||||
T_Constraint = 142,
|
||||
T_CreateTableSpaceStmt = 143,
|
||||
T_DropTableSpaceStmt = 144,
|
||||
T_AlterTableSpaceOptionsStmt = 145,
|
||||
T_AlterTableMoveAllStmt = 146,
|
||||
T_CreateExtensionStmt = 147,
|
||||
T_AlterExtensionStmt = 148,
|
||||
T_AlterExtensionContentsStmt = 149,
|
||||
T_CreateFdwStmt = 150,
|
||||
T_AlterFdwStmt = 151,
|
||||
T_CreateForeignServerStmt = 152,
|
||||
T_AlterForeignServerStmt = 153,
|
||||
T_CreateForeignTableStmt = 154,
|
||||
T_CreateUserMappingStmt = 155,
|
||||
T_AlterUserMappingStmt = 156,
|
||||
T_DropUserMappingStmt = 157,
|
||||
T_ImportForeignSchemaStmt = 158,
|
||||
T_CreatePolicyStmt = 159,
|
||||
T_AlterPolicyStmt = 160,
|
||||
T_CreateAmStmt = 161,
|
||||
T_CreateTrigStmt = 162,
|
||||
T_CreateEventTrigStmt = 163,
|
||||
T_AlterEventTrigStmt = 164,
|
||||
T_CreatePLangStmt = 165,
|
||||
T_CreateRoleStmt = 166,
|
||||
T_AlterRoleStmt = 167,
|
||||
T_AlterRoleSetStmt = 168,
|
||||
T_DropRoleStmt = 169,
|
||||
T_CreateSeqStmt = 170,
|
||||
T_AlterSeqStmt = 171,
|
||||
T_DefineStmt = 172,
|
||||
T_CreateDomainStmt = 173,
|
||||
T_CreateOpClassStmt = 174,
|
||||
T_CreateOpClassItem = 175,
|
||||
T_CreateOpFamilyStmt = 176,
|
||||
T_AlterOpFamilyStmt = 177,
|
||||
T_DropStmt = 178,
|
||||
T_TruncateStmt = 179,
|
||||
T_CommentStmt = 180,
|
||||
T_SecLabelStmt = 181,
|
||||
T_DeclareCursorStmt = 182,
|
||||
T_ClosePortalStmt = 183,
|
||||
T_FetchStmt = 184,
|
||||
T_IndexStmt = 185,
|
||||
T_CreateStatsStmt = 186,
|
||||
T_StatsElem = 187,
|
||||
T_AlterStatsStmt = 188,
|
||||
T_CreateFunctionStmt = 189,
|
||||
T_FunctionParameter = 190,
|
||||
T_AlterFunctionStmt = 191,
|
||||
T_DoStmt = 192,
|
||||
T_InlineCodeBlock = 193,
|
||||
T_CallStmt = 194,
|
||||
T_CallContext = 195,
|
||||
T_RenameStmt = 196,
|
||||
T_AlterObjectDependsStmt = 197,
|
||||
T_AlterObjectSchemaStmt = 198,
|
||||
T_AlterOwnerStmt = 199,
|
||||
T_AlterOperatorStmt = 200,
|
||||
T_AlterTypeStmt = 201,
|
||||
T_RuleStmt = 202,
|
||||
T_NotifyStmt = 203,
|
||||
T_ListenStmt = 204,
|
||||
T_UnlistenStmt = 205,
|
||||
T_TransactionStmt = 206,
|
||||
T_CompositeTypeStmt = 207,
|
||||
T_CreateEnumStmt = 208,
|
||||
T_CreateRangeStmt = 209,
|
||||
T_AlterEnumStmt = 210,
|
||||
T_ViewStmt = 211,
|
||||
T_LoadStmt = 212,
|
||||
T_CreatedbStmt = 213,
|
||||
T_AlterDatabaseStmt = 214,
|
||||
T_AlterDatabaseRefreshCollStmt = 215,
|
||||
T_AlterDatabaseSetStmt = 216,
|
||||
T_DropdbStmt = 217,
|
||||
T_AlterSystemStmt = 218,
|
||||
T_ClusterStmt = 219,
|
||||
T_VacuumStmt = 220,
|
||||
T_VacuumRelation = 221,
|
||||
T_ExplainStmt = 222,
|
||||
T_CreateTableAsStmt = 223,
|
||||
T_RefreshMatViewStmt = 224,
|
||||
T_CheckPointStmt = 225,
|
||||
T_DiscardStmt = 226,
|
||||
T_LockStmt = 227,
|
||||
T_ConstraintsSetStmt = 228,
|
||||
T_ReindexStmt = 229,
|
||||
T_CreateConversionStmt = 230,
|
||||
T_CreateCastStmt = 231,
|
||||
T_CreateTransformStmt = 232,
|
||||
T_PrepareStmt = 233,
|
||||
T_ExecuteStmt = 234,
|
||||
T_DeallocateStmt = 235,
|
||||
T_DropOwnedStmt = 236,
|
||||
T_ReassignOwnedStmt = 237,
|
||||
T_AlterTSDictionaryStmt = 238,
|
||||
T_AlterTSConfigurationStmt = 239,
|
||||
T_PublicationTable = 240,
|
||||
T_PublicationObjSpec = 241,
|
||||
T_CreatePublicationStmt = 242,
|
||||
T_AlterPublicationStmt = 243,
|
||||
T_CreateSubscriptionStmt = 244,
|
||||
T_AlterSubscriptionStmt = 245,
|
||||
T_DropSubscriptionStmt = 246,
|
||||
T_PlannerGlobal = 247,
|
||||
T_PlannerInfo = 248,
|
||||
T_RelOptInfo = 249,
|
||||
T_IndexOptInfo = 250,
|
||||
T_ForeignKeyOptInfo = 251,
|
||||
T_StatisticExtInfo = 252,
|
||||
T_JoinDomain = 253,
|
||||
T_EquivalenceClass = 254,
|
||||
T_EquivalenceMember = 255,
|
||||
T_PathKey = 256,
|
||||
T_PathTarget = 257,
|
||||
T_ParamPathInfo = 258,
|
||||
T_Path = 259,
|
||||
T_IndexPath = 260,
|
||||
T_IndexClause = 261,
|
||||
T_BitmapHeapPath = 262,
|
||||
T_BitmapAndPath = 263,
|
||||
T_BitmapOrPath = 264,
|
||||
T_TidPath = 265,
|
||||
T_TidRangePath = 266,
|
||||
T_SubqueryScanPath = 267,
|
||||
T_ForeignPath = 268,
|
||||
T_CustomPath = 269,
|
||||
T_AppendPath = 270,
|
||||
T_MergeAppendPath = 271,
|
||||
T_GroupResultPath = 272,
|
||||
T_MaterialPath = 273,
|
||||
T_MemoizePath = 274,
|
||||
T_UniquePath = 275,
|
||||
T_GatherPath = 276,
|
||||
T_GatherMergePath = 277,
|
||||
T_NestPath = 278,
|
||||
T_MergePath = 279,
|
||||
T_HashPath = 280,
|
||||
T_ProjectionPath = 281,
|
||||
T_ProjectSetPath = 282,
|
||||
T_SortPath = 283,
|
||||
T_IncrementalSortPath = 284,
|
||||
T_GroupPath = 285,
|
||||
T_UpperUniquePath = 286,
|
||||
T_AggPath = 287,
|
||||
T_GroupingSetData = 288,
|
||||
T_RollupData = 289,
|
||||
T_GroupingSetsPath = 290,
|
||||
T_MinMaxAggPath = 291,
|
||||
T_WindowAggPath = 292,
|
||||
T_SetOpPath = 293,
|
||||
T_RecursiveUnionPath = 294,
|
||||
T_LockRowsPath = 295,
|
||||
T_ModifyTablePath = 296,
|
||||
T_LimitPath = 297,
|
||||
T_RestrictInfo = 298,
|
||||
T_PlaceHolderVar = 299,
|
||||
T_SpecialJoinInfo = 300,
|
||||
T_OuterJoinClauseInfo = 301,
|
||||
T_AppendRelInfo = 302,
|
||||
T_RowIdentityVarInfo = 303,
|
||||
T_PlaceHolderInfo = 304,
|
||||
T_MinMaxAggInfo = 305,
|
||||
T_PlannerParamItem = 306,
|
||||
T_AggInfo = 307,
|
||||
T_AggTransInfo = 308,
|
||||
T_PlannedStmt = 309,
|
||||
T_Result = 310,
|
||||
T_ProjectSet = 311,
|
||||
T_ModifyTable = 312,
|
||||
T_Append = 313,
|
||||
T_MergeAppend = 314,
|
||||
T_RecursiveUnion = 315,
|
||||
T_BitmapAnd = 316,
|
||||
T_BitmapOr = 317,
|
||||
T_SeqScan = 318,
|
||||
T_SampleScan = 319,
|
||||
T_IndexScan = 320,
|
||||
T_IndexOnlyScan = 321,
|
||||
T_BitmapIndexScan = 322,
|
||||
T_BitmapHeapScan = 323,
|
||||
T_TidScan = 324,
|
||||
T_TidRangeScan = 325,
|
||||
T_SubqueryScan = 326,
|
||||
T_FunctionScan = 327,
|
||||
T_ValuesScan = 328,
|
||||
T_TableFuncScan = 329,
|
||||
T_CteScan = 330,
|
||||
T_NamedTuplestoreScan = 331,
|
||||
T_WorkTableScan = 332,
|
||||
T_ForeignScan = 333,
|
||||
T_CustomScan = 334,
|
||||
T_NestLoop = 335,
|
||||
T_NestLoopParam = 336,
|
||||
T_MergeJoin = 337,
|
||||
T_HashJoin = 338,
|
||||
T_Material = 339,
|
||||
T_Memoize = 340,
|
||||
T_Sort = 341,
|
||||
T_IncrementalSort = 342,
|
||||
T_Group = 343,
|
||||
T_Agg = 344,
|
||||
T_WindowAgg = 345,
|
||||
T_Unique = 346,
|
||||
T_Gather = 347,
|
||||
T_GatherMerge = 348,
|
||||
T_Hash = 349,
|
||||
T_SetOp = 350,
|
||||
T_LockRows = 351,
|
||||
T_Limit = 352,
|
||||
T_PlanRowMark = 353,
|
||||
T_PartitionPruneInfo = 354,
|
||||
T_PartitionedRelPruneInfo = 355,
|
||||
T_PartitionPruneStepOp = 356,
|
||||
T_PartitionPruneStepCombine = 357,
|
||||
T_PlanInvalItem = 358,
|
||||
T_ExprState = 359,
|
||||
T_IndexInfo = 360,
|
||||
T_ExprContext = 361,
|
||||
T_ReturnSetInfo = 362,
|
||||
T_ProjectionInfo = 363,
|
||||
T_JunkFilter = 364,
|
||||
T_OnConflictSetState = 365,
|
||||
T_MergeActionState = 366,
|
||||
T_ResultRelInfo = 367,
|
||||
T_EState = 368,
|
||||
T_WindowFuncExprState = 369,
|
||||
T_SetExprState = 370,
|
||||
T_SubPlanState = 371,
|
||||
T_DomainConstraintState = 372,
|
||||
T_ResultState = 373,
|
||||
T_ProjectSetState = 374,
|
||||
T_ModifyTableState = 375,
|
||||
T_AppendState = 376,
|
||||
T_MergeAppendState = 377,
|
||||
T_RecursiveUnionState = 378,
|
||||
T_BitmapAndState = 379,
|
||||
T_BitmapOrState = 380,
|
||||
T_ScanState = 381,
|
||||
T_SeqScanState = 382,
|
||||
T_SampleScanState = 383,
|
||||
T_IndexScanState = 384,
|
||||
T_IndexOnlyScanState = 385,
|
||||
T_BitmapIndexScanState = 386,
|
||||
T_BitmapHeapScanState = 387,
|
||||
T_TidScanState = 388,
|
||||
T_TidRangeScanState = 389,
|
||||
T_SubqueryScanState = 390,
|
||||
T_FunctionScanState = 391,
|
||||
T_ValuesScanState = 392,
|
||||
T_TableFuncScanState = 393,
|
||||
T_CteScanState = 394,
|
||||
T_NamedTuplestoreScanState = 395,
|
||||
T_WorkTableScanState = 396,
|
||||
T_ForeignScanState = 397,
|
||||
T_CustomScanState = 398,
|
||||
T_JoinState = 399,
|
||||
T_NestLoopState = 400,
|
||||
T_MergeJoinState = 401,
|
||||
T_HashJoinState = 402,
|
||||
T_MaterialState = 403,
|
||||
T_MemoizeState = 404,
|
||||
T_SortState = 405,
|
||||
T_IncrementalSortState = 406,
|
||||
T_GroupState = 407,
|
||||
T_AggState = 408,
|
||||
T_WindowAggState = 409,
|
||||
T_UniqueState = 410,
|
||||
T_GatherState = 411,
|
||||
T_GatherMergeState = 412,
|
||||
T_HashState = 413,
|
||||
T_SetOpState = 414,
|
||||
T_LockRowsState = 415,
|
||||
T_LimitState = 416,
|
||||
T_IndexAmRoutine = 417,
|
||||
T_TableAmRoutine = 418,
|
||||
T_TsmRoutine = 419,
|
||||
T_EventTriggerData = 420,
|
||||
T_TriggerData = 421,
|
||||
T_TupleTableSlot = 422,
|
||||
T_FdwRoutine = 423,
|
||||
T_Bitmapset = 424,
|
||||
T_ExtensibleNode = 425,
|
||||
T_ErrorSaveContext = 426,
|
||||
T_IdentifySystemCmd = 427,
|
||||
T_BaseBackupCmd = 428,
|
||||
T_CreateReplicationSlotCmd = 429,
|
||||
T_DropReplicationSlotCmd = 430,
|
||||
T_StartReplicationCmd = 431,
|
||||
T_ReadReplicationSlotCmd = 432,
|
||||
T_TimeLineHistoryCmd = 433,
|
||||
T_SupportRequestSimplify = 434,
|
||||
T_SupportRequestSelectivity = 435,
|
||||
T_SupportRequestCost = 436,
|
||||
T_SupportRequestRows = 437,
|
||||
T_SupportRequestIndexCondition = 438,
|
||||
T_SupportRequestWFuncMonotonic = 439,
|
||||
T_SupportRequestOptimizeWindowClause = 440,
|
||||
T_Integer = 441,
|
||||
T_Float = 442,
|
||||
T_Boolean = 443,
|
||||
T_String = 444,
|
||||
T_BitString = 445,
|
||||
T_ForeignKeyCacheInfo = 446,
|
||||
T_IntList = 447,
|
||||
T_OidList = 448,
|
||||
T_XidList = 449,
|
||||
T_AllocSetContext = 450,
|
||||
T_GenerationContext = 451,
|
||||
T_SlabContext = 452,
|
||||
T_TIDBitmap = 453,
|
||||
T_WindowObjectData = 454,
|
@ -66,13 +66,6 @@
|
||||
reference if 'false' */
|
||||
#define FLOAT8PASSBYVAL false
|
||||
|
||||
/* Define to 1 if gettimeofday() takes only 1 argument. */
|
||||
/* #undef GETTIMEOFDAY_1ARG */
|
||||
|
||||
#ifdef GETTIMEOFDAY_1ARG
|
||||
# define gettimeofday(a,b) gettimeofday(a)
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the `append_history' function. */
|
||||
/* #undef HAVE_APPEND_HISTORY */
|
||||
|
||||
@ -113,9 +106,6 @@
|
||||
don't. */
|
||||
#define HAVE_DECL_SNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `sigwait', and to 0 if you don't. */
|
||||
#define HAVE_DECL_SIGWAIT 1
|
||||
|
||||
/* Define to 1 if you have the declaration of `strlcat', and to 0 if you
|
||||
don't. */
|
||||
#if OS_DARWIN
|
||||
@ -139,21 +129,12 @@
|
||||
/* Define to 1 if you have the <dld.h> header file. */
|
||||
/* #undef HAVE_DLD_H */
|
||||
|
||||
/* Define to 1 if you have the `dlopen' function. */
|
||||
#define HAVE_DLOPEN 1
|
||||
|
||||
/* Define to 1 if you have the <editline/history.h> header file. */
|
||||
/* #undef HAVE_EDITLINE_HISTORY_H */
|
||||
|
||||
/* Define to 1 if you have the <editline/readline.h> header file. */
|
||||
#define HAVE_EDITLINE_READLINE_H 1
|
||||
|
||||
/* Define to 1 if you have the `fdatasync' function. */
|
||||
#define HAVE_FDATASYNC 1
|
||||
|
||||
/* Define to 1 if you have the `fls' function. */
|
||||
/* #undef HAVE_FLS */
|
||||
|
||||
/* Define to 1 if you have the `fpclass' function. */
|
||||
/* #undef HAVE_FPCLASS */
|
||||
|
||||
@ -169,12 +150,6 @@
|
||||
/* Define to 1 if fseeko (and presumably ftello) exists and is declared. */
|
||||
#define HAVE_FSEEKO 1
|
||||
|
||||
/* Define to 1 if your compiler understands __func__. */
|
||||
#define HAVE_FUNCNAME__FUNC 1
|
||||
|
||||
/* Define to 1 if your compiler understands __FUNCTION__. */
|
||||
/* #undef HAVE_FUNCNAME__FUNCTION */
|
||||
|
||||
/* Define to 1 if you have __atomic_compare_exchange_n(int *, int *, int). */
|
||||
/* #undef HAVE_GCC__ATOMIC_INT32_CAS */
|
||||
|
||||
@ -194,12 +169,6 @@
|
||||
/* Define to 1 if you have __sync_compare_and_swap(int64 *, int64, int64). */
|
||||
/* #undef HAVE_GCC__SYNC_INT64_CAS */
|
||||
|
||||
/* Define to 1 if you have the `getaddrinfo' function. */
|
||||
#define HAVE_GETADDRINFO 1
|
||||
|
||||
/* Define to 1 if you have the `gethostbyname_r' function. */
|
||||
#define HAVE_GETHOSTBYNAME_R 1
|
||||
|
||||
/* Define to 1 if you have the `getifaddrs' function. */
|
||||
#define HAVE_GETIFADDRS 1
|
||||
|
||||
@ -218,17 +187,11 @@
|
||||
/* Define to 1 if you have the `getpeerucred' function. */
|
||||
/* #undef HAVE_GETPEERUCRED */
|
||||
|
||||
/* Define to 1 if you have the `getpwuid_r' function. */
|
||||
#define HAVE_GETPWUID_R 1
|
||||
/* Define to 1 if you have the <gssapi_ext.h> header file. */
|
||||
/* #undef HAVE_GSSAPI_EXT_H */
|
||||
|
||||
/* Define to 1 if you have the `getrlimit' function. */
|
||||
#define HAVE_GETRLIMIT 1
|
||||
|
||||
/* Define to 1 if you have the `getrusage' function. */
|
||||
#define HAVE_GETRUSAGE 1
|
||||
|
||||
/* Define to 1 if you have the `gettimeofday' function. */
|
||||
/* #undef HAVE_GETTIMEOFDAY */
|
||||
/* Define to 1 if you have the <gssapi/gssapi_ext.h> header file. */
|
||||
/* #undef HAVE_GSSAPI_GSSAPI_EXT_H */
|
||||
|
||||
/* Define to 1 if you have the <gssapi/gssapi.h> header file. */
|
||||
//#define HAVE_GSSAPI_GSSAPI_H 0
|
||||
@ -275,18 +238,12 @@
|
||||
/* Define to 1 if you have the global variable 'int timezone'. */
|
||||
#define HAVE_INT_TIMEZONE 1
|
||||
|
||||
/* Define to 1 if you have support for IPv6. */
|
||||
#define HAVE_IPV6 1
|
||||
|
||||
/* Define to 1 if you have isinf(). */
|
||||
#define HAVE_ISINF 1
|
||||
|
||||
/* Define to 1 if you have the <langinfo.h> header file. */
|
||||
#define HAVE_LANGINFO_H 1
|
||||
|
||||
/* Define to 1 if you have the <ldap.h> header file. */
|
||||
//#define HAVE_LDAP_H 0
|
||||
|
||||
/* Define to 1 if you have the `crypto' library (-lcrypto). */
|
||||
#define HAVE_LIBCRYPTO 1
|
||||
|
||||
@ -351,18 +308,9 @@
|
||||
/* Define to 1 if you have the <memory.h> header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if the system has the type `MINIDUMP_TYPE'. */
|
||||
/* #undef HAVE_MINIDUMP_TYPE */
|
||||
|
||||
/* Define to 1 if you have the `mkdtemp' function. */
|
||||
#define HAVE_MKDTEMP 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/in.h> header file. */
|
||||
#define HAVE_NETINET_IN_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/tcp.h> header file. */
|
||||
#define HAVE_NETINET_TCP_H 1
|
||||
|
||||
/* Define to 1 if you have the <net/if.h> header file. */
|
||||
#define HAVE_NET_IF_H 1
|
||||
|
||||
@ -372,15 +320,6 @@
|
||||
/* Define to 1 if you have the <pam/pam_appl.h> header file. */
|
||||
/* #undef HAVE_PAM_PAM_APPL_H */
|
||||
|
||||
/* Define to 1 if you have the `poll' function. */
|
||||
#define HAVE_POLL 1
|
||||
|
||||
/* Define to 1 if you have the <poll.h> header file. */
|
||||
#define HAVE_POLL_H 1
|
||||
|
||||
/* Define to 1 if you have a POSIX-conforming sigwait declaration. */
|
||||
/* #undef HAVE_POSIX_DECL_SIGWAIT */
|
||||
|
||||
/* Define to 1 if you have the `posix_fadvise' function. */
|
||||
#define HAVE_POSIX_FADVISE 1
|
||||
|
||||
@ -399,12 +338,6 @@
|
||||
/* Define to 1 if the assembler supports PPC's LWARX mutex hint bit. */
|
||||
/* #undef HAVE_PPC_LWARX_MUTEX_HINT */
|
||||
|
||||
/* Define to 1 if you have the `pstat' function. */
|
||||
/* #undef HAVE_PSTAT */
|
||||
|
||||
/* Define to 1 if the PS_STRINGS thing exists. */
|
||||
/* #undef HAVE_PS_STRINGS */
|
||||
|
||||
/* Define to 1 if you have the `pthread_is_threaded_np' function. */
|
||||
/* #undef HAVE_PTHREAD_IS_THREADED_NP */
|
||||
|
||||
@ -420,9 +353,6 @@
|
||||
/* Define to 1 if you have the <readline/readline.h> header file. */
|
||||
/* #undef HAVE_READLINE_READLINE_H */
|
||||
|
||||
/* Define to 1 if you have the `readlink' function. */
|
||||
#define HAVE_READLINK 1
|
||||
|
||||
/* Define to 1 if you have the `rint' function. */
|
||||
#define HAVE_RINT 1
|
||||
|
||||
@ -444,12 +374,6 @@
|
||||
/* Define to 1 if you have the `setproctitle' function. */
|
||||
/* #undef HAVE_SETPROCTITLE */
|
||||
|
||||
/* Define to 1 if you have the `setsid' function. */
|
||||
#define HAVE_SETSID 1
|
||||
|
||||
/* Define to 1 if you have the `shm_open' function. */
|
||||
#define HAVE_SHM_OPEN 1
|
||||
|
||||
/* Define to 1 if the system has the type `socklen_t'. */
|
||||
#define HAVE_SOCKLEN_T 1
|
||||
|
||||
@ -468,6 +392,9 @@
|
||||
/* Define to 1 if you have spinlocks. */
|
||||
#define HAVE_SPINLOCKS 1
|
||||
|
||||
/* Define to 1 if you have the `SSL_CTX_set_cert_cb' function. */
|
||||
#define HAVE_SSL_CTX_SET_CERT_CB 1
|
||||
|
||||
/* Define to 1 if you have the `SSL_CTX_set_num_tickets' function. */
|
||||
/* #define HAVE_SSL_CTX_SET_NUM_TICKETS */
|
||||
|
||||
@ -498,55 +425,19 @@
|
||||
/* Define to 1 if you have the `strlcpy' function. */
|
||||
/* #undef HAVE_STRLCPY */
|
||||
|
||||
/* Define to 1 if you have the `strtoll' function. */
|
||||
#define HAVE_STRTOLL 1
|
||||
|
||||
#if (!OS_DARWIN)
|
||||
#define HAVE_STRCHRNUL 1
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the `strtoq' function. */
|
||||
/* #undef HAVE_STRTOQ */
|
||||
|
||||
/* Define to 1 if you have the `strtoull' function. */
|
||||
#define HAVE_STRTOULL 1
|
||||
|
||||
/* Define to 1 if you have the `strtouq' function. */
|
||||
/* #undef HAVE_STRTOUQ */
|
||||
|
||||
/* Define to 1 if the system has the type `struct addrinfo'. */
|
||||
#define HAVE_STRUCT_ADDRINFO 1
|
||||
|
||||
/* Define to 1 if the system has the type `struct cmsgcred'. */
|
||||
/* #undef HAVE_STRUCT_CMSGCRED */
|
||||
|
||||
/* Define to 1 if the system has the type `struct option'. */
|
||||
#define HAVE_STRUCT_OPTION 1
|
||||
|
||||
/* Define to 1 if `sa_len' is a member of `struct sockaddr'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_SA_LEN */
|
||||
|
||||
/* Define to 1 if the system has the type `struct sockaddr_storage'. */
|
||||
#define HAVE_STRUCT_SOCKADDR_STORAGE 1
|
||||
|
||||
/* Define to 1 if `ss_family' is a member of `struct sockaddr_storage'. */
|
||||
#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY 1
|
||||
|
||||
/* Define to 1 if `ss_len' is a member of `struct sockaddr_storage'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN */
|
||||
|
||||
/* Define to 1 if `__ss_family' is a member of `struct sockaddr_storage'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY */
|
||||
|
||||
/* Define to 1 if `__ss_len' is a member of `struct sockaddr_storage'. */
|
||||
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_LEN */
|
||||
|
||||
/* Define to 1 if `tm_zone' is a member of `struct tm'. */
|
||||
#define HAVE_STRUCT_TM_TM_ZONE 1
|
||||
|
||||
/* Define to 1 if you have the `symlink' function. */
|
||||
#define HAVE_SYMLINK 1
|
||||
|
||||
/* Define to 1 if you have the `sync_file_range' function. */
|
||||
/* #undef HAVE_SYNC_FILE_RANGE */
|
||||
|
||||
@ -556,45 +447,21 @@
|
||||
/* Define to 1 if you have the <sys/ioctl.h> header file. */
|
||||
#define HAVE_SYS_IOCTL_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ipc.h> header file. */
|
||||
#define HAVE_SYS_IPC_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/personality.h> header file. */
|
||||
/* #undef HAVE_SYS_PERSONALITY_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/poll.h> header file. */
|
||||
#define HAVE_SYS_POLL_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/pstat.h> header file. */
|
||||
/* #undef HAVE_SYS_PSTAT_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/resource.h> header file. */
|
||||
#define HAVE_SYS_RESOURCE_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/select.h> header file. */
|
||||
#define HAVE_SYS_SELECT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/sem.h> header file. */
|
||||
#define HAVE_SYS_SEM_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/shm.h> header file. */
|
||||
#define HAVE_SYS_SHM_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/signalfd.h> header file. */
|
||||
/* #undef HAVE_SYS_SIGNALFD_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/socket.h> header file. */
|
||||
#define HAVE_SYS_SOCKET_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/sockio.h> header file. */
|
||||
/* #undef HAVE_SYS_SOCKIO_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/tas.h> header file. */
|
||||
/* #undef HAVE_SYS_TAS_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/time.h> header file. */
|
||||
#define HAVE_SYS_TIME_H 1
|
||||
|
||||
@ -607,7 +474,6 @@
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the <sys/un.h> header file. */
|
||||
#define HAVE_SYS_UN_H 1
|
||||
#define _GNU_SOURCE 1 /* Needed for glibc struct ucred */
|
||||
|
||||
/* Define to 1 if you have the <termios.h> header file. */
|
||||
@ -644,9 +510,6 @@
|
||||
/* Define to 1 if you have unix sockets. */
|
||||
#define HAVE_UNIX_SOCKETS 1
|
||||
|
||||
/* Define to 1 if you have the `unsetenv' function. */
|
||||
#define HAVE_UNSETENV 1
|
||||
|
||||
/* Define to 1 if the system has the type `unsigned long long int'. */
|
||||
#define HAVE_UNSIGNED_LONG_LONG_INT 1
|
||||
|
||||
@ -674,6 +537,9 @@
|
||||
/* Define to 1 if you have the <uuid/uuid.h> header file. */
|
||||
/* #undef HAVE_UUID_UUID_H */
|
||||
|
||||
/* Define to 1 if your compiler knows the visibility("hidden") attribute. */
|
||||
/* #undef HAVE_VISIBILITY_ATTRIBUTE */
|
||||
|
||||
/* Define to 1 if you have the `vsnprintf' function. */
|
||||
#define HAVE_VSNPRINTF 1
|
||||
|
||||
@ -686,12 +552,6 @@
|
||||
/* Define to 1 if you have the `wcstombs_l' function. */
|
||||
/* #undef HAVE_WCSTOMBS_L */
|
||||
|
||||
/* Define to 1 if you have the <wctype.h> header file. */
|
||||
#define HAVE_WCTYPE_H 1
|
||||
|
||||
/* Define to 1 if you have the <winldap.h> header file. */
|
||||
/* #undef HAVE_WINLDAP_H */
|
||||
|
||||
/* Define to 1 if your compiler understands __builtin_bswap32. */
|
||||
/* #undef HAVE__BUILTIN_BSWAP32 */
|
||||
|
||||
|
@ -14,5 +14,6 @@ git config submodule."contrib/icu".update '!../sparse-checkout/update-icu.sh'
|
||||
git config submodule."contrib/boost".update '!../sparse-checkout/update-boost.sh'
|
||||
git config submodule."contrib/aws-s2n-tls".update '!../sparse-checkout/update-aws-s2n-tls.sh'
|
||||
git config submodule."contrib/protobuf".update '!../sparse-checkout/update-protobuf.sh'
|
||||
git config submodule."contrib/postgres".update '!../sparse-checkout/update-postgres.sh'
|
||||
git config submodule."contrib/libxml2".update '!../sparse-checkout/update-libxml2.sh'
|
||||
git config submodule."contrib/brotli".update '!../sparse-checkout/update-brotli.sh'
|
||||
|
16
contrib/sparse-checkout/update-postgres.sh
Executable file
16
contrib/sparse-checkout/update-postgres.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "Using sparse checkout for postgres"
|
||||
|
||||
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||
echo '!/*' > $FILES_TO_CHECKOUT
|
||||
echo '/src/interfaces/libpq/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/src/interfaces/libpq/*/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/src/common/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/src/port/*/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/src/port/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/src/include/*' >> $FILES_TO_CHECKOUT
|
||||
|
||||
git config core.sparsecheckout true
|
||||
git checkout $1
|
||||
git read-tree -mu HEAD
|
@ -2088,13 +2088,14 @@ Calculate AUC (Area Under the Curve, which is a concept in machine learning, see
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
arrayAUC(arr_scores, arr_labels)
|
||||
arrayAUC(arr_scores, arr_labels[, scale])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arr_scores` — scores prediction model gives.
|
||||
- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negative sample.
|
||||
- `scale` - Optional. Wether to return the normalized area. Default value: true. [Bool]
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/clearPasswordFromCommandLine.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
@ -36,7 +37,6 @@
|
||||
#include <Common/StudentTTest.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Core/BaseSettingsProgramOptions.h>
|
||||
|
||||
|
||||
/** A tool for evaluating ClickHouse performance.
|
||||
@ -58,8 +58,9 @@ static constexpr std::string_view DEFAULT_CLIENT_NAME = "benchmark";
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_BLOCK_SIGNAL;
|
||||
extern const int EMPTY_DATA_PASSED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int CANNOT_BLOCK_SIGNAL;
|
||||
extern const int EMPTY_DATA_PASSED;
|
||||
}
|
||||
|
||||
class Benchmark : public Poco::Util::Application
|
||||
@ -637,7 +638,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
;
|
||||
|
||||
Settings settings;
|
||||
addProgramOptions(settings, desc);
|
||||
settings.addToProgramOptions(desc);
|
||||
|
||||
boost::program_options::variables_map options;
|
||||
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
|
||||
|
@ -56,6 +56,12 @@ using namespace std::literals;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsDialect dialect;
|
||||
extern const SettingsBool use_client_time_zone;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
@ -307,9 +313,9 @@ void Client::initialize(Poco::Util::Application & self)
|
||||
config().setString("password", env_password);
|
||||
|
||||
/// settings and limits could be specified in config file, but passed settings has higher priority
|
||||
for (const auto & setting : global_context->getSettingsRef().allUnchanged())
|
||||
for (const auto & setting : global_context->getSettingsRef().getUnchangedNames())
|
||||
{
|
||||
const auto & name = setting.getName();
|
||||
String name{setting};
|
||||
if (config().has(name))
|
||||
global_context->setSetting(name, config().getString(name));
|
||||
}
|
||||
@ -525,7 +531,7 @@ void Client::connect()
|
||||
}
|
||||
}
|
||||
|
||||
if (!client_context->getSettingsRef().use_client_time_zone)
|
||||
if (!client_context->getSettingsRef()[Setting::use_client_time_zone])
|
||||
{
|
||||
const auto & time_zone = connection->getServerTimezone(connection_parameters.timeouts);
|
||||
if (!time_zone.empty())
|
||||
@ -730,7 +736,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
}
|
||||
|
||||
// Kusto is not a subject for fuzzing (yet)
|
||||
if (client_context->getSettingsRef().dialect == DB::Dialect::kusto)
|
||||
if (client_context->getSettingsRef()[Setting::dialect] == DB::Dialect::kusto)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -1073,17 +1079,7 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
|
||||
/// Copy settings-related program options to config.
|
||||
/// TODO: Is this code necessary?
|
||||
for (const auto & setting : global_context->getSettingsRef().all())
|
||||
{
|
||||
const auto & name = setting.getName();
|
||||
if (options.count(name))
|
||||
{
|
||||
if (allow_repeated_settings)
|
||||
config().setString(name, options[name].as<Strings>().back());
|
||||
else
|
||||
config().setString(name, options[name].as<String>());
|
||||
}
|
||||
}
|
||||
global_context->getSettingsRef().addToClientOptions(config(), options, allow_repeated_settings);
|
||||
|
||||
if (options.count("config-file") && options.count("config"))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Two or more configuration files referenced in arguments");
|
||||
|
@ -3,11 +3,12 @@
|
||||
#include <string_view>
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
#include <IO/copyData.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/copyData.h>
|
||||
#include <Interpreters/registerInterpreters.h>
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Parsers/ParserQuery.h>
|
||||
@ -17,7 +18,6 @@
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Core/BaseSettingsProgramOptions.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
@ -35,6 +35,15 @@
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 max_parser_backtracks;
|
||||
extern const SettingsUInt64 max_parser_depth;
|
||||
extern const SettingsUInt64 max_query_size;
|
||||
}
|
||||
}
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
@ -99,12 +108,8 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
;
|
||||
|
||||
Settings cmd_settings;
|
||||
for (const auto & field : cmd_settings.all())
|
||||
{
|
||||
std::string_view name = field.getName();
|
||||
if (name == "max_parser_depth" || name == "max_query_size")
|
||||
addProgramOption(cmd_settings, desc, name, field);
|
||||
}
|
||||
cmd_settings.addToProgramOptions("max_parser_depth", desc);
|
||||
cmd_settings.addToProgramOptions("max_query_size", desc);
|
||||
|
||||
boost::program_options::variables_map options;
|
||||
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
|
||||
@ -240,7 +245,14 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
size_t approx_query_length = multiple ? find_first_symbols<';'>(pos, end) - pos : end - pos;
|
||||
|
||||
ASTPtr res = parseQueryAndMovePosition(
|
||||
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks);
|
||||
parser,
|
||||
pos,
|
||||
end,
|
||||
"query",
|
||||
multiple,
|
||||
cmd_settings[Setting::max_query_size],
|
||||
cmd_settings[Setting::max_parser_depth],
|
||||
cmd_settings[Setting::max_parser_backtracks]);
|
||||
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload;
|
||||
/// If the query is INSERT ... VALUES, then we will try to parse the data.
|
||||
|
@ -71,6 +71,11 @@ namespace CurrentMetrics
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_introspection_functions;
|
||||
extern const SettingsLocalFSReadMethod storage_file_read_method;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -83,8 +88,8 @@ void applySettingsOverridesForLocal(ContextMutablePtr context)
|
||||
{
|
||||
Settings settings = context->getSettingsCopy();
|
||||
|
||||
settings.allow_introspection_functions = true;
|
||||
settings.storage_file_read_method = LocalFSReadMethod::mmap;
|
||||
settings[Setting::allow_introspection_functions] = true;
|
||||
settings[Setting::storage_file_read_method] = LocalFSReadMethod::mmap;
|
||||
|
||||
context->setSettings(settings);
|
||||
}
|
||||
|
@ -4,7 +4,6 @@
|
||||
#include <Client/LocalConnection.h>
|
||||
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Loggers/Loggers.h>
|
||||
#include <Common/InterruptListener.h>
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#if USE_ODBC
|
||||
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
@ -27,6 +28,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 odbc_bridge_connection_pool_size;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -129,8 +134,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
||||
const bool external_table_functions_use_nulls = Poco::NumberParser::parseBool(params.get("external_table_functions_use_nulls", "false"));
|
||||
|
||||
auto connection_holder = ODBCPooledConnectionFactory::instance().get(
|
||||
validateODBCConnectionString(connection_string),
|
||||
getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
|
||||
validateODBCConnectionString(connection_string), getContext()->getSettingsRef()[Setting::odbc_bridge_connection_pool_size]);
|
||||
|
||||
/// In XDBC tables it is allowed to pass either database_name or schema_name in table definion, but not both of them.
|
||||
/// They both are passed as 'schema' parameter in request URL, so it is not clear whether it is database_name or schema_name passed.
|
||||
|
@ -19,6 +19,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 odbc_bridge_connection_pool_size;
|
||||
}
|
||||
|
||||
void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/)
|
||||
{
|
||||
HTMLForm params(getContext()->getSettingsRef(), request, request.getStream());
|
||||
@ -68,7 +73,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
||||
nanodbc::ConnectionHolderPtr connection;
|
||||
if (use_connection_pooling)
|
||||
connection = ODBCPooledConnectionFactory::instance().get(
|
||||
validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
|
||||
validateODBCConnectionString(connection_string), getContext()->getSettingsRef()[Setting::odbc_bridge_connection_pool_size]);
|
||||
else
|
||||
connection = std::make_shared<nanodbc::ConnectionHolder>(validateODBCConnectionString(connection_string));
|
||||
|
||||
|
@ -1,27 +1,28 @@
|
||||
#include "MainHandler.h"
|
||||
|
||||
#include "validateODBCConnectionString.h"
|
||||
#include "ODBCSource.h"
|
||||
#include "ODBCSink.h"
|
||||
#include "getIdentifierQuote.h"
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromIStream.h>
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
#include <Poco/Net/HTMLForm.h>
|
||||
#include <Poco/ThreadPool.h>
|
||||
#include <QueryPipeline/QueryPipeline.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Processors/Executors/CompletedPipelineExecutor.h>
|
||||
#include <Processors/Formats/IInputFormat.h>
|
||||
#include <QueryPipeline/QueryPipeline.h>
|
||||
#include <Server/HTTP/HTMLForm.h>
|
||||
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
|
||||
#include <Poco/Net/HTMLForm.h>
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
#include <Poco/ThreadPool.h>
|
||||
#include <Common/BridgeProtocolVersion.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Server/HTTP/HTMLForm.h>
|
||||
#include "ODBCSink.h"
|
||||
#include "ODBCSource.h"
|
||||
#include "config.h"
|
||||
#include "getIdentifierQuote.h"
|
||||
#include "validateODBCConnectionString.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
@ -29,6 +30,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 odbc_bridge_connection_pool_size;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -139,7 +144,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
|
||||
nanodbc::ConnectionHolderPtr connection_handler;
|
||||
if (use_connection_pooling)
|
||||
connection_handler = ODBCPooledConnectionFactory::instance().get(
|
||||
validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
|
||||
validateODBCConnectionString(connection_string), getContext()->getSettingsRef()[Setting::odbc_bridge_connection_pool_size]);
|
||||
else
|
||||
connection_handler = std::make_shared<nanodbc::ConnectionHolder>(validateODBCConnectionString(connection_string));
|
||||
|
||||
|
@ -20,6 +20,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 odbc_bridge_connection_pool_size;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
bool isSchemaAllowed(nanodbc::ConnectionHolderPtr connection_holder)
|
||||
@ -82,7 +87,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
|
||||
|
||||
if (use_connection_pooling)
|
||||
connection = ODBCPooledConnectionFactory::instance().get(
|
||||
validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
|
||||
validateODBCConnectionString(connection_string), getContext()->getSettingsRef()[Setting::odbc_bridge_connection_pool_size]);
|
||||
else
|
||||
connection = std::make_shared<nanodbc::ConnectionHolder>(validateODBCConnectionString(connection_string));
|
||||
|
||||
|
@ -148,6 +148,18 @@
|
||||
/// A minimal file used when the server is run without installation
|
||||
INCBIN(resource_embedded_xml, SOURCE_DIR "/programs/server/embedded.xml");
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsSeconds http_receive_timeout;
|
||||
extern const SettingsSeconds http_send_timeout;
|
||||
extern const SettingsSeconds receive_timeout;
|
||||
extern const SettingsSeconds send_timeout;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric Revision;
|
||||
@ -1831,10 +1843,13 @@ try
|
||||
"Keeper (tcp): " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
new KeeperTCPHandlerFactory(
|
||||
config_getter, global_context->getKeeperDispatcher(),
|
||||
global_context->getSettingsRef().receive_timeout.totalSeconds(),
|
||||
global_context->getSettingsRef().send_timeout.totalSeconds(),
|
||||
false), server_pool, socket));
|
||||
config_getter,
|
||||
global_context->getKeeperDispatcher(),
|
||||
global_context->getSettingsRef()[Setting::receive_timeout].totalSeconds(),
|
||||
global_context->getSettingsRef()[Setting::send_timeout].totalSeconds(),
|
||||
false),
|
||||
server_pool,
|
||||
socket));
|
||||
});
|
||||
|
||||
const char * secure_port_name = "keeper_server.tcp_port_secure";
|
||||
@ -1854,9 +1869,13 @@ try
|
||||
"Keeper with secure protocol (tcp_secure): " + address.toString(),
|
||||
std::make_unique<TCPServer>(
|
||||
new KeeperTCPHandlerFactory(
|
||||
config_getter, global_context->getKeeperDispatcher(),
|
||||
global_context->getSettingsRef().receive_timeout.totalSeconds(),
|
||||
global_context->getSettingsRef().send_timeout.totalSeconds(), true), server_pool, socket));
|
||||
config_getter,
|
||||
global_context->getKeeperDispatcher(),
|
||||
global_context->getSettingsRef()[Setting::receive_timeout].totalSeconds(),
|
||||
global_context->getSettingsRef()[Setting::send_timeout].totalSeconds(),
|
||||
true),
|
||||
server_pool,
|
||||
socket));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
@ -2430,7 +2449,7 @@ void Server::createServers(
|
||||
const Settings & settings = global_context->getSettingsRef();
|
||||
|
||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setTimeout(settings[Setting::http_receive_timeout]);
|
||||
http_params->setKeepAliveTimeout(global_context->getServerSettings().keep_alive_timeout);
|
||||
http_params->setMaxKeepAliveRequests(static_cast<int>(global_context->getServerSettings().max_keep_alive_requests));
|
||||
|
||||
@ -2469,8 +2488,8 @@ void Server::createServers(
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, host, port, is_secure);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::send_timeout]);
|
||||
|
||||
return ProtocolServerAdapter(
|
||||
host,
|
||||
@ -2497,8 +2516,8 @@ void Server::createServers(
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::http_send_timeout]);
|
||||
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
@ -2518,8 +2537,8 @@ void Server::createServers(
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::http_send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
@ -2541,8 +2560,8 @@ void Server::createServers(
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
@ -2563,8 +2582,8 @@ void Server::createServers(
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
@ -2586,8 +2605,8 @@ void Server::createServers(
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
@ -2612,7 +2631,7 @@ void Server::createServers(
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
socket.setSendTimeout(settings[Setting::send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
@ -2629,7 +2648,7 @@ void Server::createServers(
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
socket.setSendTimeout(settings[Setting::send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
@ -2661,8 +2680,8 @@ void Server::createServers(
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::http_send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
@ -2687,7 +2706,7 @@ void Server::createInterserverServers(
|
||||
const Settings & settings = global_context->getSettingsRef();
|
||||
|
||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setTimeout(settings[Setting::http_receive_timeout]);
|
||||
http_params->setKeepAliveTimeout(global_context->getServerSettings().keep_alive_timeout);
|
||||
|
||||
/// Now iterate over interserver_listen_hosts
|
||||
@ -2703,8 +2722,8 @@ void Server::createInterserverServers(
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, interserver_listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::http_send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
interserver_listen_host,
|
||||
port_name,
|
||||
@ -2728,8 +2747,8 @@ void Server::createInterserverServers(
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, interserver_listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
socket.setReceiveTimeout(settings[Setting::http_receive_timeout]);
|
||||
socket.setSendTimeout(settings[Setting::http_send_timeout]);
|
||||
return ProtocolServerAdapter(
|
||||
interserver_listen_host,
|
||||
port_name,
|
||||
|
@ -1,10 +1,17 @@
|
||||
#include <Access/ContextAccessParams.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_ddl;
|
||||
extern const SettingsBool allow_introspection_functions;
|
||||
extern const SettingsUInt64 readonly;
|
||||
}
|
||||
|
||||
ContextAccessParams::ContextAccessParams(
|
||||
std::optional<UUID> user_id_,
|
||||
@ -18,9 +25,9 @@ ContextAccessParams::ContextAccessParams(
|
||||
, full_access(full_access_)
|
||||
, use_default_roles(use_default_roles_)
|
||||
, current_roles(current_roles_)
|
||||
, readonly(settings_.readonly)
|
||||
, allow_ddl(settings_.allow_ddl)
|
||||
, allow_introspection(settings_.allow_introspection_functions)
|
||||
, readonly(settings_[Setting::readonly])
|
||||
, allow_ddl(settings_[Setting::allow_ddl])
|
||||
, allow_introspection(settings_[Setting::allow_introspection_functions])
|
||||
, current_database(current_database_)
|
||||
, interface(client_info_.interface)
|
||||
, http_method(client_info_.http_method)
|
||||
|
@ -10,10 +10,15 @@
|
||||
#include <Common/SettingSource.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_ddl;
|
||||
extern const SettingsUInt64 readonly;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int READONLY;
|
||||
@ -164,7 +169,7 @@ void SettingsConstraints::check(const Settings & current_settings, const Setting
|
||||
if (element.writability)
|
||||
new_value = *element.writability;
|
||||
|
||||
auto setting_name = Settings::Traits::resolveName(element.setting_name);
|
||||
auto setting_name = Settings::resolveName(element.setting_name);
|
||||
auto it = constraints.find(setting_name);
|
||||
if (it != constraints.end())
|
||||
old_value = it->second.writability;
|
||||
@ -255,7 +260,7 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings,
|
||||
ReactionOnViolation reaction,
|
||||
SettingSource source) const
|
||||
{
|
||||
std::string_view setting_name = Settings::Traits::resolveName(change.name);
|
||||
std::string_view setting_name = Settings::resolveName(change.name);
|
||||
|
||||
if (setting_name == "profile")
|
||||
return true;
|
||||
@ -393,7 +398,7 @@ std::string_view SettingsConstraints::resolveSettingNameWithCache(std::string_vi
|
||||
SettingsConstraints::Checker SettingsConstraints::getChecker(const Settings & current_settings, std::string_view setting_name) const
|
||||
{
|
||||
auto resolved_name = resolveSettingNameWithCache(setting_name);
|
||||
if (!current_settings.allow_ddl && resolved_name == "allow_ddl")
|
||||
if (!current_settings[Setting::allow_ddl] && resolved_name == "allow_ddl")
|
||||
return Checker(PreformattedMessage::create("Cannot modify 'allow_ddl' setting when DDL queries are prohibited for the user"),
|
||||
ErrorCodes::QUERY_IS_PROHIBITED);
|
||||
|
||||
@ -403,11 +408,11 @@ SettingsConstraints::Checker SettingsConstraints::getChecker(const Settings & cu
|
||||
* 2 - only read requests, as well as changing settings, except for the `readonly` setting.
|
||||
*/
|
||||
|
||||
if (current_settings.readonly > 1 && resolved_name == "readonly")
|
||||
if (current_settings[Setting::readonly] > 1 && resolved_name == "readonly")
|
||||
return Checker(PreformattedMessage::create("Cannot modify 'readonly' setting in readonly mode"), ErrorCodes::READONLY);
|
||||
|
||||
auto it = constraints.find(resolved_name);
|
||||
if (current_settings.readonly == 1)
|
||||
if (current_settings[Setting::readonly] == 1)
|
||||
{
|
||||
if (it == constraints.end() || it->second.writability != SettingConstraintWritability::CHANGEABLE_IN_READONLY)
|
||||
return Checker(PreformattedMessage::create("Cannot modify '{}' setting in readonly mode", setting_name),
|
||||
@ -416,9 +421,9 @@ SettingsConstraints::Checker SettingsConstraints::getChecker(const Settings & cu
|
||||
else // For both readonly=0 and readonly=2
|
||||
{
|
||||
if (it == constraints.end())
|
||||
return Checker(Settings::Traits::resolveName); // Allowed
|
||||
return Checker(Settings::resolveName); // Allowed
|
||||
}
|
||||
return Checker(it->second, Settings::Traits::resolveName);
|
||||
return Checker(it->second, Settings::resolveName);
|
||||
}
|
||||
|
||||
SettingsConstraints::Checker SettingsConstraints::getMergeTreeChecker(std::string_view short_name) const
|
||||
|
@ -249,7 +249,7 @@ bool SettingsProfileElements::isBackupAllowed() const
|
||||
bool SettingsProfileElements::isAllowBackupSetting(const String & setting_name)
|
||||
{
|
||||
static constexpr std::string_view ALLOW_BACKUP_SETTING_NAME = "allow_backup";
|
||||
return Settings::Traits::resolveName(setting_name) == ALLOW_BACKUP_SETTING_NAME;
|
||||
return Settings::resolveName(setting_name) == ALLOW_BACKUP_SETTING_NAME;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
if (TARGET ch_contrib::krb5)
|
||||
clickhouse_add_executable (kerberos_init kerberos_init.cpp)
|
||||
target_link_libraries (kerberos_init PRIVATE dbms ch_contrib::krb5)
|
||||
target_link_libraries (kerberos_init PRIVATE dbms clickhouse_functions ch_contrib::krb5)
|
||||
endif()
|
||||
|
@ -46,14 +46,6 @@ inline Field settingCastValueUtil(std::string_view full_name, const Field & valu
|
||||
});
|
||||
}
|
||||
|
||||
inline String settingValueToStringUtil(std::string_view full_name, const Field & value)
|
||||
{
|
||||
return resolveSetting(full_name, [&] <typename T> (std::string_view short_name, SettingsType<T>)
|
||||
{
|
||||
return T::valueToStringUtil(short_name, value);
|
||||
});
|
||||
}
|
||||
|
||||
inline Field settingStringToValueUtil(std::string_view full_name, const String & str)
|
||||
{
|
||||
return resolveSetting(full_name, [&] <typename T> (std::string_view short_name, SettingsType<T>)
|
||||
@ -89,10 +81,9 @@ inline String settingFullName<MergeTreeSettings>(std::string_view short_name)
|
||||
|
||||
inline std::string resolveSettingName(std::string_view full_name)
|
||||
{
|
||||
return resolveSetting(full_name, [&] <typename T> (std::string_view short_name, SettingsType<T>)
|
||||
{
|
||||
return settingFullName<T>(T::Traits::resolveName(short_name));
|
||||
});
|
||||
return resolveSetting(
|
||||
full_name,
|
||||
[&]<typename T>(std::string_view short_name, SettingsType<T>) { return settingFullName<T>(T::resolveName(short_name)); });
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -14,6 +14,10 @@ static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool log_queries;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -199,7 +203,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
||||
found = *opt;
|
||||
|
||||
out_properties = found.properties;
|
||||
if (query_context && query_context->getSettingsRef().log_queries)
|
||||
if (query_context && query_context->getSettingsRef()[Setting::log_queries])
|
||||
query_context->addQueryFactoriesInfo(
|
||||
Context::QueryLogFactories::AggregateFunction, is_case_insensitive ? case_insensitive_name : name);
|
||||
|
||||
@ -224,7 +228,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
||||
"Aggregate function combinator '{}' is only for internal usage",
|
||||
combinator_name);
|
||||
|
||||
if (query_context && query_context->getSettingsRef().log_queries)
|
||||
if (query_context && query_context->getSettingsRef()[Setting::log_queries])
|
||||
query_context->addQueryFactoriesInfo(Context::QueryLogFactories::AggregateFunctionCombinator, combinator_name);
|
||||
|
||||
String nested_name = name.substr(0, name.size() - combinator_name.size());
|
||||
|
@ -17,6 +17,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_introspection_functions;
|
||||
}
|
||||
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int FUNCTION_NOT_ALLOWED;
|
||||
@ -628,7 +634,7 @@ static void check(const std::string & name, const DataTypes & argument_types, co
|
||||
|
||||
AggregateFunctionPtr createAggregateFunctionFlameGraph(const std::string & name, const DataTypes & argument_types, const Array & params, const Settings * settings)
|
||||
{
|
||||
if (!settings->allow_introspection_functions)
|
||||
if (!(*settings)[Setting::allow_introspection_functions])
|
||||
throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED,
|
||||
"Introspection functions are disabled, because setting 'allow_introspection_functions' is set to 0");
|
||||
|
||||
|
@ -18,15 +18,19 @@
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <base/range.h>
|
||||
|
||||
#include <bitset>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_experimental_funnel_functions;
|
||||
}
|
||||
|
||||
constexpr size_t max_events_size = 64;
|
||||
|
||||
constexpr size_t min_required_args = 3;
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -448,7 +452,7 @@ inline AggregateFunctionPtr createAggregateFunctionSequenceNodeImpl(
|
||||
AggregateFunctionPtr
|
||||
createAggregateFunctionSequenceNode(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings)
|
||||
{
|
||||
if (settings == nullptr || !settings->allow_experimental_funnel_functions)
|
||||
if (settings == nullptr || !(*settings)[Setting::allow_experimental_funnel_functions])
|
||||
{
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Aggregate function {} is experimental. "
|
||||
"Set `allow_experimental_funnel_functions` setting to enable it", name);
|
||||
|
@ -15,7 +15,10 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct Settings;
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsMaxThreads max_threads;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -149,7 +152,7 @@ void registerAggregateFunctionsUniq(AggregateFunctionFactory & factory)
|
||||
auto assign_bool_param = [](const std::string & name, const DataTypes & argument_types, const Array & params, const Settings * settings)
|
||||
{
|
||||
/// Using two level hash set if we wouldn't be able to merge in parallel can cause ~10% slowdown.
|
||||
if (settings && settings->max_threads > 1)
|
||||
if (settings && (*settings)[Setting::max_threads] > 1)
|
||||
return createAggregateFunctionUniq<
|
||||
true, AggregateFunctionUniqExactData, AggregateFunctionUniqExactDataForVariadic, true /* is_able_to_parallelize_merge */>(name, argument_types, params, settings);
|
||||
else
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <base/range.h>
|
||||
|
||||
#include <unordered_set>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
@ -1,5 +1,5 @@
|
||||
clickhouse_add_executable (quantile-t-digest quantile-t-digest.cpp)
|
||||
target_link_libraries (quantile-t-digest PRIVATE dbms clickhouse_aggregate_functions)
|
||||
target_link_libraries (quantile-t-digest PRIVATE dbms clickhouse_functions clickhouse_aggregate_functions)
|
||||
|
||||
clickhouse_add_executable (group_array_sorted group_array_sorted.cpp)
|
||||
target_link_libraries (group_array_sorted PRIVATE dbms clickhouse_aggregate_functions)
|
||||
target_link_libraries (group_array_sorted PRIVATE dbms clickhouse_functions clickhouse_aggregate_functions)
|
||||
|
@ -14,6 +14,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_aggregators_of_group_by_keys;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -34,7 +38,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_aggregators_of_group_by_keys)
|
||||
if (!getSettings()[Setting::optimize_aggregators_of_group_by_keys])
|
||||
return;
|
||||
|
||||
/// Collect group by keys.
|
||||
@ -79,7 +83,7 @@ public:
|
||||
/// Now we visit all nodes in QueryNode, we should remove group_by_keys from stack.
|
||||
void leaveImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_aggregators_of_group_by_keys)
|
||||
if (!getSettings()[Setting::optimize_aggregators_of_group_by_keys])
|
||||
return;
|
||||
|
||||
if (node->getNodeType() == QueryTreeNodeType::FUNCTION)
|
||||
|
@ -15,6 +15,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_arithmetic_operations_in_aggregate_functions;
|
||||
}
|
||||
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -56,7 +61,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_arithmetic_operations_in_aggregate_functions)
|
||||
if (!getSettings()[Setting::optimize_arithmetic_operations_in_aggregate_functions])
|
||||
return;
|
||||
|
||||
auto * aggregate_function_node = node->as<FunctionNode>();
|
||||
|
@ -15,6 +15,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_rewrite_array_exists_to_has;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -27,7 +31,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_rewrite_array_exists_to_has)
|
||||
if (!getSettings()[Setting::optimize_rewrite_array_exists_to_has])
|
||||
return;
|
||||
|
||||
auto * array_exists_function_node = node->as<FunctionNode>();
|
||||
|
@ -12,6 +12,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool final;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -24,7 +28,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().final)
|
||||
if (!getSettings()[Setting::final])
|
||||
return;
|
||||
|
||||
const auto * query_node = node->as<QueryNode>();
|
||||
|
@ -26,6 +26,14 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_hyperscan;
|
||||
extern const SettingsUInt64 max_hyperscan_regexp_length;
|
||||
extern const SettingsUInt64 max_hyperscan_regexp_total_length;
|
||||
extern const SettingsBool reject_expensive_hyperscan_regexps;
|
||||
extern const SettingsBool optimize_or_like_chain;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -48,10 +56,8 @@ public:
|
||||
{
|
||||
const auto & settings = getSettings();
|
||||
|
||||
return settings.optimize_or_like_chain
|
||||
&& settings.allow_hyperscan
|
||||
&& settings.max_hyperscan_regexp_length == 0
|
||||
&& settings.max_hyperscan_regexp_total_length == 0;
|
||||
return settings[Setting::optimize_or_like_chain] && settings[Setting::allow_hyperscan] && settings[Setting::max_hyperscan_regexp_length] == 0
|
||||
&& settings[Setting::max_hyperscan_regexp_total_length] == 0;
|
||||
}
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
@ -139,7 +145,11 @@ private:
|
||||
void ConvertOrLikeChainPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
auto match_function_resolver = createInternalMultiMatchAnyOverloadResolver(settings.allow_hyperscan, settings.max_hyperscan_regexp_length, settings.max_hyperscan_regexp_total_length, settings.reject_expensive_hyperscan_regexps);
|
||||
auto match_function_resolver = createInternalMultiMatchAnyOverloadResolver(
|
||||
settings[Setting::allow_hyperscan],
|
||||
settings[Setting::max_hyperscan_regexp_length],
|
||||
settings[Setting::max_hyperscan_regexp_total_length],
|
||||
settings[Setting::reject_expensive_hyperscan_regexps]);
|
||||
auto or_function_resolver = createInternalFunctionOrOverloadResolver();
|
||||
|
||||
ConvertOrLikeChainVisitor visitor(std::move(or_function_resolver), std::move(match_function_resolver), std::move(context));
|
||||
|
@ -19,6 +19,13 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool convert_query_to_cnf;
|
||||
extern const SettingsBool optimize_append_index;
|
||||
extern const SettingsBool optimize_substitute_columns;
|
||||
extern const SettingsBool optimize_using_constraints;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -681,7 +688,7 @@ void optimizeWithConstraints(Analyzer::CNF & cnf, const QueryTreeNodes & table_e
|
||||
cnf.pushNotIntoFunctions(context);
|
||||
|
||||
const auto & settings = context->getSettingsRef();
|
||||
if (settings.optimize_append_index)
|
||||
if (settings[Setting::optimize_append_index])
|
||||
addIndexConstraint(cnf, table_expressions, context);
|
||||
}
|
||||
|
||||
@ -693,7 +700,7 @@ void optimizeNode(QueryTreeNodePtr & node, const QueryTreeNodes & table_expressi
|
||||
if (!cnf)
|
||||
return;
|
||||
|
||||
if (settings.optimize_using_constraints)
|
||||
if (settings[Setting::optimize_using_constraints])
|
||||
optimizeWithConstraints(*cnf, table_expressions, context);
|
||||
|
||||
auto new_node = cnf->toQueryTree();
|
||||
@ -731,7 +738,7 @@ public:
|
||||
optimize_filter(query_node->getPrewhere());
|
||||
optimize_filter(query_node->getHaving());
|
||||
|
||||
if (has_filter && settings.optimize_substitute_columns)
|
||||
if (has_filter && settings[Setting::optimize_substitute_columns])
|
||||
substituteColumns(*query_node, table_expressions, context);
|
||||
}
|
||||
};
|
||||
@ -741,7 +748,7 @@ public:
|
||||
void ConvertLogicalExpressionToCNFPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
if (!settings.convert_query_to_cnf)
|
||||
if (!settings[Setting::convert_query_to_cnf])
|
||||
return;
|
||||
|
||||
ConvertQueryToCNFVisitor visitor(std::move(context));
|
||||
|
@ -15,6 +15,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool count_distinct_optimization;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -27,7 +31,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().count_distinct_optimization)
|
||||
if (!getSettings()[Setting::count_distinct_optimization])
|
||||
return;
|
||||
|
||||
auto * query_node = node->as<QueryNode>();
|
||||
|
@ -21,6 +21,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 cross_to_inner_join_rewrite;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -193,17 +197,14 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
bool isEnabled() const
|
||||
{
|
||||
return getSettings().cross_to_inner_join_rewrite;
|
||||
}
|
||||
bool isEnabled() const { return getSettings()[Setting::cross_to_inner_join_rewrite]; }
|
||||
|
||||
bool forceRewrite(JoinKind kind) const
|
||||
{
|
||||
if (kind == JoinKind::Cross)
|
||||
return false;
|
||||
/// Comma join can be forced to rewrite
|
||||
return getSettings().cross_to_inner_join_rewrite >= 2;
|
||||
return getSettings()[Setting::cross_to_inner_join_rewrite] >= 2;
|
||||
}
|
||||
|
||||
QueryTreeNodePtr makeConjunction(const QueryTreeNodes & nodes)
|
||||
|
@ -25,6 +25,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool group_by_use_nulls;
|
||||
extern const SettingsBool join_use_nulls;
|
||||
extern const SettingsBool optimize_functions_to_subcolumns;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -257,7 +263,7 @@ public:
|
||||
|
||||
void enterImpl(const QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_functions_to_subcolumns)
|
||||
if (!getSettings()[Setting::optimize_functions_to_subcolumns])
|
||||
return;
|
||||
|
||||
if (auto * table_node = node->as<TableNode>())
|
||||
@ -281,14 +287,14 @@ public:
|
||||
|
||||
if (const auto * join_node = node->as<JoinNode>())
|
||||
{
|
||||
can_wrap_result_columns_with_nullable |= getContext()->getSettingsRef().join_use_nulls;
|
||||
can_wrap_result_columns_with_nullable |= getContext()->getSettingsRef()[Setting::join_use_nulls];
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto * query_node = node->as<QueryNode>())
|
||||
{
|
||||
if (query_node->isGroupByWithCube() || query_node->isGroupByWithRollup() || query_node->isGroupByWithGroupingSets())
|
||||
can_wrap_result_columns_with_nullable |= getContext()->getSettingsRef().group_by_use_nulls;
|
||||
can_wrap_result_columns_with_nullable |= getContext()->getSettingsRef()[Setting::group_by_use_nulls];
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -419,7 +425,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
if (!getSettings().optimize_functions_to_subcolumns)
|
||||
if (!getSettings()[Setting::optimize_functions_to_subcolumns])
|
||||
return;
|
||||
|
||||
auto [function_node, first_argument_column_node, table_node] = getTypedNodesForOptimization(node, getContext());
|
||||
|
@ -21,6 +21,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_syntax_fuse_functions;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -44,7 +48,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_syntax_fuse_functions)
|
||||
if (!getSettings()[Setting::optimize_syntax_fuse_functions])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
@ -15,6 +15,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool force_grouping_standard_compatibility;
|
||||
extern const SettingsBool group_by_use_nulls;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -72,41 +77,38 @@ public:
|
||||
FunctionOverloadResolverPtr grouping_function_resolver;
|
||||
bool add_grouping_set_column = false;
|
||||
|
||||
bool force_grouping_standard_compatibility = getSettings().force_grouping_standard_compatibility;
|
||||
bool force_grouping_standard_compatibility = getSettings()[Setting::force_grouping_standard_compatibility];
|
||||
size_t aggregation_keys_size = aggregation_key_to_index.size();
|
||||
|
||||
switch (group_by_kind)
|
||||
{
|
||||
case GroupByKind::ORDINARY:
|
||||
{
|
||||
auto grouping_ordinary_function = std::make_shared<FunctionGroupingOrdinary>(arguments_indexes,
|
||||
force_grouping_standard_compatibility);
|
||||
auto grouping_ordinary_function
|
||||
= std::make_shared<FunctionGroupingOrdinary>(arguments_indexes, force_grouping_standard_compatibility);
|
||||
grouping_function_resolver = std::make_shared<FunctionToOverloadResolverAdaptor>(std::move(grouping_ordinary_function));
|
||||
break;
|
||||
}
|
||||
case GroupByKind::ROLLUP:
|
||||
{
|
||||
auto grouping_rollup_function = std::make_shared<FunctionGroupingForRollup>(arguments_indexes,
|
||||
aggregation_keys_size,
|
||||
force_grouping_standard_compatibility);
|
||||
auto grouping_rollup_function = std::make_shared<FunctionGroupingForRollup>(
|
||||
arguments_indexes, aggregation_keys_size, force_grouping_standard_compatibility);
|
||||
grouping_function_resolver = std::make_shared<FunctionToOverloadResolverAdaptor>(std::move(grouping_rollup_function));
|
||||
add_grouping_set_column = true;
|
||||
break;
|
||||
}
|
||||
case GroupByKind::CUBE:
|
||||
{
|
||||
auto grouping_cube_function = std::make_shared<FunctionGroupingForCube>(arguments_indexes,
|
||||
aggregation_keys_size,
|
||||
force_grouping_standard_compatibility);
|
||||
auto grouping_cube_function = std::make_shared<FunctionGroupingForCube>(
|
||||
arguments_indexes, aggregation_keys_size, force_grouping_standard_compatibility);
|
||||
grouping_function_resolver = std::make_shared<FunctionToOverloadResolverAdaptor>(std::move(grouping_cube_function));
|
||||
add_grouping_set_column = true;
|
||||
break;
|
||||
}
|
||||
case GroupByKind::GROUPING_SETS:
|
||||
{
|
||||
auto grouping_grouping_sets_function = std::make_shared<FunctionGroupingForGroupingSets>(arguments_indexes,
|
||||
grouping_sets_keys_indexes,
|
||||
force_grouping_standard_compatibility);
|
||||
auto grouping_grouping_sets_function = std::make_shared<FunctionGroupingForGroupingSets>(
|
||||
arguments_indexes, grouping_sets_keys_indexes, force_grouping_standard_compatibility);
|
||||
grouping_function_resolver = std::make_shared<FunctionToOverloadResolverAdaptor>(std::move(grouping_grouping_sets_function));
|
||||
add_grouping_set_column = true;
|
||||
break;
|
||||
@ -147,7 +149,8 @@ void resolveGroupingFunctions(QueryTreeNodePtr & query_node, ContextPtr context)
|
||||
if (query_node_typed.hasGroupBy())
|
||||
{
|
||||
/// It is expected by execution layer that if there are only 1 grouping set it will be removed
|
||||
if (query_node_typed.isGroupByWithGroupingSets() && query_node_typed.getGroupBy().getNodes().size() == 1 && !context->getSettingsRef().group_by_use_nulls)
|
||||
if (query_node_typed.isGroupByWithGroupingSets() && query_node_typed.getGroupBy().getNodes().size() == 1
|
||||
&& !context->getSettingsRef()[Setting::group_by_use_nulls])
|
||||
{
|
||||
auto grouping_set_list_node = query_node_typed.getGroupBy().getNodes().front();
|
||||
auto & grouping_set_list_node_typed = grouping_set_list_node->as<ListNode &>();
|
||||
|
@ -10,6 +10,13 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_execute_multiif_columnar;
|
||||
extern const SettingsBool allow_experimental_variant_type;
|
||||
extern const SettingsBool optimize_if_chain_to_multiif;
|
||||
extern const SettingsBool use_variant_as_common_type;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -27,7 +34,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_if_chain_to_multiif)
|
||||
if (!getSettings()[Setting::optimize_if_chain_to_multiif])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
@ -84,7 +91,8 @@ private:
|
||||
void IfChainToMultiIfPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
auto multi_if_function_ptr = createInternalMultiIfOverloadResolver(settings.allow_execute_multiif_columnar, settings.allow_experimental_variant_type, settings.use_variant_as_common_type);
|
||||
auto multi_if_function_ptr = createInternalMultiIfOverloadResolver(
|
||||
settings[Setting::allow_execute_multiif_columnar], settings[Setting::allow_experimental_variant_type], settings[Setting::use_variant_as_common_type]);
|
||||
IfChainToMultiIfPassVisitor visitor(std::move(multi_if_function_ptr), std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
@ -16,6 +16,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_if_transform_strings_to_enum;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -101,7 +105,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_if_transform_strings_to_enum)
|
||||
if (!getSettings()[Setting::optimize_if_transform_strings_to_enum])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
@ -16,6 +16,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 optimize_min_equality_disjunction_chain_length;
|
||||
extern const SettingsUInt64 optimize_min_inequality_conjunction_chain_length;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -531,7 +536,8 @@ private:
|
||||
for (auto & [expression, not_equals_functions] : node_to_not_equals_functions)
|
||||
{
|
||||
const auto & settings = getSettings();
|
||||
if (not_equals_functions.size() < settings.optimize_min_inequality_conjunction_chain_length && !expression.node->getResultType()->lowCardinality())
|
||||
if (not_equals_functions.size() < settings[Setting::optimize_min_inequality_conjunction_chain_length]
|
||||
&& !expression.node->getResultType()->lowCardinality())
|
||||
{
|
||||
std::move(not_equals_functions.begin(), not_equals_functions.end(), std::back_inserter(and_operands));
|
||||
continue;
|
||||
@ -653,7 +659,8 @@ private:
|
||||
for (auto & [expression, equals_functions] : node_to_equals_functions)
|
||||
{
|
||||
const auto & settings = getSettings();
|
||||
if (equals_functions.size() < settings.optimize_min_equality_disjunction_chain_length && !expression.node->getResultType()->lowCardinality())
|
||||
if (equals_functions.size() < settings[Setting::optimize_min_equality_disjunction_chain_length]
|
||||
&& !expression.node->getResultType()->lowCardinality())
|
||||
{
|
||||
std::move(equals_functions.begin(), equals_functions.end(), std::back_inserter(or_operands));
|
||||
continue;
|
||||
|
@ -8,6 +8,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_experimental_variant_type;
|
||||
extern const SettingsBool optimize_multiif_to_if;
|
||||
extern const SettingsBool use_variant_as_common_type;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -25,7 +31,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_multiif_to_if)
|
||||
if (!getSettings()[Setting::optimize_multiif_to_if])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
@ -57,7 +63,8 @@ private:
|
||||
void MultiIfToIfPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
auto if_function_ptr = createInternalFunctionIfOverloadResolver(settings.allow_experimental_variant_type, settings.use_variant_as_common_type);
|
||||
auto if_function_ptr
|
||||
= createInternalFunctionIfOverloadResolver(settings[Setting::allow_experimental_variant_type], settings[Setting::use_variant_as_common_type]);
|
||||
MultiIfToIfVisitor visitor(std::move(if_function_ptr), std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
@ -13,6 +13,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_normalize_count_variants;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -25,7 +29,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_normalize_count_variants)
|
||||
if (!getSettings()[Setting::optimize_normalize_count_variants])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
@ -13,6 +13,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_time_filter_with_preimage;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -58,7 +62,7 @@ public:
|
||||
{"greaterOrEquals", "lessOrEquals"},
|
||||
};
|
||||
|
||||
if (!getSettings().optimize_time_filter_with_preimage)
|
||||
if (!getSettings()[Setting::optimize_time_filter_with_preimage])
|
||||
return;
|
||||
|
||||
const auto * function = node->as<FunctionNode>();
|
||||
|
@ -12,6 +12,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool group_by_use_nulls;
|
||||
extern const SettingsBool optimize_group_by_function_keys;
|
||||
}
|
||||
|
||||
class OptimizeGroupByFunctionKeysVisitor : public InDepthQueryTreeVisitorWithContext<OptimizeGroupByFunctionKeysVisitor>
|
||||
{
|
||||
@ -29,13 +34,13 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_group_by_function_keys)
|
||||
if (!getSettings()[Setting::optimize_group_by_function_keys])
|
||||
return;
|
||||
|
||||
/// When group_by_use_nulls = 1 removing keys from GROUP BY can lead
|
||||
/// to unexpected types in some functions.
|
||||
/// See example in https://github.com/ClickHouse/ClickHouse/pull/61567#issuecomment-2018007887
|
||||
if (getSettings().group_by_use_nulls)
|
||||
if (getSettings()[Setting::group_by_use_nulls])
|
||||
return;
|
||||
|
||||
auto * query = node->as<QueryNode>();
|
||||
|
@ -9,6 +9,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool group_by_use_nulls;
|
||||
extern const SettingsBool optimize_injective_functions_in_group_by;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -23,14 +28,14 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_injective_functions_in_group_by)
|
||||
if (!getSettings()[Setting::optimize_injective_functions_in_group_by])
|
||||
return;
|
||||
|
||||
/// Don't optimize injective functions when group_by_use_nulls=true,
|
||||
/// because in this case we make initial group by keys Nullable
|
||||
/// and eliminating some functions can cause issues with arguments Nullability
|
||||
/// during their execution. See examples in https://github.com/ClickHouse/ClickHouse/pull/61567#issuecomment-2008181143
|
||||
if (getSettings().group_by_use_nulls)
|
||||
if (getSettings()[Setting::group_by_use_nulls])
|
||||
return;
|
||||
|
||||
auto * query = node->as<QueryNode>();
|
||||
|
@ -12,6 +12,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_redundant_functions_in_order_by;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -31,7 +35,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_redundant_functions_in_order_by)
|
||||
if (!getSettings()[Setting::optimize_redundant_functions_in_order_by])
|
||||
return;
|
||||
|
||||
auto * query = node->as<QueryNode>();
|
||||
|
@ -20,6 +20,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_rewrite_aggregate_function_with_if;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -32,7 +36,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_rewrite_aggregate_function_with_if)
|
||||
if (!getSettings()[Setting::optimize_rewrite_aggregate_function_with_if])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
@ -11,6 +11,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_arithmetic_operations_in_aggregate_functions;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -23,7 +27,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_arithmetic_operations_in_aggregate_functions)
|
||||
if (!getSettings()[Setting::optimize_arithmetic_operations_in_aggregate_functions])
|
||||
return;
|
||||
|
||||
static const std::unordered_set<String> func_supported = {
|
||||
|
@ -14,6 +14,11 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool aggregate_functions_null_for_empty;
|
||||
extern const SettingsBool optimize_rewrite_sum_if_to_count_if;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -26,7 +31,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_rewrite_sum_if_to_count_if)
|
||||
if (!getSettings()[Setting::optimize_rewrite_sum_if_to_count_if])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
@ -56,7 +61,7 @@ public:
|
||||
return;
|
||||
|
||||
const auto & constant_value_literal = constant_node->getValue();
|
||||
if (getSettings().aggregate_functions_null_for_empty)
|
||||
if (getSettings()[Setting::aggregate_functions_null_for_empty])
|
||||
return;
|
||||
|
||||
/// Rewrite `sumIf(1, cond)` into `countIf(cond)`
|
||||
|
@ -14,6 +14,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_injective_functions_inside_uniq;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -36,7 +40,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_injective_functions_inside_uniq)
|
||||
if (!getSettings()[Setting::optimize_injective_functions_inside_uniq])
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
@ -13,6 +13,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool optimize_uniq_to_count;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -123,7 +127,7 @@ public:
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_uniq_to_count)
|
||||
if (!getSettings()[Setting::optimize_uniq_to_count])
|
||||
return;
|
||||
|
||||
auto * query_node = node->as<QueryNode>();
|
||||
|
@ -50,6 +50,17 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_experimental_variant_type;
|
||||
extern const SettingsBool any_join_distinct_right_table_keys;
|
||||
extern const SettingsJoinStrictness join_default_strictness;
|
||||
extern const SettingsBool enable_order_by_all;
|
||||
extern const SettingsUInt64 limit;
|
||||
extern const SettingsUInt64 offset;
|
||||
extern const SettingsBool use_variant_as_common_type;
|
||||
}
|
||||
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -235,13 +246,13 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
||||
UInt64 offset = 0;
|
||||
|
||||
/// Remove global settings limit and offset
|
||||
if (const auto & settings_ref = updated_context->getSettingsRef(); settings_ref.limit || settings_ref.offset)
|
||||
if (const auto & settings_ref = updated_context->getSettingsRef(); settings_ref[Setting::limit] || settings_ref[Setting::offset])
|
||||
{
|
||||
Settings settings = updated_context->getSettingsCopy();
|
||||
limit = settings.limit;
|
||||
offset = settings.offset;
|
||||
settings.limit = 0;
|
||||
settings.offset = 0;
|
||||
limit = settings[Setting::limit];
|
||||
offset = settings[Setting::offset];
|
||||
settings[Setting::limit] = 0;
|
||||
settings[Setting::offset] = 0;
|
||||
updated_context->setSettings(settings);
|
||||
}
|
||||
|
||||
@ -268,7 +279,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
||||
}
|
||||
}
|
||||
|
||||
const auto enable_order_by_all = updated_context->getSettingsRef().enable_order_by_all;
|
||||
const auto enable_order_by_all = updated_context->getSettingsRef()[Setting::enable_order_by_all];
|
||||
|
||||
auto current_query_tree = std::make_shared<QueryNode>(std::move(updated_context), std::move(settings_changes));
|
||||
|
||||
@ -577,7 +588,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildExpression(const ASTPtr & expression, co
|
||||
}
|
||||
else if (const auto * ast_literal = expression->as<ASTLiteral>())
|
||||
{
|
||||
if (context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type)
|
||||
if (context->getSettingsRef()[Setting::allow_experimental_variant_type] && context->getSettingsRef()[Setting::use_variant_as_common_type])
|
||||
result = std::make_shared<ConstantNode>(ast_literal->value, applyVisitor(FieldToDataType<LeastSupertypeOnError::Variant>(), ast_literal->value));
|
||||
else
|
||||
result = std::make_shared<ConstantNode>(ast_literal->value);
|
||||
@ -908,8 +919,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildJoinTree(const ASTPtr & tables_in_select
|
||||
join_expression = buildExpression(table_join.on_expression, context);
|
||||
|
||||
const auto & settings = context->getSettingsRef();
|
||||
auto join_default_strictness = settings.join_default_strictness;
|
||||
auto any_join_distinct_right_table_keys = settings.any_join_distinct_right_table_keys;
|
||||
auto join_default_strictness = settings[Setting::join_default_strictness];
|
||||
auto any_join_distinct_right_table_keys = settings[Setting::any_join_distinct_right_table_keys];
|
||||
|
||||
JoinStrictness result_join_strictness = table_join.strictness;
|
||||
JoinKind result_join_kind = table_join.kind;
|
||||
|
@ -7,6 +7,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool group_by_use_nulls;
|
||||
extern const SettingsBool join_use_nulls;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
@ -32,12 +38,12 @@ IdentifierResolveScope::IdentifierResolveScope(QueryTreeNodePtr scope_node_, Ide
|
||||
else if (auto * query_node = scope_node->as<QueryNode>())
|
||||
{
|
||||
context = query_node->getContext();
|
||||
group_by_use_nulls = context->getSettingsRef().group_by_use_nulls &&
|
||||
(query_node->isGroupByWithGroupingSets() || query_node->isGroupByWithRollup() || query_node->isGroupByWithCube());
|
||||
group_by_use_nulls = context->getSettingsRef()[Setting::group_by_use_nulls]
|
||||
&& (query_node->isGroupByWithGroupingSets() || query_node->isGroupByWithRollup() || query_node->isGroupByWithCube());
|
||||
}
|
||||
|
||||
if (context)
|
||||
join_use_nulls = context->getSettingsRef().join_use_nulls;
|
||||
join_use_nulls = context->getSettingsRef()[Setting::join_use_nulls];
|
||||
else if (parent_scope)
|
||||
join_use_nulls = parent_scope->join_use_nulls;
|
||||
|
||||
|
@ -30,6 +30,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsSeconds lock_acquire_timeout;
|
||||
extern const SettingsBool single_join_prefer_left_table;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_IDENTIFIER;
|
||||
@ -420,7 +426,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveTableIdentifierFromDatabaseCatalo
|
||||
if (!storage)
|
||||
return {};
|
||||
|
||||
auto storage_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||
auto storage_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef()[Setting::lock_acquire_timeout]);
|
||||
auto storage_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context);
|
||||
auto result = std::make_shared<TableNode>(std::move(storage), std::move(storage_lock), std::move(storage_snapshot));
|
||||
if (is_temporary_table)
|
||||
@ -1155,7 +1161,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromJoin(const Identifi
|
||||
resolved_identifier = left_resolved_identifier;
|
||||
}
|
||||
}
|
||||
else if (scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
|
||||
else if (scope.joins_count == 1 && scope.context->getSettingsRef()[Setting::single_join_prefer_left_table])
|
||||
{
|
||||
resolved_side = JoinTableSide::Left;
|
||||
resolved_identifier = left_resolved_identifier;
|
||||
|
@ -77,6 +77,35 @@ namespace ProfileEvents
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool aggregate_functions_null_for_empty;
|
||||
extern const SettingsBool analyzer_compatibility_join_using_top_level_identifier;
|
||||
extern const SettingsBool asterisk_include_alias_columns;
|
||||
extern const SettingsBool asterisk_include_materialized_columns;
|
||||
extern const SettingsString count_distinct_implementation;
|
||||
extern const SettingsBool enable_global_with_statement;
|
||||
extern const SettingsBool enable_order_by_all;
|
||||
extern const SettingsBool enable_positional_arguments;
|
||||
extern const SettingsBool enable_scalar_subquery_optimization;
|
||||
extern const SettingsBool extremes;
|
||||
extern const SettingsBool force_grouping_standard_compatibility;
|
||||
extern const SettingsBool format_display_secrets_in_show_and_select;
|
||||
extern const SettingsBool joined_subquery_requires_alias;
|
||||
extern const SettingsUInt64 max_bytes_in_set;
|
||||
extern const SettingsUInt64 max_expanded_ast_elements;
|
||||
extern const SettingsUInt64 max_result_rows;
|
||||
extern const SettingsUInt64 max_rows_in_set;
|
||||
extern const SettingsUInt64 max_subquery_depth;
|
||||
extern const SettingsBool prefer_column_name_to_alias;
|
||||
extern const SettingsBool rewrite_count_distinct_if_with_count_distinct_implementation;
|
||||
extern const SettingsOverflowMode set_overflow_mode;
|
||||
extern const SettingsBool single_join_prefer_left_table;
|
||||
extern const SettingsBool transform_null_in;
|
||||
extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions;
|
||||
}
|
||||
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
@ -506,8 +535,8 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
||||
auto subquery_context = Context::createCopy(context);
|
||||
|
||||
Settings subquery_settings = context->getSettingsCopy();
|
||||
subquery_settings.max_result_rows = 1;
|
||||
subquery_settings.extremes = false;
|
||||
subquery_settings[Setting::max_result_rows] = 1;
|
||||
subquery_settings[Setting::extremes] = false;
|
||||
subquery_context->setSettings(subquery_settings);
|
||||
/// When execute `INSERT INTO t WITH ... SELECT ...`, it may lead to `Unknown columns`
|
||||
/// exception with this settings enabled(https://github.com/ClickHouse/ClickHouse/issues/52494).
|
||||
@ -627,10 +656,8 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
||||
auto * nearest_query_scope = scope.getNearestQueryScope();
|
||||
|
||||
/// Always convert to literals when there is no query context
|
||||
if (!context->getSettingsRef().enable_scalar_subquery_optimization ||
|
||||
!useless_literal_types.contains(scalar_type_name) ||
|
||||
!context->hasQueryContext() ||
|
||||
!nearest_query_scope)
|
||||
if (!context->getSettingsRef()[Setting::enable_scalar_subquery_optimization] || !useless_literal_types.contains(scalar_type_name)
|
||||
|| !context->hasQueryContext() || !nearest_query_scope)
|
||||
{
|
||||
auto constant_value = std::make_shared<ConstantValue>(std::move(scalar_value), scalar_type);
|
||||
auto constant_node = std::make_shared<ConstantNode>(constant_value, node);
|
||||
@ -726,7 +753,7 @@ void QueryAnalyzer::mergeWindowWithParentWindow(const QueryTreeNodePtr & window_
|
||||
void QueryAnalyzer::replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_list, const QueryTreeNodes & projection_nodes, IdentifierResolveScope & scope)
|
||||
{
|
||||
const auto & settings = scope.context->getSettingsRef();
|
||||
if (!settings.enable_positional_arguments || scope.context->getClientInfo().query_kind != ClientInfo::QueryKind::INITIAL_QUERY)
|
||||
if (!settings[Setting::enable_positional_arguments] || scope.context->getClientInfo().query_kind != ClientInfo::QueryKind::INITIAL_QUERY)
|
||||
return;
|
||||
|
||||
auto & node_list_typed = node_list->as<ListNode &>();
|
||||
@ -843,7 +870,7 @@ void QueryAnalyzer::validateTableExpressionModifiers(const QueryTreeNodePtr & ta
|
||||
|
||||
void QueryAnalyzer::validateJoinTableExpressionWithoutAlias(const QueryTreeNodePtr & join_node, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope)
|
||||
{
|
||||
if (!scope.context->getSettingsRef().joined_subquery_requires_alias)
|
||||
if (!scope.context->getSettingsRef()[Setting::joined_subquery_requires_alias])
|
||||
return;
|
||||
|
||||
bool table_expression_has_alias = table_expression_node->hasAlias();
|
||||
@ -938,7 +965,7 @@ void QueryAnalyzer::expandGroupByAll(QueryNode & query_tree_node_typed)
|
||||
|
||||
void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed, const Settings & settings)
|
||||
{
|
||||
if (!settings.enable_order_by_all || !query_tree_node_typed.isOrderByAll())
|
||||
if (!settings[Setting::enable_order_by_all] || !query_tree_node_typed.isOrderByAll())
|
||||
return;
|
||||
|
||||
auto * all_node = query_tree_node_typed.getOrderBy().getNodes()[0]->as<SortNode>();
|
||||
@ -989,12 +1016,14 @@ std::string QueryAnalyzer::rewriteAggregateFunctionNameIfNeeded(
|
||||
|
||||
if (aggregate_function_name_lowercase == "countdistinct")
|
||||
{
|
||||
result_aggregate_function_name = settings.count_distinct_implementation;
|
||||
result_aggregate_function_name = settings[Setting::count_distinct_implementation];
|
||||
}
|
||||
else if (aggregate_function_name_lowercase == "countifdistinct" ||
|
||||
(settings.rewrite_count_distinct_if_with_count_distinct_implementation && aggregate_function_name_lowercase == "countdistinctif"))
|
||||
else if (
|
||||
aggregate_function_name_lowercase == "countifdistinct"
|
||||
|| (settings[Setting::rewrite_count_distinct_if_with_count_distinct_implementation]
|
||||
&& aggregate_function_name_lowercase == "countdistinctif"))
|
||||
{
|
||||
result_aggregate_function_name = settings.count_distinct_implementation;
|
||||
result_aggregate_function_name = settings[Setting::count_distinct_implementation];
|
||||
result_aggregate_function_name += "If";
|
||||
}
|
||||
else if (aggregate_function_name_lowercase.ends_with("ifdistinct"))
|
||||
@ -1004,7 +1033,7 @@ std::string QueryAnalyzer::rewriteAggregateFunctionNameIfNeeded(
|
||||
result_aggregate_function_name = result_aggregate_function_name.substr(0, prefix_length) + "DistinctIf";
|
||||
}
|
||||
|
||||
bool need_add_or_null = settings.aggregate_functions_null_for_empty && !result_aggregate_function_name.ends_with("OrNull");
|
||||
bool need_add_or_null = settings[Setting::aggregate_functions_null_for_empty] && !result_aggregate_function_name.ends_with("OrNull");
|
||||
if (need_add_or_null)
|
||||
{
|
||||
auto properties = AggregateFunctionFactory::instance().tryGetProperties(result_aggregate_function_name, action);
|
||||
@ -1215,7 +1244,7 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
|
||||
}
|
||||
}
|
||||
|
||||
if (!scope.context->getSettingsRef().enable_global_with_statement)
|
||||
if (!scope.context->getSettingsRef()[Setting::enable_global_with_statement])
|
||||
return {};
|
||||
|
||||
/** Nested subqueries cannot access outer subqueries table expressions from JOIN tree because
|
||||
@ -1347,7 +1376,7 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifier(const IdentifierLook
|
||||
|
||||
if (!resolve_result.resolved_identifier)
|
||||
{
|
||||
bool prefer_column_name_to_alias = scope.context->getSettingsRef().prefer_column_name_to_alias;
|
||||
bool prefer_column_name_to_alias = scope.context->getSettingsRef()[Setting::prefer_column_name_to_alias];
|
||||
|
||||
if (identifier_lookup.isExpressionLookup())
|
||||
{
|
||||
@ -1558,10 +1587,10 @@ GetColumnsOptions QueryAnalyzer::buildGetColumnsOptions(QueryTreeNodePtr & match
|
||||
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
if (settings.asterisk_include_alias_columns)
|
||||
if (settings[Setting::asterisk_include_alias_columns])
|
||||
get_columns_options_kind |= GetColumnsOptions::Kind::Aliases;
|
||||
|
||||
if (settings.asterisk_include_materialized_columns)
|
||||
if (settings[Setting::asterisk_include_materialized_columns])
|
||||
get_columns_options_kind |= GetColumnsOptions::Kind::Materialized;
|
||||
}
|
||||
|
||||
@ -2810,7 +2839,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
allow_table_expressions /*allow_table_expression*/);
|
||||
|
||||
/// Mask arguments if needed
|
||||
if (!scope.context->getSettingsRef().format_display_secrets_in_show_and_select)
|
||||
if (!scope.context->getSettingsRef()[Setting::format_display_secrets_in_show_and_select])
|
||||
{
|
||||
if (FunctionSecretArgumentsFinder::Result secret_arguments = FunctionSecretArgumentsFinderTreeNode(*function_node_ptr).getResult(); secret_arguments.count)
|
||||
{
|
||||
@ -2834,7 +2863,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
if (is_special_function_in)
|
||||
{
|
||||
checkFunctionNodeHasEmptyNullsAction(function_node);
|
||||
if (scope.context->getSettingsRef().transform_null_in)
|
||||
if (scope.context->getSettingsRef()[Setting::transform_null_in])
|
||||
{
|
||||
static constexpr std::array<std::pair<std::string_view, std::string_view>, 4> in_function_to_replace_null_in_function_map =
|
||||
{{
|
||||
@ -3134,7 +3163,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
function_arguments_size);
|
||||
checkFunctionNodeHasEmptyNullsAction(function_node);
|
||||
|
||||
bool force_grouping_standard_compatibility = scope.context->getSettingsRef().force_grouping_standard_compatibility;
|
||||
bool force_grouping_standard_compatibility = scope.context->getSettingsRef()[Setting::force_grouping_standard_compatibility];
|
||||
auto grouping_function = std::make_shared<FunctionGrouping>(force_grouping_standard_compatibility);
|
||||
auto grouping_function_adaptor = std::make_shared<FunctionToOverloadResolverAdaptor>(std::move(grouping_function));
|
||||
function_node.resolveAsFunction(grouping_function_adaptor->build(argument_columns));
|
||||
@ -3388,14 +3417,12 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
|
||||
const auto & settings = scope.context->getSettingsRef();
|
||||
|
||||
auto result_block = getSetElementsForConstantValue(first_argument_constant_type,
|
||||
second_argument_constant_literal,
|
||||
second_argument_constant_type,
|
||||
settings.transform_null_in);
|
||||
auto result_block = getSetElementsForConstantValue(
|
||||
first_argument_constant_type, second_argument_constant_literal, second_argument_constant_type, settings[Setting::transform_null_in]);
|
||||
|
||||
SizeLimits size_limits_for_set = {settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode};
|
||||
SizeLimits size_limits_for_set = {settings[Setting::max_rows_in_set], settings[Setting::max_bytes_in_set], settings[Setting::set_overflow_mode]};
|
||||
|
||||
auto set = std::make_shared<Set>(size_limits_for_set, 0, settings.transform_null_in);
|
||||
auto set = std::make_shared<Set>(size_limits_for_set, 0, settings[Setting::transform_null_in]);
|
||||
|
||||
set->setHeader(result_block.cloneEmpty().getColumnsWithTypeAndName());
|
||||
set->insertFromBlock(result_block.getColumnsWithTypeAndName());
|
||||
@ -3826,10 +3853,10 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(
|
||||
}
|
||||
}
|
||||
|
||||
validateTreeSize(node, scope.context->getSettingsRef().max_expanded_ast_elements, node_to_tree_size);
|
||||
validateTreeSize(node, scope.context->getSettingsRef()[Setting::max_expanded_ast_elements], node_to_tree_size);
|
||||
|
||||
/// Lambda can be inside the aggregate function, so we should check parent scopes.
|
||||
/// Most likely only the root scope can have an arrgegate function, but let's check all just in case.
|
||||
/// Most likely only the root scope can have an aggregate function, but let's check all just in case.
|
||||
bool in_aggregate_function_scope = false;
|
||||
for (const auto * scope_ptr = &scope; scope_ptr; scope_ptr = scope_ptr->parent_scope)
|
||||
in_aggregate_function_scope = in_aggregate_function_scope || scope_ptr->expressions_in_resolve_process_stack.hasAggregateFunction();
|
||||
@ -4473,9 +4500,9 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
||||
if (auto * scope_query_node = scope.scope_node->as<QueryNode>())
|
||||
{
|
||||
auto left_table_expression = extractLeftTableExpression(scope_query_node->getJoinTree());
|
||||
if (table_expression_node.get() == left_table_expression.get() &&
|
||||
scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
|
||||
table_expression_data.should_qualify_columns = false;
|
||||
if (table_expression_node.get() == left_table_expression.get() && scope.joins_count == 1
|
||||
&& scope.context->getSettingsRef()[Setting::single_join_prefer_left_table])
|
||||
table_expression_data.should_qualify_columns = false;
|
||||
}
|
||||
|
||||
scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data));
|
||||
@ -4672,11 +4699,10 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
||||
table_function_ptr->parseArguments(table_function_ast, scope_context);
|
||||
|
||||
|
||||
uint64_t use_structure_from_insertion_table_in_table_functions = scope_context->getSettingsRef().use_structure_from_insertion_table_in_table_functions;
|
||||
if (!nested_table_function &&
|
||||
use_structure_from_insertion_table_in_table_functions &&
|
||||
scope_context->hasInsertionTable() &&
|
||||
table_function_ptr->needStructureHint())
|
||||
uint64_t use_structure_from_insertion_table_in_table_functions
|
||||
= scope_context->getSettingsRef()[Setting::use_structure_from_insertion_table_in_table_functions];
|
||||
if (!nested_table_function && use_structure_from_insertion_table_in_table_functions && scope_context->hasInsertionTable()
|
||||
&& table_function_ptr->needStructureHint())
|
||||
{
|
||||
const auto & insertion_table = scope_context->getInsertionTable();
|
||||
if (!insertion_table.empty())
|
||||
@ -4806,8 +4832,8 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
||||
|
||||
if (!structure_hint.empty())
|
||||
table_function_ptr->setStructureHint(structure_hint);
|
||||
|
||||
} else if (use_structure_from_insertion_table_in_table_functions == 1)
|
||||
}
|
||||
else if (use_structure_from_insertion_table_in_table_functions == 1)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_COLUMNS_DOESNT_MATCH, "Number of columns in insert table less than required by SELECT expression.");
|
||||
}
|
||||
}
|
||||
@ -4931,7 +4957,7 @@ void QueryAnalyzer::resolveArrayJoin(QueryTreeNodePtr & array_join_node, Identif
|
||||
void QueryAnalyzer::checkDuplicateTableNamesOrAlias(const QueryTreeNodePtr & join_node, QueryTreeNodePtr & left_table_expr, QueryTreeNodePtr & right_table_expr, IdentifierResolveScope & scope)
|
||||
{
|
||||
Names column_names;
|
||||
if (!scope.context->getSettingsRef().joined_subquery_requires_alias)
|
||||
if (!scope.context->getSettingsRef()[Setting::joined_subquery_requires_alias])
|
||||
return;
|
||||
|
||||
if (join_node->as<JoinNode &>().getKind() != JoinKind::Paste)
|
||||
@ -5051,7 +5077,7 @@ void QueryAnalyzer::resolveJoin(QueryTreeNodePtr & join_node, IdentifierResolveS
|
||||
* despite the fact that column from USING could be resolved from left table.
|
||||
* It's compatibility with a default behavior for old analyzer.
|
||||
*/
|
||||
if (settings.analyzer_compatibility_join_using_top_level_identifier)
|
||||
if (settings[Setting::analyzer_compatibility_join_using_top_level_identifier])
|
||||
result_left_table_expression = try_resolve_identifier_from_query_projection(identifier_full_name, join_node_typed.getLeftTableExpression(), scope);
|
||||
|
||||
IdentifierLookup identifier_lookup{identifier_node->getIdentifier(), IdentifierLookupContext::EXPRESSION};
|
||||
@ -5070,7 +5096,7 @@ void QueryAnalyzer::resolveJoin(QueryTreeNodePtr & join_node, IdentifierResolveS
|
||||
{
|
||||
String extra_message;
|
||||
const QueryNode * query_node = scope.scope_node ? scope.scope_node->as<QueryNode>() : nullptr;
|
||||
if (settings.analyzer_compatibility_join_using_top_level_identifier && query_node)
|
||||
if (settings[Setting::analyzer_compatibility_join_using_top_level_identifier] && query_node)
|
||||
{
|
||||
for (const auto & projection_node : query_node->getProjection().getNodes())
|
||||
{
|
||||
@ -5250,11 +5276,9 @@ void QueryAnalyzer::resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node,
|
||||
*/
|
||||
void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, IdentifierResolveScope & scope)
|
||||
{
|
||||
size_t max_subquery_depth = scope.context->getSettingsRef().max_subquery_depth;
|
||||
size_t max_subquery_depth = scope.context->getSettingsRef()[Setting::max_subquery_depth];
|
||||
if (max_subquery_depth && scope.subquery_depth > max_subquery_depth)
|
||||
throw Exception(ErrorCodes::TOO_DEEP_SUBQUERIES,
|
||||
"Too deep subqueries. Maximum: {}",
|
||||
max_subquery_depth);
|
||||
throw Exception(ErrorCodes::TOO_DEEP_SUBQUERIES, "Too deep subqueries. Maximum: {}", max_subquery_depth);
|
||||
|
||||
auto & query_node_typed = query_node->as<QueryNode &>();
|
||||
|
||||
@ -5588,7 +5612,7 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
expandGroupByAll(query_node_typed);
|
||||
|
||||
validateFilters(query_node);
|
||||
validateAggregates(query_node, { .group_by_use_nulls = scope.group_by_use_nulls });
|
||||
validateAggregates(query_node, {.group_by_use_nulls = scope.group_by_use_nulls});
|
||||
|
||||
for (const auto & column : projection_columns)
|
||||
{
|
||||
|
@ -14,6 +14,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsSeconds lock_acquire_timeout;
|
||||
}
|
||||
|
||||
TableNode::TableNode(StoragePtr storage_, StorageID storage_id_, TableLockHolder storage_lock_, StorageSnapshotPtr storage_snapshot_)
|
||||
: IQueryTreeNode(children_size)
|
||||
@ -29,9 +33,10 @@ TableNode::TableNode(StoragePtr storage_, TableLockHolder storage_lock_, Storage
|
||||
}
|
||||
|
||||
TableNode::TableNode(StoragePtr storage_, const ContextPtr & context)
|
||||
: TableNode(storage_,
|
||||
storage_->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout),
|
||||
storage_->getStorageSnapshot(storage_->getInMemoryMetadataPtr(), context))
|
||||
: TableNode(
|
||||
storage_,
|
||||
storage_->lockForShare(context->getInitialQueryId(), context->getSettingsRef()[Setting::lock_acquire_timeout]),
|
||||
storage_->getStorageSnapshot(storage_->getInMemoryMetadataPtr(), context))
|
||||
{
|
||||
}
|
||||
|
||||
@ -39,7 +44,7 @@ void TableNode::updateStorage(StoragePtr storage_value, const ContextPtr & conte
|
||||
{
|
||||
storage = std::move(storage_value);
|
||||
storage_id = storage->getStorageID();
|
||||
storage_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||
storage_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef()[Setting::lock_acquire_timeout]);
|
||||
storage_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context);
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool extremes;
|
||||
extern const SettingsUInt64 max_result_bytes;
|
||||
extern const SettingsUInt64 max_result_rows;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -868,10 +874,10 @@ void updateContextForSubqueryExecution(ContextMutablePtr & mutable_context)
|
||||
* which are checked separately (in the Set, Join objects).
|
||||
*/
|
||||
Settings subquery_settings = mutable_context->getSettingsCopy();
|
||||
subquery_settings.max_result_rows = 0;
|
||||
subquery_settings.max_result_bytes = 0;
|
||||
subquery_settings[Setting::max_result_rows] = 0;
|
||||
subquery_settings[Setting::max_result_bytes] = 0;
|
||||
/// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query).
|
||||
subquery_settings.extremes = false;
|
||||
subquery_settings[Setting::extremes] = false;
|
||||
mutable_context->setSettings(subquery_settings);
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,13 @@ namespace ProfileEvents
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 backup_restore_keeper_retry_initial_backoff_ms;
|
||||
extern const SettingsUInt64 backup_restore_keeper_retry_max_backoff_ms;
|
||||
extern const SettingsUInt64 backup_restore_keeper_max_retries;
|
||||
extern const SettingsSeconds lock_acquire_timeout;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -105,9 +112,9 @@ BackupEntriesCollector::BackupEntriesCollector(
|
||||
, compare_collected_metadata(context->getConfigRef().getBool("backups.compare_collected_metadata", true))
|
||||
, log(getLogger("BackupEntriesCollector"))
|
||||
, global_zookeeper_retries_info(
|
||||
context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||
context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||
context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms)
|
||||
context->getSettingsRef()[Setting::backup_restore_keeper_max_retries],
|
||||
context->getSettingsRef()[Setting::backup_restore_keeper_retry_initial_backoff_ms],
|
||||
context->getSettingsRef()[Setting::backup_restore_keeper_retry_max_backoff_ms])
|
||||
, threadpool(threadpool_)
|
||||
{
|
||||
}
|
||||
@ -653,7 +660,7 @@ void BackupEntriesCollector::lockTablesForReading()
|
||||
|
||||
checkIsQueryCancelled();
|
||||
|
||||
table_info.table_lock = storage->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||
table_info.table_lock = storage->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef()[Setting::lock_acquire_timeout]);
|
||||
}
|
||||
|
||||
std::erase_if(
|
||||
|
@ -25,6 +25,16 @@ namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 backup_restore_s3_retry_attempts;
|
||||
extern const SettingsBool enable_s3_requests_logging;
|
||||
extern const SettingsBool s3_disable_checksum;
|
||||
extern const SettingsUInt64 s3_max_connections;
|
||||
extern const SettingsUInt64 s3_max_redirects;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int S3_ERROR;
|
||||
@ -55,16 +65,16 @@ namespace
|
||||
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
|
||||
settings.auth_settings.region,
|
||||
context->getRemoteHostFilter(),
|
||||
static_cast<unsigned>(local_settings.s3_max_redirects),
|
||||
static_cast<unsigned>(local_settings.backup_restore_s3_retry_attempts),
|
||||
local_settings.enable_s3_requests_logging,
|
||||
static_cast<unsigned>(local_settings[Setting::s3_max_redirects]),
|
||||
static_cast<unsigned>(local_settings[Setting::backup_restore_s3_retry_attempts]),
|
||||
local_settings[Setting::enable_s3_requests_logging],
|
||||
/* for_disk_s3 = */ false,
|
||||
request_settings.get_request_throttler,
|
||||
request_settings.put_request_throttler,
|
||||
s3_uri.uri.getScheme());
|
||||
|
||||
client_configuration.endpointOverride = s3_uri.endpoint;
|
||||
client_configuration.maxConnections = static_cast<unsigned>(global_settings.s3_max_connections);
|
||||
client_configuration.maxConnections = static_cast<unsigned>(global_settings[Setting::s3_max_connections]);
|
||||
/// Increase connect timeout
|
||||
client_configuration.connectTimeoutMs = 10 * 1000;
|
||||
/// Requests in backups can be extremely long, set to one hour
|
||||
@ -74,7 +84,7 @@ namespace
|
||||
|
||||
S3::ClientSettings client_settings{
|
||||
.use_virtual_addressing = s3_uri.is_virtual_hosted_style,
|
||||
.disable_checksum = local_settings.s3_disable_checksum,
|
||||
.disable_checksum = local_settings[Setting::s3_disable_checksum],
|
||||
.gcs_issue_compose_request = context->getConfigRef().getBool("s3.gcs_issue_compose_request", false),
|
||||
.is_s3express_bucket = S3::isS3ExpressEndpoint(s3_uri.endpoint),
|
||||
};
|
||||
|
@ -43,6 +43,15 @@ namespace CurrentMetrics
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multiread;
|
||||
extern const SettingsUInt64 backup_restore_keeper_max_retries;
|
||||
extern const SettingsUInt64 backup_restore_keeper_retry_initial_backoff_ms;
|
||||
extern const SettingsUInt64 backup_restore_keeper_retry_max_backoff_ms;
|
||||
extern const SettingsUInt64 backup_restore_keeper_fault_injection_seed;
|
||||
extern const SettingsFloat backup_restore_keeper_fault_injection_probability;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -98,12 +107,12 @@ namespace
|
||||
|
||||
RestoreCoordinationRemote::RestoreKeeperSettings keeper_settings
|
||||
{
|
||||
.keeper_max_retries = context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||
.keeper_retry_initial_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||
.keeper_retry_max_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms,
|
||||
.batch_size_for_keeper_multiread = context->getSettingsRef().backup_restore_batch_size_for_keeper_multiread,
|
||||
.keeper_fault_injection_probability = context->getSettingsRef().backup_restore_keeper_fault_injection_probability,
|
||||
.keeper_fault_injection_seed = context->getSettingsRef().backup_restore_keeper_fault_injection_seed
|
||||
.keeper_max_retries = context->getSettingsRef()[Setting::backup_restore_keeper_max_retries],
|
||||
.keeper_retry_initial_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_initial_backoff_ms],
|
||||
.keeper_retry_max_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_max_backoff_ms],
|
||||
.batch_size_for_keeper_multiread = context->getSettingsRef()[Setting::backup_restore_batch_size_for_keeper_multiread],
|
||||
.keeper_fault_injection_probability = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_probability],
|
||||
.keeper_fault_injection_seed = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_seed]
|
||||
};
|
||||
|
||||
auto all_hosts = BackupSettings::Util::filterHostIDs(
|
||||
|
@ -36,6 +36,11 @@ namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsSeconds lock_acquire_timeout;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BACKUP_ENTRY_NOT_FOUND;
|
||||
@ -949,7 +954,7 @@ void RestorerFromBackup::checkTable(const QualifiedTableName & table_name)
|
||||
|
||||
StoragePtr storage = database->getTable(resolved_id.table_name, context);
|
||||
table_info.storage = storage;
|
||||
table_info.table_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||
table_info.table_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef()[Setting::lock_acquire_timeout]);
|
||||
|
||||
if (!restore_settings.allow_different_table_def && !table_info.is_predefined_table)
|
||||
{
|
||||
|
@ -5,20 +5,30 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 backup_restore_keeper_max_retries;
|
||||
extern const SettingsUInt64 backup_restore_keeper_retry_initial_backoff_ms;
|
||||
extern const SettingsUInt64 backup_restore_keeper_retry_max_backoff_ms;
|
||||
extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multiread;
|
||||
extern const SettingsFloat backup_restore_keeper_fault_injection_probability;
|
||||
extern const SettingsUInt64 backup_restore_keeper_fault_injection_seed;
|
||||
extern const SettingsUInt64 backup_restore_keeper_value_max_size;
|
||||
extern const SettingsUInt64 backup_restore_batch_size_for_keeper_multi;
|
||||
}
|
||||
|
||||
WithRetries::KeeperSettings WithRetries::KeeperSettings::fromContext(ContextPtr context)
|
||||
{
|
||||
return
|
||||
{
|
||||
.keeper_max_retries = context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||
.keeper_retry_initial_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||
.keeper_retry_max_backoff_ms = context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms,
|
||||
.batch_size_for_keeper_multiread = context->getSettingsRef().backup_restore_batch_size_for_keeper_multiread,
|
||||
.keeper_fault_injection_probability = context->getSettingsRef().backup_restore_keeper_fault_injection_probability,
|
||||
.keeper_fault_injection_seed = context->getSettingsRef().backup_restore_keeper_fault_injection_seed,
|
||||
.keeper_value_max_size = context->getSettingsRef().backup_restore_keeper_value_max_size,
|
||||
.batch_size_for_keeper_multi = context->getSettingsRef().backup_restore_batch_size_for_keeper_multi,
|
||||
.keeper_max_retries = context->getSettingsRef()[Setting::backup_restore_keeper_max_retries],
|
||||
.keeper_retry_initial_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_initial_backoff_ms],
|
||||
.keeper_retry_max_backoff_ms = context->getSettingsRef()[Setting::backup_restore_keeper_retry_max_backoff_ms],
|
||||
.batch_size_for_keeper_multiread = context->getSettingsRef()[Setting::backup_restore_batch_size_for_keeper_multiread],
|
||||
.keeper_fault_injection_probability = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_probability],
|
||||
.keeper_fault_injection_seed = context->getSettingsRef()[Setting::backup_restore_keeper_fault_injection_seed],
|
||||
.keeper_value_max_size = context->getSettingsRef()[Setting::backup_restore_keeper_value_max_size],
|
||||
.batch_size_for_keeper_multi = context->getSettingsRef()[Setting::backup_restore_batch_size_for_keeper_multi],
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct ColumnWithTypeAndName;
|
||||
using ColumnsWithTypeAndName = std::vector<ColumnWithTypeAndName>;
|
||||
|
||||
class CatBoostLibraryBridgeHelper final : public LibraryBridgeHelper
|
||||
{
|
||||
|
@ -1,11 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Poco/Net/HTTPRequest.h>
|
||||
#include <Common/ShellCommand.h>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Net/HTTPRequest.h>
|
||||
#include <Poco/URI.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -6,12 +6,16 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsSeconds http_receive_timeout;
|
||||
}
|
||||
|
||||
LibraryBridgeHelper::LibraryBridgeHelper(ContextPtr context_)
|
||||
: IBridgeHelper(context_)
|
||||
, config(context_->getConfigRef())
|
||||
, log(getLogger("LibraryBridgeHelper"))
|
||||
, http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value)
|
||||
, http_timeout(context_->getGlobalContext()->getSettingsRef()[Setting::http_receive_timeout].value)
|
||||
, bridge_host(config.getString("library_bridge.host", DEFAULT_HOST))
|
||||
, bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT))
|
||||
, http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_->getSettingsRef(), context_->getServerSettings().keep_alive_timeout))
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/safeExit.h>
|
||||
#include <Core/BaseSettingsProgramOptions.h>
|
||||
#include <Common/clearPasswordFromCommandLine.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/Exception.h>
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
#include <base/safeExit.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Core/BaseSettingsProgramOptions.h>
|
||||
#include <Core/Protocol.h>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
@ -93,6 +92,21 @@ using namespace std::literals;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_settings_after_format_in_insert;
|
||||
extern const SettingsBool async_insert;
|
||||
extern const SettingsDialect dialect;
|
||||
extern const SettingsUInt64 max_block_size;
|
||||
extern const SettingsUInt64 max_insert_block_size;
|
||||
extern const SettingsUInt64 max_parser_backtracks;
|
||||
extern const SettingsUInt64 max_parser_depth;
|
||||
extern const SettingsUInt64 max_query_size;
|
||||
extern const SettingsUInt64 output_format_pretty_max_rows;
|
||||
extern const SettingsUInt64 output_format_pretty_max_value_width;
|
||||
extern const SettingsBool partial_result_on_first_cancel;
|
||||
extern const SettingsBool throw_if_no_data_to_insert;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -295,24 +309,24 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting
|
||||
size_t max_length = 0;
|
||||
|
||||
if (!allow_multi_statements)
|
||||
max_length = settings.max_query_size;
|
||||
max_length = settings[Setting::max_query_size];
|
||||
|
||||
const Dialect & dialect = settings.dialect;
|
||||
const Dialect dialect = settings[Setting::dialect];
|
||||
|
||||
if (dialect == Dialect::kusto)
|
||||
parser = std::make_unique<ParserKQLStatement>(end, settings.allow_settings_after_format_in_insert);
|
||||
parser = std::make_unique<ParserKQLStatement>(end, settings[Setting::allow_settings_after_format_in_insert]);
|
||||
else if (dialect == Dialect::prql)
|
||||
parser = std::make_unique<ParserPRQLQuery>(max_length, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
parser = std::make_unique<ParserPRQLQuery>(max_length, settings[Setting::max_parser_depth], settings[Setting::max_parser_backtracks]);
|
||||
else
|
||||
parser = std::make_unique<ParserQuery>(end, settings.allow_settings_after_format_in_insert);
|
||||
parser = std::make_unique<ParserQuery>(end, settings[Setting::allow_settings_after_format_in_insert]);
|
||||
|
||||
if (is_interactive || ignore_error)
|
||||
{
|
||||
String message;
|
||||
if (dialect == Dialect::kusto)
|
||||
res = tryParseKQLQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks, true);
|
||||
res = tryParseKQLQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings[Setting::max_parser_depth], settings[Setting::max_parser_backtracks], true);
|
||||
else
|
||||
res = tryParseQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks, true);
|
||||
res = tryParseQuery(*parser, pos, end, message, true, "", allow_multi_statements, max_length, settings[Setting::max_parser_depth], settings[Setting::max_parser_backtracks], true);
|
||||
|
||||
if (!res)
|
||||
{
|
||||
@ -323,9 +337,9 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting
|
||||
else
|
||||
{
|
||||
if (dialect == Dialect::kusto)
|
||||
res = parseKQLQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
res = parseKQLQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings[Setting::max_parser_depth], settings[Setting::max_parser_backtracks]);
|
||||
else
|
||||
res = parseQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
res = parseQueryAndMovePosition(*parser, pos, end, "", allow_multi_statements, max_length, settings[Setting::max_parser_depth], settings[Setting::max_parser_backtracks]);
|
||||
}
|
||||
|
||||
if (is_interactive)
|
||||
@ -346,7 +360,8 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting
|
||||
|
||||
|
||||
/// Consumes trailing semicolons and tries to consume the same-line trailing comment.
|
||||
void ClientBase::adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth, uint32_t max_parser_backtracks)
|
||||
void ClientBase::adjustQueryEnd(
|
||||
const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth, uint32_t max_parser_backtracks)
|
||||
{
|
||||
// We have to skip the trailing semicolon that might be left
|
||||
// after VALUES parsing or just after a normal semicolon-terminated query.
|
||||
@ -672,16 +687,16 @@ void ClientBase::adjustSettings()
|
||||
/// Do not limit pretty format output in case of --pager specified or in case of stdout is not a tty.
|
||||
if (!pager.empty() || !stdout_is_a_tty)
|
||||
{
|
||||
if (!global_context->getSettingsRef().output_format_pretty_max_rows.changed)
|
||||
if (!global_context->getSettingsRef()[Setting::output_format_pretty_max_rows].changed)
|
||||
{
|
||||
settings.output_format_pretty_max_rows = std::numeric_limits<UInt64>::max();
|
||||
settings.output_format_pretty_max_rows.changed = false;
|
||||
settings[Setting::output_format_pretty_max_rows] = std::numeric_limits<UInt64>::max();
|
||||
settings[Setting::output_format_pretty_max_rows].changed = false;
|
||||
}
|
||||
|
||||
if (!global_context->getSettingsRef().output_format_pretty_max_value_width.changed)
|
||||
if (!global_context->getSettingsRef()[Setting::output_format_pretty_max_value_width].changed)
|
||||
{
|
||||
settings.output_format_pretty_max_value_width = std::numeric_limits<UInt64>::max();
|
||||
settings.output_format_pretty_max_value_width.changed = false;
|
||||
settings[Setting::output_format_pretty_max_value_width] = std::numeric_limits<UInt64>::max();
|
||||
settings[Setting::output_format_pretty_max_value_width].changed = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -767,18 +782,17 @@ void ClientBase::setDefaultFormatsAndCompressionFromConfiguration()
|
||||
default_input_format = "TSV";
|
||||
}
|
||||
|
||||
format_max_block_size = getClientConfiguration().getUInt64("format_max_block_size",
|
||||
global_context->getSettingsRef().max_block_size);
|
||||
format_max_block_size = getClientConfiguration().getUInt64("format_max_block_size", global_context->getSettingsRef()[Setting::max_block_size]);
|
||||
|
||||
/// Setting value from cmd arg overrides one from config
|
||||
if (global_context->getSettingsRef().max_insert_block_size.changed)
|
||||
if (global_context->getSettingsRef()[Setting::max_insert_block_size].changed)
|
||||
{
|
||||
insert_format_max_block_size = global_context->getSettingsRef().max_insert_block_size;
|
||||
insert_format_max_block_size = global_context->getSettingsRef()[Setting::max_insert_block_size];
|
||||
}
|
||||
else
|
||||
{
|
||||
insert_format_max_block_size = getClientConfiguration().getUInt64("insert_format_max_block_size",
|
||||
global_context->getSettingsRef().max_insert_block_size);
|
||||
insert_format_max_block_size
|
||||
= getClientConfiguration().getUInt64("insert_format_max_block_size", global_context->getSettingsRef()[Setting::max_insert_block_size]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -878,7 +892,7 @@ bool ClientBase::isSyncInsertWithData(const ASTInsertQuery & insert_query, const
|
||||
if (insert_query.settings_ast)
|
||||
settings.applyChanges(insert_query.settings_ast->as<ASTSetQuery>()->changes);
|
||||
|
||||
return !settings.async_insert;
|
||||
return !settings[Setting::async_insert];
|
||||
}
|
||||
|
||||
void ClientBase::processTextAsSingleQuery(const String & full_query)
|
||||
@ -1031,7 +1045,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
|
||||
}
|
||||
|
||||
const auto & settings = client_context->getSettingsRef();
|
||||
const Int32 signals_before_stop = settings.partial_result_on_first_cancel ? 2 : 1;
|
||||
const Int32 signals_before_stop = settings[Setting::partial_result_on_first_cancel] ? 2 : 1;
|
||||
|
||||
int retries_left = 10;
|
||||
while (retries_left)
|
||||
@ -1059,11 +1073,11 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
|
||||
catch (const NetException &)
|
||||
{
|
||||
// We still want to attempt to process whatever we already received or can receive (socket receive buffer can be not empty)
|
||||
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
|
||||
receiveResult(parsed_query, signals_before_stop, settings[Setting::partial_result_on_first_cancel]);
|
||||
throw;
|
||||
}
|
||||
|
||||
receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel);
|
||||
receiveResult(parsed_query, signals_before_stop, settings[Setting::partial_result_on_first_cancel]);
|
||||
|
||||
break;
|
||||
}
|
||||
@ -1491,7 +1505,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars
|
||||
if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && !isStdinNotEmptyAndValid(std_in))))
|
||||
{
|
||||
const auto & settings = client_context->getSettingsRef();
|
||||
if (settings.throw_if_no_data_to_insert)
|
||||
if (settings[Setting::throw_if_no_data_to_insert])
|
||||
throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "No data to insert");
|
||||
else
|
||||
return;
|
||||
@ -1609,14 +1623,14 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
auto metadata = storage->getInMemoryMetadataPtr();
|
||||
QueryPlan plan;
|
||||
storage->read(
|
||||
plan,
|
||||
sample.getNames(),
|
||||
storage->getStorageSnapshot(metadata, client_context),
|
||||
query_info,
|
||||
client_context,
|
||||
{},
|
||||
client_context->getSettingsRef().max_block_size,
|
||||
getNumberOfPhysicalCPUCores());
|
||||
plan,
|
||||
sample.getNames(),
|
||||
storage->getStorageSnapshot(metadata, client_context),
|
||||
query_info,
|
||||
client_context,
|
||||
{},
|
||||
client_context->getSettingsRef()[Setting::max_block_size],
|
||||
getNumberOfPhysicalCPUCores());
|
||||
|
||||
auto builder = plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(client_context),
|
||||
@ -1953,7 +1967,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
||||
if (insert && insert->select)
|
||||
insert->tryFindInputFunction(input_function);
|
||||
|
||||
bool is_async_insert_with_inlined_data = client_context->getSettingsRef().async_insert && insert && insert->hasInlinedData();
|
||||
bool is_async_insert_with_inlined_data = client_context->getSettingsRef()[Setting::async_insert] && insert && insert->hasInlinedData();
|
||||
|
||||
if (is_async_insert_with_inlined_data)
|
||||
{
|
||||
@ -2038,11 +2052,11 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
||||
error_stream << progress_indication.elapsedSeconds() << "\n";
|
||||
|
||||
const auto & print_memory_mode = config.getString("print-memory-to-stderr", "");
|
||||
auto peak_memeory_usage = std::max<Int64>(progress_indication.getMemoryUsage().peak, 0);
|
||||
auto peak_memory_usage = std::max<Int64>(progress_indication.getMemoryUsage().peak, 0);
|
||||
if (print_memory_mode == "default")
|
||||
error_stream << peak_memeory_usage << "\n";
|
||||
error_stream << peak_memory_usage << "\n";
|
||||
else if (print_memory_mode == "readable")
|
||||
error_stream << formatReadableSizeWithBinarySuffix(peak_memeory_usage) << "\n";
|
||||
error_stream << formatReadableSizeWithBinarySuffix(peak_memory_usage) << "\n";
|
||||
}
|
||||
|
||||
if (!is_interactive && getClientConfiguration().getBool("print-num-processed-rows", false))
|
||||
@ -2075,8 +2089,8 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
||||
if (this_query_begin >= all_queries_end)
|
||||
return MultiQueryProcessingStage::QUERIES_END;
|
||||
|
||||
unsigned max_parser_depth = static_cast<unsigned>(client_context->getSettingsRef().max_parser_depth);
|
||||
unsigned max_parser_backtracks = static_cast<unsigned>(client_context->getSettingsRef().max_parser_backtracks);
|
||||
unsigned max_parser_depth = static_cast<unsigned>(client_context->getSettingsRef()[Setting::max_parser_depth]);
|
||||
unsigned max_parser_backtracks = static_cast<unsigned>(client_context->getSettingsRef()[Setting::max_parser_backtracks]);
|
||||
|
||||
// If there are only comments left until the end of file, we just
|
||||
// stop. The parser can't handle this situation because it always
|
||||
@ -2400,9 +2414,10 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
||||
{
|
||||
this_query_end = insert_ast->end;
|
||||
adjustQueryEnd(
|
||||
this_query_end, all_queries_end,
|
||||
static_cast<unsigned>(client_context->getSettingsRef().max_parser_depth),
|
||||
static_cast<unsigned>(client_context->getSettingsRef().max_parser_backtracks));
|
||||
this_query_end,
|
||||
all_queries_end,
|
||||
static_cast<unsigned>(client_context->getSettingsRef()[Setting::max_parser_depth]),
|
||||
static_cast<unsigned>(client_context->getSettingsRef()[Setting::max_parser_backtracks]));
|
||||
}
|
||||
|
||||
// Report error.
|
||||
|
@ -83,9 +83,9 @@ private:
|
||||
void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments)
|
||||
{
|
||||
if (allow_repeated_settings)
|
||||
addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value());
|
||||
cmd_settings.addToProgramOptionsAsMultitokens(options_description.main_description.value());
|
||||
else
|
||||
addProgramOptions(cmd_settings, options_description.main_description.value());
|
||||
cmd_settings.addToProgramOptions(options_description.main_description.value());
|
||||
|
||||
if (allow_merge_tree_settings)
|
||||
{
|
||||
|
@ -51,6 +51,15 @@ namespace CurrentMetrics
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_experimental_codecs;
|
||||
extern const SettingsBool allow_suspicious_codecs;
|
||||
extern const SettingsBool enable_deflate_qpl_codec;
|
||||
extern const SettingsBool enable_zstd_qat_codec;
|
||||
extern const SettingsString network_compression_method;
|
||||
extern const SettingsInt64 network_zstd_compression_level;
|
||||
}
|
||||
|
||||
namespace FailPoints
|
||||
{
|
||||
@ -791,19 +800,19 @@ void Connection::sendQuery(
|
||||
if (settings)
|
||||
{
|
||||
std::optional<int> level;
|
||||
std::string method = Poco::toUpper(settings->network_compression_method.toString());
|
||||
std::string method = Poco::toUpper((*settings)[Setting::network_compression_method].toString());
|
||||
|
||||
/// Bad custom logic
|
||||
if (method == "ZSTD")
|
||||
level = settings->network_zstd_compression_level;
|
||||
level = (*settings)[Setting::network_zstd_compression_level];
|
||||
|
||||
CompressionCodecFactory::instance().validateCodec(
|
||||
method,
|
||||
level,
|
||||
!settings->allow_suspicious_codecs,
|
||||
settings->allow_experimental_codecs,
|
||||
settings->enable_deflate_qpl_codec,
|
||||
settings->enable_zstd_qat_codec);
|
||||
!(*settings)[Setting::allow_suspicious_codecs],
|
||||
(*settings)[Setting::allow_experimental_codecs],
|
||||
(*settings)[Setting::enable_deflate_qpl_codec],
|
||||
(*settings)[Setting::enable_zstd_qat_codec]);
|
||||
compression_codec = CompressionCodecFactory::instance().get(method, level);
|
||||
}
|
||||
else
|
||||
|
@ -14,6 +14,10 @@ namespace ProfileEvents
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 max_replica_delay_for_distributed_queries;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -78,7 +82,7 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
|
||||
LOG_TRACE(log, "Table {}.{} is readonly on server {}", table_to_check->database, table_to_check->table, result.entry->getDescription());
|
||||
}
|
||||
|
||||
const UInt64 max_allowed_delay = settings.max_replica_delay_for_distributed_queries;
|
||||
const UInt64 max_allowed_delay = settings[Setting::max_replica_delay_for_distributed_queries];
|
||||
if (!max_allowed_delay)
|
||||
{
|
||||
result.is_up_to_date = true;
|
||||
|
@ -5,6 +5,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsMilliseconds connection_pool_max_wait_ms;
|
||||
}
|
||||
|
||||
ConnectionPoolPtr ConnectionPoolFactory::get(
|
||||
unsigned max_connections,
|
||||
@ -93,7 +97,7 @@ ConnectionPoolFactory & ConnectionPoolFactory::instance()
|
||||
IConnectionPool::Entry ConnectionPool::get(const DB::ConnectionTimeouts& timeouts, const DB::Settings& settings,
|
||||
bool force_connected)
|
||||
{
|
||||
Entry entry = Base::get(settings.connection_pool_max_wait_ms.totalMilliseconds());
|
||||
Entry entry = Base::get(settings[Setting::connection_pool_max_wait_ms].totalMilliseconds());
|
||||
|
||||
if (force_connected)
|
||||
entry->forceConnected(timeouts);
|
||||
|
@ -16,6 +16,17 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 connections_with_failover_max_tries;
|
||||
extern const SettingsBool distributed_insert_skip_read_only_replicas;
|
||||
extern const SettingsUInt64 distributed_replica_max_ignored_errors;
|
||||
extern const SettingsBool fallback_to_stale_replicas_for_distributed_queries;
|
||||
extern const SettingsLoadBalancing load_balancing;
|
||||
extern const SettingsUInt64 load_balancing_first_offset;
|
||||
extern const SettingsNonZeroUInt64 max_parallel_replicas;
|
||||
extern const SettingsBool skip_unavailable_shards;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -47,10 +58,10 @@ ConnectionPoolWithFailover::ConnectionPoolWithFailover(
|
||||
IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts & timeouts)
|
||||
{
|
||||
Settings settings;
|
||||
settings.load_balancing = get_priority_load_balancing.load_balancing;
|
||||
settings.load_balancing_first_offset = 0;
|
||||
settings.distributed_replica_max_ignored_errors = 0;
|
||||
settings.fallback_to_stale_replicas_for_distributed_queries = true;
|
||||
settings[Setting::load_balancing] = get_priority_load_balancing.load_balancing;
|
||||
settings[Setting::load_balancing_first_offset] = 0;
|
||||
settings[Setting::distributed_replica_max_ignored_errors] = 0;
|
||||
settings[Setting::fallback_to_stale_replicas_for_distributed_queries] = true;
|
||||
|
||||
return get(timeouts, settings, /* force_connected= */ true);
|
||||
}
|
||||
@ -68,13 +79,12 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
|
||||
return tryGetEntry(pool, timeouts, fail_message, settings);
|
||||
};
|
||||
|
||||
const size_t offset = settings.load_balancing_first_offset % nested_pools.size();
|
||||
const LoadBalancing load_balancing = settings.load_balancing;
|
||||
const size_t offset = settings[Setting::load_balancing_first_offset] % nested_pools.size();
|
||||
|
||||
GetPriorityFunc get_priority = get_priority_load_balancing.getPriorityFunc(load_balancing, offset, nested_pools.size());
|
||||
GetPriorityFunc get_priority = get_priority_load_balancing.getPriorityFunc(settings[Setting::load_balancing], offset, nested_pools.size());
|
||||
|
||||
const UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors;
|
||||
const bool fallback_to_stale_replicas = settings.fallback_to_stale_replicas_for_distributed_queries;
|
||||
const UInt64 max_ignored_errors = settings[Setting::distributed_replica_max_ignored_errors];
|
||||
const bool fallback_to_stale_replicas = settings[Setting::fallback_to_stale_replicas_for_distributed_queries];
|
||||
|
||||
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
||||
}
|
||||
@ -170,15 +180,13 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
return getManyImpl(settings, pool_mode, try_get_entry,
|
||||
/*skip_unavailable_endpoints=*/ false, /// skip_unavailable_endpoints is used to get the min number of entries, and we need at least one
|
||||
/*priority_func=*/ {},
|
||||
settings.distributed_insert_skip_read_only_replicas);
|
||||
settings[Setting::distributed_insert_skip_read_only_replicas]);
|
||||
}
|
||||
|
||||
ConnectionPoolWithFailover::Base::GetPriorityFunc ConnectionPoolWithFailover::makeGetPriorityFunc(const Settings & settings)
|
||||
{
|
||||
const size_t offset = settings.load_balancing_first_offset % nested_pools.size();
|
||||
const LoadBalancing load_balancing = LoadBalancing(settings.load_balancing);
|
||||
|
||||
return get_priority_load_balancing.getPriorityFunc(load_balancing, offset, nested_pools.size());
|
||||
const size_t offset = settings[Setting::load_balancing_first_offset] % nested_pools.size();
|
||||
return get_priority_load_balancing.getPriorityFunc(settings[Setting::load_balancing], offset, nested_pools.size());
|
||||
}
|
||||
|
||||
std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::getManyImpl(
|
||||
@ -195,11 +203,11 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
"Cannot get connection from ConnectionPoolWithFailover cause nested pools are empty");
|
||||
|
||||
if (!skip_unavailable_endpoints.has_value())
|
||||
skip_unavailable_endpoints = settings.skip_unavailable_shards;
|
||||
skip_unavailable_endpoints = settings[Setting::skip_unavailable_shards];
|
||||
|
||||
size_t min_entries = skip_unavailable_endpoints.value() ? 0 : 1;
|
||||
|
||||
size_t max_tries = settings.connections_with_failover_max_tries;
|
||||
size_t max_tries = settings[Setting::connections_with_failover_max_tries];
|
||||
size_t max_entries;
|
||||
if (pool_mode == PoolMode::GET_ALL)
|
||||
{
|
||||
@ -212,7 +220,7 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
}
|
||||
else if (pool_mode == PoolMode::GET_MANY)
|
||||
{
|
||||
max_entries = settings.max_parallel_replicas;
|
||||
max_entries = settings[Setting::max_parallel_replicas];
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -222,8 +230,8 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
if (!priority_func)
|
||||
priority_func = makeGetPriorityFunc(settings);
|
||||
|
||||
UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value;
|
||||
bool fallback_to_stale_replicas = settings.fallback_to_stale_replicas_for_distributed_queries.value;
|
||||
UInt64 max_ignored_errors = settings[Setting::distributed_replica_max_ignored_errors].value;
|
||||
bool fallback_to_stale_replicas = settings[Setting::fallback_to_stale_replicas_for_distributed_queries].value;
|
||||
|
||||
return Base::getMany(min_entries, max_entries, max_tries, max_ignored_errors, fallback_to_stale_replicas, skip_read_only_replicas, try_get_entry, priority_func);
|
||||
}
|
||||
@ -272,7 +280,7 @@ ConnectionPoolWithFailover::getShuffledPools(const Settings & settings, GetPrior
|
||||
if (!priority_func)
|
||||
priority_func = makeGetPriorityFunc(settings);
|
||||
|
||||
UInt64 max_ignored_errors = settings.distributed_replica_max_ignored_errors.value;
|
||||
UInt64 max_ignored_errors = settings[Setting::distributed_replica_max_ignored_errors].value;
|
||||
return Base::getShuffledPools(max_ignored_errors, priority_func, use_slowdown_count);
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,21 @@ namespace ProfileEvents
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_changing_replica_until_first_data_packet;
|
||||
extern const SettingsBool allow_experimental_analyzer;
|
||||
extern const SettingsUInt64 connections_with_failover_max_tries;
|
||||
extern const SettingsDialect dialect;
|
||||
extern const SettingsBool fallback_to_stale_replicas_for_distributed_queries;
|
||||
extern const SettingsUInt64 group_by_two_level_threshold;
|
||||
extern const SettingsUInt64 group_by_two_level_threshold_bytes;
|
||||
extern const SettingsNonZeroUInt64 max_parallel_replicas;
|
||||
extern const SettingsUInt64 parallel_replicas_count;
|
||||
extern const SettingsUInt64 parallel_replica_offset;
|
||||
extern const SettingsBool skip_unavailable_shards;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int MISMATCH_REPLICAS_DATA_SOURCES;
|
||||
@ -32,15 +47,15 @@ HedgedConnections::HedgedConnections(
|
||||
AsyncCallback async_callback,
|
||||
GetPriorityForLoadBalancing::Func priority_func)
|
||||
: hedged_connections_factory(
|
||||
pool_,
|
||||
context_->getSettingsRef(),
|
||||
timeouts_,
|
||||
context_->getSettingsRef().connections_with_failover_max_tries.value,
|
||||
context_->getSettingsRef().fallback_to_stale_replicas_for_distributed_queries.value,
|
||||
context_->getSettingsRef().max_parallel_replicas.value,
|
||||
context_->getSettingsRef().skip_unavailable_shards.value,
|
||||
table_to_check_,
|
||||
priority_func)
|
||||
pool_,
|
||||
context_->getSettingsRef(),
|
||||
timeouts_,
|
||||
context_->getSettingsRef()[Setting::connections_with_failover_max_tries].value,
|
||||
context_->getSettingsRef()[Setting::fallback_to_stale_replicas_for_distributed_queries].value,
|
||||
context_->getSettingsRef()[Setting::max_parallel_replicas].value,
|
||||
context_->getSettingsRef()[Setting::skip_unavailable_shards].value,
|
||||
table_to_check_,
|
||||
priority_func)
|
||||
, context(std::move(context_))
|
||||
, settings(context->getSettingsRef())
|
||||
, throttler(throttler_)
|
||||
@ -178,29 +193,29 @@ void HedgedConnections::sendQuery(
|
||||
Settings modified_settings = settings;
|
||||
|
||||
/// Queries in foreign languages are transformed to ClickHouse-SQL. Ensure the setting before sending.
|
||||
modified_settings.dialect = Dialect::clickhouse;
|
||||
modified_settings.dialect.changed = false;
|
||||
modified_settings[Setting::dialect] = Dialect::clickhouse;
|
||||
modified_settings[Setting::dialect].changed = false;
|
||||
|
||||
if (disable_two_level_aggregation)
|
||||
{
|
||||
/// Disable two-level aggregation due to version incompatibility.
|
||||
modified_settings.group_by_two_level_threshold = 0;
|
||||
modified_settings.group_by_two_level_threshold_bytes = 0;
|
||||
modified_settings[Setting::group_by_two_level_threshold] = 0;
|
||||
modified_settings[Setting::group_by_two_level_threshold_bytes] = 0;
|
||||
}
|
||||
|
||||
const bool enable_offset_parallel_processing = context->canUseOffsetParallelReplicas();
|
||||
|
||||
if (offset_states.size() > 1 && enable_offset_parallel_processing)
|
||||
{
|
||||
modified_settings.parallel_replicas_count = offset_states.size();
|
||||
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
|
||||
modified_settings[Setting::parallel_replicas_count] = offset_states.size();
|
||||
modified_settings[Setting::parallel_replica_offset] = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
|
||||
}
|
||||
|
||||
/// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting.
|
||||
/// Make the analyzer being set, so it will be effectively applied on the remote server.
|
||||
/// In other words, the initiator always controls whether the analyzer enabled or not for
|
||||
/// all servers involved in the distributed query processing.
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings[Setting::allow_experimental_analyzer]));
|
||||
|
||||
replica.connection->sendQuery(
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
@ -446,7 +461,7 @@ Packet HedgedConnections::receivePacketFromReplica(const ReplicaLocation & repli
|
||||
{
|
||||
/// If we are allowed to change replica until the first data packet,
|
||||
/// just restart timeout (if it hasn't expired yet). Otherwise disable changing replica with this offset.
|
||||
if (settings.allow_changing_replica_until_first_data_packet && !replica.is_change_replica_timeout_expired)
|
||||
if (settings[Setting::allow_changing_replica_until_first_data_packet] && !replica.is_change_replica_timeout_expired)
|
||||
replica.change_replica_timeout.setRelative(hedged_connections_factory.getConnectionTimeouts().receive_data_timeout);
|
||||
else
|
||||
disableChangingReplica(replica_location);
|
||||
|
@ -23,6 +23,17 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_settings_after_format_in_insert;
|
||||
extern const SettingsDialect dialect;
|
||||
extern const SettingsBool input_format_defaults_for_omitted_fields;
|
||||
extern const SettingsUInt64 interactive_delay;
|
||||
extern const SettingsUInt64 max_insert_block_size;
|
||||
extern const SettingsUInt64 max_parser_backtracks;
|
||||
extern const SettingsUInt64 max_parser_depth;
|
||||
extern const SettingsUInt64 max_query_size;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -158,21 +169,38 @@ void LocalConnection::sendQuery(
|
||||
const auto & settings = context->getSettingsRef();
|
||||
const char * begin = state->query.data();
|
||||
const char * end = begin + state->query.size();
|
||||
const Dialect & dialect = settings.dialect;
|
||||
const Dialect & dialect = settings[Setting::dialect];
|
||||
|
||||
std::unique_ptr<IParserBase> parser;
|
||||
if (dialect == Dialect::kusto)
|
||||
parser = std::make_unique<ParserKQLStatement>(end, settings.allow_settings_after_format_in_insert);
|
||||
parser = std::make_unique<ParserKQLStatement>(end, settings[Setting::allow_settings_after_format_in_insert]);
|
||||
else if (dialect == Dialect::prql)
|
||||
parser = std::make_unique<ParserPRQLQuery>(settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
parser
|
||||
= std::make_unique<ParserPRQLQuery>(settings[Setting::max_query_size], settings[Setting::max_parser_depth], settings[Setting::max_parser_backtracks]);
|
||||
else
|
||||
parser = std::make_unique<ParserQuery>(end, settings.allow_settings_after_format_in_insert);
|
||||
parser = std::make_unique<ParserQuery>(end, settings[Setting::allow_settings_after_format_in_insert]);
|
||||
|
||||
ASTPtr parsed_query;
|
||||
if (dialect == Dialect::kusto)
|
||||
parsed_query = parseKQLQueryAndMovePosition(*parser, begin, end, "", /*allow_multi_statements*/false, settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
parsed_query = parseKQLQueryAndMovePosition(
|
||||
*parser,
|
||||
begin,
|
||||
end,
|
||||
"",
|
||||
/*allow_multi_statements*/ false,
|
||||
settings[Setting::max_query_size],
|
||||
settings[Setting::max_parser_depth],
|
||||
settings[Setting::max_parser_backtracks]);
|
||||
else
|
||||
parsed_query = parseQueryAndMovePosition(*parser, begin, end, "", /*allow_multi_statements*/false, settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
parsed_query = parseQueryAndMovePosition(
|
||||
*parser,
|
||||
begin,
|
||||
end,
|
||||
"",
|
||||
/*allow_multi_statements*/ false,
|
||||
settings[Setting::max_query_size],
|
||||
settings[Setting::max_parser_depth],
|
||||
settings[Setting::max_parser_backtracks]);
|
||||
|
||||
if (const auto * insert = parsed_query->as<ASTInsertQuery>())
|
||||
{
|
||||
@ -180,7 +208,7 @@ void LocalConnection::sendQuery(
|
||||
current_format = insert->format;
|
||||
}
|
||||
|
||||
auto source = context->getInputFormat(current_format, *in, sample, context->getSettingsRef().max_insert_block_size);
|
||||
auto source = context->getInputFormat(current_format, *in, sample, context->getSettingsRef()[Setting::max_insert_block_size]);
|
||||
Pipe pipe(source);
|
||||
|
||||
auto columns_description = metadata_snapshot->getColumns();
|
||||
@ -227,7 +255,7 @@ void LocalConnection::sendQuery(
|
||||
}
|
||||
|
||||
const auto & table_id = query_context->getInsertionTable();
|
||||
if (query_context->getSettingsRef().input_format_defaults_for_omitted_fields)
|
||||
if (query_context->getSettingsRef()[Setting::input_format_defaults_for_omitted_fields])
|
||||
{
|
||||
if (!table_id.empty())
|
||||
{
|
||||
@ -255,7 +283,7 @@ void LocalConnection::sendQuery(
|
||||
return false;
|
||||
};
|
||||
|
||||
executor.setCancelCallback(callback, query_context->getSettingsRef().interactive_delay / 1000);
|
||||
executor.setCancelCallback(callback, query_context->getSettingsRef()[Setting::interactive_delay] / 1000);
|
||||
}
|
||||
executor.execute();
|
||||
}
|
||||
@ -312,7 +340,7 @@ void LocalConnection::sendCancel()
|
||||
bool LocalConnection::pullBlock(Block & block)
|
||||
{
|
||||
if (state->executor)
|
||||
return state->executor->pull(block, query_context->getSettingsRef().interactive_delay / 1000);
|
||||
return state->executor->pull(block, query_context->getSettingsRef()[Setting::interactive_delay] / 1000);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -466,14 +494,15 @@ bool LocalConnection::poll(size_t)
|
||||
|
||||
bool LocalConnection::needSendProgressOrMetrics()
|
||||
{
|
||||
if (send_progress && (state->after_send_progress.elapsedMicroseconds() >= query_context->getSettingsRef().interactive_delay))
|
||||
if (send_progress && (state->after_send_progress.elapsedMicroseconds() >= query_context->getSettingsRef()[Setting::interactive_delay]))
|
||||
{
|
||||
state->after_send_progress.restart();
|
||||
next_packet_type = Protocol::Server::Progress;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (send_profile_events && (state->after_send_profile_events.elapsedMicroseconds() >= query_context->getSettingsRef().interactive_delay))
|
||||
if (send_profile_events
|
||||
&& (state->after_send_profile_events.elapsedMicroseconds() >= query_context->getSettingsRef()[Setting::interactive_delay]))
|
||||
{
|
||||
sendProfileEvents();
|
||||
return true;
|
||||
|
@ -12,6 +12,16 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool allow_experimental_analyzer;
|
||||
extern const SettingsDialect dialect;
|
||||
extern const SettingsUInt64 group_by_two_level_threshold;
|
||||
extern const SettingsUInt64 group_by_two_level_threshold_bytes;
|
||||
extern const SettingsUInt64 parallel_replicas_count;
|
||||
extern const SettingsUInt64 parallel_replica_offset;
|
||||
extern const SettingsSeconds receive_timeout;
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-undefined-memory-manipulation)
|
||||
|
||||
@ -128,8 +138,8 @@ void MultiplexedConnections::sendQuery(
|
||||
Settings modified_settings = settings;
|
||||
|
||||
/// Queries in foreign languages are transformed to ClickHouse-SQL. Ensure the setting before sending.
|
||||
modified_settings.dialect = Dialect::clickhouse;
|
||||
modified_settings.dialect.changed = false;
|
||||
modified_settings[Setting::dialect] = Dialect::clickhouse;
|
||||
modified_settings[Setting::dialect].changed = false;
|
||||
|
||||
for (auto & replica : replica_states)
|
||||
{
|
||||
@ -139,8 +149,8 @@ void MultiplexedConnections::sendQuery(
|
||||
if (replica.connection->getServerRevision(timeouts) < DBMS_MIN_REVISION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD)
|
||||
{
|
||||
/// Disable two-level aggregation due to version incompatibility.
|
||||
modified_settings.group_by_two_level_threshold = 0;
|
||||
modified_settings.group_by_two_level_threshold_bytes = 0;
|
||||
modified_settings[Setting::group_by_two_level_threshold] = 0;
|
||||
modified_settings[Setting::group_by_two_level_threshold_bytes] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,7 +164,7 @@ void MultiplexedConnections::sendQuery(
|
||||
/// Make the analyzer being set, so it will be effectively applied on the remote server.
|
||||
/// In other words, the initiator always controls whether the analyzer enabled or not for
|
||||
/// all servers involved in the distributed query processing.
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings[Setting::allow_experimental_analyzer]));
|
||||
|
||||
const bool enable_offset_parallel_processing = context->canUseOffsetParallelReplicas();
|
||||
|
||||
@ -163,12 +173,12 @@ void MultiplexedConnections::sendQuery(
|
||||
{
|
||||
if (enable_offset_parallel_processing)
|
||||
/// Use multiple replicas for parallel query processing.
|
||||
modified_settings.parallel_replicas_count = num_replicas;
|
||||
modified_settings[Setting::parallel_replicas_count] = num_replicas;
|
||||
|
||||
for (size_t i = 0; i < num_replicas; ++i)
|
||||
{
|
||||
if (enable_offset_parallel_processing)
|
||||
modified_settings.parallel_replica_offset = i;
|
||||
modified_settings[Setting::parallel_replica_offset] = i;
|
||||
|
||||
replica_states[i].connection->sendQuery(
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
@ -403,7 +413,7 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead
|
||||
Poco::Net::Socket::SocketList write_list;
|
||||
Poco::Net::Socket::SocketList except_list;
|
||||
|
||||
auto timeout = settings.receive_timeout;
|
||||
auto timeout = settings[Setting::receive_timeout];
|
||||
int n = 0;
|
||||
|
||||
/// EINTR loop
|
||||
|
@ -1,2 +1,2 @@
|
||||
clickhouse_add_executable(test-connect test_connect.cpp)
|
||||
target_link_libraries (test-connect PRIVATE dbms)
|
||||
target_link_libraries (test-connect PRIVATE dbms clickhouse_functions)
|
||||
|
@ -816,6 +816,22 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
return;
|
||||
}
|
||||
|
||||
/// If it's not null we update hash with the type name and the actual value.
|
||||
|
||||
/// If value in this row is in shared variant, deserialize type and value and
|
||||
/// update hash with it.
|
||||
if (discr == getSharedVariantDiscriminator())
|
||||
{
|
||||
auto value = getSharedVariant().getDataAt(variant_col.offsetAt(n));
|
||||
ReadBufferFromMemory buf(value.data, value.size);
|
||||
auto type = decodeDataType(buf);
|
||||
hash.update(type->getName());
|
||||
auto tmp_column = type->createColumn();
|
||||
type->getDefaultSerialization()->deserializeBinary(*tmp_column, buf, getFormatSettings());
|
||||
tmp_column->updateHashWithValue(0, hash);
|
||||
return;
|
||||
}
|
||||
|
||||
hash.update(variant_info.variant_names[discr]);
|
||||
variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash);
|
||||
}
|
||||
|
@ -47,15 +47,21 @@ ColumnObject::ColumnObject(
|
||||
, statistics(statistics_)
|
||||
{
|
||||
typed_paths.reserve(typed_paths_.size());
|
||||
sorted_typed_paths.reserve(typed_paths_.size());
|
||||
for (auto & [path, column] : typed_paths_)
|
||||
typed_paths[path] = std::move(column);
|
||||
{
|
||||
auto it = typed_paths.emplace(path, std::move(column)).first;
|
||||
sorted_typed_paths.push_back(it->first);
|
||||
}
|
||||
std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end());
|
||||
|
||||
dynamic_paths.reserve(dynamic_paths_.size());
|
||||
dynamic_paths_ptrs.reserve(dynamic_paths_.size());
|
||||
for (auto & [path, column] : dynamic_paths_)
|
||||
{
|
||||
dynamic_paths[path] = std::move(column);
|
||||
dynamic_paths_ptrs[path] = assert_cast<ColumnDynamic *>(dynamic_paths[path].get());
|
||||
auto it = dynamic_paths.emplace(path, std::move(column)).first;
|
||||
dynamic_paths_ptrs[path] = assert_cast<ColumnDynamic *>(it->second.get());
|
||||
sorted_dynamic_paths.insert(it->first);
|
||||
}
|
||||
}
|
||||
|
||||
@ -64,13 +70,17 @@ ColumnObject::ColumnObject(
|
||||
: max_dynamic_paths(max_dynamic_paths_), global_max_dynamic_paths(max_dynamic_paths_), max_dynamic_types(max_dynamic_types_)
|
||||
{
|
||||
typed_paths.reserve(typed_paths_.size());
|
||||
sorted_typed_paths.reserve(typed_paths_.size());
|
||||
for (auto & [path, column] : typed_paths_)
|
||||
{
|
||||
if (!column->empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected non-empty typed path column in ColumnObject constructor");
|
||||
typed_paths[path] = std::move(column);
|
||||
auto it = typed_paths.emplace(path, std::move(column)).first;
|
||||
sorted_typed_paths.push_back(it->first);
|
||||
}
|
||||
|
||||
std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end());
|
||||
|
||||
MutableColumns paths_and_values;
|
||||
paths_and_values.emplace_back(ColumnString::create());
|
||||
paths_and_values.emplace_back(ColumnString::create());
|
||||
@ -129,13 +139,8 @@ std::string ColumnObject::getName() const
|
||||
ss << "Object(";
|
||||
ss << "max_dynamic_paths=" << global_max_dynamic_paths;
|
||||
ss << ", max_dynamic_types=" << max_dynamic_types;
|
||||
std::vector<String> sorted_typed_paths;
|
||||
sorted_typed_paths.reserve(typed_paths.size());
|
||||
for (const auto & [path, column] : typed_paths)
|
||||
sorted_typed_paths.push_back(path);
|
||||
std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end());
|
||||
for (const auto & path : sorted_typed_paths)
|
||||
ss << ", " << path << " " << typed_paths.at(path)->getName();
|
||||
ss << ", " << path << " " << typed_paths.find(path)->second->getName();
|
||||
ss << ")";
|
||||
return ss.str();
|
||||
}
|
||||
@ -260,6 +265,7 @@ ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(std::string_view path)
|
||||
new_dynamic_column->insertManyDefaults(size());
|
||||
auto it = dynamic_paths.emplace(path, std::move(new_dynamic_column)).first;
|
||||
auto it_ptr = dynamic_paths_ptrs.emplace(path, assert_cast<ColumnDynamic *>(it->second.get())).first;
|
||||
sorted_dynamic_paths.insert(it->first);
|
||||
return it_ptr->second;
|
||||
}
|
||||
|
||||
@ -288,8 +294,9 @@ void ColumnObject::setDynamicPaths(const std::vector<String> & paths)
|
||||
auto new_dynamic_column = ColumnDynamic::create(max_dynamic_types);
|
||||
if (size)
|
||||
new_dynamic_column->insertManyDefaults(size);
|
||||
dynamic_paths[path] = std::move(new_dynamic_column);
|
||||
dynamic_paths_ptrs[path] = assert_cast<ColumnDynamic *>(dynamic_paths[path].get());
|
||||
auto it = dynamic_paths.emplace(path, std::move(new_dynamic_column)).first;
|
||||
dynamic_paths_ptrs[path] = assert_cast<ColumnDynamic *>(it->second.get());
|
||||
sorted_dynamic_paths.insert(it->first);
|
||||
}
|
||||
}
|
||||
|
||||
@ -658,39 +665,61 @@ void ColumnObject::popBack(size_t n)
|
||||
StringRef ColumnObject::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const
|
||||
{
|
||||
StringRef res(begin, 0);
|
||||
// Serialize all paths and values in binary format.
|
||||
/// First serialize values from typed paths in sorted order. They are the same for all instances of this column.
|
||||
for (auto path : sorted_typed_paths)
|
||||
{
|
||||
auto data_ref = typed_paths.find(path)->second->serializeValueIntoArena(n, arena, begin);
|
||||
res.data = data_ref.data - res.size;
|
||||
res.size += data_ref.size;
|
||||
}
|
||||
|
||||
/// Second, serialize paths and values in bunary format from dynamic paths and shared data in sorted by path order.
|
||||
/// Calculate total number of paths to serialize and write it.
|
||||
const auto & shared_data_offsets = getSharedDataOffsets();
|
||||
size_t offset = shared_data_offsets[static_cast<ssize_t>(n) - 1];
|
||||
size_t end = shared_data_offsets[static_cast<ssize_t>(n)];
|
||||
size_t num_paths = typed_paths.size() + dynamic_paths.size() + (end - offset);
|
||||
size_t num_paths = (end - offset);
|
||||
/// Don't serialize Nulls from dynamic paths.
|
||||
for (const auto & [_, column] : dynamic_paths)
|
||||
num_paths += !column->isNullAt(n);
|
||||
|
||||
char * pos = arena.allocContinue(sizeof(size_t), begin);
|
||||
memcpy(pos, &num_paths, sizeof(size_t));
|
||||
res.data = pos - res.size;
|
||||
res.size += sizeof(size_t);
|
||||
/// Serialize paths and values from typed paths.
|
||||
for (const auto & [path, column] : typed_paths)
|
||||
{
|
||||
size_t path_size = path.size();
|
||||
pos = arena.allocContinue(sizeof(size_t) + path_size, begin);
|
||||
memcpy(pos, &path_size, sizeof(size_t));
|
||||
memcpy(pos + sizeof(size_t), path.data(), path_size);
|
||||
auto data_ref = column->serializeValueIntoArena(n, arena, begin);
|
||||
res.data = data_ref.data - res.size - sizeof(size_t) - path_size;
|
||||
res.size += data_ref.size + sizeof(size_t) + path_size;
|
||||
}
|
||||
|
||||
/// Serialize paths and values from dynamic paths.
|
||||
for (const auto & [path, column] : dynamic_paths)
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
getDynamicSerialization()->serializeBinary(*column, n, buf, getFormatSettings());
|
||||
serializePathAndValueIntoArena(arena, begin, path, buf.str(), res);
|
||||
}
|
||||
|
||||
/// Serialize paths and values from shared data.
|
||||
auto dynamic_paths_it = sorted_dynamic_paths.begin();
|
||||
auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues();
|
||||
for (size_t i = offset; i != end; ++i)
|
||||
serializePathAndValueIntoArena(arena, begin, shared_data_paths->getDataAt(i), shared_data_values->getDataAt(i), res);
|
||||
{
|
||||
auto path = shared_data_paths->getDataAt(i).toView();
|
||||
/// Paths in shared data are sorted. Serialize all paths from dynamic paths that go before this path in sorted order.
|
||||
while (dynamic_paths_it != sorted_dynamic_paths.end() && *dynamic_paths_it < path)
|
||||
{
|
||||
const auto * dynamic_column = dynamic_paths_ptrs.find(*dynamic_paths_it)->second;
|
||||
/// Don't serialize Nulls.
|
||||
if (!dynamic_column->isNullAt(n))
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
getDynamicSerialization()->serializeBinary(*dynamic_column, n, buf, getFormatSettings());
|
||||
serializePathAndValueIntoArena(arena, begin, StringRef(*dynamic_paths_it), buf.str(), res);
|
||||
}
|
||||
++dynamic_paths_it;
|
||||
}
|
||||
serializePathAndValueIntoArena(arena, begin, StringRef(path), shared_data_values->getDataAt(i), res);
|
||||
}
|
||||
|
||||
/// Serialize all remaining paths in dynamic paths.
|
||||
for (; dynamic_paths_it != sorted_dynamic_paths.end(); ++dynamic_paths_it)
|
||||
{
|
||||
const auto * dynamic_column = dynamic_paths_ptrs.find(*dynamic_paths_it)->second;
|
||||
if (!dynamic_column->isNullAt(n))
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
getDynamicSerialization()->serializeBinary(*dynamic_column, n, buf, getFormatSettings());
|
||||
serializePathAndValueIntoArena(arena, begin, StringRef(*dynamic_paths_it), buf.str(), res);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -711,70 +740,49 @@ void ColumnObject::serializePathAndValueIntoArena(DB::Arena & arena, const char
|
||||
const char * ColumnObject::deserializeAndInsertFromArena(const char * pos)
|
||||
{
|
||||
size_t current_size = size();
|
||||
/// Deserialize paths and values and insert them into typed paths, dynamic paths or shared data.
|
||||
/// Serialized paths could be unsorted, so we will have to sort all paths that will be inserted into shared data.
|
||||
std::vector<std::pair<std::string_view, std::string_view>> paths_and_values_for_shared_data;
|
||||
/// First deserialize typed paths. They come first.
|
||||
for (auto path : sorted_typed_paths)
|
||||
pos = typed_paths.find(path)->second->deserializeAndInsertFromArena(pos);
|
||||
|
||||
/// Second deserialize all other paths and values and insert them into dynamic paths or shared data.
|
||||
auto num_paths = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(size_t);
|
||||
const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues();
|
||||
for (size_t i = 0; i != num_paths; ++i)
|
||||
{
|
||||
auto path_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(size_t);
|
||||
std::string_view path(pos, path_size);
|
||||
pos += path_size;
|
||||
/// Check if it's a typed path. In this case we should use
|
||||
/// deserializeAndInsertFromArena of corresponding column.
|
||||
if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end())
|
||||
/// Deserialize binary value and try to insert it to dynamic paths or shared data.
|
||||
auto value_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(size_t);
|
||||
std::string_view value(pos, value_size);
|
||||
pos += value_size;
|
||||
/// Check if we have this path in dynamic paths.
|
||||
if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end())
|
||||
{
|
||||
pos = typed_it->second->deserializeAndInsertFromArena(pos);
|
||||
ReadBufferFromMemory buf(value.data(), value.size());
|
||||
getDynamicSerialization()->deserializeBinary(*dynamic_it->second, buf, getFormatSettings());
|
||||
}
|
||||
/// If it's not a typed path, deserialize binary value and try to insert it
|
||||
/// to dynamic paths or shared data.
|
||||
/// Try to add a new dynamic path.
|
||||
else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path))
|
||||
{
|
||||
ReadBufferFromMemory buf(value.data(), value.size());
|
||||
getDynamicSerialization()->deserializeBinary(*dynamic_path_column, buf, getFormatSettings());
|
||||
}
|
||||
/// Limit on dynamic paths is reached, add this path to shared data.
|
||||
/// Serialized paths are sorted, so we can insert right away.
|
||||
else
|
||||
{
|
||||
auto value_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(size_t);
|
||||
std::string_view value(pos, value_size);
|
||||
pos += value_size;
|
||||
/// Check if we have this path in dynamic paths.
|
||||
if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end())
|
||||
{
|
||||
ReadBufferFromMemory buf(value.data(), value.size());
|
||||
getDynamicSerialization()->deserializeBinary(*dynamic_it->second, buf, getFormatSettings());
|
||||
}
|
||||
/// Try to add a new dynamic path.
|
||||
else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path))
|
||||
{
|
||||
ReadBufferFromMemory buf(value.data(), value.size());
|
||||
getDynamicSerialization()->deserializeBinary(*dynamic_path_column, buf, getFormatSettings());
|
||||
}
|
||||
/// Limit on dynamic paths is reached, add this path to shared data later.
|
||||
else
|
||||
{
|
||||
paths_and_values_for_shared_data.emplace_back(path, value);
|
||||
}
|
||||
shared_data_paths->insertData(path.data(), path.size());
|
||||
shared_data_values->insertData(value.data(), value.size());
|
||||
}
|
||||
}
|
||||
|
||||
/// Sort and insert all paths from paths_and_values_for_shared_data into shared data.
|
||||
std::sort(paths_and_values_for_shared_data.begin(), paths_and_values_for_shared_data.end());
|
||||
const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues();
|
||||
for (const auto & [path, value] : paths_and_values_for_shared_data)
|
||||
{
|
||||
shared_data_paths->insertData(path.data(), path.size());
|
||||
shared_data_values->insertData(value.data(), value.size());
|
||||
}
|
||||
|
||||
getSharedDataOffsets().push_back(shared_data_paths->size());
|
||||
|
||||
/// Insert default value in all remaining typed and dynamic paths.
|
||||
|
||||
for (auto & [_, column] : typed_paths)
|
||||
{
|
||||
if (column->size() == current_size)
|
||||
column->insertDefault();
|
||||
}
|
||||
|
||||
/// Insert default value in all remaining dynamic paths.
|
||||
for (auto & [_, column] : dynamic_paths_ptrs)
|
||||
{
|
||||
if (column->size() == current_size)
|
||||
@ -786,6 +794,11 @@ const char * ColumnObject::deserializeAndInsertFromArena(const char * pos)
|
||||
|
||||
const char * ColumnObject::skipSerializedInArena(const char * pos) const
|
||||
{
|
||||
/// First, skip all values of typed paths;
|
||||
for (auto path : sorted_typed_paths)
|
||||
pos = typed_paths.find(path)->second->skipSerializedInArena(pos);
|
||||
|
||||
/// Second, skip all other paths and values.
|
||||
auto num_paths = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(size_t);
|
||||
for (size_t i = 0; i != num_paths; ++i)
|
||||
@ -794,15 +807,8 @@ const char * ColumnObject::skipSerializedInArena(const char * pos) const
|
||||
pos += sizeof(size_t);
|
||||
std::string_view path(pos, path_size);
|
||||
pos += path_size;
|
||||
if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end())
|
||||
{
|
||||
pos = typed_it->second->skipSerializedInArena(pos);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto value_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(size_t) + value_size;
|
||||
}
|
||||
auto value_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(size_t) + value_size;
|
||||
}
|
||||
|
||||
return pos;
|
||||
@ -810,11 +816,51 @@ const char * ColumnObject::skipSerializedInArena(const char * pos) const
|
||||
|
||||
void ColumnObject::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
{
|
||||
for (const auto & [_, column] : typed_paths)
|
||||
column->updateHashWithValue(n, hash);
|
||||
for (const auto & [_, column] : dynamic_paths_ptrs)
|
||||
column->updateHashWithValue(n, hash);
|
||||
shared_data->updateHashWithValue(n, hash);
|
||||
for (auto path : sorted_typed_paths)
|
||||
typed_paths.find(path)->second->updateHashWithValue(n, hash);
|
||||
|
||||
/// The hash of the object in row should not depend on the way we store paths (in dynamic paths or in shared data)
|
||||
/// and should be the same for the same objects. To support it we update hash with path and its value (if not null) in
|
||||
/// sorted by path order from both dynamic paths and shared data.
|
||||
const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues();
|
||||
const auto & shared_data_offsets = getSharedDataOffsets();
|
||||
size_t start = shared_data_offsets[static_cast<ssize_t>(n) - 1];
|
||||
size_t end = shared_data_offsets[static_cast<ssize_t>(n)];
|
||||
auto dynamic_paths_it = sorted_dynamic_paths.begin();
|
||||
for (size_t i = start; i != end; ++i)
|
||||
{
|
||||
auto path = shared_data_paths->getDataAt(i).toView();
|
||||
/// Paths in shared data are sorted. Update hash with all paths from dynamic paths that go before this path in sorted order.
|
||||
while (dynamic_paths_it != sorted_dynamic_paths.end() && *dynamic_paths_it < path)
|
||||
{
|
||||
const auto * dynamic_column = dynamic_paths_ptrs.find(*dynamic_paths_it)->second;
|
||||
if (!dynamic_column->isNullAt(n))
|
||||
{
|
||||
hash.update(*dynamic_paths_it);
|
||||
dynamic_column->updateHashWithValue(n, hash);
|
||||
}
|
||||
++dynamic_paths_it;
|
||||
}
|
||||
|
||||
/// Deserialize value in temporary column to get its hash.
|
||||
auto value = shared_data_values->getDataAt(i);
|
||||
ReadBufferFromMemory buf(value.data, value.size);
|
||||
auto tmp_column = ColumnDynamic::create();
|
||||
getDynamicSerialization()->deserializeBinary(*tmp_column, buf, getFormatSettings());
|
||||
hash.update(path);
|
||||
tmp_column->updateHashWithValue(0, hash);
|
||||
}
|
||||
|
||||
/// Iterate over all remaining paths in dynamic paths.
|
||||
for (; dynamic_paths_it != sorted_dynamic_paths.end(); ++dynamic_paths_it)
|
||||
{
|
||||
const auto * dynamic_column = dynamic_paths_ptrs.find(*dynamic_paths_it)->second;
|
||||
if (!dynamic_column->isNullAt(n))
|
||||
{
|
||||
hash.update(*dynamic_paths_it);
|
||||
dynamic_column->updateHashWithValue(n, hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WeakHash32 ColumnObject::getWeakHash32() const
|
||||
@ -1310,6 +1356,7 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou
|
||||
/// Reset current state.
|
||||
dynamic_paths.clear();
|
||||
dynamic_paths_ptrs.clear();
|
||||
sorted_dynamic_paths.clear();
|
||||
max_dynamic_paths = global_max_dynamic_paths;
|
||||
Statistics new_statistics(Statistics::Source::MERGE);
|
||||
|
||||
@ -1328,8 +1375,9 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou
|
||||
{
|
||||
if (dynamic_paths.size() < max_dynamic_paths)
|
||||
{
|
||||
dynamic_paths.emplace(path, ColumnDynamic::create(max_dynamic_types));
|
||||
dynamic_paths_ptrs.emplace(path, assert_cast<ColumnDynamic *>(dynamic_paths.find(path)->second.get()));
|
||||
auto it = dynamic_paths.emplace(path, ColumnDynamic::create(max_dynamic_types)).first;
|
||||
dynamic_paths_ptrs.emplace(path, assert_cast<ColumnDynamic *>(it->second.get()));
|
||||
sorted_dynamic_paths.insert(it->first);
|
||||
}
|
||||
/// Add all remaining paths into shared data statistics until we reach its max size;
|
||||
else if (new_statistics.shared_data_paths_statistics.size() < Statistics::MAX_SHARED_DATA_STATISTICS_SIZE)
|
||||
@ -1343,8 +1391,9 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou
|
||||
{
|
||||
for (const auto & [path, _] : path_to_total_number_of_non_null_values)
|
||||
{
|
||||
dynamic_paths[path] = ColumnDynamic::create(max_dynamic_types);
|
||||
dynamic_paths_ptrs[path] = assert_cast<ColumnDynamic *>(dynamic_paths[path].get());
|
||||
auto it = dynamic_paths.emplace(path, ColumnDynamic::create(max_dynamic_types)).first;
|
||||
dynamic_paths_ptrs[path] = assert_cast<ColumnDynamic *>(it->second.get());
|
||||
sorted_dynamic_paths.insert(it->first);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,10 +238,15 @@ private:
|
||||
/// Map path -> column for paths with explicitly specified types.
|
||||
/// This set of paths is constant and cannot be changed.
|
||||
PathToColumnMap typed_paths;
|
||||
/// Sorted list of typed paths. Used to avoid sorting paths every time in some methods.
|
||||
std::vector<std::string_view> sorted_typed_paths;
|
||||
/// Map path -> column for dynamically added paths. All columns
|
||||
/// here are Dynamic columns. This set of paths can be extended
|
||||
/// during inerts into the column.
|
||||
PathToColumnMap dynamic_paths;
|
||||
/// Sorted list of dynamic paths. Used to avoid sorting paths every time in some methods.
|
||||
std::set<std::string_view> sorted_dynamic_paths;
|
||||
|
||||
/// Store and use pointers to ColumnDynamic to avoid virtual calls.
|
||||
/// With hundreds of dynamic paths these virtual calls are noticeable.
|
||||
PathToDynamicColumnPtrMap dynamic_paths_ptrs;
|
||||
|
@ -1,4 +1,5 @@
|
||||
clickhouse_add_executable(column_insert_many_from benchmark_column_insert_many_from.cpp)
|
||||
target_link_libraries (column_insert_many_from PRIVATE
|
||||
ch_contrib::gbenchmark_all
|
||||
dbms)
|
||||
dbms
|
||||
clickhouse_functions)
|
||||
|
@ -13,6 +13,13 @@
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsTimezone session_timezone;
|
||||
}
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -203,5 +210,5 @@ DateLUT & DateLUT::getInstance()
|
||||
|
||||
std::string DateLUT::extractTimezoneFromContext(DB::ContextPtr query_context)
|
||||
{
|
||||
return query_context->getSettingsRef().session_timezone.value;
|
||||
return query_context->getSettingsRef()[DB::Setting::session_timezone].value;
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
#include <Common/NamedCollections/NamedCollectionsFactory.h>
|
||||
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsMetadataStorage.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <base/sleep.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsFactory.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsMetadataStorage.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,28 +1,36 @@
|
||||
#include <Common/NamedCollections/NamedCollectionsMetadataStorage.h>
|
||||
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <filesystem>
|
||||
#include <Core/Settings.h>
|
||||
#include <IO/FileEncryptionCommon.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <filesystem>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <boost/algorithm/hex.hpp>
|
||||
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
|
||||
#include <Common/NamedCollections/NamedCollectionsMetadataStorage.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsBool fsync_metadata;
|
||||
extern const SettingsUInt64 max_parser_backtracks;
|
||||
extern const SettingsUInt64 max_parser_depth;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NAMED_COLLECTION_ALREADY_EXISTS;
|
||||
@ -157,7 +165,7 @@ public:
|
||||
writeString(write_data, out);
|
||||
|
||||
out.next();
|
||||
if (getContext()->getSettingsRef().fsync_metadata)
|
||||
if (getContext()->getSettingsRef()[Setting::fsync_metadata])
|
||||
out.sync();
|
||||
out.close();
|
||||
|
||||
@ -573,7 +581,7 @@ ASTCreateNamedCollectionQuery NamedCollectionsMetadataStorage::readCreateQuery(c
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
|
||||
ParserCreateNamedCollectionQuery parser;
|
||||
auto ast = parseQuery(parser, query, "in file " + path, 0, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
auto ast = parseQuery(parser, query, "in file " + path, 0, settings[Setting::max_parser_depth], settings[Setting::max_parser_backtracks]);
|
||||
const auto & create_query = ast->as<const ASTCreateNamedCollectionQuery &>();
|
||||
return create_query;
|
||||
}
|
||||
|
@ -14,6 +14,12 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsFloat opentelemetry_start_trace_probability;
|
||||
}
|
||||
|
||||
namespace OpenTelemetry
|
||||
{
|
||||
|
||||
@ -329,7 +335,7 @@ TracingContextHolder::TracingContextHolder(
|
||||
return;
|
||||
|
||||
// Start the trace with some configurable probability.
|
||||
std::bernoulli_distribution should_start_trace{settings_ptr->opentelemetry_start_trace_probability};
|
||||
std::bernoulli_distribution should_start_trace{(*settings_ptr)[Setting::opentelemetry_start_trace_probability]};
|
||||
if (!should_start_trace(thread_local_rng))
|
||||
/// skip tracing context initialization on current thread
|
||||
return;
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Core/Field.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -2,6 +2,7 @@ clickhouse_add_executable(integer_hash_tables_and_hashes integer_hash_tables_and
|
||||
target_link_libraries (integer_hash_tables_and_hashes PRIVATE
|
||||
ch_contrib::gbenchmark_all
|
||||
dbms
|
||||
clickhouse_functions
|
||||
ch_contrib::abseil_swiss_tables
|
||||
ch_contrib::sparsehash
|
||||
ch_contrib::wyhash
|
||||
@ -11,4 +12,5 @@ target_link_libraries (integer_hash_tables_and_hashes PRIVATE
|
||||
clickhouse_add_executable(orc_string_dictionary orc_string_dictionary.cpp)
|
||||
target_link_libraries (orc_string_dictionary PRIVATE
|
||||
ch_contrib::gbenchmark_all
|
||||
dbms)
|
||||
dbms
|
||||
clickhouse_functions)
|
||||
|
@ -26,24 +26,24 @@ clickhouse_add_executable (radix_sort radix_sort.cpp)
|
||||
target_link_libraries (radix_sort PRIVATE clickhouse_common_io clickhouse_common_config ch_contrib::pdqsort)
|
||||
|
||||
clickhouse_add_executable (arena_with_free_lists arena_with_free_lists.cpp)
|
||||
target_link_libraries (arena_with_free_lists PRIVATE dbms)
|
||||
target_link_libraries (arena_with_free_lists PRIVATE dbms clickhouse_functions)
|
||||
|
||||
clickhouse_add_executable (lru_hash_map_perf lru_hash_map_perf.cpp)
|
||||
target_link_libraries (lru_hash_map_perf PRIVATE dbms)
|
||||
target_link_libraries (lru_hash_map_perf PRIVATE dbms clickhouse_functions)
|
||||
|
||||
if (OS_LINUX)
|
||||
clickhouse_add_executable (thread_creation_latency thread_creation_latency.cpp)
|
||||
target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
target_link_libraries (thread_creation_latency PRIVATE dbms clickhouse_functions clickhouse_common_io clickhouse_common_config)
|
||||
endif()
|
||||
|
||||
clickhouse_add_executable (array_cache array_cache.cpp)
|
||||
target_link_libraries (array_cache PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
|
||||
clickhouse_add_executable (space_saving space_saving.cpp)
|
||||
target_link_libraries (space_saving PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
target_link_libraries (space_saving PRIVATE dbms clickhouse_functions clickhouse_common_io clickhouse_common_config)
|
||||
|
||||
clickhouse_add_executable (integer_hash_tables_benchmark integer_hash_tables_benchmark.cpp)
|
||||
target_link_libraries (integer_hash_tables_benchmark PRIVATE dbms ch_contrib::abseil_swiss_tables ch_contrib::sparsehash)
|
||||
target_link_libraries (integer_hash_tables_benchmark PRIVATE dbms clickhouse_functions ch_contrib::abseil_swiss_tables ch_contrib::sparsehash)
|
||||
|
||||
clickhouse_add_executable (cow_columns cow_columns.cpp)
|
||||
target_link_libraries (cow_columns PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
@ -69,13 +69,13 @@ clickhouse_add_executable (procfs_metrics_provider_perf procfs_metrics_provider_
|
||||
target_link_libraries (procfs_metrics_provider_perf PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
|
||||
clickhouse_add_executable (average average.cpp)
|
||||
target_link_libraries (average PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
target_link_libraries (average PRIVATE dbms clickhouse_common_io clickhouse_common_config clickhouse_functions)
|
||||
|
||||
clickhouse_add_executable (shell_command_inout shell_command_inout.cpp)
|
||||
target_link_libraries (shell_command_inout PRIVATE clickhouse_common_io clickhouse_common_config)
|
||||
|
||||
clickhouse_add_executable (executable_udf executable_udf.cpp)
|
||||
target_link_libraries (executable_udf PRIVATE dbms)
|
||||
target_link_libraries (executable_udf PRIVATE dbms clickhouse_functions)
|
||||
|
||||
if (ENABLE_HIVE)
|
||||
clickhouse_add_executable (hive_metastore_client hive_metastore_client.cpp)
|
||||
@ -83,7 +83,7 @@ if (ENABLE_HIVE)
|
||||
endif()
|
||||
|
||||
clickhouse_add_executable (interval_tree interval_tree.cpp)
|
||||
target_link_libraries (interval_tree PRIVATE dbms)
|
||||
target_link_libraries (interval_tree PRIVATE dbms clickhouse_functions)
|
||||
|
||||
if (ENABLE_SSL)
|
||||
clickhouse_add_executable (encrypt_decrypt encrypt_decrypt.cpp)
|
||||
|
@ -1,2 +1,2 @@
|
||||
clickhouse_add_executable (compressed_buffer compressed_buffer.cpp)
|
||||
target_link_libraries (compressed_buffer PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression)
|
||||
target_link_libraries (compressed_buffer PRIVATE dbms clickhouse_functions clickhouse_common_io clickhouse_common_config clickhouse_compression)
|
||||
|
@ -1,12 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
#include <Core/SettingsFields.h>
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Core/SettingsWriteFormat.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <base/range.h>
|
||||
#include <boost/blank.hpp>
|
||||
#include <unordered_map>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/SettingsChanges.h>
|
||||
|
||||
|
||||
namespace boost::program_options
|
||||
@ -20,13 +21,6 @@ namespace DB
|
||||
class ReadBuffer;
|
||||
class WriteBuffer;
|
||||
|
||||
enum class SettingsWriteFormat : uint8_t
|
||||
{
|
||||
BINARY = 0, /// Part of the settings are serialized as strings, and other part as variants. This is the old behaviour.
|
||||
STRINGS_WITH_FLAGS = 1, /// All settings are serialized as strings. Before each value the flag `is_important` is serialized.
|
||||
DEFAULT = STRINGS_WITH_FLAGS,
|
||||
};
|
||||
|
||||
/** Template class to define collections of settings.
|
||||
* Example of usage:
|
||||
*
|
||||
@ -96,6 +90,8 @@ public:
|
||||
static String valueToStringUtil(std::string_view name, const Field & value);
|
||||
static Field stringToValueUtil(std::string_view name, const String & str);
|
||||
|
||||
static std::string_view resolveName(std::string_view name);
|
||||
|
||||
void write(WriteBuffer & out, SettingsWriteFormat format = SettingsWriteFormat::DEFAULT) const;
|
||||
void read(ReadBuffer & in, SettingsWriteFormat format = SettingsWriteFormat::DEFAULT);
|
||||
|
||||
@ -191,8 +187,6 @@ public:
|
||||
MutableRange allMutable(SkipFlags skip_flags = SKIP_NONE) { return MutableRange{*this, skip_flags}; }
|
||||
Range allChanged() const { return all(SKIP_UNCHANGED); }
|
||||
Range allUnchanged() const { return all(SKIP_CHANGED); }
|
||||
Range allBuiltin() const { return all(SKIP_CUSTOM); }
|
||||
Range allCustom() const { return all(SKIP_BUILTIN); }
|
||||
|
||||
Iterator begin() const { return allChanged().begin(); }
|
||||
Iterator end() const { return allChanged().end(); }
|
||||
|
7
src/Core/BaseSettingsFwdMacros.h
Normal file
7
src/Core/BaseSettingsFwdMacros.h
Normal file
@ -0,0 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#define DECLARE_SETTING_TRAIT(CLASS_NAME, TYPE) using CLASS_NAME##TYPE = SettingField##TYPE CLASS_NAME##Impl::*;
|
||||
|
||||
#define DECLARE_SETTING_SUBSCRIPT_OPERATOR(CLASS_NAME, TYPE) \
|
||||
const SettingField##TYPE & operator[](CLASS_NAME##TYPE t) const; \
|
||||
SettingField##TYPE & operator[](CLASS_NAME##TYPE t);
|
@ -27,6 +27,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 http_max_multipart_form_data_size;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
@ -182,10 +186,12 @@ void ExternalTablesHandler::handlePart(const Poco::Net::MessageHeader & header,
|
||||
|
||||
const Settings & settings = getContext()->getSettingsRef();
|
||||
|
||||
if (settings.http_max_multipart_form_data_size)
|
||||
if (settings[Setting::http_max_multipart_form_data_size])
|
||||
read_buffer = std::make_unique<LimitReadBuffer>(
|
||||
stream, settings.http_max_multipart_form_data_size,
|
||||
/* trow_exception */ true, /* exact_limit */ std::optional<size_t>(),
|
||||
stream,
|
||||
settings[Setting::http_max_multipart_form_data_size],
|
||||
/* trow_exception */ true,
|
||||
/* exact_limit */ std::optional<size_t>(),
|
||||
"the maximum size of multipart/form-data. "
|
||||
"This limit can be tuned by 'http_max_multipart_form_data_size' setting");
|
||||
else
|
||||
|
18
src/Core/FormatFactorySettings.cpp
Normal file
18
src/Core/FormatFactorySettings.cpp
Normal file
@ -0,0 +1,18 @@
|
||||
#include <Core/BaseSettings.h>
|
||||
#include <Core/FormatFactorySettingsDeclaration.h>
|
||||
#include <Core/SettingsEnums.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/*
|
||||
* User-specified file format settings for File and URL engines.
|
||||
*/
|
||||
DECLARE_SETTINGS_TRAITS(FormatFactorySettingsTraits, LIST_OF_ALL_FORMAT_SETTINGS)
|
||||
|
||||
struct FormatFactorySettingsImpl : public BaseSettings<FormatFactorySettingsTraits>
|
||||
{
|
||||
};
|
||||
|
||||
IMPLEMENT_SETTINGS_TRAITS(FormatFactorySettingsTraits, LIST_OF_ALL_FORMAT_SETTINGS)
|
||||
|
||||
}
|
55
src/Core/FormatFactorySettings.h
Normal file
55
src/Core/FormatFactorySettings.h
Normal file
@ -0,0 +1,55 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/BaseSettingsFwdMacros.h>
|
||||
#include <Core/SettingsEnums.h>
|
||||
#include <Core/SettingsFields.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct FormatFactorySettingsImpl;
|
||||
struct SettingChange;
|
||||
class SettingsChanges;
|
||||
|
||||
#define FORMAT_SETTINGS_SUPPORTED_TYPES(CLASS_NAME, M) \
|
||||
M(CLASS_NAME, Bool) \
|
||||
M(CLASS_NAME, Char) \
|
||||
M(CLASS_NAME, Int64) \
|
||||
M(CLASS_NAME, UInt64) \
|
||||
M(CLASS_NAME, MsgPackUUIDRepresentation) \
|
||||
M(CLASS_NAME, SchemaInferenceMode) \
|
||||
M(CLASS_NAME, UInt64Auto) \
|
||||
M(CLASS_NAME, DateTimeInputFormat) \
|
||||
M(CLASS_NAME, DateTimeOutputFormat) \
|
||||
M(CLASS_NAME, IntervalOutputFormat) \
|
||||
M(CLASS_NAME, String) \
|
||||
M(CLASS_NAME, ParquetVersion) \
|
||||
M(CLASS_NAME, ParquetCompression) \
|
||||
M(CLASS_NAME, EscapingRule) \
|
||||
M(CLASS_NAME, ArrowCompression) \
|
||||
M(CLASS_NAME, CapnProtoEnumComparingMode) \
|
||||
M(CLASS_NAME, DateTimeOverflowBehavior) \
|
||||
M(CLASS_NAME, IdentifierQuotingStyle)
|
||||
|
||||
FORMAT_SETTINGS_SUPPORTED_TYPES(FormatFactorySettings, DECLARE_SETTING_TRAIT)
|
||||
|
||||
struct FormatFactorySettings
|
||||
{
|
||||
FormatFactorySettings();
|
||||
~FormatFactorySettings();
|
||||
|
||||
FORMAT_SETTINGS_SUPPORTED_TYPES(FormatFactorySettings, DECLARE_SETTING_SUBSCRIPT_OPERATOR)
|
||||
|
||||
/// General API as needed
|
||||
bool tryGet(std::string_view name, Field & value) const;
|
||||
Field get(std::string_view name) const;
|
||||
void set(std::string_view name, const Field & value);
|
||||
bool has(std::string_view name) const;
|
||||
void applyChange(const SettingChange & change);
|
||||
void applyChanges(const SettingsChanges & changes);
|
||||
|
||||
private:
|
||||
std::unique_ptr<FormatFactorySettingsImpl> impl;
|
||||
};
|
||||
|
||||
}
|
275
src/Core/FormatFactorySettingsDeclaration.h
Normal file
275
src/Core/FormatFactorySettingsDeclaration.h
Normal file
@ -0,0 +1,275 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/SettingsObsoleteMacros.h>
|
||||
|
||||
/// This header exists so we can share it between Settings.cpp, FormatFactorySettings.cpp and other storage settings
|
||||
|
||||
// clang-format off
|
||||
#if defined(__CLION_IDE__)
|
||||
/// CLion freezes for a minute every time is processes this
|
||||
#define FORMAT_FACTORY_SETTINGS(M, ALIAS)
|
||||
#define OBSOLETE_FORMAT_SETTINGS(M, ALIAS)
|
||||
#else
|
||||
|
||||
#define FORMAT_FACTORY_SETTINGS(M, ALIAS) \
|
||||
M(Char, format_csv_delimiter, ',', "The character to be considered as a delimiter in CSV data. If setting with a string, a string has to have a length of 1.", 0) \
|
||||
M(Bool, format_csv_allow_single_quotes, false, "If it is set to true, allow strings in single quotes.", 0) \
|
||||
M(Bool, format_csv_allow_double_quotes, true, "If it is set to true, allow strings in double quotes.", 0) \
|
||||
M(Bool, output_format_csv_serialize_tuple_into_separate_columns, true, "If it set to true, then Tuples in CSV format are serialized as separate columns (that is, their nesting in the tuple is lost)", 0) \
|
||||
M(Bool, input_format_csv_deserialize_separate_columns_into_tuple, true, "If it set to true, then separate columns written in CSV format can be deserialized to Tuple column.", 0) \
|
||||
M(Bool, output_format_csv_crlf_end_of_line, false, "If it is set true, end of line in CSV format will be \\r\\n instead of \\n.", 0) \
|
||||
M(Bool, input_format_csv_allow_cr_end_of_line, false, "If it is set true, \\r will be allowed at end of line not followed by \\n", 0) \
|
||||
M(Bool, input_format_csv_enum_as_number, false, "Treat inserted enum values in CSV formats as enum indices", 0) \
|
||||
M(Bool, input_format_csv_arrays_as_nested_csv, false, R"(When reading Array from CSV, expect that its elements were serialized in nested CSV and then put into string. Example: "[""Hello"", ""world"", ""42"""" TV""]". Braces around array can be omitted.)", 0) \
|
||||
M(Bool, input_format_skip_unknown_fields, true, "Skip columns with unknown names from input data (it works for JSONEachRow, -WithNames, -WithNamesAndTypes and TSKV formats).", 0) \
|
||||
M(Bool, input_format_with_names_use_header, true, "For -WithNames input formats this controls whether format parser is to assume that column data appear in the input exactly as they are specified in the header.", 0) \
|
||||
M(Bool, input_format_with_types_use_header, true, "For -WithNamesAndTypes input formats this controls whether format parser should check if data types from the input match data types from the header.", 0) \
|
||||
M(Bool, input_format_import_nested_json, false, "Map nested JSON data to nested tables (it works for JSONEachRow format).", 0) \
|
||||
M(Bool, input_format_defaults_for_omitted_fields, true, "For input data calculate default expressions for omitted fields (it works for JSONEachRow, -WithNames, -WithNamesAndTypes formats).", IMPORTANT) \
|
||||
M(Bool, input_format_csv_empty_as_default, true, "Treat empty fields in CSV input as default values.", 0) \
|
||||
M(Bool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \
|
||||
M(Bool, input_format_tsv_enum_as_number, false, "Treat inserted enum values in TSV formats as enum indices.", 0) \
|
||||
M(Bool, input_format_null_as_default, true, "Initialize null fields with default values if the data type of this field is not nullable and it is supported by the input format", 0) \
|
||||
M(Bool, input_format_force_null_for_omitted_fields, false, "Force initialize omitted fields with null values", 0) \
|
||||
M(Bool, input_format_arrow_case_insensitive_column_matching, false, "Ignore case when matching Arrow columns with CH columns.", 0) \
|
||||
M(Int64, input_format_orc_row_batch_size, 100'000, "Batch size when reading ORC stripes.", 0) \
|
||||
M(Bool, input_format_orc_case_insensitive_column_matching, false, "Ignore case when matching ORC columns with CH columns.", 0) \
|
||||
M(Bool, input_format_parquet_case_insensitive_column_matching, false, "Ignore case when matching Parquet columns with CH columns.", 0) \
|
||||
M(Bool, input_format_parquet_preserve_order, false, "Avoid reordering rows when reading from Parquet files. Usually makes it much slower.", 0) \
|
||||
M(Bool, input_format_parquet_filter_push_down, true, "When reading Parquet files, skip whole row groups based on the WHERE/PREWHERE expressions and min/max statistics in the Parquet metadata.", 0) \
|
||||
M(Bool, input_format_parquet_use_native_reader, false, "When reading Parquet files, to use native reader instead of arrow reader.", 0) \
|
||||
M(Bool, input_format_allow_seeks, true, "Allow seeks while reading in ORC/Parquet/Arrow input formats", 0) \
|
||||
M(Bool, input_format_orc_allow_missing_columns, true, "Allow missing columns while reading ORC input formats", 0) \
|
||||
M(Bool, input_format_orc_use_fast_decoder, true, "Use a faster ORC decoder implementation.", 0) \
|
||||
M(Bool, input_format_orc_filter_push_down, true, "When reading ORC files, skip whole stripes or row groups based on the WHERE/PREWHERE expressions, min/max statistics or bloom filter in the ORC metadata.", 0) \
|
||||
M(String, input_format_orc_reader_time_zone_name, "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \
|
||||
M(Bool, input_format_parquet_allow_missing_columns, true, "Allow missing columns while reading Parquet input formats", 0) \
|
||||
M(UInt64, input_format_parquet_local_file_min_bytes_for_seek, 8192, "Min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format", 0) \
|
||||
M(Bool, input_format_arrow_allow_missing_columns, true, "Allow missing columns while reading Arrow input formats", 0) \
|
||||
M(Char, input_format_hive_text_fields_delimiter, '\x01', "Delimiter between fields in Hive Text File", 0) \
|
||||
M(Char, input_format_hive_text_collection_items_delimiter, '\x02', "Delimiter between collection(array or map) items in Hive Text File", 0) \
|
||||
M(Char, input_format_hive_text_map_keys_delimiter, '\x03', "Delimiter between a pair of map key/values in Hive Text File", 0) \
|
||||
M(Bool, input_format_hive_text_allow_variable_number_of_columns, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values", 0) \
|
||||
M(UInt64, input_format_msgpack_number_of_columns, 0, "The number of columns in inserted MsgPack data. Used for automatic schema inference from data.", 0) \
|
||||
M(MsgPackUUIDRepresentation, output_format_msgpack_uuid_representation, FormatSettings::MsgPackUUIDRepresentation::EXT, "The way how to output UUID in MsgPack format.", 0) \
|
||||
M(UInt64, input_format_max_rows_to_read_for_schema_inference, 25000, "The maximum rows of data to read for automatic schema inference", 0) \
|
||||
M(UInt64, input_format_max_bytes_to_read_for_schema_inference, 32 * 1024 * 1024, "The maximum bytes of data to read for automatic schema inference", 0) \
|
||||
M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \
|
||||
M(Bool, input_format_csv_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference in CSV format", 0) \
|
||||
M(Bool, input_format_csv_try_infer_strings_from_quoted_tuples, true, "Interpret quoted tuples in the input data as a value of type String.", 0) \
|
||||
M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \
|
||||
M(Bool, input_format_csv_detect_header, true, "Automatically detect header with names and types in CSV format", 0) \
|
||||
M(Bool, input_format_csv_allow_whitespace_or_tab_as_delimiter, false, "Allow to use spaces and tabs(\\t) as field delimiter in the CSV strings", 0) \
|
||||
M(Bool, input_format_csv_trim_whitespaces, true, "Trims spaces and tabs (\\t) characters at the beginning and end in CSV strings", 0) \
|
||||
M(Bool, input_format_csv_use_default_on_bad_values, false, "Allow to set default value to column when CSV field deserialization failed on bad value", 0) \
|
||||
M(Bool, input_format_csv_allow_variable_number_of_columns, false, "Ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values", 0) \
|
||||
M(Bool, input_format_tsv_allow_variable_number_of_columns, false, "Ignore extra columns in TSV input (if file has more columns than expected) and treat missing fields in TSV input as default values", 0) \
|
||||
M(Bool, input_format_custom_allow_variable_number_of_columns, false, "Ignore extra columns in CustomSeparated input (if file has more columns than expected) and treat missing fields in CustomSeparated input as default values", 0) \
|
||||
M(Bool, input_format_json_compact_allow_variable_number_of_columns, false, "Ignore extra columns in JSONCompact(EachRow) input (if file has more columns than expected) and treat missing fields in JSONCompact(EachRow) input as default values", 0) \
|
||||
M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \
|
||||
M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \
|
||||
M(Bool, input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format Parquet", 0) \
|
||||
M(UInt64, input_format_parquet_max_block_size, DEFAULT_BLOCK_SIZE, "Max block size for parquet reader.", 0) \
|
||||
M(UInt64, input_format_parquet_prefer_block_bytes, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader", 0) \
|
||||
M(Bool, input_format_protobuf_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format Protobuf", 0) \
|
||||
M(Bool, input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format CapnProto", 0) \
|
||||
M(Bool, input_format_orc_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format ORC", 0) \
|
||||
M(Bool, input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format Arrow", 0) \
|
||||
M(String, column_names_for_schema_inference, "", "The list of column names to use in schema inference for formats without column names. The format: 'column1,column2,column3,...'", 0) \
|
||||
M(String, schema_inference_hints, "", "The list of column names and types to use in schema inference for formats without column names. The format: 'column_name1 column_type1, column_name2 column_type2, ...'", 0) \
|
||||
M(SchemaInferenceMode, schema_inference_mode, "default", "Mode of schema inference. 'default' - assume that all files have the same schema and schema can be inferred from any file, 'union' - files can have different schemas and the resulting schema should be the a union of schemas of all files", 0) \
|
||||
M(UInt64Auto, schema_inference_make_columns_nullable, 1, "If set to true, all inferred types will be Nullable in schema inference. When set to false, no columns will be converted to Nullable. When set to 'auto', ClickHouse will use information about nullability from the data.", 0) \
|
||||
M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_read_bools_as_strings, true, "Allow to parse bools as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference", 0) \
|
||||
M(Bool, input_format_json_validate_types_from_metadata, true, "For JSON/JSONCompact/JSONColumnsWithMetadata input formats this controls whether format parser should check if data types from input metadata match data types of the corresponding columns from the table", 0) \
|
||||
M(Bool, input_format_json_read_numbers_as_strings, true, "Allow to parse numbers as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_read_objects_as_strings, true, "Allow to parse JSON objects as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_read_arrays_as_strings, true, "Allow to parse JSON arrays as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_try_infer_named_tuples_from_objects, true, "Try to infer named tuples from JSON objects in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects, false, "Use String type instead of an exception in case of ambiguous paths in JSON objects during named tuples inference", 0) \
|
||||
M(Bool, input_format_json_infer_incomplete_types_as_strings, true, "Use type String for keys that contains only Nulls or empty objects/arrays during schema inference in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_named_tuples_as_objects, true, "Deserialize named tuple columns as JSON objects", 0) \
|
||||
M(Bool, input_format_json_ignore_unknown_keys_in_named_tuple, true, "Ignore unknown keys in json object for named tuples", 0) \
|
||||
M(Bool, input_format_json_defaults_for_missing_elements_in_named_tuple, true, "Insert default value in named tuple element if it's missing in json object", 0) \
|
||||
M(Bool, input_format_json_throw_on_bad_escape_sequence, true, "Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data", 0) \
|
||||
M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \
|
||||
M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \
|
||||
M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \
|
||||
M(UInt64, input_format_json_max_depth, 1000, "Maximum depth of a field in JSON. This is not a strict limit, it does not have to be applied precisely.", 0) \
|
||||
M(Bool, input_format_json_empty_as_default, false, "Treat empty fields in JSON input as default values.", 0) \
|
||||
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_datetimes_only_datetime64, false, "When input_format_try_infer_datetimes is enabled, infer only DateTime64 but not DateTime types", 0) \
|
||||
M(Bool, input_format_try_infer_exponent_floats, false, "Try to infer floats in exponential notation while schema inference in text formats (except JSON, where exponent numbers are always inferred)", 0) \
|
||||
M(Bool, output_format_markdown_escape_special_characters, false, "Escape special characters in Markdown", 0) \
|
||||
M(Bool, input_format_protobuf_flatten_google_wrappers, false, "Enable Google wrappers for regular non-nested columns, e.g. google.protobuf.StringValue 'str' for String column 'str'. For Nullable columns empty wrappers are recognized as defaults, and missing as nulls", 0) \
|
||||
M(Bool, output_format_protobuf_nullables_with_google_wrappers, false, "When serializing Nullable columns with Google wrappers, serialize default values as empty wrappers. If turned off, default and null values are not serialized", 0) \
|
||||
M(UInt64, input_format_csv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in CSV format", 0) \
|
||||
M(UInt64, input_format_tsv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in TSV format", 0) \
|
||||
M(Bool, input_format_csv_skip_trailing_empty_lines, false, "Skip trailing empty lines in CSV format", 0) \
|
||||
M(Bool, input_format_tsv_skip_trailing_empty_lines, false, "Skip trailing empty lines in TSV format", 0) \
|
||||
M(Bool, input_format_custom_skip_trailing_empty_lines, false, "Skip trailing empty lines in CustomSeparated format", 0) \
|
||||
M(Bool, input_format_tsv_crlf_end_of_line, false, "If it is set true, file function will read TSV format with \\r\\n instead of \\n.", 0) \
|
||||
\
|
||||
M(Bool, input_format_native_allow_types_conversion, true, "Allow data types conversion in Native input format", 0) \
|
||||
M(Bool, input_format_native_decode_types_in_binary_format, false, "Read data types in binary format instead of type names in Native input format", 0) \
|
||||
M(Bool, output_format_native_encode_types_in_binary_format, false, "Write data types in binary format instead of type names in Native output format", 0) \
|
||||
\
|
||||
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
||||
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
||||
M(IntervalOutputFormat, interval_output_format, FormatSettings::IntervalOutputFormat::Numeric, "Textual representation of Interval. Possible values: 'kusto', 'numeric'.", 0) \
|
||||
\
|
||||
M(Bool, input_format_ipv4_default_on_conversion_error, false, "Deserialization of IPv4 will use default values instead of throwing exception on conversion error.", 0) \
|
||||
M(Bool, input_format_ipv6_default_on_conversion_error, false, "Deserialization of IPV6 will use default values instead of throwing exception on conversion error.", 0) \
|
||||
M(String, bool_true_representation, "true", "Text to represent bool value in TSV/CSV formats.", 0) \
|
||||
M(String, bool_false_representation, "false", "Text to represent bool value in TSV/CSV formats.", 0) \
|
||||
\
|
||||
M(Bool, input_format_values_interpret_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression.", 0) \
|
||||
M(Bool, input_format_values_deduce_templates_of_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows.", 0) \
|
||||
M(Bool, input_format_values_accurate_types_of_literals, true, "For Values format: when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues.", 0) \
|
||||
M(Bool, input_format_avro_allow_missing_fields, false, "For Avro/AvroConfluent format: when field is not found in schema use default value instead of error", 0) \
|
||||
/** This setting is obsolete and do nothing, left for compatibility reasons. */ \
|
||||
M(Bool, input_format_avro_null_as_default, false, "For Avro/AvroConfluent format: insert default in case of null and non Nullable column", 0) \
|
||||
M(UInt64, format_binary_max_string_size, 1_GiB, "The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit", 0) \
|
||||
M(UInt64, format_binary_max_array_size, 1_GiB, "The maximum allowed size for Array in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit", 0) \
|
||||
M(Bool, input_format_binary_decode_types_in_binary_format, false, "Read data types in binary format instead of type names in RowBinaryWithNamesAndTypes input format", 0) \
|
||||
M(Bool, output_format_binary_encode_types_in_binary_format, false, "Write data types in binary format instead of type names in RowBinaryWithNamesAndTypes output format ", 0) \
|
||||
M(URI, format_avro_schema_registry_url, "", "For AvroConfluent format: Confluent Schema Registry URL.", 0) \
|
||||
\
|
||||
M(Bool, output_format_json_quote_64bit_integers, true, "Controls quoting of 64-bit integers in JSON output format.", 0) \
|
||||
M(Bool, output_format_json_quote_denormals, false, "Enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format.", 0) \
|
||||
M(Bool, output_format_json_quote_decimals, false, "Controls quoting of decimals in JSON output format.", 0) \
|
||||
M(Bool, output_format_json_quote_64bit_floats, false, "Controls quoting of 64-bit float numbers in JSON output format.", 0) \
|
||||
\
|
||||
M(Bool, output_format_json_escape_forward_slashes, true, "Controls escaping forward slashes for string outputs in JSON output format. This is intended for compatibility with JavaScript. Don't confuse with backslashes that are always escaped.", 0) \
|
||||
M(Bool, output_format_json_named_tuples_as_objects, true, "Serialize named tuple columns as JSON objects.", 0) \
|
||||
M(Bool, output_format_json_skip_null_value_in_named_tuples, false, "Skip key value pairs with null value when serialize named tuple columns as JSON objects. It is only valid when output_format_json_named_tuples_as_objects is true.", 0) \
|
||||
M(Bool, output_format_json_array_of_rows, false, "Output a JSON array of all rows in JSONEachRow(Compact) format.", 0) \
|
||||
M(Bool, output_format_json_validate_utf8, false, "Validate UTF-8 sequences in JSON output formats, doesn't impact formats JSON/JSONCompact/JSONColumnsWithMetadata, they always validate utf8", 0) \
|
||||
\
|
||||
M(String, format_json_object_each_row_column_for_object_name, "", "The name of column that will be used as object names in JSONObjectEachRow format. Column type should be String", 0) \
|
||||
\
|
||||
M(UInt64, output_format_pretty_max_rows, 10000, "Rows limit for Pretty formats.", 0) \
|
||||
M(UInt64, output_format_pretty_max_column_pad_width, 250, "Maximum width to pad all values in a column in Pretty formats.", 0) \
|
||||
M(UInt64, output_format_pretty_max_value_width, 10000, "Maximum width of value to display in Pretty formats. If greater - it will be cut.", 0) \
|
||||
M(UInt64, output_format_pretty_max_value_width_apply_for_single_value, false, "Only cut values (see the `output_format_pretty_max_value_width` setting) when it is not a single value in a block. Otherwise output it entirely, which is useful for the `SHOW CREATE TABLE` query.", 0) \
|
||||
M(UInt64Auto, output_format_pretty_color, "auto", "Use ANSI escape sequences in Pretty formats. 0 - disabled, 1 - enabled, 'auto' - enabled if a terminal.", 0) \
|
||||
M(String, output_format_pretty_grid_charset, "UTF-8", "Charset for printing grid borders. Available charsets: ASCII, UTF-8 (default one).", 0) \
|
||||
M(UInt64, output_format_pretty_display_footer_column_names, true, "Display column names in the footer if there are 999 or more rows.", 0) \
|
||||
M(UInt64, output_format_pretty_display_footer_column_names_min_rows, 50, "Sets the minimum threshold value of rows for which to enable displaying column names in the footer. 50 (default)", 0) \
|
||||
M(UInt64, output_format_parquet_row_group_size, 1000000, "Target row group size in rows.", 0) \
|
||||
M(UInt64, output_format_parquet_row_group_size_bytes, 512 * 1024 * 1024, "Target row group size in bytes, before compression.", 0) \
|
||||
M(Bool, output_format_parquet_string_as_string, true, "Use Parquet String type instead of Binary for String columns.", 0) \
|
||||
M(Bool, output_format_parquet_fixed_string_as_fixed_byte_array, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary for FixedString columns.", 0) \
|
||||
M(ParquetVersion, output_format_parquet_version, "2.latest", "Parquet format version for output format. Supported versions: 1.0, 2.4, 2.6 and 2.latest (default)", 0) \
|
||||
M(ParquetCompression, output_format_parquet_compression_method, "zstd", "Compression method for Parquet output format. Supported codecs: snappy, lz4, brotli, zstd, gzip, none (uncompressed)", 0) \
|
||||
M(Bool, output_format_parquet_compliant_nested_types, true, "In parquet file schema, use name 'element' instead of 'item' for list elements. This is a historical artifact of Arrow library implementation. Generally increases compatibility, except perhaps with some old versions of Arrow.", 0) \
|
||||
M(Bool, output_format_parquet_use_custom_encoder, true, "Use a faster Parquet encoder implementation.", 0) \
|
||||
M(Bool, output_format_parquet_parallel_encoding, true, "Do Parquet encoding in multiple threads. Requires output_format_parquet_use_custom_encoder.", 0) \
|
||||
M(UInt64, output_format_parquet_data_page_size, 1024 * 1024, "Target page size in bytes, before compression.", 0) \
|
||||
M(UInt64, output_format_parquet_batch_size, 1024, "Check page size every this many rows. Consider decreasing if you have columns with average values size above a few KBs.", 0) \
|
||||
M(Bool, output_format_parquet_write_page_index, true, "Add a possibility to write page index into parquet files.", 0) \
|
||||
M(String, output_format_avro_codec, "", "Compression codec used for output. Possible values: 'null', 'deflate', 'snappy', 'zstd'.", 0) \
|
||||
M(UInt64, output_format_avro_sync_interval, 16 * 1024, "Sync interval in bytes.", 0) \
|
||||
M(String, output_format_avro_string_column_pattern, "", "For Avro format: regexp of String columns to select as AVRO string.", 0) \
|
||||
M(UInt64, output_format_avro_rows_in_file, 1, "Max rows in a file (if permitted by storage)", 0) \
|
||||
M(Bool, output_format_tsv_crlf_end_of_line, false, "If it is set true, end of line in TSV format will be \\r\\n instead of \\n.", 0) \
|
||||
M(String, format_csv_null_representation, "\\N", "Custom NULL representation in CSV format", 0) \
|
||||
M(String, format_tsv_null_representation, "\\N", "Custom NULL representation in TSV format", 0) \
|
||||
M(Bool, output_format_decimal_trailing_zeros, false, "Output trailing zeros when printing Decimal values. E.g. 1.230000 instead of 1.23.", 0) \
|
||||
\
|
||||
M(UInt64, input_format_allow_errors_num, 0, "Maximum absolute amount of errors while reading text formats (like CSV, TSV). In case of error, if at least absolute or relative amount of errors is lower than corresponding value, will skip until next line and continue.", 0) \
|
||||
M(Float, input_format_allow_errors_ratio, 0, "Maximum relative amount of errors while reading text formats (like CSV, TSV). In case of error, if at least absolute or relative amount of errors is lower than corresponding value, will skip until next line and continue.", 0) \
|
||||
M(String, input_format_record_errors_file_path, "", "Path of the file used to record errors while reading text formats (CSV, TSV).", 0) \
|
||||
M(String, errors_output_format, "CSV", "Method to write Errors to text output.", 0) \
|
||||
\
|
||||
M(String, format_schema, "", "Schema identifier (used by schema-based formats)", 0) \
|
||||
M(String, format_template_resultset, "", "Path to file which contains format string for result set (for Template format)", 0) \
|
||||
M(String, format_template_row, "", "Path to file which contains format string for rows (for Template format)", 0) \
|
||||
M(String, format_template_row_format, "", "Format string for rows (for Template format)", 0) \
|
||||
M(String, format_template_resultset_format, "", "Format string for result set (for Template format)", 0) \
|
||||
M(String, format_template_rows_between_delimiter, "\n", "Delimiter between rows (for Template format)", 0) \
|
||||
\
|
||||
M(EscapingRule, format_custom_escaping_rule, "Escaped", "Field escaping rule (for CustomSeparated format)", 0) \
|
||||
M(String, format_custom_field_delimiter, "\t", "Delimiter between fields (for CustomSeparated format)", 0) \
|
||||
M(String, format_custom_row_before_delimiter, "", "Delimiter before field of the first column (for CustomSeparated format)", 0) \
|
||||
M(String, format_custom_row_after_delimiter, "\n", "Delimiter after field of the last column (for CustomSeparated format)", 0) \
|
||||
M(String, format_custom_row_between_delimiter, "", "Delimiter between rows (for CustomSeparated format)", 0) \
|
||||
M(String, format_custom_result_before_delimiter, "", "Prefix before result set (for CustomSeparated format)", 0) \
|
||||
M(String, format_custom_result_after_delimiter, "", "Suffix after result set (for CustomSeparated format)", 0) \
|
||||
\
|
||||
M(String, format_regexp, "", "Regular expression (for Regexp format)", 0) \
|
||||
M(EscapingRule, format_regexp_escaping_rule, "Raw", "Field escaping rule (for Regexp format)", 0) \
|
||||
M(Bool, format_regexp_skip_unmatched, false, "Skip lines unmatched by regular expression (for Regexp format)", 0) \
|
||||
\
|
||||
M(Bool, output_format_enable_streaming, false, "Enable streaming in output formats that support it.", 0) \
|
||||
M(Bool, output_format_write_statistics, true, "Write statistics about read rows, bytes, time elapsed in suitable output formats.", 0) \
|
||||
M(Bool, output_format_pretty_row_numbers, true, "Add row numbers before each row for pretty output format", 0) \
|
||||
M(Bool, output_format_pretty_highlight_digit_groups, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline.", 0) \
|
||||
M(UInt64, output_format_pretty_single_large_number_tip_threshold, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)", 0) \
|
||||
M(Bool, insert_distributed_one_random_shard, false, "If setting is enabled, inserting into distributed table will choose a random shard to write when there is no sharding key", 0) \
|
||||
\
|
||||
M(Bool, exact_rows_before_limit, false, "When enabled, ClickHouse will provide exact value for rows_before_limit_at_least statistic, but with the cost that the data before limit will have to be read completely", 0) \
|
||||
M(Bool, rows_before_aggregation, false, "When enabled, ClickHouse will provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation", 0) \
|
||||
M(UInt64, cross_to_inner_join_rewrite, 1, "Use inner join instead of comma/cross join if there are joining expressions in the WHERE section. Values: 0 - no rewrite, 1 - apply if possible for comma/cross, 2 - force rewrite all comma joins, cross - if possible", 0) \
|
||||
\
|
||||
M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \
|
||||
M(Bool, output_format_arrow_use_signed_indexes_for_dictionary, true, "Use signed integers for dictionary indexes in Arrow format", 0) \
|
||||
M(Bool, output_format_arrow_use_64_bit_indexes_for_dictionary, false, "Always use 64 bit integers for dictionary indexes in Arrow format", 0) \
|
||||
M(Bool, output_format_arrow_string_as_string, true, "Use Arrow String type instead of Binary for String columns", 0) \
|
||||
M(Bool, output_format_arrow_fixed_string_as_fixed_byte_array, true, "Use Arrow FIXED_SIZE_BINARY type instead of Binary for FixedString columns.", 0) \
|
||||
M(ArrowCompression, output_format_arrow_compression_method, "lz4_frame", "Compression method for Arrow output format. Supported codecs: lz4_frame, zstd, none (uncompressed)", 0) \
|
||||
\
|
||||
M(Bool, output_format_orc_string_as_string, true, "Use ORC String type instead of Binary for String columns", 0) \
|
||||
M(ORCCompression, output_format_orc_compression_method, "zstd", "Compression method for ORC output format. Supported codecs: lz4, snappy, zlib, zstd, none (uncompressed)", 0) \
|
||||
M(UInt64, output_format_orc_row_index_stride, 10'000, "Target row index stride in ORC output format", 0) \
|
||||
M(Double, output_format_orc_dictionary_key_size_threshold, 0.0, "For a string column in ORC output format, if the number of distinct values is greater than this fraction of the total number of non-null rows, turn off dictionary encoding. Otherwise dictionary encoding is enabled", 0) \
|
||||
\
|
||||
M(CapnProtoEnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::CapnProtoEnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \
|
||||
\
|
||||
M(Bool, format_capn_proto_use_autogenerated_schema, true, "Use autogenerated CapnProto schema when format_schema is not set", 0) \
|
||||
M(Bool, format_protobuf_use_autogenerated_schema, true, "Use autogenerated Protobuf when format_schema is not set", 0) \
|
||||
M(String, output_format_schema, "", "The path to the file where the automatically generated schema will be saved", 0) \
|
||||
\
|
||||
M(String, input_format_mysql_dump_table_name, "", "Name of the table in MySQL dump from which to read data", 0) \
|
||||
M(Bool, input_format_mysql_dump_map_column_names, true, "Match columns from table in MySQL dump and columns from ClickHouse table by names", 0) \
|
||||
\
|
||||
M(UInt64, output_format_sql_insert_max_batch_size, DEFAULT_BLOCK_SIZE, "The maximum number of rows in one INSERT statement.", 0) \
|
||||
M(String, output_format_sql_insert_table_name, "table", "The name of table in the output INSERT query", 0) \
|
||||
M(Bool, output_format_sql_insert_include_column_names, true, "Include column names in INSERT query", 0) \
|
||||
M(Bool, output_format_sql_insert_use_replace, false, "Use REPLACE statement instead of INSERT", 0) \
|
||||
M(Bool, output_format_sql_insert_quote_names, true, "Quote column names with '`' characters", 0) \
|
||||
\
|
||||
M(Bool, output_format_values_escape_quote_with_quote, false, "If true escape ' with '', otherwise quoted with \\'", 0) \
|
||||
\
|
||||
M(Bool, output_format_bson_string_as_string, false, "Use BSON String type instead of Binary for String columns.", 0) \
|
||||
M(Bool, input_format_bson_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format BSON.", 0) \
|
||||
\
|
||||
M(Bool, format_display_secrets_in_show_and_select, false, "Do not hide secrets in SHOW and SELECT queries.", IMPORTANT) \
|
||||
M(Bool, regexp_dict_allow_hyperscan, true, "Allow regexp_tree dictionary using Hyperscan library.", 0) \
|
||||
M(Bool, regexp_dict_flag_case_insensitive, false, "Use case-insensitive matching for a regexp_tree dictionary. Can be overridden in individual expressions with (?i) and (?-i).", 0) \
|
||||
M(Bool, regexp_dict_flag_dotall, false, "Allow '.' to match newline characters for a regexp_tree dictionary.", 0) \
|
||||
\
|
||||
M(Bool, dictionary_use_async_executor, false, "Execute a pipeline for reading dictionary source in several threads. It's supported only by dictionaries with local CLICKHOUSE source.", 0) \
|
||||
M(Bool, precise_float_parsing, false, "Prefer more precise (but slower) float parsing algorithm", 0) \
|
||||
M(DateTimeOverflowBehavior, date_time_overflow_behavior, "ignore", "Overflow mode for Date, Date32, DateTime, DateTime64 types. Possible values: 'ignore', 'throw', 'saturate'.", 0) \
|
||||
M(Bool, validate_experimental_and_suspicious_types_inside_nested_types, true, "Validate usage of experimental and suspicious types inside nested types like Array/Map/Tuple", 0) \
|
||||
\
|
||||
M(Bool, output_format_always_quote_identifiers, false, "Always quote identifiers", 0) \
|
||||
M(IdentifierQuotingStyle, output_format_identifier_quoting_style, IdentifierQuotingStyle::Backticks, "Set the quoting style for identifiers", 0) \
|
||||
|
||||
// End of FORMAT_FACTORY_SETTINGS
|
||||
|
||||
#define OBSOLETE_FORMAT_SETTINGS(M, ALIAS) \
|
||||
/** Obsolete format settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
|
||||
MAKE_OBSOLETE(M, Bool, input_format_arrow_import_nested, false) \
|
||||
MAKE_OBSOLETE(M, Bool, input_format_parquet_import_nested, false) \
|
||||
MAKE_OBSOLETE(M, Bool, input_format_orc_import_nested, false) \
|
||||
|
||||
#endif // __CLION_IDE__
|
||||
|
||||
#define LIST_OF_ALL_FORMAT_SETTINGS(M, ALIAS) \
|
||||
FORMAT_FACTORY_SETTINGS(M, ALIAS) \
|
||||
OBSOLETE_FORMAT_SETTINGS(M, ALIAS)
|
@ -34,6 +34,7 @@ static constexpr auto DBMS_MIN_REVISION_WITH_AGGREGATE_FUNCTIONS_VERSIONING = 54
|
||||
static constexpr auto DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION = 1;
|
||||
|
||||
static constexpr auto DBMS_MIN_SUPPORTED_PARALLEL_REPLICAS_PROTOCOL_VERSION = 3;
|
||||
static constexpr auto DBMS_PARALLEL_REPLICAS_MIN_VERSION_WITH_MARK_SEGMENT_SIZE_FIELD = 4;
|
||||
static constexpr auto DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION = 4;
|
||||
static constexpr auto DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS = 54453;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
1485
src/Core/Settings.h
1485
src/Core/Settings.h
File diff suppressed because it is too large
Load Diff
9
src/Core/SettingsObsoleteMacros.h
Normal file
9
src/Core/SettingsObsoleteMacros.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
// clang-format off
|
||||
#define MAKE_OBSOLETE(M, TYPE, NAME, DEFAULT) \
|
||||
M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE)
|
||||
|
||||
/// NOTE: ServerSettings::loadSettingsFromConfig() should be updated to include this settings
|
||||
#define MAKE_DEPRECATED_BY_SERVER_CONFIG(M, TYPE, NAME, DEFAULT) \
|
||||
M(TYPE, NAME, DEFAULT, "User-level setting is deprecated, and it must be defined in the server configuration instead.", BaseSettingsHelpers::Flags::OBSOLETE)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user