mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge pull request #23704 from vzakaznikov/testflows_window_functions_tests
TestFlows window functions tests
This commit is contained in:
commit
617e71b3f2
@ -19,6 +19,7 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
# Feature(test=load("rbac.regression", "regression"))(**args)
|
||||
# Feature(test=load("aes_encryption.regression", "regression"))(**args)
|
||||
Feature(test=load("map_type.regression", "regression"))(**args)
|
||||
Feature(test=load("window_functions.regression", "regression"))(**args)
|
||||
# Feature(test=load("kerberos.regression", "regression"))(**args)
|
||||
|
||||
if main():
|
||||
|
@ -0,0 +1,16 @@
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/log.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
|
||||
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
|
||||
</logger>
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
<flush_interval_milliseconds>500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
</yandex>
|
@ -0,0 +1,42 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<remote_servers>
|
||||
<replicated_cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>clickhouse1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</replicated_cluster>
|
||||
<sharded_cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</sharded_cluster>
|
||||
</remote_servers>
|
||||
</yandex>
|
@ -0,0 +1,10 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zookeeper</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<session_timeout_ms>15000</session_timeout_ms>
|
||||
</zookeeper>
|
||||
</yandex>
|
448
tests/testflows/window_functions/configs/clickhouse/config.xml
Normal file
448
tests/testflows/window_functions/configs/clickhouse/config.xml
Normal file
@ -0,0 +1,448 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
NOTE: User and query level settings are set up in "users.xml" file.
|
||||
-->
|
||||
<yandex>
|
||||
<logger>
|
||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
</logger>
|
||||
<!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
|
||||
<client> <!-- Used for connecting to https dictionary source -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||
<invalidCertificateHandler>
|
||||
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
-->
|
||||
|
||||
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||
-->
|
||||
|
||||
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||
<!-- <listen_host>::</listen_host> -->
|
||||
<!-- Same for hosts with disabled ipv6: -->
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
|
||||
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||
<!--
|
||||
<listen_host>::1</listen_host>
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
-->
|
||||
<!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
|
||||
<!-- <listen_try>0</listen_try> -->
|
||||
|
||||
<!-- Allow listen on same address:port -->
|
||||
<!-- <listen_reuse_port>0</listen_reuse_port> -->
|
||||
|
||||
<!-- <listen_backlog>64</listen_backlog> -->
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
|
||||
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||
correct maximum value. -->
|
||||
<!-- <max_open_files>262144</max_open_files> -->
|
||||
|
||||
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||
-->
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
|
||||
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
You should not lower this value.
|
||||
-->
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
|
||||
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
<!-- Directory with user provided files that are accessible by 'file' table function. -->
|
||||
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||
|
||||
<!-- Path to folder where users and roles created by SQL commands are stored. -->
|
||||
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
|
||||
|
||||
<!-- Sources to read users, roles, access rights, profiles of settings, quotas. -->
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<!-- Path to configuration file with predefined users. -->
|
||||
<path>users.xml</path>
|
||||
</users_xml>
|
||||
<local_directory>
|
||||
<!-- Path to folder where users created by SQL commands are stored. -->
|
||||
<path>/var/lib/clickhouse/access/</path>
|
||||
</local_directory>
|
||||
</user_directories>
|
||||
|
||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<!-- Default profile of settings. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
|
||||
<!-- <system_profile>default</system_profile> -->
|
||||
|
||||
<!-- Default database. -->
|
||||
<default_database>default</default_database>
|
||||
|
||||
<!-- Server time zone could be set here.
|
||||
|
||||
Time zone is used when converting between String and DateTime types,
|
||||
when printing DateTime in text formats and parsing DateTime from text,
|
||||
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||
|
||||
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||
If not specified, system time zone at server startup is used.
|
||||
|
||||
Please note, that server could display time zone alias instead of specified name.
|
||||
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||
-->
|
||||
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||
|
||||
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||
-->
|
||||
<!-- <umask>022</umask> -->
|
||||
|
||||
<!-- Perform mlockall after startup to lower first queries latency
|
||||
and to prevent clickhouse executable from being paged out under high IO load.
|
||||
Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
|
||||
-->
|
||||
<mlock_executable>false</mlock_executable>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.yandex/docs/en/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers incl="clickhouse_remote_servers" >
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
<test_cluster_two_shards_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_localhost>
|
||||
<test_shard_localhost_secure>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost_secure>
|
||||
<test_unavailable_shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>1</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_unavailable_shard>
|
||||
</remote_servers>
|
||||
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/
|
||||
-->
|
||||
<zookeeper incl="zookeeper-servers" optional="true" />
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
|
||||
-->
|
||||
<macros incl="macros" optional="true" />
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||
|
||||
|
||||
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||
<max_session_timeout>3600</max_session_timeout>
|
||||
|
||||
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||
<default_session_timeout>60</default_session_timeout>
|
||||
|
||||
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||
<!--
|
||||
interval - send every X second
|
||||
root_path - prefix for keys
|
||||
hostname_in_path - append hostname to root_path (default = true)
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>60</interval>
|
||||
<root_path>one_min</root_path>
|
||||
<hostname_in_path>true</hostname_in_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</graphite>
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>1</interval>
|
||||
<root_path>one_sec</root_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>false</asynchronous_metrics>
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
When query log structure is changed after system update,
|
||||
then old table will be renamed and new table will be created automatically.
|
||||
-->
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
<!--
|
||||
PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
|
||||
Example:
|
||||
event_date
|
||||
toMonday(event_date)
|
||||
toYYYYMM(event_date)
|
||||
toStartOfHour(event_time)
|
||||
-->
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
<!-- Trace log. Stores stack traces collected by query profilers.
|
||||
See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
|
||||
<trace_log>
|
||||
<database>system</database>
|
||||
<table>trace_log</table>
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</trace_log>
|
||||
|
||||
<!-- Query thread log. Has information about all threads participated in query execution.
|
||||
Used only for queries with setting log_query_threads = 1. -->
|
||||
<query_thread_log>
|
||||
<database>system</database>
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_thread_log>
|
||||
|
||||
<!-- Uncomment if use part log.
|
||||
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to write text log into table.
|
||||
Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||
<text_log>
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</text_log>
|
||||
-->
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
||||
-->
|
||||
|
||||
<!-- Path to file with region hierarchy. -->
|
||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||
|
||||
<!-- Path to directory with files containing names of regions -->
|
||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||
|
||||
|
||||
<!-- Configuration of external dictionaries. See:
|
||||
https://clickhouse.yandex/docs/en/dicts/external_dicts/
|
||||
-->
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
-->
|
||||
<compression incl="clickhouse_compression">
|
||||
<!--
|
||||
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||
<case>
|
||||
|
||||
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||
|
||||
<!- - What compression method to use. - ->
|
||||
<method>zstd</method>
|
||||
</case>
|
||||
-->
|
||||
</compression>
|
||||
|
||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||
<distributed_ddl>
|
||||
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
|
||||
<!-- Settings from this profile will be used to execute DDL queries -->
|
||||
<!-- <profile>default</profile> -->
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
<!--
|
||||
<merge_tree>
|
||||
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||
</merge_tree>
|
||||
-->
|
||||
|
||||
<!-- Protection from accidental DROP.
|
||||
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||
By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
|
||||
The same for max_partition_size_to_drop.
|
||||
Uncomment to disable protection.
|
||||
-->
|
||||
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||
<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
|
||||
|
||||
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||
<graphite_rollup_example>
|
||||
<pattern>
|
||||
<regexp>click_cost</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>max</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup_example>
|
||||
|
||||
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||
The directory will be created if it doesn't exist.
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
|
||||
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
|
||||
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
|
||||
</yandex>
|
133
tests/testflows/window_functions/configs/clickhouse/users.xml
Normal file
133
tests/testflows/window_functions/configs/clickhouse/users.xml
Normal file
@ -0,0 +1,133 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
|
||||
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | openssl dgst -sha1 -binary | openssl dgst -sha1
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.yandex.ru.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- Allow access management -->
|
||||
<access_management>1</access_management>
|
||||
|
||||
<!-- Example of row level security policy. -->
|
||||
<!-- <databases>
|
||||
<test>
|
||||
<filtered_table1>
|
||||
<filter>a = 1</filter>
|
||||
</filtered_table1>
|
||||
<filtered_table2>
|
||||
<filter>a + b < 1 or c - d > 5</filter>
|
||||
</filtered_table2>
|
||||
</test>
|
||||
</databases> -->
|
||||
</default>
|
||||
|
||||
<!-- Example of user with readonly access. -->
|
||||
<!-- <readonly>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
</networks>
|
||||
<profile>readonly</profile>
|
||||
<quota>default</quota>
|
||||
</readonly> -->
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
@ -0,0 +1,7 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<macros>
|
||||
<replica>clickhouse1</replica>
|
||||
<shard>01</shard>
|
||||
</macros>
|
||||
</yandex>
|
@ -0,0 +1,7 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<macros>
|
||||
<replica>clickhouse2</replica>
|
||||
<shard>02</shard>
|
||||
</macros>
|
||||
</yandex>
|
@ -0,0 +1,7 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<macros>
|
||||
<replica>clickhouse3</replica>
|
||||
<shard>03</shard>
|
||||
</macros>
|
||||
</yandex>
|
27
tests/testflows/window_functions/docker-compose/clickhouse-service.yml
Executable file
27
tests/testflows/window_functions/docker-compose/clickhouse-service.yml
Executable file
@ -0,0 +1,27 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-integration-test
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
60
tests/testflows/window_functions/docker-compose/docker-compose.yml
Executable file
60
tests/testflows/window_functions/docker-compose/docker-compose.yml
Executable file
@ -0,0 +1,60 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
18
tests/testflows/window_functions/docker-compose/zookeeper-service.yml
Executable file
18
tests/testflows/window_functions/docker-compose/zookeeper-service.yml
Executable file
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 3s
|
||||
timeout: 2s
|
||||
retries: 5
|
||||
start_period: 2s
|
||||
security_opt:
|
||||
- label:disable
|
96
tests/testflows/window_functions/regression.py
Executable file
96
tests/testflows/window_functions/regression.py
Executable file
@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
|
||||
from testflows.core import *
|
||||
|
||||
append_path(sys.path, "..")
|
||||
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from window_functions.requirements import SRS019_ClickHouse_Window_Functions, RQ_SRS_019_ClickHouse_WindowFunctions
|
||||
|
||||
xfails = {
|
||||
"tests/:/frame clause/range frame/between expr following and expr following without order by error":
|
||||
[(Fail, "invalid error message")],
|
||||
"tests/:/frame clause/range frame/between expr following and expr preceding without order by error":
|
||||
[(Fail, "invalid error message")],
|
||||
"tests/:/frame clause/range frame/between expr following and current row without order by error":
|
||||
[(Fail, "invalid error message")],
|
||||
"tests/:/frame clause/range frame/between expr following and current row zero special case":
|
||||
[(Fail, "known bug")],
|
||||
"tests/:/frame clause/range frame/between expr following and expr preceding with order by zero special case":
|
||||
[(Fail, "known bug")],
|
||||
"tests/:/funcs/lag/anyOrNull with column value as offset":
|
||||
[(Fail, "column values are not supported as offset")],
|
||||
"tests/:/funcs/lead/subquery as offset":
|
||||
[(Fail, "subquery is not supported as offset")],
|
||||
"tests/:/frame clause/range frame/between current row and unbounded following modifying named window":
|
||||
[(Fail, "range with named window is not supported")],
|
||||
"tests/:/frame clause/range overflow/negative overflow with Int16":
|
||||
[(Fail, "exception on conversion")],
|
||||
"tests/:/frame clause/range overflow/positive overflow with Int16":
|
||||
[(Fail, "exception on conversion")],
|
||||
"tests/:/misc/subquery expr preceding":
|
||||
[(Fail, "subquery is not supported as offset")],
|
||||
"tests/:/frame clause/range errors/error negative preceding offset":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22442")],
|
||||
"tests/:/frame clause/range errors/error negative following offset":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22442")],
|
||||
"tests/:/misc/window functions in select expression":
|
||||
[(Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857")],
|
||||
"tests/:/misc/window functions in subquery":
|
||||
[(Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857")],
|
||||
"tests/:/frame clause/range frame/order by decimal":
|
||||
[(Fail, "Exception: The RANGE OFFSET frame for 'DB::ColumnDecimal<DB::Decimal<long> >' ORDER BY column is not implemented")],
|
||||
"tests/:/frame clause/range frame/with nulls":
|
||||
[(Fail, "DB::Exception: The RANGE OFFSET frame for 'DB::ColumnNullable' ORDER BY column is not implemented")],
|
||||
"tests/:/aggregate funcs/aggregate funcs over rows frame/func='mannWhitneyUTest(salary, 1)'":
|
||||
[(Fail, "need to investigate")],
|
||||
"tests/:/aggregate funcs/aggregate funcs over rows frame/func='rankCorr(salary, 0.5)'":
|
||||
[(Fail, "need to investigate")],
|
||||
"tests/distributed/misc/query with order by and one window":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")],
|
||||
"tests/distributed/over clause/empty named window":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")],
|
||||
"tests/distributed/over clause/empty":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")],
|
||||
"tests/distributed/over clause/adhoc window":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")],
|
||||
"tests/distributed/frame clause/range datetime/:":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")],
|
||||
"tests/distributed/frame clause/range frame/between expr preceding and expr following with partition by same column twice":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")]
|
||||
}
|
||||
|
||||
xflags = {
|
||||
}
|
||||
|
||||
@TestModule
|
||||
@ArgumentParser(argparser)
|
||||
@XFails(xfails)
|
||||
@XFlags(xflags)
|
||||
@Name("window functions")
|
||||
@Specifications(
|
||||
SRS019_ClickHouse_Window_Functions
|
||||
)
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions("1.0")
|
||||
)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
"""Window functions regression.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse":
|
||||
("clickhouse1", "clickhouse2", "clickhouse3")
|
||||
}
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster:
|
||||
self.context.cluster = cluster
|
||||
self.context.stress = stress
|
||||
|
||||
if parallel is not None:
|
||||
self.context.parallel = parallel
|
||||
|
||||
Feature(run=load("window_functions.tests.feature", "feature"), flags=TE)
|
||||
|
||||
if main():
|
||||
regression()
|
@ -0,0 +1 @@
|
||||
from .requirements import *
|
2292
tests/testflows/window_functions/requirements/requirements.md
Normal file
2292
tests/testflows/window_functions/requirements/requirements.md
Normal file
File diff suppressed because it is too large
Load Diff
5887
tests/testflows/window_functions/requirements/requirements.py
Normal file
5887
tests/testflows/window_functions/requirements/requirements.py
Normal file
File diff suppressed because it is too large
Load Diff
0
tests/testflows/window_functions/tests/__init__.py
Normal file
0
tests/testflows/window_functions/tests/__init__.py
Normal file
331
tests/testflows/window_functions/tests/aggregate_funcs.py
Normal file
331
tests/testflows/window_functions/tests/aggregate_funcs.py
Normal file
@ -0,0 +1,331 @@
|
||||
from testflows.core import *
|
||||
from testflows.asserts import values, error, snapshot
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("func", [
|
||||
("count(salary)",),
|
||||
("min(salary)",),
|
||||
("max(salary)",),
|
||||
("sum(salary)",),
|
||||
("avg(salary)",),
|
||||
("any(salary)",),
|
||||
("stddevPop(salary)",),
|
||||
("stddevSamp(salary)",),
|
||||
("varPop(salary)",),
|
||||
("varSamp(salary)",),
|
||||
("covarPop(salary, 2000)",),
|
||||
("covarSamp(salary, 2000)",),
|
||||
("anyHeavy(salary)",),
|
||||
("anyLast(salary)",),
|
||||
("argMin(salary, 5000)",),
|
||||
("argMax(salary, 5000)",),
|
||||
("avgWeighted(salary, 1)",),
|
||||
("corr(salary, 0.5)",),
|
||||
("topK(salary)",),
|
||||
("topKWeighted(salary, 1)",),
|
||||
("groupArray(salary)",),
|
||||
("groupUniqArray(salary)",),
|
||||
("groupArrayInsertAt(salary, 0)",),
|
||||
("groupArrayMovingSum(salary)",),
|
||||
("groupArrayMovingAvg(salary)",),
|
||||
("groupArraySample(3, 1234)(salary)",),
|
||||
("groupBitAnd(toUInt8(salary))",),
|
||||
("groupBitOr(toUInt8(salary))",),
|
||||
("groupBitXor(toUInt8(salary))",),
|
||||
("groupBitmap(toUInt8(salary))",),
|
||||
# #("groupBitmapAnd",),
|
||||
# #("groupBitmapOr",),
|
||||
# #("groupBitmapXor",),
|
||||
("sumWithOverflow(salary)",),
|
||||
("deltaSum(salary)",),
|
||||
("sumMap([5000], [salary])",),
|
||||
("minMap([5000], [salary])",),
|
||||
("maxMap([5000], [salary])",),
|
||||
# #("initializeAggregation",),
|
||||
("skewPop(salary)",),
|
||||
("skewSamp(salary)",),
|
||||
("kurtPop(salary)",),
|
||||
("kurtSamp(salary)",),
|
||||
("uniq(salary)",),
|
||||
("uniqExact(salary)",),
|
||||
("uniqCombined(salary)",),
|
||||
("uniqCombined64(salary)",),
|
||||
("uniqHLL12(salary)",),
|
||||
("quantile(salary)",),
|
||||
("quantiles(0.5)(salary)",),
|
||||
("quantileExact(salary)",),
|
||||
("quantileExactWeighted(salary, 1)",),
|
||||
("quantileTiming(salary)",),
|
||||
("quantileTimingWeighted(salary, 1)",),
|
||||
("quantileDeterministic(salary, 1234)",),
|
||||
("quantileTDigest(salary)",),
|
||||
("quantileTDigestWeighted(salary, 1)",),
|
||||
("simpleLinearRegression(salary, empno)",),
|
||||
("stochasticLinearRegression(salary, 1)",),
|
||||
("stochasticLogisticRegression(salary, 1)",),
|
||||
#("categoricalInformationValue(salary, 0)",),
|
||||
("studentTTest(salary, 1)",),
|
||||
("welchTTest(salary, 1)",),
|
||||
("mannWhitneyUTest(salary, 1)",),
|
||||
("median(salary)",),
|
||||
("rankCorr(salary, 0.5)",),
|
||||
])
|
||||
def aggregate_funcs_over_rows_frame(self, func):
|
||||
"""Checking aggregate funcs over rows frame.
|
||||
"""
|
||||
execute_query(f"""
|
||||
SELECT {func} OVER (ORDER BY salary, empno ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS func
|
||||
FROM empsalary
|
||||
"""
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def avg_with_nulls(self):
|
||||
"""Check `avg` aggregate function using a window that contains NULLs.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
i | avg
|
||||
---+--------------------
|
||||
1 | 1.5
|
||||
2 | 2
|
||||
3 | \\N
|
||||
4 | \\N
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT i, avg(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS avg
|
||||
FROM values('i Int32, v Nullable(Int32)', (1,1),(2,2),(3,NULL),(4,NULL))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def var_pop(self):
|
||||
"""Check `var_pop` aggregate function ove a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
var_pop
|
||||
-----------------------
|
||||
21704
|
||||
13868.75
|
||||
11266.666666666666
|
||||
4225
|
||||
0
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS var_pop
|
||||
FROM values('i Int8, n Int32', (1,600),(2,470),(3,170),(4,430),(5,300))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def var_samp(self):
|
||||
"""Check `var_samp` aggregate function ove a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
var_samp
|
||||
-----------------------
|
||||
27130
|
||||
18491.666666666668
|
||||
16900
|
||||
8450
|
||||
nan
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT VAR_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS var_samp
|
||||
FROM VALUES('i Int8, n Int16',(1,600),(2,470),(3,170),(4,430),(5,300))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def stddevpop(self):
|
||||
"""Check `stddevPop` aggregate function ove a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
stddev_pop
|
||||
---------------------
|
||||
147.32277488562318
|
||||
147.32277488562318
|
||||
117.76565713313877
|
||||
106.14455552060438
|
||||
65
|
||||
0
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT stddevPop(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS stddev_pop
|
||||
FROM VALUES('i Int8, n Nullable(Int16)',(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def stddevsamp(self):
|
||||
"""Check `stddevSamp` aggregate function ove a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
stddev_samp
|
||||
---------------------
|
||||
164.7118696390761
|
||||
164.7118696390761
|
||||
135.9840676942217
|
||||
130
|
||||
91.92388155425118
|
||||
nan
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT stddevSamp(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS stddev_samp
|
||||
FROM VALUES('i Int8, n Nullable(Int16)',(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def aggregate_function_recovers_from_nan(self):
|
||||
"""Check that aggregate function can recover from `nan` value inside a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
a | b | sum
|
||||
---+-----+-----
|
||||
1 | 1 | 1
|
||||
2 | 2 | 3
|
||||
3 | nan | nan
|
||||
4 | 3 | nan
|
||||
5 | 4 | 7
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT a, b,
|
||||
SUM(b) OVER(ORDER BY a ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS sum
|
||||
FROM VALUES('a Int8, b Float64',(1,1),(2,2),(3,nan),(4,3),(5,4))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def bit_functions(self):
|
||||
"""Check trying to use bitwise functions over a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
i | b | bool_and | bool_or
|
||||
---+---+----------+---------
|
||||
1 | 1 | 1 | 1
|
||||
2 | 1 | 0 | 1
|
||||
3 | 0 | 0 | 0
|
||||
4 | 0 | 0 | 1
|
||||
5 | 1 | 1 | 1
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT i, b, groupBitAnd(b) OVER w AS bool_and, groupBitOr(b) OVER w AS bool_or
|
||||
FROM VALUES('i Int8, b UInt8', (1,1), (2,1), (3,0), (4,0), (5,1))
|
||||
WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def sum(self):
|
||||
"""Check calculation of sum over a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum_1 | ten | four
|
||||
-------+-----+------
|
||||
0 | 0 | 0
|
||||
0 | 0 | 0
|
||||
2 | 0 | 2
|
||||
3 | 1 | 3
|
||||
4 | 1 | 1
|
||||
5 | 1 | 1
|
||||
3 | 3 | 3
|
||||
0 | 4 | 0
|
||||
1 | 7 | 1
|
||||
1 | 9 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def nested_aggregates(self):
|
||||
"""Check using nested aggregates over a window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
ten | two | gsum | wsum
|
||||
-----+-----+-------+--------
|
||||
0 | 0 | 45000 | 45000
|
||||
2 | 0 | 47000 | 92000
|
||||
4 | 0 | 49000 | 141000
|
||||
6 | 0 | 51000 | 192000
|
||||
8 | 0 | 53000 | 245000
|
||||
1 | 1 | 46000 | 46000
|
||||
3 | 1 | 48000 | 94000
|
||||
5 | 1 | 50000 | 144000
|
||||
7 | 1 | 52000 | 196000
|
||||
9 | 1 | 54000 | 250000
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum FROM tenk1 GROUP BY ten, two",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def aggregate_and_window_function_in_the_same_window(self):
|
||||
"""Check using aggregate and window function in the same window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | rank
|
||||
-------+------
|
||||
6000 | 1
|
||||
16400 | 2
|
||||
16400 | 2
|
||||
20900 | 4
|
||||
25100 | 5
|
||||
3900 | 1
|
||||
7400 | 2
|
||||
5000 | 1
|
||||
14600 | 2
|
||||
14600 | 2
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum, rank() OVER w AS rank FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def ungrouped_aggregate_over_empty_row_set(self):
|
||||
"""Check using window function with ungrouped aggregate over an empty row set.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-----
|
||||
0
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT SUM(COUNT(number)) OVER () AS sum FROM numbers(10) WHERE number=42",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("aggregate funcs")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check using aggregate functions over windows.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
404
tests/testflows/window_functions/tests/common.py
Normal file
404
tests/testflows/window_functions/tests/common.py
Normal file
@ -0,0 +1,404 @@
|
||||
import os
|
||||
import re
|
||||
import uuid
|
||||
import tempfile
|
||||
|
||||
from testflows.core import *
|
||||
from testflows.core.name import basename, parentname
|
||||
from testflows._core.testtype import TestSubType
|
||||
from testflows.asserts import values, error, snapshot
|
||||
|
||||
def window_frame_error():
|
||||
return (36, "Exception: Window frame")
|
||||
|
||||
def frame_start_error():
|
||||
return (36, "Exception: Frame start")
|
||||
|
||||
def frame_end_error():
|
||||
return (36, "Exception: Frame end")
|
||||
|
||||
def frame_offset_nonnegative_error():
|
||||
return syntax_error()
|
||||
|
||||
def frame_end_unbounded_preceding_error():
|
||||
return (36, "Exception: Frame end cannot be UNBOUNDED PRECEDING")
|
||||
|
||||
def frame_range_offset_error():
|
||||
return (48, "Exception: The RANGE OFFSET frame")
|
||||
|
||||
def frame_requires_order_by_error():
|
||||
return (36, "Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 0 given")
|
||||
|
||||
def syntax_error():
|
||||
return (62, "Exception: Syntax error")
|
||||
|
||||
def groups_frame_error():
|
||||
return (48, "Exception: Window frame 'GROUPS' is not implemented")
|
||||
|
||||
def getuid():
|
||||
if current().subtype == TestSubType.Example:
|
||||
testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}"
|
||||
else:
|
||||
testname = f"{basename(current().name).replace(' ', '_').replace(',','')}"
|
||||
return testname + "_" + str(uuid.uuid1()).replace('-', '_')
|
||||
|
||||
def convert_output(s):
|
||||
"""Convert expected output to TSV format.
|
||||
"""
|
||||
return '\n'.join([l.strip() for i, l in enumerate(re.sub('\s+\|\s+', '\t', s).strip().splitlines()) if i != 1])
|
||||
|
||||
def execute_query(sql, expected=None, exitcode=None, message=None, format="TabSeparatedWithNames"):
|
||||
"""Execute SQL query and compare the output to the snapshot.
|
||||
"""
|
||||
name = basename(current().name)
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql + " FORMAT " + format, exitcode=exitcode, message=message)
|
||||
|
||||
if message is None:
|
||||
if expected is not None:
|
||||
with Then("I check output against expected"):
|
||||
assert r.output.strip() == expected, error()
|
||||
else:
|
||||
with Then("I check output against snapshot"):
|
||||
with values() as that:
|
||||
assert that(snapshot("\n" + r.output.strip() + "\n", "tests", name=name, encoder=str)), error()
|
||||
|
||||
@TestStep(Given)
|
||||
def t1_table(self, name="t1", distributed=False):
|
||||
"""Create t1 table.
|
||||
"""
|
||||
table = None
|
||||
data = [
|
||||
"(1, 1)",
|
||||
"(1, 2)",
|
||||
"(2, 2)"
|
||||
]
|
||||
|
||||
if not distributed:
|
||||
with By("creating table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} (
|
||||
f1 Int8,
|
||||
f2 Int8
|
||||
) ENGINE = MergeTree ORDER BY tuple()
|
||||
"""
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
sql = f"INSERT INTO {name} VALUES {','.join(data)}"
|
||||
self.context.node.query(sql)
|
||||
|
||||
else:
|
||||
with By("creating table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} ON CLUSTER sharded_cluster (
|
||||
f1 Int8,
|
||||
f2 Int8
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple()
|
||||
"""
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
for row in data:
|
||||
sql = f"INSERT INTO {name} VALUES {row}"
|
||||
self.context.node.query(sql)
|
||||
|
||||
return table
|
||||
|
||||
@TestStep(Given)
|
||||
def datetimes_table(self, name="datetimes", distributed=False):
|
||||
"""Create datetimes table.
|
||||
"""
|
||||
table = None
|
||||
data = [
|
||||
"(1, '2000-10-19 10:23:54', '2000-10-19 10:23:54')",
|
||||
"(2, '2001-10-19 10:23:54', '2001-10-19 10:23:54')",
|
||||
"(3, '2001-10-19 10:23:54', '2001-10-19 10:23:54')",
|
||||
"(4, '2002-10-19 10:23:54', '2002-10-19 10:23:54')",
|
||||
"(5, '2003-10-19 10:23:54', '2003-10-19 10:23:54')",
|
||||
"(6, '2004-10-19 10:23:54', '2004-10-19 10:23:54')",
|
||||
"(7, '2005-10-19 10:23:54', '2005-10-19 10:23:54')",
|
||||
"(8, '2006-10-19 10:23:54', '2006-10-19 10:23:54')",
|
||||
"(9, '2007-10-19 10:23:54', '2007-10-19 10:23:54')",
|
||||
"(10, '2008-10-19 10:23:54', '2008-10-19 10:23:54')"
|
||||
]
|
||||
|
||||
if not distributed:
|
||||
with By("creating table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} (
|
||||
id UInt32,
|
||||
f_timestamptz DateTime('CET'),
|
||||
f_timestamp DateTime
|
||||
) ENGINE = MergeTree() ORDER BY tuple()
|
||||
"""
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
sql = f"INSERT INTO {name} VALUES {','.join(data)}"
|
||||
self.context.node.query(sql)
|
||||
|
||||
else:
|
||||
with By("creating table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} ON CLUSTER sharded_cluster (
|
||||
id UInt32,
|
||||
f_timestamptz DateTime('CET'),
|
||||
f_timestamp DateTime
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple()
|
||||
"""
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
for row in data:
|
||||
sql = f"INSERT INTO {name} VALUES {row}"
|
||||
self.context.node.query(sql)
|
||||
|
||||
return table
|
||||
|
||||
@TestStep(Given)
|
||||
def numerics_table(self, name="numerics", distributed=False):
|
||||
"""Create numerics tables.
|
||||
"""
|
||||
table = None
|
||||
|
||||
data = [
|
||||
"(0, '-infinity', '-infinity', toDecimal64(-1000,15))",
|
||||
"(1, -3, -3, -3)",
|
||||
"(2, -1, -1, -1)",
|
||||
"(3, 0, 0, 0)",
|
||||
"(4, 1.1, 1.1, 1.1)",
|
||||
"(5, 1.12, 1.12, 1.12)",
|
||||
"(6, 2, 2, 2)",
|
||||
"(7, 100, 100, 100)",
|
||||
"(8, 'infinity', 'infinity', toDecimal64(1000,15))",
|
||||
"(9, 'NaN', 'NaN', 0)"
|
||||
]
|
||||
|
||||
if not distributed:
|
||||
with By("creating a table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} (
|
||||
id Int32,
|
||||
f_float4 Float32,
|
||||
f_float8 Float64,
|
||||
f_numeric Decimal64(15)
|
||||
) ENGINE = MergeTree() ORDER BY tuple();
|
||||
"""
|
||||
create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
sql = f"INSERT INTO {name} VALUES {','.join(data)}"
|
||||
self.context.node.query(sql)
|
||||
|
||||
else:
|
||||
with By("creating a table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} ON CLUSTER sharded_cluster (
|
||||
id Int32,
|
||||
f_float4 Float32,
|
||||
f_float8 Float64,
|
||||
f_numeric Decimal64(15)
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple();
|
||||
"""
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
for row in data:
|
||||
sql = f"INSERT INTO {name} VALUES {row}"
|
||||
self.context.node.query(sql)
|
||||
|
||||
return table
|
||||
|
||||
@TestStep(Given)
|
||||
def tenk1_table(self, name="tenk1", distributed=False):
|
||||
"""Create tenk1 table.
|
||||
"""
|
||||
table = None
|
||||
|
||||
if not distributed:
|
||||
with By("creating a table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} (
|
||||
unique1 Int32,
|
||||
unique2 Int32,
|
||||
two Int32,
|
||||
four Int32,
|
||||
ten Int32,
|
||||
twenty Int32,
|
||||
hundred Int32,
|
||||
thousand Int32,
|
||||
twothousand Int32,
|
||||
fivethous Int32,
|
||||
tenthous Int32,
|
||||
odd Int32,
|
||||
even Int32,
|
||||
stringu1 String,
|
||||
stringu2 String,
|
||||
string4 String
|
||||
) ENGINE = MergeTree() ORDER BY tuple()
|
||||
"""
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
datafile = os.path.join(current_dir(), "tenk.data")
|
||||
debug(datafile)
|
||||
self.context.cluster.command(None, f"cat \"{datafile}\" | {self.context.node.cluster.docker_compose} exec -T {self.context.node.name} clickhouse client -q \"INSERT INTO {name} FORMAT TSV\"", exitcode=0)
|
||||
else:
|
||||
with By("creating a table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} ON CLUSTER sharded_cluster (
|
||||
unique1 Int32,
|
||||
unique2 Int32,
|
||||
two Int32,
|
||||
four Int32,
|
||||
ten Int32,
|
||||
twenty Int32,
|
||||
hundred Int32,
|
||||
thousand Int32,
|
||||
twothousand Int32,
|
||||
fivethous Int32,
|
||||
tenthous Int32,
|
||||
odd Int32,
|
||||
even Int32,
|
||||
stringu1 String,
|
||||
stringu2 String,
|
||||
string4 String
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple()
|
||||
"""
|
||||
create_table(name=name + '_source', statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
datafile = os.path.join(current_dir(), "tenk.data")
|
||||
|
||||
with open(datafile, "r") as file:
|
||||
lines = file.readlines()
|
||||
|
||||
chunks = [lines[i:i + 1000] for i in range(0, len(lines), 1000)]
|
||||
|
||||
for chunk in chunks:
|
||||
with tempfile.NamedTemporaryFile() as file:
|
||||
file.write(''.join(chunk).encode("utf-8"))
|
||||
file.flush()
|
||||
self.context.cluster.command(None,
|
||||
f"cat \"{file.name}\" | {self.context.node.cluster.docker_compose} exec -T {self.context.node.name} clickhouse client -q \"INSERT INTO {table} FORMAT TSV\"",
|
||||
exitcode=0)
|
||||
|
||||
return table
|
||||
|
||||
@TestStep(Given)
|
||||
def empsalary_table(self, name="empsalary", distributed=False):
|
||||
"""Create employee salary reference table.
|
||||
"""
|
||||
table = None
|
||||
|
||||
data = [
|
||||
"('develop', 10, 5200, '2007-08-01')",
|
||||
"('sales', 1, 5000, '2006-10-01')",
|
||||
"('personnel', 5, 3500, '2007-12-10')",
|
||||
"('sales', 4, 4800, '2007-08-08')",
|
||||
"('personnel', 2, 3900, '2006-12-23')",
|
||||
"('develop', 7, 4200, '2008-01-01')",
|
||||
"('develop', 9, 4500, '2008-01-01')",
|
||||
"('sales', 3, 4800, '2007-08-01')",
|
||||
"('develop', 8, 6000, '2006-10-01')",
|
||||
"('develop', 11, 5200, '2007-08-15')"
|
||||
]
|
||||
|
||||
if not distributed:
|
||||
with By("creating a table"):
|
||||
sql = """
|
||||
CREATE TABLE {name} (
|
||||
depname LowCardinality(String),
|
||||
empno UInt64,
|
||||
salary Int32,
|
||||
enroll_date Date
|
||||
)
|
||||
ENGINE = MergeTree() ORDER BY enroll_date
|
||||
"""
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
sql = f"INSERT INTO {name} VALUES {','.join(data)}"
|
||||
self.context.node.query(sql)
|
||||
|
||||
else:
|
||||
with By("creating replicated source tables"):
|
||||
sql = """
|
||||
CREATE TABLE {name} ON CLUSTER sharded_cluster (
|
||||
depname LowCardinality(String),
|
||||
empno UInt64,
|
||||
salary Int32,
|
||||
enroll_date Date
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY enroll_date
|
||||
"""
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating distributed table with data"):
|
||||
with By("inserting one data row at a time", description="so that data is sharded between nodes"):
|
||||
for row in data:
|
||||
self.context.node.query(f"INSERT INTO {table} VALUES {row}",
|
||||
settings=[("insert_distributed_sync", "1")])
|
||||
|
||||
with And("dumping all the data in the table"):
|
||||
self.context.node.query(f"SELECT * FROM {table}")
|
||||
|
||||
return table
|
||||
|
||||
@TestStep(Given)
|
||||
def create_table(self, name, statement, on_cluster=False):
|
||||
"""Create table.
|
||||
"""
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given(f"I have a {name} table"):
|
||||
node.query(statement.format(name=name))
|
||||
yield name
|
||||
finally:
|
||||
with Finally("I drop the table"):
|
||||
if on_cluster:
|
||||
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {on_cluster}")
|
||||
else:
|
||||
node.query(f"DROP TABLE IF EXISTS {name}")
|
||||
|
||||
@TestStep(Given)
|
||||
def allow_experimental_window_functions(self):
|
||||
"""Set allow_experimental_window_functions = 1
|
||||
"""
|
||||
setting = ("allow_experimental_window_functions", 1)
|
||||
default_query_settings = None
|
||||
|
||||
try:
|
||||
with By("adding allow_experimental_window_functions to the default query settings"):
|
||||
default_query_settings = getsattr(current().context, "default_query_settings", [])
|
||||
default_query_settings.append(setting)
|
||||
yield
|
||||
finally:
|
||||
with Finally("I remove allow_experimental_window_functions from the default query settings"):
|
||||
if default_query_settings:
|
||||
try:
|
||||
default_query_settings.pop(default_query_settings.index(setting))
|
||||
except ValueError:
|
||||
pass
|
137
tests/testflows/window_functions/tests/errors.py
Normal file
137
tests/testflows/window_functions/tests/errors.py
Normal file
@ -0,0 +1,137 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def error_using_non_window_function(self):
|
||||
"""Check that trying to use non window or aggregate function over a window
|
||||
returns an error.
|
||||
"""
|
||||
exitcode = 63
|
||||
message = "DB::Exception: Unknown aggregate function numbers"
|
||||
|
||||
sql = ("SELECT numbers(1, 100) OVER () FROM empsalary")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_order_by_another_window_function(self):
|
||||
"""Check that trying to order by another window function returns an error.
|
||||
"""
|
||||
exitcode = 184
|
||||
message = "DB::Exception: Window function rank() OVER (ORDER BY random() ASC) is found inside window definition in query"
|
||||
|
||||
sql = ("SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random()))")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_window_function_in_where(self):
|
||||
"""Check that trying to use window function in `WHERE` returns an error.
|
||||
"""
|
||||
exitcode = 184
|
||||
message = "DB::Exception: Window function row_number() OVER (ORDER BY salary ASC) is found in WHERE in query"
|
||||
|
||||
sql = ("SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_window_function_in_join(self):
|
||||
"""Check that trying to use window function in `JOIN` returns an error.
|
||||
"""
|
||||
exitcode = 48
|
||||
message = "DB::Exception: JOIN ON inequalities are not supported. Unexpected 'row_number() OVER (ORDER BY salary ASC) < 10"
|
||||
|
||||
sql = ("SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_window_function_in_group_by(self):
|
||||
"""Check that trying to use window function in `GROUP BY` returns an error.
|
||||
"""
|
||||
exitcode = 47
|
||||
message = "DB::Exception: Unknown identifier: row_number() OVER (ORDER BY salary ASC); there are columns"
|
||||
|
||||
sql = ("SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY row_number() OVER (ORDER BY salary) < 10")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_window_function_in_having(self):
|
||||
"""Check that trying to use window function in `HAVING` returns an error.
|
||||
"""
|
||||
exitcode = 184
|
||||
message = "DB::Exception: Window function row_number() OVER (ORDER BY salary ASC) is found in HAVING in query"
|
||||
|
||||
sql = ("SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY salary HAVING row_number() OVER (ORDER BY salary) < 10")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_select_from_window(self):
|
||||
"""Check that trying to use window function in `FROM` returns an error.
|
||||
"""
|
||||
exitcode = 46
|
||||
message = "DB::Exception: Unknown table function rank"
|
||||
|
||||
sql = ("SELECT * FROM rank() OVER (ORDER BY random())")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_window_function_in_alter_delete_where(self):
|
||||
"""Check that trying to use window function in `ALTER DELETE`'s `WHERE` clause returns an error.
|
||||
"""
|
||||
if self.context.distributed:
|
||||
exitcode = 48
|
||||
message = "Exception: Table engine Distributed doesn't support mutations"
|
||||
else:
|
||||
exitcode = 184
|
||||
message = "DB::Exception: Window function rank() OVER (ORDER BY random() ASC) is found in WHERE in query"
|
||||
|
||||
sql = ("ALTER TABLE empsalary DELETE WHERE (rank() OVER (ORDER BY random())) > 10")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_named_window_defined_twice(self):
|
||||
"""Check that trying to define named window twice.
|
||||
"""
|
||||
exitcode = 36
|
||||
message = "DB::Exception: Window 'w' is defined twice in the WINDOW clause"
|
||||
|
||||
sql = ("SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1)")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_coma_between_partition_by_and_order_by_clause(self):
|
||||
"""Check that trying to use a coma between partition by and order by clause.
|
||||
"""
|
||||
exitcode = 62
|
||||
message = "DB::Exception: Syntax error"
|
||||
|
||||
sql = ("SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestFeature
|
||||
@Name("errors")
|
||||
def feature(self):
|
||||
"""Check different error conditions.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
47
tests/testflows/window_functions/tests/feature.py
Executable file
47
tests/testflows/window_functions/tests/feature.py
Executable file
@ -0,0 +1,47 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.tests.common import *
|
||||
from window_functions.requirements import *
|
||||
|
||||
|
||||
@TestOutline(Feature)
|
||||
@Name("tests")
|
||||
@Examples("distributed", [
|
||||
(False, Name("non distributed"),Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_NonDistributedTables("1.0"))),
|
||||
(True, Name("distributed"), Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_DistributedTables("1.0")))
|
||||
])
|
||||
def feature(self, distributed, node="clickhouse1"):
|
||||
"""Check window functions behavior using non-distributed or
|
||||
distributed tables.
|
||||
"""
|
||||
self.context.distributed = distributed
|
||||
self.context.node = self.context.cluster.node(node)
|
||||
|
||||
with Given("I allow experimental window functions"):
|
||||
allow_experimental_window_functions()
|
||||
|
||||
with And("employee salary table"):
|
||||
empsalary_table(distributed=distributed)
|
||||
|
||||
with And("tenk1 table"):
|
||||
tenk1_table(distributed=distributed)
|
||||
|
||||
with And("numerics table"):
|
||||
numerics_table(distributed=distributed)
|
||||
|
||||
with And("datetimes table"):
|
||||
datetimes_table(distributed=distributed)
|
||||
|
||||
with And("t1 table"):
|
||||
t1_table(distributed=distributed)
|
||||
|
||||
Feature(run=load("window_functions.tests.window_spec", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.partition_clause", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.order_clause", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.frame_clause", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.window_clause", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.over_clause", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.funcs", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.aggregate_funcs", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.errors", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.misc", "feature"), flags=TE)
|
29
tests/testflows/window_functions/tests/frame_clause.py
Normal file
29
tests/testflows/window_functions/tests/frame_clause.py
Normal file
@ -0,0 +1,29 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestFeature
|
||||
@Name("frame clause")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Frame_Extent("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Frame_Start("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Frame_End("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Frame_Between("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_CurrentRow("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_UnboundedPreceding("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_UnboundedFollowing("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding_ExprValue("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing_ExprValue("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check defining frame clause.
|
||||
"""
|
||||
Feature(run=load("window_functions.tests.rows_frame", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.range_frame", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.range_overflow", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.range_datetime", "feature"), flags=TE)
|
||||
Feature(run=load("window_functions.tests.range_errors", "feature"), flags=TE)
|
456
tests/testflows/window_functions/tests/funcs.py
Normal file
456
tests/testflows/window_functions/tests/funcs.py
Normal file
@ -0,0 +1,456 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_FirstValue("1.0")
|
||||
)
|
||||
def first_value(self):
|
||||
"""Check `first_value` function.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
first_value | ten | four
|
||||
-------------+-----+------
|
||||
0 | 0 | 0
|
||||
0 | 0 | 0
|
||||
0 | 4 | 0
|
||||
1 | 1 | 1
|
||||
1 | 1 | 1
|
||||
1 | 7 | 1
|
||||
1 | 9 | 1
|
||||
0 | 0 | 2
|
||||
1 | 1 | 3
|
||||
1 | 3 | 3
|
||||
""")
|
||||
|
||||
with Example("using first_value"):
|
||||
execute_query(
|
||||
"SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten) AS first_value, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Example("using any equivalent"):
|
||||
execute_query(
|
||||
"SELECT any(ten) OVER (PARTITION BY four ORDER BY ten) AS first_value, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_LastValue("1.0")
|
||||
)
|
||||
def last_value(self):
|
||||
"""Check `last_value` function.
|
||||
"""
|
||||
with Example("order by window", description="""
|
||||
Check that last_value returns the last row of the frame that is CURRENT ROW in ORDER BY window
|
||||
"""):
|
||||
expected = convert_output("""
|
||||
last_value | ten | four
|
||||
------------+-----+------
|
||||
0 | 0 | 0
|
||||
0 | 0 | 0
|
||||
2 | 0 | 2
|
||||
1 | 1 | 1
|
||||
1 | 1 | 1
|
||||
3 | 1 | 3
|
||||
3 | 3 | 3
|
||||
0 | 4 | 0
|
||||
1 | 7 | 1
|
||||
1 | 9 | 1
|
||||
""")
|
||||
|
||||
with Check("using last_value"):
|
||||
execute_query(
|
||||
"SELECT last_value(four) OVER (ORDER BY ten, four) AS last_value, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Check("using anyLast() equivalent"):
|
||||
execute_query(
|
||||
"SELECT anyLast(four) OVER (ORDER BY ten, four) AS last_value, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Example("partition by window", description="""
|
||||
Check that last_value returns the last row of the frame that is CURRENT ROW in ORDER BY window
|
||||
"""):
|
||||
expected = convert_output("""
|
||||
last_value | ten | four
|
||||
------------+-----+------
|
||||
4 | 0 | 0
|
||||
4 | 0 | 0
|
||||
4 | 4 | 0
|
||||
9 | 1 | 1
|
||||
9 | 1 | 1
|
||||
9 | 7 | 1
|
||||
9 | 9 | 1
|
||||
0 | 0 | 2
|
||||
3 | 1 | 3
|
||||
3 | 3 | 3
|
||||
""")
|
||||
|
||||
with Check("using last_value"):
|
||||
execute_query(
|
||||
"""SELECT last_value(ten) OVER (PARTITION BY four) AS last_value, ten, four FROM
|
||||
(SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)
|
||||
ORDER BY four, ten""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Check("using anyLast() equivalent"):
|
||||
execute_query(
|
||||
"""SELECT anyLast(ten) OVER (PARTITION BY four) AS last_value, ten, four FROM
|
||||
(SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)
|
||||
ORDER BY four, ten""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Lag_Workaround("1.0")
|
||||
)
|
||||
def lag(self):
|
||||
"""Check `lag` function workaround.
|
||||
"""
|
||||
with Example("anyOrNull"):
|
||||
expected = convert_output("""
|
||||
lag | ten | four
|
||||
-----+-----+------
|
||||
\\N | 0 | 0
|
||||
0 | 0 | 0
|
||||
0 | 4 | 0
|
||||
\\N | 1 | 1
|
||||
1 | 1 | 1
|
||||
1 | 7 | 1
|
||||
7 | 9 | 1
|
||||
\\N | 0 | 2
|
||||
\\N | 1 | 3
|
||||
1 | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT anyOrNull(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS lag , ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Example("any"):
|
||||
expected = convert_output("""
|
||||
lag | ten | four
|
||||
-----+-----+------
|
||||
0 | 0 | 0
|
||||
0 | 0 | 0
|
||||
0 | 4 | 0
|
||||
0 | 1 | 1
|
||||
1 | 1 | 1
|
||||
1 | 7 | 1
|
||||
7 | 9 | 1
|
||||
0 | 0 | 2
|
||||
0 | 1 | 3
|
||||
1 | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT any(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS lag , ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Example("anyOrNull with column value as offset"):
|
||||
expected = convert_output("""
|
||||
lag | ten | four
|
||||
-----+-----+------
|
||||
0 | 0 | 0
|
||||
0 | 0 | 0
|
||||
4 | 4 | 0
|
||||
\\N | 1 | 1
|
||||
1 | 1 | 1
|
||||
1 | 7 | 1
|
||||
7 | 9 | 1
|
||||
\\N | 0 | 2
|
||||
\\N | 1 | 3
|
||||
\\N | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT any(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN four PRECEDING AND four PRECEDING) AS lag , ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Lead_Workaround("1.0")
|
||||
)
|
||||
def lead(self):
|
||||
"""Check `lead` function workaround.
|
||||
"""
|
||||
with Example("anyOrNull"):
|
||||
expected = convert_output("""
|
||||
lead | ten | four
|
||||
------+-----+------
|
||||
0 | 0 | 0
|
||||
4 | 0 | 0
|
||||
\\N | 4 | 0
|
||||
1 | 1 | 1
|
||||
7 | 1 | 1
|
||||
9 | 7 | 1
|
||||
\\N | 9 | 1
|
||||
\\N | 0 | 2
|
||||
3 | 1 | 3
|
||||
\\N | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT anyOrNull(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) AS lead, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Example("any"):
|
||||
expected = convert_output("""
|
||||
lead | ten | four
|
||||
------+-----+------
|
||||
0 | 0 | 0
|
||||
4 | 0 | 0
|
||||
0 | 4 | 0
|
||||
1 | 1 | 1
|
||||
7 | 1 | 1
|
||||
9 | 7 | 1
|
||||
0 | 9 | 1
|
||||
0 | 0 | 2
|
||||
3 | 1 | 3
|
||||
0 | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT any(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) AS lead, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Example("any with arithmetic expr"):
|
||||
expected = convert_output("""
|
||||
lead | ten | four
|
||||
------+-----+------
|
||||
0 | 0 | 0
|
||||
8 | 0 | 0
|
||||
0 | 4 | 0
|
||||
2 | 1 | 1
|
||||
14 | 1 | 1
|
||||
18 | 7 | 1
|
||||
0 | 9 | 1
|
||||
0 | 0 | 2
|
||||
6 | 1 | 3
|
||||
0 | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT any(ten * 2) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) AS lead, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
with Example("subquery as offset"):
|
||||
expected = convert_output("""
|
||||
lead
|
||||
------
|
||||
0
|
||||
0
|
||||
4
|
||||
1
|
||||
7
|
||||
9
|
||||
\\N
|
||||
0
|
||||
3
|
||||
\\N
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT anyNull(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN (SELECT two FROM tenk1 WHERE unique2 = unique2) FOLLOWING AND (SELECT two FROM tenk1 WHERE unique2 = unique2) FOLLOWING) AS lead "
|
||||
"FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowNumber("1.0")
|
||||
)
|
||||
def row_number(self):
|
||||
"""Check `row_number` function.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
row_number
|
||||
------------
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
10
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT row_number() OVER (ORDER BY unique2) AS row_number FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Rank("1.0")
|
||||
)
|
||||
def rank(self):
|
||||
"""Check `rank` function.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
rank_1 | ten | four
|
||||
--------+-----+------
|
||||
1 | 0 | 0
|
||||
1 | 0 | 0
|
||||
3 | 4 | 0
|
||||
1 | 1 | 1
|
||||
1 | 1 | 1
|
||||
3 | 7 | 1
|
||||
4 | 9 | 1
|
||||
1 | 0 | 2
|
||||
1 | 1 | 3
|
||||
2 | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_DenseRank("1.0")
|
||||
)
|
||||
def dense_rank(self):
|
||||
"""Check `dense_rank` function.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
dense_rank | ten | four
|
||||
------------+-----+------
|
||||
1 | 0 | 0
|
||||
1 | 0 | 0
|
||||
2 | 4 | 0
|
||||
1 | 1 | 1
|
||||
1 | 1 | 1
|
||||
2 | 7 | 1
|
||||
3 | 9 | 1
|
||||
1 | 0 | 2
|
||||
1 | 1 | 3
|
||||
2 | 3 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten) AS dense_rank, ten, four FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def last_value_with_no_frame(self):
|
||||
"""Check last_value function with no frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
four | ten | sum | last_value
|
||||
------+-----+-----+------------
|
||||
0 | 0 | 0 | 0
|
||||
0 | 2 | 2 | 2
|
||||
0 | 4 | 6 | 4
|
||||
0 | 6 | 12 | 6
|
||||
0 | 8 | 20 | 8
|
||||
1 | 1 | 1 | 1
|
||||
1 | 3 | 4 | 3
|
||||
1 | 5 | 9 | 5
|
||||
1 | 7 | 16 | 7
|
||||
1 | 9 | 25 | 9
|
||||
2 | 0 | 0 | 0
|
||||
2 | 2 | 2 | 2
|
||||
2 | 4 | 6 | 4
|
||||
2 | 6 | 12 | 6
|
||||
2 | 8 | 20 | 8
|
||||
3 | 1 | 1 | 1
|
||||
3 | 3 | 4 | 3
|
||||
3 | 5 | 9 | 5
|
||||
3 | 7 | 16 | 7
|
||||
3 | 9 | 25 | 9
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT four, ten, sum(ten) over (partition by four order by ten) AS sum, "
|
||||
"last_value(ten) over (partition by four order by ten) AS last_value "
|
||||
"FROM (select distinct ten, four from tenk1)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_LastValue("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Lag_Workaround("1.0"),
|
||||
)
|
||||
def last_value_with_lag_workaround(self):
|
||||
"""Check last value with lag workaround.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
last_value | lag | salary
|
||||
------------+------+--------
|
||||
4500 | 0 | 3500
|
||||
4800 | 3500 | 3900
|
||||
5200 | 3900 | 4200
|
||||
5200 | 4200 | 4500
|
||||
5200 | 4500 | 4800
|
||||
5200 | 4800 | 4800
|
||||
6000 | 4800 | 5000
|
||||
6000 | 5000 | 5200
|
||||
6000 | 5200 | 5200
|
||||
6000 | 5200 | 6000
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"select last_value(salary) over(order by salary range between 1000 preceding and 1000 following) AS last_value, "
|
||||
"any(salary) over(order by salary rows between 1 preceding and 1 preceding) AS lag, "
|
||||
"salary from empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_FirstValue("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_Lead_Workaround("1.0")
|
||||
)
|
||||
def first_value_with_lead_workaround(self):
|
||||
"""Check first value with lead workaround.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
first_value | lead | salary
|
||||
-------------+------+--------
|
||||
3500 | 3900 | 3500
|
||||
3500 | 4200 | 3900
|
||||
3500 | 4500 | 4200
|
||||
3500 | 4800 | 4500
|
||||
3900 | 4800 | 4800
|
||||
3900 | 5000 | 4800
|
||||
4200 | 5200 | 5000
|
||||
4200 | 5200 | 5200
|
||||
4200 | 6000 | 5200
|
||||
5000 | 0 | 6000
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"select first_value(salary) over(order by salary range between 1000 preceding and 1000 following) AS first_value, "
|
||||
"any(salary) over(order by salary rows between 1 following and 1 following) AS lead,"
|
||||
"salary from empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("funcs")
|
||||
def feature(self):
|
||||
"""Check true window functions.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
396
tests/testflows/window_functions/tests/misc.py
Normal file
396
tests/testflows/window_functions/tests/misc.py
Normal file
@ -0,0 +1,396 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def subquery_expr_preceding(self):
|
||||
"""Check using subquery expr in preceding.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | unique1
|
||||
-----+---------
|
||||
0 | 0
|
||||
1 | 1
|
||||
3 | 2
|
||||
5 | 3
|
||||
7 | 4
|
||||
9 | 5
|
||||
11 | 6
|
||||
13 | 7
|
||||
15 | 8
|
||||
17 | 9
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(unique1) over "
|
||||
"(order by unique1 rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING) AS sum, "
|
||||
"unique1 "
|
||||
"FROM tenk1 WHERE unique1 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def window_functions_in_select_expression(self):
|
||||
"""Check using multiple window functions in an expression.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
cntsum
|
||||
--------
|
||||
22
|
||||
22
|
||||
87
|
||||
24
|
||||
24
|
||||
82
|
||||
92
|
||||
51
|
||||
92
|
||||
136
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + "
|
||||
"sum(hundred) OVER (PARTITION BY four ORDER BY ten)) AS cntsum "
|
||||
"FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def window_functions_in_subquery(self):
|
||||
"""Check using window functions in a subquery.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
total | fourcount | twosum
|
||||
-------+-----------+--------
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT * FROM ("
|
||||
" SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + "
|
||||
" sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, "
|
||||
" count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount, "
|
||||
" sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum "
|
||||
" FROM tenk1 "
|
||||
") WHERE total <> fourcount + twosum",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def group_by_and_one_window(self):
|
||||
"""Check running window function with group by and one window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
four | ten | sum | avg
|
||||
------+-----+------+------------------------
|
||||
0 | 0 | 0 | 0
|
||||
0 | 2 | 0 | 2
|
||||
0 | 4 | 0 | 4
|
||||
0 | 6 | 0 | 6
|
||||
0 | 8 | 0 | 8
|
||||
1 | 1 | 2500 | 1
|
||||
1 | 3 | 2500 | 3
|
||||
1 | 5 | 2500 | 5
|
||||
1 | 7 | 2500 | 7
|
||||
1 | 9 | 2500 | 9
|
||||
2 | 0 | 5000 | 0
|
||||
2 | 2 | 5000 | 2
|
||||
2 | 4 | 5000 | 4
|
||||
2 | 6 | 5000 | 6
|
||||
2 | 8 | 5000 | 8
|
||||
3 | 1 | 7500 | 1
|
||||
3 | 3 | 7500 | 3
|
||||
3 | 5 | 7500 | 5
|
||||
3 | 7 | 7500 | 7
|
||||
3 | 9 | 7500 | 9
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four) AS sum, AVG(ten) AS avg FROM tenk1 GROUP BY four, ten ORDER BY four, ten",
|
||||
expected=expected,
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def group_by_and_multiple_windows(self):
|
||||
"""Check running window function with group by and multiple windows.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum1 | row_number | sum2
|
||||
-------+------------+-------
|
||||
25100 | 1 | 47100
|
||||
7400 | 2 | 22000
|
||||
14600 | 3 | 14600
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) AS sum1, row_number() OVER (ORDER BY depname) AS row_number, "
|
||||
"sum(sum(salary)) OVER (ORDER BY depname DESC) AS sum2 "
|
||||
"FROM empsalary GROUP BY depname",
|
||||
expected=expected,
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def query_with_order_by_and_one_window(self):
|
||||
"""Check using a window function in the query that has `ORDER BY` clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
depname | empno | salary | rank
|
||||
----------+----------+--------+---------
|
||||
sales | 3 | 4800 | 1
|
||||
personnel | 5 | 3500 | 1
|
||||
develop | 7 | 4200 | 1
|
||||
personnel | 2 | 3900 | 2
|
||||
sales | 4 | 4800 | 2
|
||||
develop | 9 | 4500 | 2
|
||||
sales | 1 | 5000 | 3
|
||||
develop | 10 | 5200 | 3
|
||||
develop | 11 | 5200 | 4
|
||||
develop | 8 | 6000 | 5
|
||||
""")
|
||||
execute_query(
|
||||
"SELECT depname, empno, salary, rank() OVER w AS rank FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary, empno) ORDER BY rank() OVER w, empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def with_union_all(self):
|
||||
"""Check using window over rows obtained with `UNION ALL`.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
count
|
||||
-------
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT count(*) OVER (PARTITION BY four) AS count FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) LIMIT 0",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def empty_table(self):
|
||||
"""Check using an empty table with a window function.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
count
|
||||
-------
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT count(*) OVER (PARTITION BY four) AS count FROM (SELECT * FROM tenk1 WHERE 0)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def from_subquery(self):
|
||||
"""Check using a window function over data from subquery.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
count | four
|
||||
-------+------
|
||||
4 | 1
|
||||
4 | 1
|
||||
4 | 1
|
||||
4 | 1
|
||||
2 | 3
|
||||
2 | 3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT count(*) OVER (PARTITION BY four) AS count, four FROM (SELECT * FROM tenk1 WHERE two = 1) WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def groups_frame(self):
|
||||
"""Check using `GROUPS` frame.
|
||||
"""
|
||||
exitcode, message = groups_frame_error()
|
||||
|
||||
expected = convert_output("""
|
||||
sum | unique1 | four
|
||||
-----+---------+------
|
||||
12 | 0 | 0
|
||||
12 | 8 | 0
|
||||
12 | 4 | 0
|
||||
27 | 5 | 1
|
||||
27 | 9 | 1
|
||||
27 | 1 | 1
|
||||
35 | 6 | 2
|
||||
35 | 2 | 2
|
||||
45 | 3 | 3
|
||||
45 | 7 | 3
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT sum(unique1) over (order by four groups between unbounded preceding and current row),
|
||||
unique1, four
|
||||
FROM tenk1 WHERE unique1 < 10
|
||||
""",
|
||||
exitcode=exitcode, message=message
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def count_with_empty_over_clause_without_start(self):
|
||||
"""Check that we can use `count()` window function without passing
|
||||
`*` argument when using empty over clause.
|
||||
"""
|
||||
exitcode = 0
|
||||
message = "1"
|
||||
|
||||
sql = ("SELECT count() OVER () FROM tenk1 LIMIT 1")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
|
||||
@TestScenario
|
||||
def subquery_multiple_window_functions(self):
|
||||
"""Check using multiple window functions is a subquery.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
depname | depsalary | depminsalary
|
||||
--------+-------------+--------------
|
||||
sales | 5000 | 5000
|
||||
sales | 9800 | 4800
|
||||
sales | 14600 | 4800
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT * FROM
|
||||
(SELECT depname,
|
||||
sum(salary) OVER (PARTITION BY depname order by empno) AS depsalary,
|
||||
min(salary) OVER (PARTITION BY depname, empno order by enroll_date) AS depminsalary
|
||||
FROM empsalary)
|
||||
WHERE depname = 'sales'
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def windows_with_same_partitioning_but_different_ordering(self):
|
||||
"""Check using using two windows that use the same partitioning
|
||||
but different ordering.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
first | last
|
||||
------+-----
|
||||
7 | 7
|
||||
7 | 9
|
||||
7 | 10
|
||||
7 | 11
|
||||
7 | 8
|
||||
5 | 5
|
||||
5 | 2
|
||||
3 | 3
|
||||
3 | 4
|
||||
3 | 1
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT
|
||||
any(empno) OVER (PARTITION BY depname ORDER BY salary, enroll_date) AS first,
|
||||
anyLast(empno) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) AS last
|
||||
FROM empsalary
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def subquery_with_multiple_windows_filtering(self):
|
||||
"""Check filtering rows from a subquery that uses multiple window functions.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
depname | empno | salary | enroll_date | first_emp | last_emp
|
||||
----------+-------+----------+--------------+-------------+----------
|
||||
develop | 8 | 6000 | 2006-10-01 | 1 | 5
|
||||
develop | 7 | 4200 | 2008-01-01 | 4 | 1
|
||||
personnel | 2 | 3900 | 2006-12-23 | 1 | 2
|
||||
personnel | 5 | 3500 | 2007-12-10 | 2 | 1
|
||||
sales | 1 | 5000 | 2006-10-01 | 1 | 3
|
||||
sales | 4 | 4800 | 2007-08-08 | 3 | 1
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT * FROM
|
||||
(SELECT depname,
|
||||
empno,
|
||||
salary,
|
||||
enroll_date,
|
||||
row_number() OVER (PARTITION BY depname ORDER BY enroll_date, empno) AS first_emp,
|
||||
row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC, empno) AS last_emp
|
||||
FROM empsalary) emp
|
||||
WHERE first_emp = 1 OR last_emp = 1
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def exclude_clause(self):
|
||||
"""Check if exclude clause is supported.
|
||||
"""
|
||||
exitcode, message = syntax_error()
|
||||
|
||||
expected = convert_output("""
|
||||
sum | unique1 | four
|
||||
-----+---------+------
|
||||
7 | 4 | 0
|
||||
13 | 2 | 2
|
||||
22 | 1 | 1
|
||||
26 | 6 | 2
|
||||
29 | 9 | 1
|
||||
31 | 8 | 0
|
||||
32 | 5 | 1
|
||||
23 | 3 | 3
|
||||
15 | 7 | 3
|
||||
10 | 0 | 0
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others) AS sum,"
|
||||
"unique1, four "
|
||||
"FROM tenk1 WHERE unique1 < 10",
|
||||
exitcode=exitcode, message=message
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def in_view(self):
|
||||
"""Check using a window function in a view.
|
||||
"""
|
||||
with Given("I create a view"):
|
||||
sql = """
|
||||
CREATE VIEW v_window AS
|
||||
SELECT number, sum(number) over (order by number rows between 1 preceding and 1 following) as sum_rows
|
||||
FROM numbers(1, 10)
|
||||
"""
|
||||
create_table(name="v_window", statement=sql)
|
||||
|
||||
expected = convert_output("""
|
||||
number | sum_rows
|
||||
---------+----------
|
||||
1 | 3
|
||||
2 | 6
|
||||
3 | 9
|
||||
4 | 12
|
||||
5 | 15
|
||||
6 | 18
|
||||
7 | 21
|
||||
8 | 24
|
||||
9 | 27
|
||||
10 | 19
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT * FROM v_window",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("misc")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check misc cases for frame clause.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
218
tests/testflows/window_functions/tests/order_clause.py
Normal file
218
tests/testflows/window_functions/tests/order_clause.py
Normal file
@ -0,0 +1,218 @@
|
||||
from testflows.core import *
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def single_expr_asc(self):
|
||||
"""Check defining of order clause with single expr ASC.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | s | sum
|
||||
----+---+-----
|
||||
1 | a | 2
|
||||
1 | b | 2
|
||||
2 | b | 4
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT x,s, sum(x) OVER (ORDER BY x ASC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def single_expr_desc(self):
|
||||
"""Check defining of order clause with single expr DESC.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | s | sum
|
||||
----+---+-----
|
||||
2 | b | 2
|
||||
1 | a | 4
|
||||
1 | b | 4
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT x,s, sum(x) OVER (ORDER BY x DESC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0")
|
||||
)
|
||||
def multiple_expr_desc_desc(self):
|
||||
"""Check defining of order clause with multiple exprs.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | s | sum
|
||||
--+---+----
|
||||
2 | b | 2
|
||||
1 | b | 3
|
||||
1 | a | 4
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT x,s, sum(x) OVER (ORDER BY x DESC, s DESC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0")
|
||||
)
|
||||
def multiple_expr_asc_asc(self):
|
||||
"""Check defining of order clause with multiple exprs.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | s | sum
|
||||
----+---+------
|
||||
1 | a | 1
|
||||
1 | b | 2
|
||||
2 | b | 4
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT x,s, sum(x) OVER (ORDER BY x ASC, s ASC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0")
|
||||
)
|
||||
def multiple_expr_asc_desc(self):
|
||||
"""Check defining of order clause with multiple exprs.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | s | sum
|
||||
----+---+------
|
||||
1 | b | 1
|
||||
1 | a | 2
|
||||
2 | b | 4
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT x,s, sum(x) OVER (ORDER BY x ASC, s DESC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MissingExpr_Error("1.0")
|
||||
)
|
||||
def missing_expr_error(self):
|
||||
"""Check that defining of order clause with missing expr returns an error.
|
||||
"""
|
||||
exitcode = 62
|
||||
message = "Exception: Syntax error: failed at position"
|
||||
|
||||
self.context.node.query("SELECT sum(number) OVER (ORDER BY) FROM numbers(1,3)", exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_InvalidExpr_Error("1.0")
|
||||
)
|
||||
def invalid_expr_error(self):
|
||||
"""Check that defining of order clause with invalid expr returns an error.
|
||||
"""
|
||||
exitcode = 47
|
||||
message = "Exception: Missing columns: 'foo'"
|
||||
|
||||
self.context.node.query("SELECT sum(number) OVER (ORDER BY foo) FROM numbers(1,3)", exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def by_column(self):
|
||||
"""Check order by using a single column.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
depname | empno | salary | rank
|
||||
-----------+-------+--------+------
|
||||
develop | 7 | 4200 | 1
|
||||
develop | 8 | 6000 | 1
|
||||
develop | 9 | 4500 | 1
|
||||
develop | 10 | 5200 | 1
|
||||
develop | 11 | 5200 | 1
|
||||
personnel | 2 | 3900 | 1
|
||||
personnel | 5 | 3500 | 1
|
||||
sales | 1 | 5000 | 1
|
||||
sales | 3 | 4800 | 1
|
||||
sales | 4 | 4800 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT depname, empno, salary, rank() OVER (PARTITION BY depname, empno ORDER BY salary) AS rank FROM empsalary",
|
||||
expected=expected,
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def by_expr(self):
|
||||
"""Check order by with expression.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
avg
|
||||
------------------------
|
||||
0
|
||||
0
|
||||
0
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
2
|
||||
3
|
||||
3
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) AS avg FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected,
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def by_expr_with_aggregates(self):
|
||||
expected = convert_output("""
|
||||
ten | res | rank
|
||||
-----+----------+------
|
||||
0 | 9976146 | 4
|
||||
1 | 10114187 | 9
|
||||
2 | 10059554 | 8
|
||||
3 | 9878541 | 1
|
||||
4 | 9881005 | 2
|
||||
5 | 9981670 | 5
|
||||
6 | 9947099 | 3
|
||||
7 | 10120309 | 10
|
||||
8 | 9991305 | 6
|
||||
9 | 10040184 | 7
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"select ten, sum(unique1) + sum(unique2) as res, rank() over (order by sum(unique1) + sum(unique2)) as rank "
|
||||
"from tenk1 group by ten order by ten",
|
||||
expected=expected,
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def by_a_non_integer_constant(self):
|
||||
"""Check if it is allowed to use a window with ordering by a non integer constant.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
rank
|
||||
------
|
||||
1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT rank() OVER (ORDER BY length('abc')) AS rank",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("order clause")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check defining order clause.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
137
tests/testflows/window_functions/tests/over_clause.py
Normal file
137
tests/testflows/window_functions/tests/over_clause.py
Normal file
@ -0,0 +1,137 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_EmptyOverClause("1.0")
|
||||
)
|
||||
def empty(self):
|
||||
"""Check using empty over clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
count
|
||||
-------
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT COUNT(*) OVER () AS count FROM tenk1 WHERE unique2 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_EmptyOverClause("1.0"),
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow("1.0")
|
||||
)
|
||||
def empty_named_window(self):
|
||||
"""Check using over clause with empty window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
count
|
||||
-------
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
10
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT COUNT(*) OVER w AS count FROM tenk1 WHERE unique2 < 10 WINDOW w AS ()",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_AdHocWindow("1.0"),
|
||||
)
|
||||
def adhoc_window(self):
|
||||
"""Check running aggregating `sum` function over an adhoc window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
depname | empno | salary | sum
|
||||
-----------+-------+--------+-------
|
||||
develop | 7 | 4200 | 25100
|
||||
develop | 9 | 4500 | 25100
|
||||
develop | 10 | 5200 | 25100
|
||||
develop | 11 | 5200 | 25100
|
||||
develop | 8 | 6000 | 25100
|
||||
personnel | 5 | 3500 | 7400
|
||||
personnel | 2 | 3900 | 7400
|
||||
sales | 3 | 4800 | 14600
|
||||
sales | 4 | 4800 | 14600
|
||||
sales | 1 | 5000 | 14600
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) AS sum FROM empsalary ORDER BY depname, salary, empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_AdHocWindow_MissingWindowSpec_Error("1.0")
|
||||
)
|
||||
def missing_window_spec(self):
|
||||
"""Check missing window spec in over clause.
|
||||
"""
|
||||
exitcode = 62
|
||||
message = "Exception: Syntax error"
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER FROM values('number Int8', (1),(1),(2),(3))",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_InvalidName_Error("1.0")
|
||||
)
|
||||
def invalid_window_name(self):
|
||||
"""Check invalid window name.
|
||||
"""
|
||||
exitcode = 47
|
||||
message = "Exception: Window 'w3' is not defined"
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER w3 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1 AS ()",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_MultipleWindows_Error("1.0")
|
||||
)
|
||||
def invalid_multiple_windows(self):
|
||||
"""Check invalid multiple window names.
|
||||
"""
|
||||
exitcode = 47
|
||||
message = "Exception: Missing columns"
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER w1, w2 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1 AS (), w2 AS (PARTITION BY number)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
|
||||
@TestFeature
|
||||
@Name("over clause")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_OverClause("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check defining frame clause.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
77
tests/testflows/window_functions/tests/partition_clause.py
Normal file
77
tests/testflows/window_functions/tests/partition_clause.py
Normal file
@ -0,0 +1,77 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def single_expr(self):
|
||||
"""Check defining of partition clause with single expr.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | s | sum
|
||||
----+---+------
|
||||
1 | a | 2
|
||||
1 | b | 2
|
||||
2 | b | 2
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT x,s, sum(x) OVER (PARTITION BY x) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_MultipleExpr("1.0")
|
||||
)
|
||||
def multiple_expr(self):
|
||||
"""Check defining of partition clause with multiple exprs.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | s | sum
|
||||
--+---+----
|
||||
1 | a | 1
|
||||
1 | b | 1
|
||||
2 | b | 2
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT x,s, sum(x) OVER (PARTITION BY x,s) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_MissingExpr_Error("1.0")
|
||||
)
|
||||
def missing_expr_error(self):
|
||||
"""Check that defining of partition clause with missing expr returns an error.
|
||||
"""
|
||||
exitcode = 62
|
||||
message = "Exception: Syntax error: failed at position"
|
||||
|
||||
self.context.node.query("SELECT sum(number) OVER (PARTITION BY) FROM numbers(1,3)", exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_InvalidExpr_Error("1.0")
|
||||
)
|
||||
def invalid_expr_error(self):
|
||||
"""Check that defining of partition clause with invalid expr returns an error.
|
||||
"""
|
||||
exitcode = 47
|
||||
message = "Exception: Missing columns: 'foo'"
|
||||
|
||||
self.context.node.query("SELECT sum(number) OVER (PARTITION BY foo) FROM numbers(1,3)", exitcode=exitcode, message=message)
|
||||
|
||||
|
||||
@TestFeature
|
||||
@Name("partition clause")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check defining partition clause.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
238
tests/testflows/window_functions/tests/range_datetime.py
Normal file
238
tests/testflows/window_functions/tests/range_datetime.py
Normal file
@ -0,0 +1,238 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def order_by_asc_range_between_days_preceding_and_days_following(self):
|
||||
"""Check range between days preceding and days following
|
||||
with ascending order by.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | salary | enroll_date
|
||||
-------+--------+-------------
|
||||
34900 | 5000 | 2006-10-01
|
||||
38400 | 3900 | 2006-12-23
|
||||
47100 | 4800 | 2007-08-01
|
||||
47100 | 4800 | 2007-08-08
|
||||
36100 | 3500 | 2007-12-10
|
||||
32200 | 4200 | 2008-01-01
|
||||
34900 | 6000 | 2006-10-01
|
||||
32200 | 4500 | 2008-01-01
|
||||
47100 | 5200 | 2007-08-01
|
||||
47100 | 5200 | 2007-08-15
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"select sum(salary) over (order by enroll_date range between 365 preceding and 365 following) AS sum, "
|
||||
"salary, enroll_date from empsalary order by empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def order_by_desc_range_between_days_preceding_and_days_following(self):
|
||||
"""Check range between days preceding and days following
|
||||
with descending order by."""
|
||||
expected = convert_output("""
|
||||
sum | salary | enroll_date
|
||||
-------+--------+-------------
|
||||
34900 | 5000 | 2006-10-01
|
||||
38400 | 3900 | 2006-12-23
|
||||
47100 | 4800 | 2007-08-01
|
||||
47100 | 4800 | 2007-08-08
|
||||
36100 | 3500 | 2007-12-10
|
||||
32200 | 4200 | 2008-01-01
|
||||
34900 | 6000 | 2006-10-01
|
||||
32200 | 4500 | 2008-01-01
|
||||
47100 | 5200 | 2007-08-01
|
||||
47100 | 5200 | 2007-08-15
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"select sum(salary) over (order by enroll_date desc range between 365 preceding and 365 following) AS sum, "
|
||||
"salary, enroll_date from empsalary order by empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def order_by_desc_range_between_days_following_and_days_following(self):
|
||||
"""Check range between days following and days following with
|
||||
descending order by.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | salary | enroll_date
|
||||
-------+--------+-------------
|
||||
0 | 5000 | 2006-10-01
|
||||
0 | 3900 | 2006-12-23
|
||||
0 | 4800 | 2007-08-01
|
||||
0 | 4800 | 2007-08-08
|
||||
0 | 3500 | 2007-12-10
|
||||
0 | 4200 | 2008-01-01
|
||||
0 | 6000 | 2006-10-01
|
||||
0 | 4500 | 2008-01-01
|
||||
0 | 5200 | 2007-08-01
|
||||
0 | 5200 | 2007-08-15
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"select sum(salary) over (order by enroll_date desc range between 365 following and 365 following) AS sum, "
|
||||
"salary, enroll_date from empsalary order by empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def order_by_desc_range_between_days_preceding_and_days_preceding(self):
|
||||
"""Check range between days preceding and days preceding with
|
||||
descending order by.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | salary | enroll_date
|
||||
-------+--------+-------------
|
||||
0 | 5000 | 2006-10-01
|
||||
0 | 3900 | 2006-12-23
|
||||
0 | 4800 | 2007-08-01
|
||||
0 | 4800 | 2007-08-08
|
||||
0 | 3500 | 2007-12-10
|
||||
0 | 4200 | 2008-01-01
|
||||
0 | 6000 | 2006-10-01
|
||||
0 | 4500 | 2008-01-01
|
||||
0 | 5200 | 2007-08-01
|
||||
0 | 5200 | 2007-08-15
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"select sum(salary) over (order by enroll_date desc range between 365 preceding and 365 preceding) AS sum, "
|
||||
"salary, enroll_date from empsalary order by empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def datetime_with_timezone_order_by_asc_range_between_n_preceding_and_n_following(self):
|
||||
"""Check range between preceding and following with
|
||||
DateTime column that has timezone using ascending order by.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
id | f_timestamptz | first_value | last_value
|
||||
----+------------------------------+-------------+------------
|
||||
1 | 2000-10-19 10:23:54 | 1 | 3
|
||||
2 | 2001-10-19 10:23:54 | 1 | 4
|
||||
3 | 2001-10-19 10:23:54 | 1 | 4
|
||||
4 | 2002-10-19 10:23:54 | 2 | 5
|
||||
5 | 2003-10-19 10:23:54 | 4 | 6
|
||||
6 | 2004-10-19 10:23:54 | 5 | 7
|
||||
7 | 2005-10-19 10:23:54 | 6 | 8
|
||||
8 | 2006-10-19 10:23:54 | 7 | 9
|
||||
9 | 2007-10-19 10:23:54 | 8 | 10
|
||||
10 | 2008-10-19 10:23:54 | 9 | 10
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select id, f_timestamptz, first_value(id) over w AS first_value, last_value(id) over w AS last_value
|
||||
from datetimes
|
||||
window w as (order by f_timestamptz range between
|
||||
31622400 preceding and 31622400 following) order by id
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def datetime_with_timezone_order_by_desc_range_between_n_preceding_and_n_following(self):
|
||||
"""Check range between preceding and following with
|
||||
DateTime column that has timezone using descending order by.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
id | f_timestamptz | first_value | last_value
|
||||
----+------------------------------+-------------+------------
|
||||
10 | 2008-10-19 10:23:54 | 10 | 9
|
||||
9 | 2007-10-19 10:23:54 | 10 | 8
|
||||
8 | 2006-10-19 10:23:54 | 9 | 7
|
||||
7 | 2005-10-19 10:23:54 | 8 | 6
|
||||
6 | 2004-10-19 10:23:54 | 7 | 5
|
||||
5 | 2003-10-19 10:23:54 | 6 | 4
|
||||
4 | 2002-10-19 10:23:54 | 5 | 3
|
||||
3 | 2001-10-19 10:23:54 | 4 | 1
|
||||
2 | 2001-10-19 10:23:54 | 4 | 1
|
||||
1 | 2000-10-19 10:23:54 | 2 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select id, f_timestamptz, first_value(id) over w AS first_value, last_value(id) over w AS last_value
|
||||
from datetimes
|
||||
window w as (order by f_timestamptz desc range between
|
||||
31622400 preceding and 31622400 following) order by id desc
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def datetime_order_by_asc_range_between_n_preceding_and_n_following(self):
|
||||
"""Check range between preceding and following with
|
||||
DateTime column and ascending order by.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
id | f_timestamp | first_value | last_value
|
||||
----+------------------------------+-------------+------------
|
||||
1 | 2000-10-19 10:23:54 | 1 | 3
|
||||
2 | 2001-10-19 10:23:54 | 1 | 4
|
||||
3 | 2001-10-19 10:23:54 | 1 | 4
|
||||
4 | 2002-10-19 10:23:54 | 2 | 5
|
||||
5 | 2003-10-19 10:23:54 | 4 | 6
|
||||
6 | 2004-10-19 10:23:54 | 5 | 7
|
||||
7 | 2005-10-19 10:23:54 | 6 | 8
|
||||
8 | 2006-10-19 10:23:54 | 7 | 9
|
||||
9 | 2007-10-19 10:23:54 | 8 | 10
|
||||
10 | 2008-10-19 10:23:54 | 9 | 10
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select id, f_timestamp, first_value(id) over w AS first_value, last_value(id) over w AS last_value
|
||||
from datetimes
|
||||
window w as (order by f_timestamp range between
|
||||
31622400 preceding and 31622400 following) ORDER BY id
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def datetime_order_by_desc_range_between_n_preceding_and_n_following(self):
|
||||
"""Check range between preceding and following with
|
||||
DateTime column and descending order by.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
id | f_timestamp | first_value | last_value
|
||||
----+------------------------------+-------------+------------
|
||||
10 | 2008-10-19 10:23:54 | 10 | 9
|
||||
9 | 2007-10-19 10:23:54 | 10 | 8
|
||||
8 | 2006-10-19 10:23:54 | 9 | 7
|
||||
7 | 2005-10-19 10:23:54 | 8 | 6
|
||||
6 | 2004-10-19 10:23:54 | 7 | 5
|
||||
5 | 2003-10-19 10:23:54 | 6 | 4
|
||||
4 | 2002-10-19 10:23:54 | 5 | 3
|
||||
2 | 2001-10-19 10:23:54 | 4 | 1
|
||||
3 | 2001-10-19 10:23:54 | 4 | 1
|
||||
1 | 2000-10-19 10:23:54 | 2 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select id, f_timestamp, first_value(id) over w AS first_value, last_value(id) over w AS last_value
|
||||
from datetimes
|
||||
window w as (order by f_timestamp desc range between
|
||||
31622400 preceding and 31622400 following)
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("range datetime")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_DataTypes_DateAndDateTime("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check `Date` and `DateTime` data time with range frames.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
104
tests/testflows/window_functions/tests/range_errors.py
Normal file
104
tests/testflows/window_functions/tests/range_errors.py
Normal file
@ -0,0 +1,104 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_MultipleColumnsInOrderBy_Error("1.0")
|
||||
)
|
||||
def error_more_than_one_order_by_column(self):
|
||||
"""Check that using more than one column in order by with range frame
|
||||
returns an error.
|
||||
"""
|
||||
exitcode = 36
|
||||
message = "DB::Exception: Received from localhost:9000. DB::Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 2 given"
|
||||
|
||||
sql = ("select sum(salary) over (order by enroll_date, salary range between 1 preceding and 2 following) AS sum, "
|
||||
"salary, enroll_date from empsalary")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_missing_order_by(self):
|
||||
"""Check that using range frame with offsets without order by returns an error.
|
||||
"""
|
||||
exitcode = 36
|
||||
message = "DB::Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 0 given"
|
||||
|
||||
sql = ("select sum(salary) over (range between 1 preceding and 2 following) AS sum, "
|
||||
"salary, enroll_date from empsalary")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_missing_order_by_with_partition_by_clause(self):
|
||||
"""Check that range frame with offsets used with partition by but
|
||||
without order by returns an error.
|
||||
"""
|
||||
exitcode = 36
|
||||
message = "DB::Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 0 given"
|
||||
|
||||
sql = ("select f1, sum(f1) over (partition by f1 range between 1 preceding and 1 following) AS sum "
|
||||
"from t1 where f1 = f2")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
def error_range_over_non_numerical_column(self):
|
||||
"""Check that range over non numerical column returns an error.
|
||||
"""
|
||||
exitcode = 48
|
||||
message = "DB::Exception: The RANGE OFFSET frame for 'DB::ColumnLowCardinality' ORDER BY column is not implemented"
|
||||
|
||||
sql = ("select sum(salary) over (order by depname range between 1 preceding and 2 following) as sum, "
|
||||
"salary, enroll_date from empsalary")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding_ExprValue("1.0")
|
||||
)
|
||||
def error_negative_preceding_offset(self):
|
||||
"""Check that non-positive value of preceding offset returns an error.
|
||||
"""
|
||||
exitcode = 36
|
||||
message = "DB::Exception: Frame start offset must be greater than zero, -1 given"
|
||||
|
||||
sql = ("select max(enroll_date) over (order by salary range between -1 preceding and 2 following) AS max, "
|
||||
"salary, enroll_date from empsalary")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing_ExprValue("1.0")
|
||||
)
|
||||
def error_negative_following_offset(self):
|
||||
"""Check that non-positive value of following offset returns an error.
|
||||
"""
|
||||
exitcode = 36
|
||||
message = "DB::Exception: Frame end offset must be greater than zero, -2 given"
|
||||
|
||||
sql = ("select max(enroll_date) over (order by salary range between 1 preceding and -2 following) AS max, "
|
||||
"salary, enroll_date from empsalary")
|
||||
|
||||
with When("I execute query", description=sql):
|
||||
r = current().context.node.query(sql, exitcode=exitcode, message=message)
|
||||
|
||||
@TestFeature
|
||||
@Name("range errors")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check different error conditions when usign range frame.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
1410
tests/testflows/window_functions/tests/range_frame.py
Normal file
1410
tests/testflows/window_functions/tests/range_frame.py
Normal file
File diff suppressed because it is too large
Load Diff
135
tests/testflows/window_functions/tests/range_overflow.py
Normal file
135
tests/testflows/window_functions/tests/range_overflow.py
Normal file
@ -0,0 +1,135 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def positive_overflow_with_Int16(self):
|
||||
"""Check positive overflow with Int16.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | last_value
|
||||
-------+------------
|
||||
32764 | 0
|
||||
32765 | 0
|
||||
32766 | 0
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select number as x, last_value(x) over (order by toInt16(x) range between current row and 2147450884 following) AS last_value
|
||||
from numbers(32764, 3)
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def negative_overflow_with_Int16(self):
|
||||
"""Check negative overflow with Int16.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | last_value
|
||||
--------+------------
|
||||
-32764 | 0
|
||||
-32765 | 0
|
||||
-32766 | 0
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select number as x, last_value(x) over (order by toInt16(x) desc range between current row and 2147450885 following) as last_value
|
||||
from (SELECT -number - 32763 AS number FROM numbers(1, 3))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def positive_overflow_for_Int32(self):
|
||||
"""Check positive overflow for Int32.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | last_value
|
||||
------------+------------
|
||||
2147483644 | 2147483646
|
||||
2147483645 | 2147483646
|
||||
2147483646 | 2147483646
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select number as x, last_value(x) over (order by x range between current row and 4 following) as last_value
|
||||
from numbers(2147483644, 3)
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def negative_overflow_for_Int32(self):
|
||||
"""Check negative overflow for Int32.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | last_value
|
||||
-------------+-------------
|
||||
-2147483644 | -2147483646
|
||||
-2147483645 | -2147483646
|
||||
-2147483646 | -2147483646
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select number as x, last_value(x) over (order by x desc range between current row and 5 following) as last_value
|
||||
from (select -number-2147483643 AS number FROM numbers(1,3))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def positive_overflow_for_Int64(self):
|
||||
"""Check positive overflow for Int64.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | last_value
|
||||
---------------------+---------------------
|
||||
9223372036854775804 | 9223372036854775806
|
||||
9223372036854775805 | 9223372036854775806
|
||||
9223372036854775806 | 9223372036854775806
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select number as x, last_value(x) over (order by x range between current row and 4 following) as last_value
|
||||
from numbers(9223372036854775804, 3)
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def negative_overflow_for_Int64(self):
|
||||
"""Check negative overflow for Int64.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
x | last_value
|
||||
----------------------+----------------------
|
||||
-9223372036854775804 | -9223372036854775806
|
||||
-9223372036854775805 | -9223372036854775806
|
||||
-9223372036854775806 | -9223372036854775806
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"""
|
||||
select number as x, last_value(x) over (order by x desc range between current row and 5 following) as last_value
|
||||
from (select -number-9223372036854775803 AS number from numbers(1,3))
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("range overflow")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check using range frame with overflows.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
688
tests/testflows/window_functions/tests/rows_frame.py
Normal file
688
tests/testflows/window_functions/tests/rows_frame.py
Normal file
@ -0,0 +1,688 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_MissingFrameExtent_Error("1.0")
|
||||
)
|
||||
def missing_frame_extent(self):
|
||||
"""Check that when rows frame has missing frame extent then an error is returned.
|
||||
"""
|
||||
exitcode, message = syntax_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_InvalidFrameExtent_Error("1.0")
|
||||
)
|
||||
def invalid_frame_extent(self):
|
||||
"""Check that when rows frame has invalid frame extent then an error is returned.
|
||||
"""
|
||||
exitcode, message = frame_offset_nonnegative_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS -1) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_CurrentRow("1.0")
|
||||
)
|
||||
def start_current_row(self):
|
||||
"""Check rows current row frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+-------
|
||||
1 | 5000 | 5000
|
||||
2 | 3900 | 3900
|
||||
3 | 4800 | 4800
|
||||
4 | 4800 | 4800
|
||||
5 | 3500 | 3500
|
||||
7 | 4200 | 4200
|
||||
8 | 6000 | 6000
|
||||
9 | 4500 | 4500
|
||||
10 | 5200 | 5200
|
||||
11 | 5200 | 5200
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS CURRENT ROW) AS sum FROM empsalary ORDER BY empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_UnboundedPreceding("1.0")
|
||||
)
|
||||
def start_unbounded_preceding(self):
|
||||
"""Check rows unbounded preceding frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+-------
|
||||
1 | 5000 | 5000
|
||||
2 | 3900 | 8900
|
||||
3 | 4800 | 13700
|
||||
4 | 4800 | 18500
|
||||
5 | 3500 | 22000
|
||||
7 | 4200 | 26200
|
||||
8 | 6000 | 32200
|
||||
9 | 4500 | 36700
|
||||
10 | 5200 | 41900
|
||||
11 | 5200 | 47100
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS UNBOUNDED PRECEDING) AS sum FROM empsalary ORDER BY empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_ExprPreceding("1.0")
|
||||
)
|
||||
def start_expr_preceding(self):
|
||||
"""Check rows expr preceding frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
1 | 5000 | 5000
|
||||
2 | 3900 | 8900
|
||||
3 | 4800 | 8700
|
||||
4 | 4800 | 9600
|
||||
5 | 3500 | 8300
|
||||
7 | 4200 | 7700
|
||||
8 | 6000 | 10200
|
||||
9 | 4500 | 10500
|
||||
10 | 5200 | 9700
|
||||
11 | 5200 | 10400
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS 1 PRECEDING) AS sum FROM empsalary ORDER BY empno",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_UnboundedFollowing_Error("1.0")
|
||||
)
|
||||
def start_unbounded_following_error(self):
|
||||
"""Check rows unbounded following frame returns an error.
|
||||
"""
|
||||
exitcode, message = frame_start_error()
|
||||
|
||||
self.context.node.query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ROWS UNBOUNDED FOLLOWING) AS sum FROM empsalary ORDER BY empno",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_ExprFollowing_Error("1.0")
|
||||
)
|
||||
def start_expr_following_error(self):
|
||||
"""Check rows expr following frame returns an error.
|
||||
"""
|
||||
exitcode, message = window_frame_error()
|
||||
|
||||
self.context.node.query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ROWS 1 FOLLOWING) AS sum FROM empsalary ORDER BY empno",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_CurrentRow("1.0")
|
||||
)
|
||||
def between_current_row_and_current_row(self):
|
||||
"""Check rows between current row and current row frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
1 | 5000 | 5000
|
||||
2 | 3900 | 3900
|
||||
3 | 4800 | 4800
|
||||
4 | 4800 | 4800
|
||||
5 | 3500 | 3500
|
||||
7 | 4200 | 4200
|
||||
8 | 6000 | 6000
|
||||
9 | 4500 | 4500
|
||||
10 | 5200 | 5200
|
||||
11 | 5200 | 5200
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN CURRENT ROW AND CURRENT ROW) AS sum FROM empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprPreceding_Error("1.0")
|
||||
)
|
||||
def between_current_row_and_expr_preceding_error(self):
|
||||
"""Check rows between current row and expr preceding returns an error.
|
||||
"""
|
||||
exitcode, message = window_frame_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS BETWEEN CURRENT ROW AND 1 PRECEDING) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedPreceding_Error("1.0")
|
||||
)
|
||||
def between_current_row_and_unbounded_preceding_error(self):
|
||||
"""Check rows between current row and unbounded preceding returns an error.
|
||||
"""
|
||||
exitcode, message = frame_end_unbounded_preceding_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedFollowing("1.0")
|
||||
)
|
||||
def between_current_row_and_unbounded_following(self):
|
||||
"""Check rows between current row and unbounded following.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | unique1 | four
|
||||
-----+---------+------
|
||||
45 | 0 | 0
|
||||
45 | 1 | 1
|
||||
44 | 2 | 2
|
||||
42 | 3 | 3
|
||||
39 | 4 | 0
|
||||
35 | 5 | 1
|
||||
30 | 6 | 2
|
||||
24 | 7 | 3
|
||||
17 | 8 | 0
|
||||
9 | 9 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(unique1) over (order by unique1 rows between current row and unbounded following) AS sum,"
|
||||
"unique1, four "
|
||||
"FROM tenk1 WHERE unique1 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprFollowing("1.0")
|
||||
)
|
||||
def between_current_row_and_expr_following(self):
|
||||
"""Check rows between current row and expr following.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
i | b | bool_and | bool_or
|
||||
---+---+----------+---------
|
||||
1 | 1 | 1 | 1
|
||||
2 | 1 | 0 | 1
|
||||
3 | 0 | 0 | 0
|
||||
4 | 0 | 0 | 1
|
||||
5 | 1 | 1 | 1
|
||||
""")
|
||||
|
||||
execute_query("""
|
||||
SELECT i, b, groupBitAnd(b) OVER w AS bool_and, groupBitOr(b) OVER w AS bool_or
|
||||
FROM VALUES('i Int8, b UInt8', (1,1), (2,1), (3,0), (4,0), (5,1))
|
||||
WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)
|
||||
""",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_CurrentRow("1.0")
|
||||
)
|
||||
def between_unbounded_preceding_and_current_row(self):
|
||||
"""Check rows between unbounded preceding and current row.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
four | two | sum | last_value
|
||||
------+-----+-----+------------
|
||||
0 | 0 | 0 | 0
|
||||
0 | 0 | 0 | 0
|
||||
0 | 1 | 1 | 1
|
||||
0 | 1 | 2 | 1
|
||||
0 | 2 | 4 | 2
|
||||
1 | 0 | 0 | 0
|
||||
1 | 0 | 0 | 0
|
||||
1 | 1 | 1 | 1
|
||||
1 | 1 | 2 | 1
|
||||
1 | 2 | 4 | 2
|
||||
2 | 0 | 0 | 0
|
||||
2 | 0 | 0 | 0
|
||||
2 | 1 | 1 | 1
|
||||
2 | 1 | 2 | 1
|
||||
2 | 2 | 4 | 2
|
||||
3 | 0 | 0 | 0
|
||||
3 | 0 | 0 | 0
|
||||
3 | 1 | 1 | 1
|
||||
3 | 1 | 2 | 1
|
||||
3 | 2 | 4 | 2
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT four, toInt8(ten/4) as two,"
|
||||
"sum(toInt8(ten/4)) over (partition by four order by toInt8(ten/4) rows between unbounded preceding and current row) AS sum,"
|
||||
"last_value(toInt8(ten/4)) over (partition by four order by toInt8(ten/4) rows between unbounded preceding and current row) AS last_value "
|
||||
"FROM (select distinct ten, four from tenk1)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedPreceding_Error("1.0")
|
||||
)
|
||||
def between_unbounded_preceding_and_unbounded_preceding_error(self):
|
||||
"""Check rows between unbounded preceding and unbounded preceding returns an error.
|
||||
"""
|
||||
exitcode, message = frame_end_unbounded_preceding_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprPreceding("1.0")
|
||||
)
|
||||
def between_unbounded_preceding_and_expr_preceding(self):
|
||||
"""Check rows between unbounded preceding and expr preceding frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
1 | 5000 | 0
|
||||
2 | 3900 | 5000
|
||||
3 | 4800 | 8900
|
||||
4 | 4800 | 13700
|
||||
5 | 3500 | 18500
|
||||
7 | 4200 | 22000
|
||||
8 | 6000 | 26200
|
||||
9 | 4500 | 32200
|
||||
10 | 5200 | 36700
|
||||
11 | 5200 | 41900
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) AS sum FROM empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedFollowing("1.0")
|
||||
)
|
||||
def between_unbounded_preceding_and_unbounded_following(self):
|
||||
"""Check rows between unbounded preceding and unbounded following frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
1 | 5000 | 47100
|
||||
2 | 3900 | 47100
|
||||
3 | 4800 | 47100
|
||||
4 | 4800 | 47100
|
||||
5 | 3500 | 47100
|
||||
7 | 4200 | 47100
|
||||
8 | 6000 | 47100
|
||||
9 | 4500 | 47100
|
||||
10 | 5200 | 47100
|
||||
11 | 5200 | 47100
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprFollowing("1.0")
|
||||
)
|
||||
def between_unbounded_preceding_and_expr_following(self):
|
||||
"""Check rows between unbounded preceding and expr following.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | unique1 | four
|
||||
-----+---------+------
|
||||
1 | 0 | 0
|
||||
3 | 1 | 1
|
||||
6 | 2 | 2
|
||||
10 | 3 | 3
|
||||
15 | 4 | 0
|
||||
21 | 5 | 1
|
||||
28 | 6 | 2
|
||||
36 | 7 | 3
|
||||
45 | 8 | 0
|
||||
45 | 9 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(unique1) over (order by unique1 rows between unbounded preceding and 1 following) AS sum,"
|
||||
"unique1, four "
|
||||
"FROM tenk1 WHERE unique1 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedFollowing_Error("1.0")
|
||||
)
|
||||
@Examples("range", [
|
||||
("UNBOUNDED FOLLOWING AND CURRENT ROW",),
|
||||
("UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING",),
|
||||
("UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING",),
|
||||
("UNBOUNDED FOLLOWING AND 1 PRECEDING",),
|
||||
("UNBOUNDED FOLLOWING AND 1 FOLLOWING",),
|
||||
])
|
||||
def between_unbounded_following_error(self, range):
|
||||
"""Check rows between unbounded following and any end frame returns an error.
|
||||
"""
|
||||
exitcode, message = frame_start_error()
|
||||
|
||||
self.context.node.query(f"SELECT number,sum(number) OVER (ROWS BETWEEN {range}) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_Error("1.0")
|
||||
)
|
||||
@Examples("range exitcode message", [
|
||||
("1 FOLLOWING AND CURRENT ROW", *window_frame_error()),
|
||||
("1 FOLLOWING AND UNBOUNDED PRECEDING", *frame_end_unbounded_preceding_error()),
|
||||
("1 FOLLOWING AND 1 PRECEDING", *frame_start_error())
|
||||
])
|
||||
def between_expr_following_error(self, range, exitcode, message):
|
||||
"""Check cases when rows between expr following returns an error.
|
||||
"""
|
||||
self.context.node.query(f"SELECT number,sum(number) OVER (ROWS BETWEEN {range}) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing_Error("1.0")
|
||||
)
|
||||
def between_expr_following_and_expr_following_error(self):
|
||||
"""Check rows between expr following and expr following returns an error when frame end index is less
|
||||
than frame start.
|
||||
"""
|
||||
exitcode, message = frame_start_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_UnboundedFollowing("1.0")
|
||||
)
|
||||
def between_expr_following_and_unbounded_following(self):
|
||||
"""Check rows between exp following and unbounded following frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
1 | 5000 | 28600
|
||||
2 | 3900 | 25100
|
||||
3 | 4800 | 20900
|
||||
4 | 4800 | 14900
|
||||
5 | 3500 | 10400
|
||||
7 | 4200 | 5200
|
||||
8 | 6000 | 0
|
||||
9 | 4500 | 0
|
||||
10 | 5200 | 0
|
||||
11 | 5200 | 0
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 4 FOLLOWING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing("1.0")
|
||||
)
|
||||
def between_expr_following_and_expr_following(self):
|
||||
"""Check rows between exp following and expr following frame when end of the frame is greater than
|
||||
the start of the frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
1 | 5000 | 17000
|
||||
2 | 3900 | 17300
|
||||
3 | 4800 | 18500
|
||||
4 | 4800 | 18200
|
||||
5 | 3500 | 19900
|
||||
7 | 4200 | 20900
|
||||
8 | 6000 | 14900
|
||||
9 | 4500 | 10400
|
||||
10 | 5200 | 5200
|
||||
11 | 5200 | 0
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 FOLLOWING AND 4 FOLLOWING) AS sum FROM empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_CurrentRow("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_current_row(self):
|
||||
"""Check rows between exp preceding and current row frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
8 | 6000 | 6000
|
||||
10 | 5200 | 11200
|
||||
11 | 5200 | 10400
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS sum FROM empsalary WHERE salary > 5000",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedPreceding_Error("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_unbounded_preceding_error(self):
|
||||
"""Check rows between expr preceding and unbounded preceding returns an error.
|
||||
"""
|
||||
exitcode, message = frame_end_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedFollowing("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_unbounded_following(self):
|
||||
"""Check rows between exp preceding and unbounded following frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
8 | 6000 | 16400
|
||||
10 | 5200 | 16400
|
||||
11 | 5200 | 10400
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary WHERE salary > 5000",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding_Error("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_expr_preceding_error(self):
|
||||
"""Check rows between expr preceding and expr preceding returns an error when frame end is
|
||||
before frame start.
|
||||
"""
|
||||
exitcode, message = frame_start_error()
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM numbers(1,3)",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_expr_preceding(self):
|
||||
"""Check rows between expr preceding and expr preceding frame when frame end is after or at frame start.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
1 | 5000 | 5000
|
||||
2 | 3900 | 8900
|
||||
3 | 4800 | 8700
|
||||
4 | 4800 | 9600
|
||||
5 | 3500 | 8300
|
||||
7 | 4200 | 7700
|
||||
8 | 6000 | 10200
|
||||
9 | 4500 | 10500
|
||||
10 | 5200 | 9700
|
||||
11 | 5200 | 10400
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND 0 PRECEDING) AS sum FROM empsalary",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprFollowing("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_expr_following(self):
|
||||
"""Check rows between expr preceding and expr following frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | salary | sum
|
||||
--------+--------+--------
|
||||
8 | 6000 | 11200
|
||||
10 | 5200 | 16400
|
||||
11 | 5200 | 10400
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum FROM empsalary WHERE salary > 5000",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing("1.0")
|
||||
)
|
||||
def between_expr_following_and_expr_following_ref(self):
|
||||
"""Check reference result for rows between expr following and expr following range.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | unique1 | four
|
||||
-----+---------+------
|
||||
6 | 0 | 0
|
||||
9 | 1 | 1
|
||||
12 | 2 | 2
|
||||
15 | 3 | 3
|
||||
18 | 4 | 0
|
||||
21 | 5 | 1
|
||||
24 | 6 | 2
|
||||
17 | 7 | 3
|
||||
9 | 8 | 0
|
||||
0 | 9 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(unique1) over (order by unique1 rows between 1 following and 3 following) AS sum,"
|
||||
"unique1, four "
|
||||
"FROM tenk1 WHERE unique1 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_expr_preceding_ref(self):
|
||||
"""Check reference result for rows between expr preceding and expr preceding frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | unique1 | four
|
||||
-----+---------+------
|
||||
0 | 0 | 0
|
||||
0 | 1 | 1
|
||||
1 | 2 | 2
|
||||
3 | 3 | 3
|
||||
5 | 4 | 0
|
||||
7 | 5 | 1
|
||||
9 | 6 | 2
|
||||
11 | 7 | 3
|
||||
13 | 8 | 0
|
||||
15 | 9 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(unique1) over (order by unique1 rows between 2 preceding and 1 preceding) AS sum,"
|
||||
"unique1, four "
|
||||
"FROM tenk1 WHERE unique1 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprFollowing("1.0")
|
||||
)
|
||||
def between_expr_preceding_and_expr_following_ref(self):
|
||||
"""Check reference result for rows between expr preceding and expr following frame.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | unique1 | four
|
||||
-----+---------+------
|
||||
3 | 0 | 0
|
||||
6 | 1 | 1
|
||||
10 | 2 | 2
|
||||
15 | 3 | 3
|
||||
20 | 4 | 0
|
||||
25 | 5 | 1
|
||||
30 | 6 | 2
|
||||
35 | 7 | 3
|
||||
30 | 8 | 0
|
||||
24 | 9 | 1
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(unique1) over (order by unique1 rows between 2 preceding and 2 following) AS sum, "
|
||||
"unique1, four "
|
||||
"FROM tenk1 WHERE unique1 < 10",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("rows frame")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check defining rows frame.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
@ -0,0 +1,826 @@
|
||||
func__count_salary__ = r"""
|
||||
func
|
||||
10
|
||||
9
|
||||
8
|
||||
7
|
||||
6
|
||||
5
|
||||
4
|
||||
3
|
||||
2
|
||||
1
|
||||
"""
|
||||
|
||||
func__min_salary__ = r"""
|
||||
func
|
||||
3500
|
||||
3900
|
||||
4200
|
||||
4500
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__max_salary__ = r"""
|
||||
func
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
"""
|
||||
|
||||
func__sum_salary__ = r"""
|
||||
func
|
||||
47100
|
||||
43600
|
||||
39700
|
||||
35500
|
||||
31000
|
||||
26200
|
||||
21400
|
||||
16400
|
||||
11200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__avg_salary__ = r"""
|
||||
func
|
||||
4710
|
||||
4844.444444444444
|
||||
4962.5
|
||||
5071.428571428572
|
||||
5166.666666666667
|
||||
5240
|
||||
5350
|
||||
5466.666666666667
|
||||
5600
|
||||
6000
|
||||
"""
|
||||
|
||||
func__any_salary__ = r"""
|
||||
func
|
||||
3500
|
||||
3900
|
||||
4200
|
||||
4500
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__stddevPop_salary__ = r"""
|
||||
func
|
||||
683.3008122342604
|
||||
581.3989089756045
|
||||
504.8205126577168
|
||||
443.08749769345
|
||||
406.88518719112545
|
||||
407.9215610874228
|
||||
384.0572873934304
|
||||
377.1236166328275
|
||||
400
|
||||
0
|
||||
"""
|
||||
|
||||
func__stddevSamp_salary__ = r"""
|
||||
func
|
||||
720.2622979011034
|
||||
616.6666666666654
|
||||
539.6758286230726
|
||||
478.5891965429401
|
||||
445.720390678583
|
||||
456.0701700396552
|
||||
443.471156521669
|
||||
461.8802153517033
|
||||
565.685424949238
|
||||
nan
|
||||
"""
|
||||
|
||||
func__varPop_salary__ = r"""
|
||||
func
|
||||
466900
|
||||
338024.6913580232
|
||||
254843.75
|
||||
196326.53061224308
|
||||
165555.55555555722
|
||||
166400
|
||||
147500
|
||||
142222.22222222388
|
||||
160000
|
||||
0
|
||||
"""
|
||||
|
||||
func__varSamp_salary__ = r"""
|
||||
func
|
||||
518777.77777777775
|
||||
380277.7777777761
|
||||
291250
|
||||
229047.61904761693
|
||||
198666.66666666867
|
||||
208000
|
||||
196666.66666666666
|
||||
213333.33333333582
|
||||
320000
|
||||
nan
|
||||
"""
|
||||
|
||||
func__covarPop_salary__2000__ = r"""
|
||||
func
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
"""
|
||||
|
||||
func__covarSamp_salary__2000__ = r"""
|
||||
func
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
nan
|
||||
"""
|
||||
|
||||
func__anyHeavy_salary__ = r"""
|
||||
func
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__anyLast_salary__ = r"""
|
||||
func
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
6000
|
||||
"""
|
||||
|
||||
func__argMin_salary__5000__ = r"""
|
||||
func
|
||||
3500
|
||||
3900
|
||||
4200
|
||||
4500
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__argMax_salary__5000__ = r"""
|
||||
func
|
||||
3500
|
||||
3900
|
||||
4200
|
||||
4500
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__avgWeighted_salary__1__ = r"""
|
||||
func
|
||||
4710
|
||||
4844.444444444444
|
||||
4962.5
|
||||
5071.428571428572
|
||||
5166.666666666667
|
||||
5240
|
||||
5350
|
||||
5466.666666666667
|
||||
5600
|
||||
6000
|
||||
"""
|
||||
|
||||
func__corr_salary__0_5__ = r"""
|
||||
func
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
nan
|
||||
"""
|
||||
|
||||
func__topK_salary__ = r"""
|
||||
func
|
||||
[4800,5200,3500,3900,4200,4500,5000,6000]
|
||||
[4800,5200,3900,4200,4500,5000,6000]
|
||||
[4800,5200,4200,4500,5000,6000]
|
||||
[4800,5200,4500,5000,6000]
|
||||
[4800,5200,5000,6000]
|
||||
[5200,4800,5000,6000]
|
||||
[5200,5000,6000]
|
||||
[5200,6000]
|
||||
[5200,6000]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__topKWeighted_salary__1__ = r"""
|
||||
func
|
||||
[4800,5200,3500,3900,4200,4500,5000,6000]
|
||||
[4800,5200,3900,4200,4500,5000,6000]
|
||||
[4800,5200,4200,4500,5000,6000]
|
||||
[4800,5200,4500,5000,6000]
|
||||
[4800,5200,5000,6000]
|
||||
[5200,4800,5000,6000]
|
||||
[5200,5000,6000]
|
||||
[5200,6000]
|
||||
[5200,6000]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__groupArray_salary__ = r"""
|
||||
func
|
||||
[3500,3900,4200,4500,4800,4800,5000,5200,5200,6000]
|
||||
[3900,4200,4500,4800,4800,5000,5200,5200,6000]
|
||||
[4200,4500,4800,4800,5000,5200,5200,6000]
|
||||
[4500,4800,4800,5000,5200,5200,6000]
|
||||
[4800,4800,5000,5200,5200,6000]
|
||||
[4800,5000,5200,5200,6000]
|
||||
[5000,5200,5200,6000]
|
||||
[5200,5200,6000]
|
||||
[5200,6000]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__groupUniqArray_salary__ = r"""
|
||||
func
|
||||
[3500,5000,6000,3900,4800,5200,4200,4500]
|
||||
[5000,6000,3900,4800,5200,4200,4500]
|
||||
[5000,6000,4800,5200,4200,4500]
|
||||
[5000,6000,4800,5200,4500]
|
||||
[5000,6000,4800,5200]
|
||||
[5000,6000,4800,5200]
|
||||
[5000,6000,5200]
|
||||
[6000,5200]
|
||||
[6000,5200]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__groupArrayInsertAt_salary__0__ = r"""
|
||||
func
|
||||
[3500]
|
||||
[3900]
|
||||
[4200]
|
||||
[4500]
|
||||
[4800]
|
||||
[4800]
|
||||
[5000]
|
||||
[5200]
|
||||
[5200]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__groupArrayMovingSum_salary__ = r"""
|
||||
func
|
||||
[3500,7400,11600,16100,20900,25700,30700,35900,41100,47100]
|
||||
[3900,8100,12600,17400,22200,27200,32400,37600,43600]
|
||||
[4200,8700,13500,18300,23300,28500,33700,39700]
|
||||
[4500,9300,14100,19100,24300,29500,35500]
|
||||
[4800,9600,14600,19800,25000,31000]
|
||||
[4800,9800,15000,20200,26200]
|
||||
[5000,10200,15400,21400]
|
||||
[5200,10400,16400]
|
||||
[5200,11200]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__groupArrayMovingAvg_salary__ = r"""
|
||||
func
|
||||
[350,740,1160,1610,2090,2570,3070,3590,4110,4710]
|
||||
[433.3333333333333,900,1400,1933.3333333333333,2466.6666666666665,3022.222222222222,3600,4177.777777777777,4844.444444444444]
|
||||
[525,1087.5,1687.5,2287.5,2912.5,3562.5,4212.5,4962.5]
|
||||
[642.8571428571429,1328.5714285714287,2014.2857142857142,2728.5714285714284,3471.4285714285716,4214.285714285715,5071.428571428572]
|
||||
[800,1600,2433.3333333333335,3300,4166.666666666667,5166.666666666667]
|
||||
[960,1960,3000,4040,5240]
|
||||
[1250,2550,3850,5350]
|
||||
[1733.3333333333333,3466.6666666666665,5466.666666666667]
|
||||
[2600,5600]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__groupArraySample_3__1234__salary__ = r"""
|
||||
func
|
||||
[6000,4800,4200]
|
||||
[4800,5000,4500]
|
||||
[4800,5200,4800]
|
||||
[5000,5200,4800]
|
||||
[5200,6000,5000]
|
||||
[5200,6000,5200]
|
||||
[6000,5200,5200]
|
||||
[5200,5200,6000]
|
||||
[5200,6000]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__groupBitAnd_toUInt8_salary___ = r"""
|
||||
func
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
0
|
||||
80
|
||||
80
|
||||
112
|
||||
"""
|
||||
|
||||
func__groupBitOr_toUInt8_salary___ = r"""
|
||||
func
|
||||
252
|
||||
252
|
||||
252
|
||||
252
|
||||
248
|
||||
248
|
||||
248
|
||||
112
|
||||
112
|
||||
112
|
||||
"""
|
||||
|
||||
func__groupBitXor_toUInt8_salary___ = r"""
|
||||
func
|
||||
148
|
||||
56
|
||||
4
|
||||
108
|
||||
248
|
||||
56
|
||||
248
|
||||
112
|
||||
32
|
||||
112
|
||||
"""
|
||||
|
||||
func__groupBitmap_toUInt8_salary___ = r"""
|
||||
func
|
||||
8
|
||||
7
|
||||
6
|
||||
5
|
||||
4
|
||||
4
|
||||
3
|
||||
2
|
||||
2
|
||||
1
|
||||
"""
|
||||
|
||||
func__sumWithOverflow_salary__ = r"""
|
||||
func
|
||||
47100
|
||||
43600
|
||||
39700
|
||||
35500
|
||||
31000
|
||||
26200
|
||||
21400
|
||||
16400
|
||||
11200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__deltaSum_salary__ = r"""
|
||||
func
|
||||
2500
|
||||
2100
|
||||
1800
|
||||
1500
|
||||
1200
|
||||
1200
|
||||
1000
|
||||
800
|
||||
800
|
||||
0
|
||||
"""
|
||||
|
||||
func__sumMap__5000____salary___ = r"""
|
||||
func
|
||||
([5000],[47100])
|
||||
([5000],[43600])
|
||||
([5000],[39700])
|
||||
([5000],[35500])
|
||||
([5000],[31000])
|
||||
([5000],[26200])
|
||||
([5000],[21400])
|
||||
([5000],[16400])
|
||||
([5000],[11200])
|
||||
([5000],[6000])
|
||||
"""
|
||||
|
||||
func__minMap__5000____salary___ = r"""
|
||||
func
|
||||
([5000],[3500])
|
||||
([5000],[3900])
|
||||
([5000],[4200])
|
||||
([5000],[4500])
|
||||
([5000],[4800])
|
||||
([5000],[4800])
|
||||
([5000],[5000])
|
||||
([5000],[5200])
|
||||
([5000],[5200])
|
||||
([5000],[6000])
|
||||
"""
|
||||
|
||||
func__maxMap__5000____salary___ = r"""
|
||||
func
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
([5000],[6000])
|
||||
"""
|
||||
|
||||
func__skewPop_salary__ = r"""
|
||||
func
|
||||
-0.01162261667454972
|
||||
0.2745338273905704
|
||||
0.5759615484689373
|
||||
0.9491407966637483
|
||||
1.1766149730944095
|
||||
1.0013237284459204
|
||||
0.9929662701927243
|
||||
0.7071067811864931
|
||||
0
|
||||
nan
|
||||
"""
|
||||
|
||||
func__skewSamp_salary__ = r"""
|
||||
func
|
||||
-0.009923564086909852
|
||||
0.2300737552746305
|
||||
0.4714173586339343
|
||||
0.7532002517703689
|
||||
0.8950813364751975
|
||||
0.7164889357723577
|
||||
0.6449505113159867
|
||||
0.3849001794597209
|
||||
0
|
||||
nan
|
||||
"""
|
||||
|
||||
func__kurtPop_salary__ = r"""
|
||||
func
|
||||
2.539217051205756
|
||||
2.7206630060048402
|
||||
2.983661891140213
|
||||
3.193064086003685
|
||||
3.1570199540482906
|
||||
2.7045118343195265
|
||||
2.235277219189888
|
||||
1.499999999988063
|
||||
1
|
||||
nan
|
||||
"""
|
||||
|
||||
func__kurtSamp_salary__ = r"""
|
||||
func
|
||||
2.0567658114766627
|
||||
2.1496596590655526
|
||||
2.2843661354042255
|
||||
2.3459246346149527
|
||||
2.1923749680890907
|
||||
1.730887573964497
|
||||
1.2573434357943123
|
||||
0.6666666666613614
|
||||
0.25
|
||||
nan
|
||||
"""
|
||||
|
||||
func__uniq_salary__ = r"""
|
||||
func
|
||||
8
|
||||
7
|
||||
6
|
||||
5
|
||||
4
|
||||
4
|
||||
3
|
||||
2
|
||||
2
|
||||
1
|
||||
"""
|
||||
|
||||
func__uniqExact_salary__ = r"""
|
||||
func
|
||||
8
|
||||
7
|
||||
6
|
||||
5
|
||||
4
|
||||
4
|
||||
3
|
||||
2
|
||||
2
|
||||
1
|
||||
"""
|
||||
|
||||
func__uniqCombined_salary__ = r"""
|
||||
func
|
||||
8
|
||||
7
|
||||
6
|
||||
5
|
||||
4
|
||||
4
|
||||
3
|
||||
2
|
||||
2
|
||||
1
|
||||
"""
|
||||
|
||||
func__uniqCombined64_salary__ = r"""
|
||||
func
|
||||
8
|
||||
7
|
||||
6
|
||||
5
|
||||
4
|
||||
4
|
||||
3
|
||||
2
|
||||
2
|
||||
1
|
||||
"""
|
||||
|
||||
func__uniqHLL12_salary__ = r"""
|
||||
func
|
||||
8
|
||||
7
|
||||
6
|
||||
5
|
||||
4
|
||||
4
|
||||
3
|
||||
2
|
||||
2
|
||||
1
|
||||
"""
|
||||
|
||||
func__quantile_salary__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
4900
|
||||
5000
|
||||
5100
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5600
|
||||
6000
|
||||
"""
|
||||
|
||||
func__quantiles_0_5__salary__ = r"""
|
||||
func
|
||||
[4800]
|
||||
[4800]
|
||||
[4900]
|
||||
[5000]
|
||||
[5100]
|
||||
[5200]
|
||||
[5200]
|
||||
[5200]
|
||||
[5600]
|
||||
[6000]
|
||||
"""
|
||||
|
||||
func__quantileExact_salary__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
6000
|
||||
"""
|
||||
|
||||
func__quantileExactWeighted_salary__1__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__quantileTiming_salary__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
6000
|
||||
"""
|
||||
|
||||
func__quantileTimingWeighted_salary__1__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
6000
|
||||
"""
|
||||
|
||||
func__quantileDeterministic_salary__1234__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
4900
|
||||
5000
|
||||
5100
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5600
|
||||
6000
|
||||
"""
|
||||
|
||||
func__quantileTDigest_salary__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__quantileTDigestWeighted_salary__1__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
4800
|
||||
5000
|
||||
5000
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
6000
|
||||
"""
|
||||
|
||||
func__simpleLinearRegression_salary__empno__ = r"""
|
||||
func
|
||||
(0.0017991004497751124,-2.473763118440779)
|
||||
(0.0023192111029948868,-5.12417823228634)
|
||||
(0.0013182096873083997,0.08338442673206625)
|
||||
(0.0021933471933471933,-4.551975051975051)
|
||||
(0.004664429530201342,-17.93288590604027)
|
||||
(0.003894230769230769,-13.60576923076923)
|
||||
(0.00288135593220339,-7.915254237288137)
|
||||
(-0.003125,26.75)
|
||||
(-0.00375,30.5)
|
||||
(nan,nan)
|
||||
"""
|
||||
|
||||
func__stochasticLinearRegression_salary__1__ = r"""
|
||||
func
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
"""
|
||||
|
||||
func__stochasticLogisticRegression_salary__1__ = r"""
|
||||
func
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
[0,0]
|
||||
"""
|
||||
|
||||
func__studentTTest_salary__1__ = r"""
|
||||
func
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
"""
|
||||
|
||||
func__welchTTest_salary__1__ = r"""
|
||||
func
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
(nan,0)
|
||||
"""
|
||||
|
||||
func__median_salary__ = r"""
|
||||
func
|
||||
4800
|
||||
4800
|
||||
4900
|
||||
5000
|
||||
5100
|
||||
5200
|
||||
5200
|
||||
5200
|
||||
5600
|
||||
6000
|
||||
"""
|
||||
|
10000
tests/testflows/window_functions/tests/tenk.data
Normal file
10000
tests/testflows/window_functions/tests/tenk.data
Normal file
File diff suppressed because it is too large
Load Diff
121
tests/testflows/window_functions/tests/window_clause.py
Normal file
121
tests/testflows/window_functions/tests/window_clause.py
Normal file
@ -0,0 +1,121 @@
|
||||
from testflows.core import *
|
||||
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def single_window(self):
|
||||
"""Check defining a single named window using window clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
depname | empno | salary | sum
|
||||
-----------+-------+--------+-------
|
||||
develop | 7 | 4200 | 4200
|
||||
develop | 8 | 6000 | 10200
|
||||
develop | 9 | 4500 | 14700
|
||||
develop | 10 | 5200 | 19900
|
||||
develop | 11 | 5200 | 25100
|
||||
personnel | 2 | 3900 | 3900
|
||||
personnel | 5 | 3500 | 7400
|
||||
sales | 1 | 5000 | 5000
|
||||
sales | 3 | 4800 | 9800
|
||||
sales | 4 | 4800 | 14600
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT depname, empno, salary, sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY empno)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def unused_window(self):
|
||||
"""Check unused window.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
four
|
||||
-------
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT four FROM tenk1 WHERE 0 WINDOW w AS (PARTITION BY ten)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MultipleWindows("1.0")
|
||||
)
|
||||
def multiple_identical_windows(self):
|
||||
"""Check defining multiple windows using window clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum | count
|
||||
-------+-------
|
||||
3500 | 1
|
||||
7400 | 2
|
||||
11600 | 3
|
||||
16100 | 4
|
||||
25700 | 6
|
||||
25700 | 6
|
||||
30700 | 7
|
||||
41100 | 9
|
||||
41100 | 9
|
||||
47100 | 10
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w1 AS sum, count(*) OVER w2 AS count "
|
||||
"FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MultipleWindows("1.0")
|
||||
)
|
||||
def multiple_windows(self):
|
||||
"""Check defining multiple windows using window clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
empno | depname | salary | sum1 | sum2
|
||||
--------+-----------+--------+-------+--------
|
||||
1 | sales | 5000 | 5000 | 5000
|
||||
2 | personnel | 3900 | 3900 | 8900
|
||||
3 | sales | 4800 | 9800 | 8700
|
||||
4 | sales | 4800 | 14600 | 9600
|
||||
5 | personnel | 3500 | 7400 | 8300
|
||||
7 | develop | 4200 | 4200 | 7700
|
||||
8 | develop | 6000 | 10200 | 10200
|
||||
9 | develop | 4500 | 14700 | 10500
|
||||
10 | develop | 5200 | 19900 | 9700
|
||||
11 | develop | 5200 | 25100 | 10400
|
||||
""")
|
||||
|
||||
execute_query("SELECT empno, depname, salary, sum(salary) OVER w1 AS sum1, sum(salary) OVER w2 AS sum2 "
|
||||
"FROM empsalary WINDOW w1 AS (PARTITION BY depname ORDER BY empno), w2 AS (ORDER BY empno ROWS 1 PRECEDING)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MissingWindowSpec_Error("1.0")
|
||||
)
|
||||
def missing_window_spec(self):
|
||||
"""Check missing window spec in window clause.
|
||||
"""
|
||||
exitcode = 62
|
||||
message = "Exception: Syntax error"
|
||||
|
||||
self.context.node.query("SELECT number,sum(number) OVER w1 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1",
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestFeature
|
||||
@Name("window clause")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check defining frame clause.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
206
tests/testflows/window_functions/tests/window_spec.py
Normal file
206
tests/testflows/window_functions/tests/window_spec.py
Normal file
@ -0,0 +1,206 @@
|
||||
from testflows.core import *
|
||||
from window_functions.requirements import *
|
||||
from window_functions.tests.common import *
|
||||
|
||||
@TestScenario
|
||||
def partition_clause(self):
|
||||
"""Check window specification that only contains partition clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
25100
|
||||
25100
|
||||
25100
|
||||
25100
|
||||
25100
|
||||
7400
|
||||
7400
|
||||
14600
|
||||
14600
|
||||
14600
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def orderby_clause(self):
|
||||
"""Check window specification that only contains order by clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
25100
|
||||
25100
|
||||
25100
|
||||
25100
|
||||
25100
|
||||
32500
|
||||
32500
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (ORDER BY depname)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def frame_clause(self):
|
||||
"""Check window specification that only contains frame clause.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
5000
|
||||
3900
|
||||
4800
|
||||
4800
|
||||
3500
|
||||
4200
|
||||
6000
|
||||
4500
|
||||
5200
|
||||
5200
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (ORDER BY empno ROWS CURRENT ROW)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def partition_with_order_by(self):
|
||||
"""Check window specification that contains partition and order by clauses.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
4200
|
||||
8700
|
||||
19100
|
||||
19100
|
||||
25100
|
||||
3500
|
||||
7400
|
||||
9600
|
||||
9600
|
||||
14600
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def partition_with_frame(self):
|
||||
"""Check window specification that contains partition and frame clauses.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
4200
|
||||
6000
|
||||
4500
|
||||
5200
|
||||
5200
|
||||
3900
|
||||
3500
|
||||
5000
|
||||
4800
|
||||
4800
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname, empno ROWS 1 PRECEDING)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def order_by_with_frame(self):
|
||||
"""Check window specification that contains order by and frame clauses.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
4200
|
||||
10200
|
||||
10500
|
||||
9700
|
||||
10400
|
||||
9100
|
||||
7400
|
||||
8500
|
||||
9800
|
||||
9600
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (ORDER BY depname, empno ROWS 1 PRECEDING)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def partition_with_order_by_and_frame(self):
|
||||
"""Check window specification that contains all clauses.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
4200
|
||||
8700
|
||||
9700
|
||||
10400
|
||||
11200
|
||||
3500
|
||||
7400
|
||||
4800
|
||||
9600
|
||||
9800
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary ROWS 1 PRECEDING)",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestScenario
|
||||
def empty(self):
|
||||
"""Check defining an empty window specification.
|
||||
"""
|
||||
expected = convert_output("""
|
||||
sum
|
||||
-------
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
47100
|
||||
""")
|
||||
|
||||
execute_query(
|
||||
"SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS ()",
|
||||
expected=expected
|
||||
)
|
||||
|
||||
@TestFeature
|
||||
@Name("window spec")
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions_WindowSpec("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check defining window specifications.
|
||||
"""
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
Loading…
Reference in New Issue
Block a user