mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-18 05:32:52 +00:00
7609 lines
497 KiB
Plaintext
7609 lines
497 KiB
Plaintext
|
Not found: /etc/init.d/service with path=
|
|||
|
start time: Ср. сент. 18 02:11:52 MSK 2013
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT count(*) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_8923@mturlrep13_201309180211_341387203.txt
|
|||
|
hive> SELECT count(*) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0199
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:12:08,040 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:12:16,074 Stage-1 map = 4%, reduce = 0%
|
|||
|
2013-09-18 02:12:19,086 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 02:12:22,098 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 02:12:25,110 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 02:12:28,121 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 02:12:34,140 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:12:35,150 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 31.42 sec
|
|||
|
2013-09-18 02:12:36,156 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 31.42 sec
|
|||
|
2013-09-18 02:12:37,162 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 31.42 sec
|
|||
|
2013-09-18 02:12:38,167 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 68.28 sec
|
|||
|
2013-09-18 02:12:39,173 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 143.22 sec
|
|||
|
2013-09-18 02:12:40,178 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 143.22 sec
|
|||
|
2013-09-18 02:12:41,182 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 143.22 sec
|
|||
|
2013-09-18 02:12:42,188 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 143.22 sec
|
|||
|
2013-09-18 02:12:43,195 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 145.37 sec
|
|||
|
2013-09-18 02:12:44,201 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 145.37 sec
|
|||
|
2013-09-18 02:12:45,206 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 145.37 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 25 seconds 370 msec
|
|||
|
Ended Job = job_201309172235_0199
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 145.37 sec HDFS Read: 1082943442 HDFS Write: 9 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 25 seconds 370 msec
|
|||
|
OK
|
|||
|
10000000
|
|||
|
Time taken: 47.388 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT count(*) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_10060@mturlrep13_201309180212_360076553.txt
|
|||
|
hive> SELECT count(*) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0200
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:12:55,466 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:13:02,498 Stage-1 map = 4%, reduce = 0%
|
|||
|
2013-09-18 02:13:05,516 Stage-1 map = 18%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:06,523 Stage-1 map = 18%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:07,530 Stage-1 map = 18%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:08,535 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:09,542 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:10,549 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:11,555 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:12,561 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:13,567 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:14,572 Stage-1 map = 56%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:15,577 Stage-1 map = 56%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:16,582 Stage-1 map = 56%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:17,588 Stage-1 map = 63%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:18,594 Stage-1 map = 63%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:19,600 Stage-1 map = 63%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:20,605 Stage-1 map = 78%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:21,611 Stage-1 map = 78%, reduce = 0%, Cumulative CPU 41.56 sec
|
|||
|
2013-09-18 02:13:22,618 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 65.53 sec
|
|||
|
2013-09-18 02:13:23,623 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 95.51 sec
|
|||
|
2013-09-18 02:13:24,629 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 95.51 sec
|
|||
|
2013-09-18 02:13:25,634 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 151.45 sec
|
|||
|
2013-09-18 02:13:26,639 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 151.45 sec
|
|||
|
2013-09-18 02:13:27,643 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 151.45 sec
|
|||
|
2013-09-18 02:13:28,648 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 151.45 sec
|
|||
|
2013-09-18 02:13:29,653 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 151.45 sec
|
|||
|
2013-09-18 02:13:30,661 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 153.61 sec
|
|||
|
2013-09-18 02:13:31,667 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 153.61 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 33 seconds 610 msec
|
|||
|
Ended Job = job_201309172235_0200
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 153.61 sec HDFS Read: 1082943442 HDFS Write: 9 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 33 seconds 610 msec
|
|||
|
OK
|
|||
|
10000000
|
|||
|
Time taken: 44.55 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT count(*) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_11198@mturlrep13_201309180213_1431795111.txt
|
|||
|
hive> SELECT count(*) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0201
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:13:41,815 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:13:48,846 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 02:13:51,858 Stage-1 map = 18%, reduce = 0%
|
|||
|
2013-09-18 02:13:54,871 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 02:13:57,884 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 02:14:00,897 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 02:14:03,913 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 105.42 sec
|
|||
|
2013-09-18 02:14:04,919 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 105.42 sec
|
|||
|
2013-09-18 02:14:05,925 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 105.42 sec
|
|||
|
2013-09-18 02:14:06,931 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 105.42 sec
|
|||
|
2013-09-18 02:14:07,937 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 113.25 sec
|
|||
|
2013-09-18 02:14:08,942 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 121.6 sec
|
|||
|
2013-09-18 02:14:09,947 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 128.96 sec
|
|||
|
2013-09-18 02:14:10,952 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 140.92 sec
|
|||
|
2013-09-18 02:14:11,957 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 140.92 sec
|
|||
|
2013-09-18 02:14:12,967 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 140.92 sec
|
|||
|
2013-09-18 02:14:13,971 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 140.92 sec
|
|||
|
2013-09-18 02:14:14,976 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 140.92 sec
|
|||
|
2013-09-18 02:14:15,983 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 142.87 sec
|
|||
|
2013-09-18 02:14:16,988 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 142.87 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 22 seconds 870 msec
|
|||
|
Ended Job = job_201309172235_0201
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 142.87 sec HDFS Read: 1082943442 HDFS Write: 9 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 22 seconds 870 msec
|
|||
|
OK
|
|||
|
10000000
|
|||
|
Time taken: 43.513 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_12353@mturlrep13_201309180214_1687900933.txt
|
|||
|
hive> SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0202
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:14:33,532 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:14:38,563 Stage-1 map = 25%, reduce = 0%, Cumulative CPU 5.8 sec
|
|||
|
2013-09-18 02:14:39,570 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.27 sec
|
|||
|
2013-09-18 02:14:40,577 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.27 sec
|
|||
|
2013-09-18 02:14:41,582 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.27 sec
|
|||
|
2013-09-18 02:14:42,588 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.27 sec
|
|||
|
2013-09-18 02:14:43,593 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.27 sec
|
|||
|
2013-09-18 02:14:44,598 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.27 sec
|
|||
|
2013-09-18 02:14:45,603 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.27 sec
|
|||
|
2013-09-18 02:14:46,611 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:14:47,617 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:14:48,624 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 27.34 sec
|
|||
|
MapReduce Total cumulative CPU time: 27 seconds 340 msec
|
|||
|
Ended Job = job_201309172235_0202
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 27.34 sec HDFS Read: 907716 HDFS Write: 7 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 27 seconds 340 msec
|
|||
|
OK
|
|||
|
171127
|
|||
|
Time taken: 25.332 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_13496@mturlrep13_201309180214_186944360.txt
|
|||
|
hive> SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0203
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:14:58,925 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:15:03,954 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:04,962 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:05,970 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:06,975 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:07,982 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:08,988 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:09,994 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:11,001 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 23.94 sec
|
|||
|
2013-09-18 02:15:12,008 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 25.75 sec
|
|||
|
2013-09-18 02:15:13,015 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 25.75 sec
|
|||
|
MapReduce Total cumulative CPU time: 25 seconds 750 msec
|
|||
|
Ended Job = job_201309172235_0203
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 25.75 sec HDFS Read: 907716 HDFS Write: 7 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 25 seconds 750 msec
|
|||
|
OK
|
|||
|
171127
|
|||
|
Time taken: 22.592 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_15074@mturlrep13_201309180215_1069318863.txt
|
|||
|
hive> SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0204
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:15:22,365 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:15:27,393 Stage-1 map = 75%, reduce = 0%, Cumulative CPU 17.92 sec
|
|||
|
2013-09-18 02:15:28,401 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 24.51 sec
|
|||
|
2013-09-18 02:15:29,409 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 24.51 sec
|
|||
|
2013-09-18 02:15:30,415 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 24.51 sec
|
|||
|
2013-09-18 02:15:31,421 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 24.51 sec
|
|||
|
2013-09-18 02:15:32,427 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 24.51 sec
|
|||
|
2013-09-18 02:15:33,433 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 24.51 sec
|
|||
|
2013-09-18 02:15:34,439 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 24.51 sec
|
|||
|
2013-09-18 02:15:35,447 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 26.63 sec
|
|||
|
2013-09-18 02:15:36,454 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 26.63 sec
|
|||
|
2013-09-18 02:15:37,461 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 26.63 sec
|
|||
|
MapReduce Total cumulative CPU time: 26 seconds 630 msec
|
|||
|
Ended Job = job_201309172235_0204
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 26.63 sec HDFS Read: 907716 HDFS Write: 7 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 26 seconds 630 msec
|
|||
|
OK
|
|||
|
171127
|
|||
|
Time taken: 22.629 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16492@mturlrep13_201309180215_2118658323.txt
|
|||
|
hive> SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0205
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:15:54,647 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:16:02,687 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:03,694 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:04,700 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:05,706 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:06,712 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:07,717 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:08,724 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:09,731 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 34.0 sec
|
|||
|
2013-09-18 02:16:10,739 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 36.22 sec
|
|||
|
2013-09-18 02:16:11,745 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 36.22 sec
|
|||
|
MapReduce Total cumulative CPU time: 36 seconds 220 msec
|
|||
|
Ended Job = job_201309172235_0205
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 36.22 sec HDFS Read: 8109219 HDFS Write: 30 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 36 seconds 220 msec
|
|||
|
OK
|
|||
|
Time taken: 27.558 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_17739@mturlrep13_201309180216_1507251512.txt
|
|||
|
hive> SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0206
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:16:21,573 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:16:28,610 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:29,617 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:30,623 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:31,628 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:32,634 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:33,639 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:34,645 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:35,651 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 31.85 sec
|
|||
|
2013-09-18 02:16:36,661 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 33.81 sec
|
|||
|
2013-09-18 02:16:37,667 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 33.81 sec
|
|||
|
MapReduce Total cumulative CPU time: 33 seconds 810 msec
|
|||
|
Ended Job = job_201309172235_0206
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 33.81 sec HDFS Read: 8109219 HDFS Write: 30 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 33 seconds 810 msec
|
|||
|
OK
|
|||
|
Time taken: 23.861 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_18970@mturlrep13_201309180216_474859328.txt
|
|||
|
hive> SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0207
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:16:48,424 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:16:54,457 Stage-1 map = 25%, reduce = 0%, Cumulative CPU 7.2 sec
|
|||
|
2013-09-18 02:16:55,465 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:16:56,471 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:16:57,477 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:16:58,482 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:16:59,487 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:17:00,493 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:17:01,499 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:17:02,505 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 32.03 sec
|
|||
|
2013-09-18 02:17:03,513 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 33.98 sec
|
|||
|
2013-09-18 02:17:04,519 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 33.98 sec
|
|||
|
MapReduce Total cumulative CPU time: 33 seconds 980 msec
|
|||
|
Ended Job = job_201309172235_0207
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 33.98 sec HDFS Read: 8109219 HDFS Write: 30 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 33 seconds 980 msec
|
|||
|
OK
|
|||
|
Time taken: 24.986 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT sum(UserID) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_20233@mturlrep13_201309180217_1020705203.txt
|
|||
|
hive> SELECT sum(UserID) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0208
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:17:21,071 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:17:27,108 Stage-1 map = 25%, reduce = 0%, Cumulative CPU 6.28 sec
|
|||
|
2013-09-18 02:17:28,115 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:29,122 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:30,127 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:31,132 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:32,137 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:33,143 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:34,149 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:35,155 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 27.34 sec
|
|||
|
2013-09-18 02:17:36,163 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.22 sec
|
|||
|
2013-09-18 02:17:37,169 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.22 sec
|
|||
|
MapReduce Total cumulative CPU time: 29 seconds 220 msec
|
|||
|
Ended Job = job_201309172235_0208
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 29.22 sec HDFS Read: 57312623 HDFS Write: 21 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 29 seconds 220 msec
|
|||
|
OK
|
|||
|
-4662894107982093709
|
|||
|
Time taken: 26.148 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT sum(UserID) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_21379@mturlrep13_201309180217_1577746245.txt
|
|||
|
hive> SELECT sum(UserID) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0209
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:17:46,496 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:17:52,527 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:17:53,534 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:17:54,540 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:17:55,545 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:17:56,550 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:17:57,555 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:17:58,561 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:17:59,565 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.37 sec
|
|||
|
2013-09-18 02:18:00,574 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.46 sec
|
|||
|
2013-09-18 02:18:01,581 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.46 sec
|
|||
|
2013-09-18 02:18:02,587 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.46 sec
|
|||
|
MapReduce Total cumulative CPU time: 29 seconds 460 msec
|
|||
|
Ended Job = job_201309172235_0209
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 29.46 sec HDFS Read: 57312623 HDFS Write: 21 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 29 seconds 460 msec
|
|||
|
OK
|
|||
|
-4662894107982093709
|
|||
|
Time taken: 23.564 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT sum(UserID) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_22513@mturlrep13_201309180218_799027004.txt
|
|||
|
hive> SELECT sum(UserID) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0210
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:18:12,872 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:18:17,906 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 13.71 sec
|
|||
|
2013-09-18 02:18:18,915 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:19,922 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:20,928 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:21,933 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:22,938 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:23,943 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:24,947 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:25,953 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 27.76 sec
|
|||
|
2013-09-18 02:18:26,960 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.92 sec
|
|||
|
2013-09-18 02:18:27,966 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.92 sec
|
|||
|
MapReduce Total cumulative CPU time: 29 seconds 920 msec
|
|||
|
Ended Job = job_201309172235_0210
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 29.92 sec HDFS Read: 57312623 HDFS Write: 21 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 29 seconds 920 msec
|
|||
|
OK
|
|||
|
-4662894107982093709
|
|||
|
Time taken: 23.508 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT count(DISTINCT UserID) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_23657@mturlrep13_201309180218_452234815.txt
|
|||
|
hive> SELECT count(DISTINCT UserID) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0211
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:18:45,243 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:18:53,314 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:18:56,332 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:18:57,339 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:18:58,346 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:18:59,352 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:19:00,359 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:19:01,364 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:19:02,370 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:19:03,375 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:19:04,380 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 50.1 sec
|
|||
|
2013-09-18 02:19:05,386 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 50.9 sec
|
|||
|
2013-09-18 02:19:06,391 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 50.9 sec
|
|||
|
2013-09-18 02:19:07,396 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 50.9 sec
|
|||
|
2013-09-18 02:19:08,404 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 57.82 sec
|
|||
|
2013-09-18 02:19:09,409 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 57.82 sec
|
|||
|
2013-09-18 02:19:10,416 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 57.82 sec
|
|||
|
MapReduce Total cumulative CPU time: 57 seconds 820 msec
|
|||
|
Ended Job = job_201309172235_0211
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 57.82 sec HDFS Read: 57312623 HDFS Write: 8 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 57 seconds 820 msec
|
|||
|
OK
|
|||
|
2037258
|
|||
|
Time taken: 35.237 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT count(DISTINCT UserID) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_24818@mturlrep13_201309180219_76164215.txt
|
|||
|
hive> SELECT count(DISTINCT UserID) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0212
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:19:20,706 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:19:27,739 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:19:30,759 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:31,766 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:32,772 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:33,777 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:34,784 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:35,790 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:36,796 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:37,801 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:38,807 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:39,812 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:40,818 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:41,823 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 50.88 sec
|
|||
|
2013-09-18 02:19:42,830 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 58.39 sec
|
|||
|
2013-09-18 02:19:43,835 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 58.39 sec
|
|||
|
MapReduce Total cumulative CPU time: 58 seconds 390 msec
|
|||
|
Ended Job = job_201309172235_0212
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 58.39 sec HDFS Read: 57312623 HDFS Write: 8 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 58 seconds 390 msec
|
|||
|
OK
|
|||
|
2037258
|
|||
|
Time taken: 31.445 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT count(DISTINCT UserID) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_25977@mturlrep13_201309180219_1809129811.txt
|
|||
|
hive> SELECT count(DISTINCT UserID) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0213
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:19:53,965 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:20:00,997 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:20:03,011 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:20:04,018 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:05,026 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:06,031 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:07,036 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:08,042 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:09,049 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:10,055 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:11,061 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:12,066 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:13,072 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:14,078 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:15,084 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 52.47 sec
|
|||
|
2013-09-18 02:20:16,091 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 61.2 sec
|
|||
|
2013-09-18 02:20:17,096 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 61.2 sec
|
|||
|
2013-09-18 02:20:18,102 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 61.2 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 1 seconds 200 msec
|
|||
|
Ended Job = job_201309172235_0213
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 61.2 sec HDFS Read: 57312623 HDFS Write: 8 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 1 seconds 200 msec
|
|||
|
OK
|
|||
|
2037258
|
|||
|
Time taken: 32.552 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT count(DISTINCT SearchPhrase) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27834@mturlrep13_201309180220_1270552368.txt
|
|||
|
hive> SELECT count(DISTINCT SearchPhrase) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0214
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:20:35,012 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:20:43,045 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:20:45,062 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:46,068 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:47,074 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:48,079 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:49,085 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:50,091 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:51,097 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:52,103 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:53,109 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:54,115 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:55,121 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:56,126 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.7 sec
|
|||
|
2013-09-18 02:20:57,133 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.24 sec
|
|||
|
2013-09-18 02:20:58,139 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.24 sec
|
|||
|
2013-09-18 02:20:59,144 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.24 sec
|
|||
|
MapReduce Total cumulative CPU time: 50 seconds 240 msec
|
|||
|
Ended Job = job_201309172235_0214
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 50.24 sec HDFS Read: 27820105 HDFS Write: 8 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 50 seconds 240 msec
|
|||
|
OK
|
|||
|
1110413
|
|||
|
Time taken: 34.063 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT count(DISTINCT SearchPhrase) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_29013@mturlrep13_201309180221_818576677.txt
|
|||
|
hive> SELECT count(DISTINCT SearchPhrase) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0215
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:21:08,404 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:21:16,449 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 10.79 sec
|
|||
|
2013-09-18 02:21:17,457 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:18,464 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:19,469 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:20,475 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:21,481 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:22,488 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:23,494 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:24,500 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:25,506 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:26,511 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:27,516 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 43.2 sec
|
|||
|
2013-09-18 02:21:28,523 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.7 sec
|
|||
|
2013-09-18 02:21:29,528 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.7 sec
|
|||
|
2013-09-18 02:21:30,533 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.7 sec
|
|||
|
MapReduce Total cumulative CPU time: 50 seconds 700 msec
|
|||
|
Ended Job = job_201309172235_0215
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 50.7 sec HDFS Read: 27820105 HDFS Write: 8 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 50 seconds 700 msec
|
|||
|
OK
|
|||
|
1110413
|
|||
|
Time taken: 29.607 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT count(DISTINCT SearchPhrase) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_30188@mturlrep13_201309180221_356829267.txt
|
|||
|
hive> SELECT count(DISTINCT SearchPhrase) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0216
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:21:40,608 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:21:47,636 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:21:48,646 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 31.99 sec
|
|||
|
2013-09-18 02:21:49,653 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:50,659 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:51,666 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:52,671 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:53,677 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:54,683 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:55,690 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:56,695 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:57,700 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:58,704 Stage-1 map = 100%, reduce = 78%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:21:59,710 Stage-1 map = 100%, reduce = 78%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:22:00,717 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 51.3 sec
|
|||
|
2013-09-18 02:22:01,727 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 51.3 sec
|
|||
|
MapReduce Total cumulative CPU time: 51 seconds 300 msec
|
|||
|
Ended Job = job_201309172235_0216
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 51.3 sec HDFS Read: 27820105 HDFS Write: 8 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 51 seconds 300 msec
|
|||
|
OK
|
|||
|
1110413
|
|||
|
Time taken: 29.268 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT min(EventDate), max(EventDate) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_31349@mturlrep13_201309180222_1184363660.txt
|
|||
|
hive> SELECT min(EventDate), max(EventDate) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0217
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:22:19,289 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:22:25,321 Stage-1 map = 25%, reduce = 0%, Cumulative CPU 6.4 sec
|
|||
|
2013-09-18 02:22:26,329 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.67 sec
|
|||
|
2013-09-18 02:22:27,336 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.67 sec
|
|||
|
2013-09-18 02:22:28,342 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.67 sec
|
|||
|
2013-09-18 02:22:29,347 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.67 sec
|
|||
|
2013-09-18 02:22:30,352 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.67 sec
|
|||
|
2013-09-18 02:22:31,358 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.67 sec
|
|||
|
2013-09-18 02:22:32,364 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.67 sec
|
|||
|
2013-09-18 02:22:33,372 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 28.78 sec
|
|||
|
2013-09-18 02:22:34,379 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 28.78 sec
|
|||
|
2013-09-18 02:22:35,384 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 28.78 sec
|
|||
|
MapReduce Total cumulative CPU time: 28 seconds 780 msec
|
|||
|
Ended Job = job_201309172235_0217
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 28.78 sec HDFS Read: 597016 HDFS Write: 6 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 28 seconds 780 msec
|
|||
|
OK
|
|||
|
Time taken: 25.999 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT min(EventDate), max(EventDate) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_32505@mturlrep13_201309180222_1320628760.txt
|
|||
|
hive> SELECT min(EventDate), max(EventDate) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0218
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:22:44,642 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:22:50,675 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:51,682 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:52,688 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:53,694 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:54,699 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:55,704 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:56,710 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:57,717 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 26.81 sec
|
|||
|
2013-09-18 02:22:58,725 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 28.69 sec
|
|||
|
2013-09-18 02:22:59,731 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 28.69 sec
|
|||
|
MapReduce Total cumulative CPU time: 28 seconds 690 msec
|
|||
|
Ended Job = job_201309172235_0218
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 28.69 sec HDFS Read: 597016 HDFS Write: 6 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 28 seconds 690 msec
|
|||
|
OK
|
|||
|
Time taken: 22.443 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT min(EventDate), max(EventDate) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_1222@mturlrep13_201309180223_1487045078.txt
|
|||
|
hive> SELECT min(EventDate), max(EventDate) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0219
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:23:09,025 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:23:15,057 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:16,064 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:17,072 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:18,077 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:19,083 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:20,088 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:21,094 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:22,100 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 25.89 sec
|
|||
|
2013-09-18 02:23:23,108 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 27.99 sec
|
|||
|
2013-09-18 02:23:24,115 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 27.99 sec
|
|||
|
MapReduce Total cumulative CPU time: 27 seconds 990 msec
|
|||
|
Ended Job = job_201309172235_0219
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 27.99 sec HDFS Read: 597016 HDFS Write: 6 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 27 seconds 990 msec
|
|||
|
OK
|
|||
|
Time taken: 22.559 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_2436@mturlrep13_201309180223_855141249.txt
|
|||
|
hive> SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0220
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:23:40,593 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:23:45,623 Stage-1 map = 25%, reduce = 0%, Cumulative CPU 6.06 sec
|
|||
|
2013-09-18 02:23:46,632 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:47,640 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:48,647 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:49,652 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:50,659 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:51,665 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:52,672 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:53,677 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 25.31 sec
|
|||
|
2013-09-18 02:23:54,686 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.84 sec
|
|||
|
2013-09-18 02:23:55,693 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.84 sec
|
|||
|
MapReduce Total cumulative CPU time: 29 seconds 840 msec
|
|||
|
Ended Job = job_201309172235_0220
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0221
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:23:58,461 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:24:00,483 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:01,490 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:02,496 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:03,501 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:04,517 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:05,523 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:06,528 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:07,534 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 02:24:08,540 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.3 sec
|
|||
|
2013-09-18 02:24:09,547 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.3 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 300 msec
|
|||
|
Ended Job = job_201309172235_0221
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 29.84 sec HDFS Read: 907716 HDFS Write: 384 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.3 sec HDFS Read: 1153 HDFS Write: 60 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 32 seconds 140 msec
|
|||
|
OK
|
|||
|
Time taken: 38.784 seconds, Fetched: 9 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_4138@mturlrep13_201309180224_1265964624.txt
|
|||
|
hive> SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0222
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:24:19,801 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:24:24,833 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:25,840 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:26,847 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:27,852 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:28,858 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:29,863 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:30,870 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:31,876 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 26.53 sec
|
|||
|
2013-09-18 02:24:32,884 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 30.38 sec
|
|||
|
2013-09-18 02:24:33,891 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 30.38 sec
|
|||
|
2013-09-18 02:24:34,898 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 30.38 sec
|
|||
|
MapReduce Total cumulative CPU time: 30 seconds 380 msec
|
|||
|
Ended Job = job_201309172235_0222
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0223
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:24:37,418 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:24:39,427 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:40,433 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:41,438 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:42,447 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:43,452 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:44,457 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:45,462 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:46,468 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 02:24:47,474 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.26 sec
|
|||
|
2013-09-18 02:24:48,480 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.26 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 260 msec
|
|||
|
Ended Job = job_201309172235_0223
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 30.38 sec HDFS Read: 907716 HDFS Write: 384 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.26 sec HDFS Read: 1153 HDFS Write: 60 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 32 seconds 640 msec
|
|||
|
OK
|
|||
|
Time taken: 37.082 seconds, Fetched: 9 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_5920@mturlrep13_201309180224_277886677.txt
|
|||
|
hive> SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0224
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:24:57,648 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:25:02,677 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:03,685 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:04,692 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:05,699 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:06,705 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:07,711 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:08,717 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:09,723 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.93 sec
|
|||
|
2013-09-18 02:25:10,731 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 26.14 sec
|
|||
|
2013-09-18 02:25:11,738 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 28.35 sec
|
|||
|
2013-09-18 02:25:12,745 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 28.35 sec
|
|||
|
MapReduce Total cumulative CPU time: 28 seconds 350 msec
|
|||
|
Ended Job = job_201309172235_0224
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0225
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:25:15,963 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:25:17,971 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:18,976 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:19,986 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:20,991 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:21,995 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:23,000 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:24,004 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:25,009 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 02:25:26,014 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 02:25:27,020 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 02:25:28,025 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 100 msec
|
|||
|
Ended Job = job_201309172235_0225
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 28.35 sec HDFS Read: 907716 HDFS Write: 384 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.1 sec HDFS Read: 1153 HDFS Write: 60 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 30 seconds 450 msec
|
|||
|
OK
|
|||
|
Time taken: 37.652 seconds, Fetched: 9 row(s)
|
|||
|
hive> quit;
|
|||
|
-- мощная фильтрация. После фильтрации почти ничего не остаётся, но делаем ещё агрегацию.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_8653@mturlrep13_201309180225_1991831138.txt
|
|||
|
hive> SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0226
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:25:45,679 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:25:52,712 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:25:56,736 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:25:57,743 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:25:58,751 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:25:59,757 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:26:00,764 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:26:01,770 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:26:02,776 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:26:03,782 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:26:04,788 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 56.02 sec
|
|||
|
2013-09-18 02:26:05,793 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 57.54 sec
|
|||
|
2013-09-18 02:26:06,802 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 62.25 sec
|
|||
|
2013-09-18 02:26:07,808 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 62.25 sec
|
|||
|
2013-09-18 02:26:08,813 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 68.6 sec
|
|||
|
2013-09-18 02:26:09,820 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 68.6 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 8 seconds 600 msec
|
|||
|
Ended Job = job_201309172235_0226
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0227
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:26:13,294 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:26:15,303 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:16,320 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:17,325 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:18,330 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:19,335 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:20,341 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:21,346 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:22,352 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.41 sec
|
|||
|
2013-09-18 02:26:23,358 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.16 sec
|
|||
|
2013-09-18 02:26:24,365 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.16 sec
|
|||
|
2013-09-18 02:26:25,371 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.16 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 seconds 160 msec
|
|||
|
Ended Job = job_201309172235_0227
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 68.6 sec HDFS Read: 67340015 HDFS Write: 100142 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 3.16 sec HDFS Read: 100911 HDFS Write: 96 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 11 seconds 760 msec
|
|||
|
OK
|
|||
|
Time taken: 49.973 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_10416@mturlrep13_201309180226_2116920110.txt
|
|||
|
hive> SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0228
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:26:34,811 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:26:42,870 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:26:44,885 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 27.09 sec
|
|||
|
2013-09-18 02:26:45,892 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:46,901 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:47,907 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:48,913 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:49,920 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:50,926 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:51,932 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:52,939 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:53,945 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:54,951 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 56.71 sec
|
|||
|
2013-09-18 02:26:55,959 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 62.1 sec
|
|||
|
2013-09-18 02:26:56,965 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 62.1 sec
|
|||
|
2013-09-18 02:26:57,971 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 68.76 sec
|
|||
|
2013-09-18 02:26:58,977 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 68.76 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 8 seconds 760 msec
|
|||
|
Ended Job = job_201309172235_0228
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0229
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:27:02,462 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:27:04,472 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:05,478 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:06,483 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:07,488 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:08,493 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:09,498 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:10,504 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:11,511 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.37 sec
|
|||
|
2013-09-18 02:27:12,517 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.12 sec
|
|||
|
2013-09-18 02:27:13,523 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.12 sec
|
|||
|
2013-09-18 02:27:14,530 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.12 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 seconds 120 msec
|
|||
|
Ended Job = job_201309172235_0229
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 68.76 sec HDFS Read: 67340015 HDFS Write: 100142 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 3.12 sec HDFS Read: 100911 HDFS Write: 96 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 11 seconds 880 msec
|
|||
|
OK
|
|||
|
Time taken: 47.282 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_12148@mturlrep13_201309180227_553335692.txt
|
|||
|
hive> SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0230
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:27:23,737 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:27:31,772 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:27:34,792 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:35,800 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:36,806 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:37,812 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:38,819 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:39,825 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:40,831 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:41,837 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:42,842 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:43,847 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 52.34 sec
|
|||
|
2013-09-18 02:27:44,853 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 57.59 sec
|
|||
|
2013-09-18 02:27:45,858 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 64.28 sec
|
|||
|
2013-09-18 02:27:46,864 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 64.28 sec
|
|||
|
2013-09-18 02:27:47,870 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 64.28 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 4 seconds 280 msec
|
|||
|
Ended Job = job_201309172235_0230
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0231
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:27:50,366 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:27:52,375 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:27:53,380 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:27:54,385 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:27:55,389 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:27:56,393 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:27:57,398 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:27:58,403 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:27:59,408 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:28:00,413 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.35 sec
|
|||
|
2013-09-18 02:28:01,419 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.99 sec
|
|||
|
2013-09-18 02:28:02,425 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.99 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 990 msec
|
|||
|
Ended Job = job_201309172235_0231
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 64.28 sec HDFS Read: 67340015 HDFS Write: 100142 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.99 sec HDFS Read: 100911 HDFS Write: 96 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 7 seconds 270 msec
|
|||
|
OK
|
|||
|
Time taken: 46.027 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация, среднее количество ключей.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_13875@mturlrep13_201309180228_1421953116.txt
|
|||
|
hive> SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0232
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:28:19,616 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:28:26,647 Stage-1 map = 43%, reduce = 0%
|
|||
|
2013-09-18 02:28:27,653 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 02:28:29,662 Stage-1 map = 73%, reduce = 0%
|
|||
|
2013-09-18 02:28:30,668 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:28:33,688 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:34,696 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:35,703 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:36,710 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:37,716 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:38,721 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:39,727 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:40,733 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:41,739 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:42,745 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:43,755 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 65.54 sec
|
|||
|
2013-09-18 02:28:44,763 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 72.69 sec
|
|||
|
2013-09-18 02:28:45,768 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 72.69 sec
|
|||
|
2013-09-18 02:28:46,774 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 81.91 sec
|
|||
|
2013-09-18 02:28:47,780 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 81.91 sec
|
|||
|
2013-09-18 02:28:48,786 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 81.91 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 21 seconds 910 msec
|
|||
|
Ended Job = job_201309172235_0232
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0233
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:28:51,883 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:28:54,897 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:28:55,903 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:28:56,908 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:28:57,913 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:28:58,917 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:28:59,923 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:29:00,929 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:29:01,934 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.55 sec
|
|||
|
2013-09-18 02:29:02,941 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.3 sec
|
|||
|
2013-09-18 02:29:03,947 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.3 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 seconds 300 msec
|
|||
|
Ended Job = job_201309172235_0233
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 81.91 sec HDFS Read: 74853201 HDFS Write: 148871 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 3.3 sec HDFS Read: 149640 HDFS Write: 414 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 25 seconds 210 msec
|
|||
|
OK
|
|||
|
Time taken: 54.759 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_15771@mturlrep13_201309180229_811845410.txt
|
|||
|
hive> SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0234
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:29:13,428 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:29:21,464 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:29:24,478 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:29:26,494 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 49.2 sec
|
|||
|
2013-09-18 02:29:27,501 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:28,508 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:29,514 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:30,519 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:31,525 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:32,531 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:33,536 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:34,542 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:35,547 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:36,552 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 67.11 sec
|
|||
|
2013-09-18 02:29:37,559 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 73.92 sec
|
|||
|
2013-09-18 02:29:38,566 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 73.92 sec
|
|||
|
2013-09-18 02:29:39,572 Stage-1 map = 100%, reduce = 99%, Cumulative CPU 73.92 sec
|
|||
|
2013-09-18 02:29:40,578 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 83.02 sec
|
|||
|
2013-09-18 02:29:41,585 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 83.02 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 23 seconds 20 msec
|
|||
|
Ended Job = job_201309172235_0234
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0235
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:29:45,075 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:29:47,084 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:48,089 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:49,094 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:50,099 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:51,104 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:52,109 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:53,114 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:54,120 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.47 sec
|
|||
|
2013-09-18 02:29:55,125 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.2 sec
|
|||
|
2013-09-18 02:29:56,130 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.2 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 seconds 200 msec
|
|||
|
Ended Job = job_201309172235_0235
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 83.02 sec HDFS Read: 74853201 HDFS Write: 148871 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 3.2 sec HDFS Read: 149640 HDFS Write: 414 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 26 seconds 220 msec
|
|||
|
OK
|
|||
|
Time taken: 50.301 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_17673@mturlrep13_201309180229_1772334372.txt
|
|||
|
hive> SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0236
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:30:06,838 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:30:13,869 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:30:16,883 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:30:18,898 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 32.43 sec
|
|||
|
2013-09-18 02:30:19,906 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:20,942 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:21,949 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:22,955 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:23,960 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:24,965 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:25,971 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:26,976 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:27,982 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:28,989 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:29,995 Stage-1 map = 100%, reduce = 78%, Cumulative CPU 66.34 sec
|
|||
|
2013-09-18 02:30:31,003 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 73.15 sec
|
|||
|
2013-09-18 02:30:32,010 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 73.15 sec
|
|||
|
2013-09-18 02:30:33,016 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 82.57 sec
|
|||
|
2013-09-18 02:30:34,022 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 82.57 sec
|
|||
|
2013-09-18 02:30:35,108 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 82.57 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 22 seconds 570 msec
|
|||
|
Ended Job = job_201309172235_0236
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0237
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:30:37,758 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:30:39,767 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:40,773 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:41,779 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:42,783 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:43,788 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:44,792 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:45,796 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:46,801 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:47,806 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.45 sec
|
|||
|
2013-09-18 02:30:48,812 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.2 sec
|
|||
|
2013-09-18 02:30:49,817 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.2 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 seconds 200 msec
|
|||
|
Ended Job = job_201309172235_0237
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 82.57 sec HDFS Read: 74853201 HDFS Write: 148871 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 3.2 sec HDFS Read: 149640 HDFS Write: 414 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 25 seconds 770 msec
|
|||
|
OK
|
|||
|
Time taken: 51.858 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация, среднее количество ключей, несколько агрегатных функций.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_20314@mturlrep13_201309180230_1304388954.txt
|
|||
|
hive> SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0238
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:31:06,469 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:31:13,508 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 8.59 sec
|
|||
|
2013-09-18 02:31:14,520 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:15,529 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:16,536 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:17,543 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:18,549 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:19,555 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:20,562 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:21,569 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:22,576 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 34.31 sec
|
|||
|
2013-09-18 02:31:23,584 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 41.23 sec
|
|||
|
2013-09-18 02:31:24,591 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 41.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 41 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0238
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0239
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:31:28,037 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:31:29,043 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:30,049 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:31,055 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:32,060 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:33,065 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:34,070 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:35,076 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:36,082 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.79 sec
|
|||
|
2013-09-18 02:31:37,088 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.35 sec
|
|||
|
2013-09-18 02:31:38,095 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.35 sec
|
|||
|
2013-09-18 02:31:39,102 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.35 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 350 msec
|
|||
|
Ended Job = job_201309172235_0239
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 41.23 sec HDFS Read: 58273488 HDFS Write: 21128 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.35 sec HDFS Read: 21897 HDFS Write: 127 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 43 seconds 580 msec
|
|||
|
OK
|
|||
|
Time taken: 42.793 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_22123@mturlrep13_201309180231_845723400.txt
|
|||
|
hive> SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0240
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:31:49,498 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:31:55,532 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:31:56,540 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:31:57,547 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:31:58,552 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:31:59,557 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:32:00,563 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:32:01,570 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:32:02,576 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:32:03,582 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 31.77 sec
|
|||
|
2013-09-18 02:32:04,590 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 38.18 sec
|
|||
|
2013-09-18 02:32:05,597 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 38.18 sec
|
|||
|
MapReduce Total cumulative CPU time: 38 seconds 180 msec
|
|||
|
Ended Job = job_201309172235_0240
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0241
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:32:09,026 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:32:10,031 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:11,037 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:12,043 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:13,048 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:14,053 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:15,058 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:16,063 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:17,068 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.77 sec
|
|||
|
2013-09-18 02:32:18,074 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.44 sec
|
|||
|
2013-09-18 02:32:19,079 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.44 sec
|
|||
|
2013-09-18 02:32:20,085 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.44 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 440 msec
|
|||
|
Ended Job = job_201309172235_0241
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 38.18 sec HDFS Read: 58273488 HDFS Write: 21128 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.44 sec HDFS Read: 21897 HDFS Write: 127 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 40 seconds 620 msec
|
|||
|
OK
|
|||
|
Time taken: 39.001 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_23905@mturlrep13_201309180232_33315104.txt
|
|||
|
hive> SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0242
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:32:30,268 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:32:36,302 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:37,310 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:38,317 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:39,323 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:40,329 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:41,336 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:42,342 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:43,348 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:44,355 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 32.2 sec
|
|||
|
2013-09-18 02:32:45,363 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.05 sec
|
|||
|
2013-09-18 02:32:46,370 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.05 sec
|
|||
|
MapReduce Total cumulative CPU time: 39 seconds 50 msec
|
|||
|
Ended Job = job_201309172235_0242
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0243
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:32:49,830 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:32:50,836 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:51,842 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:52,847 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:53,852 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:54,860 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:55,865 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:56,871 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:57,876 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.74 sec
|
|||
|
2013-09-18 02:32:58,881 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.37 sec
|
|||
|
2013-09-18 02:32:59,886 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.37 sec
|
|||
|
2013-09-18 02:33:00,892 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.37 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 370 msec
|
|||
|
Ended Job = job_201309172235_0243
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 39.05 sec HDFS Read: 58273488 HDFS Write: 21128 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.37 sec HDFS Read: 21897 HDFS Write: 127 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 41 seconds 420 msec
|
|||
|
OK
|
|||
|
Time taken: 38.998 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- мощная фильтрация по строкам, затем агрегация по строкам.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_25692@mturlrep13_201309180233_546222477.txt
|
|||
|
hive> SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0244
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:33:17,631 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:33:24,670 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 23.16 sec
|
|||
|
2013-09-18 02:33:25,678 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:26,685 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:27,693 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:28,699 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:29,705 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:30,711 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:31,718 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:32,725 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 31.61 sec
|
|||
|
2013-09-18 02:33:33,733 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.08 sec
|
|||
|
2013-09-18 02:33:34,740 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.08 sec
|
|||
|
2013-09-18 02:33:35,747 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.08 sec
|
|||
|
MapReduce Total cumulative CPU time: 39 seconds 80 msec
|
|||
|
Ended Job = job_201309172235_0244
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0245
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:33:39,294 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:33:40,299 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:41,304 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:42,310 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:43,316 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:44,321 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:45,326 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:46,332 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:47,337 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:33:48,344 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.33 sec
|
|||
|
2013-09-18 02:33:49,350 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.33 sec
|
|||
|
2013-09-18 02:33:50,356 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.33 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 330 msec
|
|||
|
Ended Job = job_201309172235_0245
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 39.08 sec HDFS Read: 59259422 HDFS Write: 22710 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.33 sec HDFS Read: 23477 HDFS Write: 149 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 41 seconds 410 msec
|
|||
|
OK
|
|||
|
Time taken: 42.858 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27484@mturlrep13_201309180233_1733545584.txt
|
|||
|
hive> SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0246
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:34:00,545 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:34:06,577 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:07,584 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:08,591 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:09,598 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:10,604 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:11,610 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:12,615 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:13,621 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:14,628 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 32.0 sec
|
|||
|
2013-09-18 02:34:15,636 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.52 sec
|
|||
|
2013-09-18 02:34:16,642 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.52 sec
|
|||
|
MapReduce Total cumulative CPU time: 39 seconds 520 msec
|
|||
|
Ended Job = job_201309172235_0246
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0247
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:34:20,082 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:34:21,088 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:22,093 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:23,098 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:24,102 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:25,107 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:26,111 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:27,116 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:28,121 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.76 sec
|
|||
|
2013-09-18 02:34:29,126 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.36 sec
|
|||
|
2013-09-18 02:34:30,132 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.36 sec
|
|||
|
2013-09-18 02:34:31,138 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.36 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 360 msec
|
|||
|
Ended Job = job_201309172235_0247
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 39.52 sec HDFS Read: 59259422 HDFS Write: 22710 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.36 sec HDFS Read: 23479 HDFS Write: 149 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 41 seconds 880 msec
|
|||
|
OK
|
|||
|
Time taken: 38.928 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_29266@mturlrep13_201309180234_1724427023.txt
|
|||
|
hive> SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0248
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:34:41,249 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:34:47,281 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:48,289 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:49,295 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:50,301 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:51,307 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:52,313 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:53,319 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:54,325 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:55,331 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 33.23 sec
|
|||
|
2013-09-18 02:34:56,340 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.78 sec
|
|||
|
2013-09-18 02:34:57,346 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.78 sec
|
|||
|
2013-09-18 02:34:58,351 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.78 sec
|
|||
|
MapReduce Total cumulative CPU time: 40 seconds 780 msec
|
|||
|
Ended Job = job_201309172235_0248
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0249
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:35:00,867 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:35:03,879 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.78 sec
|
|||
|
2013-09-18 02:35:04,884 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.78 sec
|
|||
|
2013-09-18 02:35:05,889 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.78 sec
|
|||
|
2013-09-18 02:35:06,894 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.78 sec
|
|||
|
2013-09-18 02:35:07,899 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.78 sec
|
|||
|
2013-09-18 02:35:08,904 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.78 sec
|
|||
|
2013-09-18 02:35:09,909 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.78 sec
|
|||
|
2013-09-18 02:35:10,915 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.45 sec
|
|||
|
2013-09-18 02:35:11,921 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.45 sec
|
|||
|
2013-09-18 02:35:12,927 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.45 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 450 msec
|
|||
|
Ended Job = job_201309172235_0249
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 40.78 sec HDFS Read: 59259422 HDFS Write: 22710 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.45 sec HDFS Read: 23479 HDFS Write: 149 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 43 seconds 230 msec
|
|||
|
OK
|
|||
|
Time taken: 40.035 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- мощная фильтрация по строкам, затем агрегация по паре из числа и строки.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_31746@mturlrep13_201309180235_1665432259.txt
|
|||
|
hive> SELECT SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0250
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:35:30,446 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:35:37,477 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:35:38,490 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 11.18 sec
|
|||
|
2013-09-18 02:35:39,498 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:40,505 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:41,511 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:42,516 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:43,521 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:44,527 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:45,534 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:46,540 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:47,547 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:48,554 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:49,560 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 42.48 sec
|
|||
|
2013-09-18 02:35:50,569 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 49.79 sec
|
|||
|
2013-09-18 02:35:51,575 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 56.98 sec
|
|||
|
2013-09-18 02:35:52,582 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 56.98 sec
|
|||
|
MapReduce Total cumulative CPU time: 56 seconds 980 msec
|
|||
|
Ended Job = job_201309172235_0250
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0251
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:35:56,085 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:36:03,111 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:36:05,121 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:06,127 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:07,132 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:08,137 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:09,142 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:10,147 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:11,153 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:12,158 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:13,163 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.4 sec
|
|||
|
2013-09-18 02:36:14,171 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.23 sec
|
|||
|
2013-09-18 02:36:15,177 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.23 sec
|
|||
|
2013-09-18 02:36:16,183 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 18 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0251
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 56.98 sec HDFS Read: 27820105 HDFS Write: 79726641 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 18.23 sec HDFS Read: 79727410 HDFS Write: 275 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 15 seconds 210 msec
|
|||
|
OK
|
|||
|
Time taken: 55.967 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_1103@mturlrep13_201309180236_553015116.txt
|
|||
|
hive> SELECT SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0252
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:36:25,454 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:36:33,496 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 32.69 sec
|
|||
|
2013-09-18 02:36:34,504 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:35,511 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:36,518 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:37,524 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:38,530 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:39,537 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:40,544 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:41,551 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:42,557 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:43,563 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:44,570 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 44.11 sec
|
|||
|
2013-09-18 02:36:45,577 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 58.39 sec
|
|||
|
2013-09-18 02:36:46,583 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 58.39 sec
|
|||
|
2013-09-18 02:36:47,590 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 58.39 sec
|
|||
|
MapReduce Total cumulative CPU time: 58 seconds 390 msec
|
|||
|
Ended Job = job_201309172235_0252
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0253
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:36:50,133 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:36:58,161 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:37:00,169 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.07 sec
|
|||
|
2013-09-18 02:37:01,175 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.07 sec
|
|||
|
2013-09-18 02:37:02,180 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.07 sec
|
|||
|
2013-09-18 02:37:03,184 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.07 sec
|
|||
|
2013-09-18 02:37:04,189 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.07 sec
|
|||
|
2013-09-18 02:37:05,195 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.07 sec
|
|||
|
2013-09-18 02:37:06,200 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.07 sec
|
|||
|
2013-09-18 02:37:07,205 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 14.97 sec
|
|||
|
2013-09-18 02:37:08,211 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 14.97 sec
|
|||
|
2013-09-18 02:37:09,216 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.86 sec
|
|||
|
2013-09-18 02:37:10,221 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.86 sec
|
|||
|
2013-09-18 02:37:11,226 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.86 sec
|
|||
|
MapReduce Total cumulative CPU time: 18 seconds 860 msec
|
|||
|
Ended Job = job_201309172235_0253
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 58.39 sec HDFS Read: 27820105 HDFS Write: 79726641 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 18.86 sec HDFS Read: 79727410 HDFS Write: 275 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 17 seconds 250 msec
|
|||
|
OK
|
|||
|
Time taken: 53.253 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_2921@mturlrep13_201309180237_1642997595.txt
|
|||
|
hive> SELECT SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0254
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:37:21,370 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:37:28,400 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:37:29,413 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:30,420 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:31,427 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:32,433 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:33,439 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:34,445 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:35,451 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:36,457 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:37,464 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:38,470 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:39,476 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 43.06 sec
|
|||
|
2013-09-18 02:37:40,483 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 56.99 sec
|
|||
|
2013-09-18 02:37:41,490 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 56.99 sec
|
|||
|
2013-09-18 02:37:42,495 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 56.99 sec
|
|||
|
MapReduce Total cumulative CPU time: 56 seconds 990 msec
|
|||
|
Ended Job = job_201309172235_0254
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0255
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:37:46,017 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:37:53,043 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:37:55,051 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:37:56,056 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:37:57,061 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:37:58,065 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:37:59,070 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:38:00,074 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:38:01,079 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:38:02,085 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:38:03,090 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.12 sec
|
|||
|
2013-09-18 02:38:04,096 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.65 sec
|
|||
|
2013-09-18 02:38:05,101 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.65 sec
|
|||
|
2013-09-18 02:38:06,106 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.65 sec
|
|||
|
MapReduce Total cumulative CPU time: 17 seconds 650 msec
|
|||
|
Ended Job = job_201309172235_0255
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 56.99 sec HDFS Read: 27820105 HDFS Write: 79726641 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 17.65 sec HDFS Read: 79727410 HDFS Write: 275 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 14 seconds 640 msec
|
|||
|
OK
|
|||
|
Time taken: 53.053 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- средняя фильтрация по строкам, затем агрегация по строкам, большое количество ключей.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_4654@mturlrep13_201309180238_1576859277.txt
|
|||
|
hive> SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0256
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:38:23,923 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:38:30,955 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 02:38:33,979 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:34,986 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:35,992 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:36,999 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:38,005 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:39,011 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:40,018 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:41,024 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:42,030 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:43,036 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:44,042 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:45,049 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 48.79 sec
|
|||
|
2013-09-18 02:38:46,057 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 66.21 sec
|
|||
|
2013-09-18 02:38:47,063 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 66.21 sec
|
|||
|
2013-09-18 02:38:48,070 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 66.21 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 6 seconds 210 msec
|
|||
|
Ended Job = job_201309172235_0256
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0257
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:38:50,633 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:38:58,663 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:39:00,672 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:01,679 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:02,684 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:03,690 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:04,695 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:05,700 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:06,705 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:07,710 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.14 sec
|
|||
|
2013-09-18 02:39:08,715 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 14.0 sec
|
|||
|
2013-09-18 02:39:09,720 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.9 sec
|
|||
|
2013-09-18 02:39:10,726 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.9 sec
|
|||
|
2013-09-18 02:39:11,731 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.9 sec
|
|||
|
MapReduce Total cumulative CPU time: 17 seconds 900 msec
|
|||
|
Ended Job = job_201309172235_0257
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 66.21 sec HDFS Read: 84536695 HDFS Write: 79726544 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 17.9 sec HDFS Read: 79727313 HDFS Write: 293 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 24 seconds 110 msec
|
|||
|
OK
|
|||
|
Time taken: 58.068 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_6870@mturlrep13_201309180239_5166481.txt
|
|||
|
hive> SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0258
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:39:22,278 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:39:29,309 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:39:30,322 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 11.14 sec
|
|||
|
2013-09-18 02:39:31,329 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:32,337 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:33,342 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:34,347 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:35,353 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:36,359 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:37,365 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:38,374 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:39,381 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:40,387 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:41,394 Stage-1 map = 100%, reduce = 78%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:42,399 Stage-1 map = 100%, reduce = 78%, Cumulative CPU 48.0 sec
|
|||
|
2013-09-18 02:39:43,407 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 65.26 sec
|
|||
|
2013-09-18 02:39:44,413 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 65.26 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 5 seconds 260 msec
|
|||
|
Ended Job = job_201309172235_0258
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0259
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:39:47,918 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:39:54,941 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:39:56,950 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:39:57,956 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:39:58,961 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:39:59,965 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:40:00,970 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:40:01,975 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:40:02,980 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:40:03,984 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:40:04,989 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.07 sec
|
|||
|
2013-09-18 02:40:05,995 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.73 sec
|
|||
|
2013-09-18 02:40:07,001 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.73 sec
|
|||
|
2013-09-18 02:40:08,006 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.73 sec
|
|||
|
MapReduce Total cumulative CPU time: 17 seconds 730 msec
|
|||
|
Ended Job = job_201309172235_0259
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 65.26 sec HDFS Read: 84536695 HDFS Write: 79726544 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 17.73 sec HDFS Read: 79727313 HDFS Write: 293 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 22 seconds 990 msec
|
|||
|
OK
|
|||
|
Time taken: 54.393 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_8928@mturlrep13_201309180240_2088574054.txt
|
|||
|
hive> SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0260
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:40:18,597 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:40:25,630 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:40:26,645 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 11.55 sec
|
|||
|
2013-09-18 02:40:27,653 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:28,661 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:29,668 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:30,674 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:31,680 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:32,687 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:33,694 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:34,701 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:35,707 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:36,714 Stage-1 map = 100%, reduce = 55%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:37,720 Stage-1 map = 100%, reduce = 78%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:38,726 Stage-1 map = 100%, reduce = 78%, Cumulative CPU 47.89 sec
|
|||
|
2013-09-18 02:40:39,734 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 64.72 sec
|
|||
|
2013-09-18 02:40:40,740 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 64.72 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 4 seconds 720 msec
|
|||
|
Ended Job = job_201309172235_0260
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0261
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:40:43,177 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:40:51,206 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:40:53,216 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:40:54,221 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:40:55,226 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:40:56,231 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:40:57,236 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:40:58,242 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:40:59,247 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:41:00,253 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:41:01,259 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 12.81 sec
|
|||
|
2013-09-18 02:41:02,264 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.59 sec
|
|||
|
2013-09-18 02:41:03,271 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 17.59 sec
|
|||
|
MapReduce Total cumulative CPU time: 17 seconds 590 msec
|
|||
|
Ended Job = job_201309172235_0261
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 64.72 sec HDFS Read: 84536695 HDFS Write: 79726544 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 17.59 sec HDFS Read: 79727311 HDFS Write: 293 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 22 seconds 310 msec
|
|||
|
OK
|
|||
|
Time taken: 53.189 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация чуть сложнее.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_11232@mturlrep13_201309180241_2050644034.txt
|
|||
|
hive> SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0262
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:41:21,239 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:41:28,272 Stage-1 map = 18%, reduce = 0%
|
|||
|
2013-09-18 02:41:29,278 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:41:30,291 Stage-1 map = 86%, reduce = 0%, Cumulative CPU 22.42 sec
|
|||
|
2013-09-18 02:41:31,298 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:32,306 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:33,312 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:34,319 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:35,326 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:36,333 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:37,340 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:38,347 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:39,353 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:40,360 Stage-1 map = 100%, reduce = 56%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:41,366 Stage-1 map = 100%, reduce = 80%, Cumulative CPU 45.45 sec
|
|||
|
2013-09-18 02:41:42,375 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 60.58 sec
|
|||
|
2013-09-18 02:41:43,382 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 60.58 sec
|
|||
|
2013-09-18 02:41:44,388 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 60.58 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 0 seconds 580 msec
|
|||
|
Ended Job = job_201309172235_0262
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0263
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:41:48,080 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:41:55,103 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:41:58,114 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:41:59,120 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:00,125 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:01,131 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:02,136 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:03,142 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:04,147 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:05,153 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:06,159 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 14.06 sec
|
|||
|
2013-09-18 02:42:07,165 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.95 sec
|
|||
|
2013-09-18 02:42:08,172 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.95 sec
|
|||
|
2013-09-18 02:42:09,178 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.95 sec
|
|||
|
MapReduce Total cumulative CPU time: 18 seconds 950 msec
|
|||
|
Ended Job = job_201309172235_0263
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 60.58 sec HDFS Read: 30310112 HDFS Write: 84160093 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 18.95 sec HDFS Read: 84160862 HDFS Write: 297 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 19 seconds 530 msec
|
|||
|
OK
|
|||
|
Time taken: 58.359 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_12984@mturlrep13_201309180242_2082311040.txt
|
|||
|
hive> SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0264
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:42:19,501 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:42:26,537 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 10.64 sec
|
|||
|
2013-09-18 02:42:27,545 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:28,553 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:29,559 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:30,565 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:31,571 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:32,576 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:33,583 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:34,590 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:35,596 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:36,602 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:37,608 Stage-1 map = 100%, reduce = 80%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:38,614 Stage-1 map = 100%, reduce = 80%, Cumulative CPU 43.43 sec
|
|||
|
2013-09-18 02:42:39,621 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 59.06 sec
|
|||
|
2013-09-18 02:42:40,627 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 59.06 sec
|
|||
|
MapReduce Total cumulative CPU time: 59 seconds 60 msec
|
|||
|
Ended Job = job_201309172235_0264
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0265
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:42:43,197 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:42:51,226 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:42:54,237 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:42:55,243 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:42:56,247 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:42:57,254 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:42:58,260 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:42:59,265 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:43:00,275 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:43:01,281 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:43:02,286 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 14.46 sec
|
|||
|
2013-09-18 02:43:03,291 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.26 sec
|
|||
|
2013-09-18 02:43:04,296 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.26 sec
|
|||
|
MapReduce Total cumulative CPU time: 19 seconds 260 msec
|
|||
|
Ended Job = job_201309172235_0265
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 59.06 sec HDFS Read: 30310112 HDFS Write: 84160093 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 19.26 sec HDFS Read: 84160862 HDFS Write: 297 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 18 seconds 320 msec
|
|||
|
OK
|
|||
|
Time taken: 53.181 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_14722@mturlrep13_201309180243_131124739.txt
|
|||
|
hive> SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0266
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:43:13,609 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:43:21,644 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:43:22,656 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:23,663 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:24,669 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:25,675 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:26,680 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:27,686 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:28,693 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:29,698 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:30,704 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:31,711 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:32,717 Stage-1 map = 100%, reduce = 80%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:33,724 Stage-1 map = 100%, reduce = 80%, Cumulative CPU 43.26 sec
|
|||
|
2013-09-18 02:43:34,732 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 58.63 sec
|
|||
|
2013-09-18 02:43:35,739 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 58.63 sec
|
|||
|
MapReduce Total cumulative CPU time: 58 seconds 630 msec
|
|||
|
Ended Job = job_201309172235_0266
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0267
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:43:39,244 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:43:46,270 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:43:49,283 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:50,289 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:51,294 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:52,300 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:53,305 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:54,313 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:55,318 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:56,323 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:57,329 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 13.92 sec
|
|||
|
2013-09-18 02:43:58,334 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.06 sec
|
|||
|
2013-09-18 02:43:59,340 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.06 sec
|
|||
|
2013-09-18 02:44:00,345 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.06 sec
|
|||
|
MapReduce Total cumulative CPU time: 19 seconds 60 msec
|
|||
|
Ended Job = job_201309172235_0267
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 58.63 sec HDFS Read: 30310112 HDFS Write: 84160093 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 19.06 sec HDFS Read: 84160862 HDFS Write: 297 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 17 seconds 690 msec
|
|||
|
OK
|
|||
|
Time taken: 54.164 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация по числу и строке, большое количество ключей.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT UserID, count(*) AS c FROM hits_10m GROUP BY UserID ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16467@mturlrep13_201309180244_1529367412.txt
|
|||
|
hive> SELECT UserID, count(*) AS c FROM hits_10m GROUP BY UserID ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0268
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:44:18,084 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:44:25,121 Stage-1 map = 81%, reduce = 0%
|
|||
|
2013-09-18 02:44:28,140 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 23.45 sec
|
|||
|
2013-09-18 02:44:29,148 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:30,155 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:31,161 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:32,167 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:33,174 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:34,180 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:35,186 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:36,192 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:37,198 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:38,204 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:39,209 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:44:40,217 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.8 sec
|
|||
|
2013-09-18 02:44:41,222 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.8 sec
|
|||
|
2013-09-18 02:44:42,229 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.8 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 2 seconds 800 msec
|
|||
|
Ended Job = job_201309172235_0268
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0269
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:44:45,729 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:44:55,761 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:44:58,774 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:44:59,779 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:00,790 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:01,795 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:02,800 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:03,805 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:04,809 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:05,814 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:06,819 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:07,824 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 18.29 sec
|
|||
|
2013-09-18 02:45:08,830 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 23.02 sec
|
|||
|
2013-09-18 02:45:09,835 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 23.02 sec
|
|||
|
2013-09-18 02:45:10,840 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 23.02 sec
|
|||
|
MapReduce Total cumulative CPU time: 23 seconds 20 msec
|
|||
|
Ended Job = job_201309172235_0269
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 62.8 sec HDFS Read: 57312623 HDFS Write: 55475412 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 23.02 sec HDFS Read: 55476181 HDFS Write: 246 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 25 seconds 820 msec
|
|||
|
OK
|
|||
|
Time taken: 63.096 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT UserID, count(*) AS c FROM hits_10m GROUP BY UserID ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_18524@mturlrep13_201309180245_141259504.txt
|
|||
|
hive> SELECT UserID, count(*) AS c FROM hits_10m GROUP BY UserID ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0270
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:45:21,420 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:45:28,450 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:45:30,465 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 35.49 sec
|
|||
|
2013-09-18 02:45:31,472 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:32,478 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:33,483 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:34,488 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:35,494 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:36,501 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:37,507 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:38,513 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:39,518 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:40,523 Stage-1 map = 100%, reduce = 58%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:41,529 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 48.62 sec
|
|||
|
2013-09-18 02:45:42,537 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.57 sec
|
|||
|
2013-09-18 02:45:43,543 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.57 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 2 seconds 570 msec
|
|||
|
Ended Job = job_201309172235_0270
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0271
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:45:47,340 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:45:57,375 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:46:01,391 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:02,397 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:03,402 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:04,420 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:05,425 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:06,429 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:07,435 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:08,440 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 18.09 sec
|
|||
|
2013-09-18 02:46:09,446 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 18.88 sec
|
|||
|
2013-09-18 02:46:10,452 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 22.96 sec
|
|||
|
2013-09-18 02:46:11,457 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 22.96 sec
|
|||
|
MapReduce Total cumulative CPU time: 22 seconds 960 msec
|
|||
|
Ended Job = job_201309172235_0271
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 62.57 sec HDFS Read: 57312623 HDFS Write: 55475412 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 22.96 sec HDFS Read: 55476181 HDFS Write: 246 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 25 seconds 530 msec
|
|||
|
OK
|
|||
|
Time taken: 58.614 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT UserID, count(*) AS c FROM hits_10m GROUP BY UserID ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_20702@mturlrep13_201309180246_772055237.txt
|
|||
|
hive> SELECT UserID, count(*) AS c FROM hits_10m GROUP BY UserID ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0272
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:46:21,609 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:46:28,640 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:46:30,656 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 23.58 sec
|
|||
|
2013-09-18 02:46:31,664 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:32,671 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:33,676 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:34,682 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:35,688 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:36,698 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:37,706 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:38,712 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:39,717 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:40,723 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:41,728 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 49.28 sec
|
|||
|
2013-09-18 02:46:42,735 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 63.18 sec
|
|||
|
2013-09-18 02:46:43,741 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 63.18 sec
|
|||
|
2013-09-18 02:46:44,747 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 63.18 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 3 seconds 180 msec
|
|||
|
Ended Job = job_201309172235_0272
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0273
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:46:48,292 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:46:58,320 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:47:02,333 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.58 sec
|
|||
|
2013-09-18 02:47:03,338 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.58 sec
|
|||
|
2013-09-18 02:47:04,343 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.58 sec
|
|||
|
2013-09-18 02:47:05,347 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.58 sec
|
|||
|
2013-09-18 02:47:06,351 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.58 sec
|
|||
|
2013-09-18 02:47:07,355 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.58 sec
|
|||
|
2013-09-18 02:47:08,360 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 18.58 sec
|
|||
|
2013-09-18 02:47:09,365 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 19.39 sec
|
|||
|
2013-09-18 02:47:10,369 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 19.39 sec
|
|||
|
2013-09-18 02:47:11,375 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 23.3 sec
|
|||
|
2013-09-18 02:47:12,380 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 23.3 sec
|
|||
|
2013-09-18 02:47:13,386 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 23.3 sec
|
|||
|
MapReduce Total cumulative CPU time: 23 seconds 300 msec
|
|||
|
Ended Job = job_201309172235_0273
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 63.18 sec HDFS Read: 57312623 HDFS Write: 55475412 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 23.3 sec HDFS Read: 55476181 HDFS Write: 246 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 26 seconds 480 msec
|
|||
|
OK
|
|||
|
Time taken: 60.153 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация по очень большому количеству ключей, может не хватить оперативки.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_22448@mturlrep13_201309180247_332946336.txt
|
|||
|
hive> SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0274
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:47:30,239 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:47:37,271 Stage-1 map = 63%, reduce = 0%
|
|||
|
2013-09-18 02:47:40,286 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:47:43,305 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 51.04 sec
|
|||
|
2013-09-18 02:47:44,312 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:45,319 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:46,324 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:47,329 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:48,334 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:49,340 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:50,346 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:51,352 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:52,359 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:53,365 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:54,371 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:55,378 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:56,384 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 70.28 sec
|
|||
|
2013-09-18 02:47:57,392 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 92.27 sec
|
|||
|
2013-09-18 02:47:58,399 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 92.27 sec
|
|||
|
2013-09-18 02:47:59,404 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 92.27 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 32 seconds 270 msec
|
|||
|
Ended Job = job_201309172235_0274
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0275
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:48:02,066 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:48:13,104 Stage-2 map = 46%, reduce = 0%
|
|||
|
2013-09-18 02:48:16,114 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:48:19,125 Stage-2 map = 96%, reduce = 0%
|
|||
|
2013-09-18 02:48:21,132 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:22,138 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:23,142 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:24,146 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:25,150 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:26,154 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:27,159 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:28,165 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:29,170 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:30,175 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.92 sec
|
|||
|
2013-09-18 02:48:31,182 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 28.57 sec
|
|||
|
2013-09-18 02:48:32,187 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 28.57 sec
|
|||
|
2013-09-18 02:48:33,193 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 28.57 sec
|
|||
|
MapReduce Total cumulative CPU time: 28 seconds 570 msec
|
|||
|
Ended Job = job_201309172235_0275
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 92.27 sec HDFS Read: 84536695 HDFS Write: 146202868 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 28.57 sec HDFS Read: 146210123 HDFS Write: 256 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 0 seconds 840 msec
|
|||
|
OK
|
|||
|
Time taken: 73.175 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_24287@mturlrep13_201309180248_957091100.txt
|
|||
|
hive> SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0276
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:48:43,644 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:48:50,677 Stage-1 map = 81%, reduce = 0%
|
|||
|
2013-09-18 02:48:53,691 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:48:55,754 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:48:56,761 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:48:57,769 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:48:58,775 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:48:59,781 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:00,786 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:01,792 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:02,797 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:03,804 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:04,810 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:05,816 Stage-1 map = 100%, reduce = 53%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:06,822 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:07,828 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:08,834 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 69.03 sec
|
|||
|
2013-09-18 02:49:09,843 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 87.96 sec
|
|||
|
2013-09-18 02:49:10,849 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 90.76 sec
|
|||
|
2013-09-18 02:49:11,855 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 90.76 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 30 seconds 760 msec
|
|||
|
Ended Job = job_201309172235_0276
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0277
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:49:15,354 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:49:25,387 Stage-2 map = 46%, reduce = 0%
|
|||
|
2013-09-18 02:49:28,396 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:49:31,406 Stage-2 map = 96%, reduce = 0%
|
|||
|
2013-09-18 02:49:34,416 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:35,421 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:36,510 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:37,521 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:38,526 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:39,530 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:40,536 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:41,541 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:42,546 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:43,551 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.84 sec
|
|||
|
2013-09-18 02:49:44,557 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 28.37 sec
|
|||
|
2013-09-18 02:49:45,562 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 28.37 sec
|
|||
|
MapReduce Total cumulative CPU time: 28 seconds 370 msec
|
|||
|
Ended Job = job_201309172235_0277
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 90.76 sec HDFS Read: 84536695 HDFS Write: 146202868 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 28.37 sec HDFS Read: 146210119 HDFS Write: 256 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 59 seconds 130 msec
|
|||
|
OK
|
|||
|
Time taken: 70.386 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_26119@mturlrep13_201309180249_746088235.txt
|
|||
|
hive> SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0278
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:49:54,756 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:50:02,790 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 02:50:05,803 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:50:07,818 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:08,826 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:09,834 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:10,840 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:11,846 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:12,852 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:13,858 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:14,864 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:15,870 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:16,877 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:17,883 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:18,889 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:19,895 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:20,901 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 72.7 sec
|
|||
|
2013-09-18 02:50:21,909 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 83.46 sec
|
|||
|
2013-09-18 02:50:22,915 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 94.56 sec
|
|||
|
2013-09-18 02:50:23,921 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 94.56 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 34 seconds 560 msec
|
|||
|
Ended Job = job_201309172235_0278
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0279
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:50:27,487 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:50:37,520 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:50:43,538 Stage-2 map = 96%, reduce = 0%
|
|||
|
2013-09-18 02:50:45,545 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:46,551 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:47,555 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:48,560 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:49,564 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:50,568 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:51,572 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:52,577 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:53,583 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:54,588 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 23.15 sec
|
|||
|
2013-09-18 02:50:55,594 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 27.69 sec
|
|||
|
2013-09-18 02:50:56,599 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 27.69 sec
|
|||
|
MapReduce Total cumulative CPU time: 27 seconds 690 msec
|
|||
|
Ended Job = job_201309172235_0279
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 94.56 sec HDFS Read: 84536695 HDFS Write: 146202868 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 27.69 sec HDFS Read: 146210123 HDFS Write: 256 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 2 seconds 250 msec
|
|||
|
OK
|
|||
|
Time taken: 69.204 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- ещё более сложная агрегация.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_28670@mturlrep13_201309180251_345585890.txt
|
|||
|
hive> SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0280
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:51:14,036 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:51:21,066 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 02:51:24,080 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:51:27,098 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 34.73 sec
|
|||
|
2013-09-18 02:51:28,105 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:29,113 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:30,119 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:31,124 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:32,130 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:33,135 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:34,140 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:35,146 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:36,151 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 69.99 sec
|
|||
|
2013-09-18 02:51:37,158 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 79.66 sec
|
|||
|
2013-09-18 02:51:38,164 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 79.66 sec
|
|||
|
2013-09-18 02:51:39,170 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 79.66 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 19 seconds 660 msec
|
|||
|
Ended Job = job_201309172235_0280
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 79.66 sec HDFS Read: 84536695 HDFS Write: 889 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 19 seconds 660 msec
|
|||
|
OK
|
|||
|
Time taken: 35.511 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_30045@mturlrep13_201309180251_893832227.txt
|
|||
|
hive> SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0281
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:51:48,380 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:51:56,430 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 02:51:59,444 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:52:00,455 Stage-1 map = 92%, reduce = 0%, Cumulative CPU 17.14 sec
|
|||
|
2013-09-18 02:52:01,463 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:02,470 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:03,476 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:04,482 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:05,487 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:06,491 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:07,496 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:08,502 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 68.67 sec
|
|||
|
2013-09-18 02:52:09,508 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 70.32 sec
|
|||
|
2013-09-18 02:52:10,516 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 02:52:11,522 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 02:52:12,528 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 77.77 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 17 seconds 770 msec
|
|||
|
Ended Job = job_201309172235_0281
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 77.77 sec HDFS Read: 84536695 HDFS Write: 889 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 17 seconds 770 msec
|
|||
|
OK
|
|||
|
Time taken: 31.512 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_31399@mturlrep13_201309180252_43587713.txt
|
|||
|
hive> SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0282
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:52:22,770 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:52:29,799 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 02:52:32,813 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 02:52:34,829 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:35,836 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:36,843 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:37,849 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:38,855 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:39,861 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:40,866 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:41,872 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:42,877 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 68.3 sec
|
|||
|
2013-09-18 02:52:43,885 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 77.39 sec
|
|||
|
2013-09-18 02:52:44,891 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 77.39 sec
|
|||
|
2013-09-18 02:52:45,897 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 77.39 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 17 seconds 390 msec
|
|||
|
Ended Job = job_201309172235_0282
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 77.39 sec HDFS Read: 84536695 HDFS Write: 889 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 17 seconds 390 msec
|
|||
|
OK
|
|||
|
Time taken: 31.482 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- то же самое, но без сортировки.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT UserID, minute(EventTime), SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, minute(EventTime), SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_300@mturlrep13_201309180252_1115673732.txt
|
|||
|
hive> SELECT UserID, minute(EventTime), SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, minute(EventTime), SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0283
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:53:02,084 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:53:09,112 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 02:53:12,137 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:13,143 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:14,151 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:15,163 Stage-1 map = 48%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:16,169 Stage-1 map = 48%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:17,177 Stage-1 map = 48%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:18,184 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:19,189 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:20,216 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:21,223 Stage-1 map = 85%, reduce = 0%, Cumulative CPU 40.15 sec
|
|||
|
2013-09-18 02:53:22,231 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 56.66 sec
|
|||
|
2013-09-18 02:53:23,237 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 56.66 sec
|
|||
|
2013-09-18 02:53:24,244 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 95.24 sec
|
|||
|
2013-09-18 02:53:25,249 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:26,255 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:27,260 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:28,266 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:29,272 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:30,278 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:31,283 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:32,288 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:33,293 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:34,298 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:35,303 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:36,308 Stage-1 map = 100%, reduce = 71%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:37,314 Stage-1 map = 100%, reduce = 71%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:38,320 Stage-1 map = 100%, reduce = 71%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:39,325 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:40,330 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:41,336 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:42,341 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:43,345 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:44,351 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:45,356 Stage-1 map = 100%, reduce = 95%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:46,361 Stage-1 map = 100%, reduce = 95%, Cumulative CPU 115.4 sec
|
|||
|
2013-09-18 02:53:47,367 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 159.4 sec
|
|||
|
2013-09-18 02:53:48,372 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 159.4 sec
|
|||
|
2013-09-18 02:53:49,378 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 159.4 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 39 seconds 400 msec
|
|||
|
Ended Job = job_201309172235_0283
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0284
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:53:51,875 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:54:05,919 Stage-2 map = 28%, reduce = 0%
|
|||
|
2013-09-18 02:54:11,975 Stage-2 map = 50%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:12,981 Stage-2 map = 50%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:13,986 Stage-2 map = 50%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:14,991 Stage-2 map = 50%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:15,996 Stage-2 map = 50%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:17,001 Stage-2 map = 50%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:18,006 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:19,011 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:20,016 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:21,021 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:22,026 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:23,032 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:24,037 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:25,042 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:26,047 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:27,052 Stage-2 map = 78%, reduce = 0%, Cumulative CPU 20.31 sec
|
|||
|
2013-09-18 02:54:28,057 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:29,062 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:30,066 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:31,071 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:32,075 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:33,080 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:34,085 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:35,090 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:36,095 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:37,100 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:38,105 Stage-2 map = 100%, reduce = 68%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:39,110 Stage-2 map = 100%, reduce = 68%, Cumulative CPU 42.15 sec
|
|||
|
2013-09-18 02:54:40,115 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 48.87 sec
|
|||
|
2013-09-18 02:54:41,121 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 48.87 sec
|
|||
|
MapReduce Total cumulative CPU time: 48 seconds 870 msec
|
|||
|
Ended Job = job_201309172235_0284
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 159.4 sec HDFS Read: 84944733 HDFS Write: 241346048 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 48.87 sec HDFS Read: 241349358 HDFS Write: 268 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 28 seconds 270 msec
|
|||
|
OK
|
|||
|
Time taken: 109.132 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT UserID, minute(EventTime), SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, minute(EventTime), SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_2351@mturlrep13_201309180254_1547940573.txt
|
|||
|
hive> SELECT UserID, minute(EventTime), SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, minute(EventTime), SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0285
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:54:50,345 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:54:58,379 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 02:55:01,391 Stage-1 map = 40%, reduce = 0%
|
|||
|
2013-09-18 02:55:04,405 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 02:55:07,418 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:55:10,437 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 93.25 sec
|
|||
|
2013-09-18 02:55:11,444 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 93.25 sec
|
|||
|
2013-09-18 02:55:12,452 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:13,458 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:14,464 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:15,469 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:16,474 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:17,481 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:18,487 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:19,492 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:20,497 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:21,503 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:22,509 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:23,515 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:24,524 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:25,529 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:26,535 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:27,540 Stage-1 map = 100%, reduce = 81%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:28,545 Stage-1 map = 100%, reduce = 81%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:29,551 Stage-1 map = 100%, reduce = 81%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:30,557 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:31,562 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:32,568 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:33,573 Stage-1 map = 100%, reduce = 98%, Cumulative CPU 112.72 sec
|
|||
|
2013-09-18 02:55:34,580 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 156.37 sec
|
|||
|
2013-09-18 02:55:35,585 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 156.37 sec
|
|||
|
2013-09-18 02:55:36,591 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 156.37 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 36 seconds 370 msec
|
|||
|
Ended Job = job_201309172235_0285
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0286
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:55:40,097 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:55:53,132 Stage-2 map = 28%, reduce = 0%
|
|||
|
2013-09-18 02:55:59,148 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:56:08,170 Stage-2 map = 78%, reduce = 0%
|
|||
|
2013-09-18 02:56:17,199 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:18,204 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:19,209 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:20,214 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:21,218 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:22,222 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:23,227 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:24,232 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:25,238 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:26,243 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:27,249 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 43.74 sec
|
|||
|
2013-09-18 02:56:28,254 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 50.54 sec
|
|||
|
2013-09-18 02:56:29,260 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 50.54 sec
|
|||
|
2013-09-18 02:56:30,265 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 50.54 sec
|
|||
|
MapReduce Total cumulative CPU time: 50 seconds 540 msec
|
|||
|
Ended Job = job_201309172235_0286
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 156.37 sec HDFS Read: 84944733 HDFS Write: 241346048 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 50.54 sec HDFS Read: 241349358 HDFS Write: 268 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 26 seconds 910 msec
|
|||
|
OK
|
|||
|
Time taken: 107.333 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT UserID, minute(EventTime), SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, minute(EventTime), SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_5002@mturlrep13_201309180256_1377188535.txt
|
|||
|
hive> SELECT UserID, minute(EventTime), SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, minute(EventTime), SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0287
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 02:56:40,498 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:56:47,527 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 02:56:50,540 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 02:56:53,554 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 02:56:56,569 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 02:56:59,590 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 26.21 sec
|
|||
|
2013-09-18 02:57:00,596 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 53.06 sec
|
|||
|
2013-09-18 02:57:01,604 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:02,609 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:03,614 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:04,619 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:05,625 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:06,631 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:07,637 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:08,643 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:09,649 Stage-1 map = 100%, reduce = 46%, Cumulative CPU 109.54 sec
|
|||
|
2013-09-18 02:57:10,657 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:11,664 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:12,670 Stage-1 map = 100%, reduce = 69%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:13,676 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:14,682 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:15,689 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:16,695 Stage-1 map = 100%, reduce = 81%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:17,702 Stage-1 map = 100%, reduce = 81%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:18,708 Stage-1 map = 100%, reduce = 85%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:19,715 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:20,722 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:21,728 Stage-1 map = 100%, reduce = 93%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:22,733 Stage-1 map = 100%, reduce = 98%, Cumulative CPU 121.89 sec
|
|||
|
2013-09-18 02:57:23,740 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 152.17 sec
|
|||
|
2013-09-18 02:57:24,747 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 152.17 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 32 seconds 170 msec
|
|||
|
Ended Job = job_201309172235_0287
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0288
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 02:57:28,223 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:57:41,263 Stage-2 map = 28%, reduce = 0%
|
|||
|
2013-09-18 02:57:47,345 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 02:57:53,362 Stage-2 map = 78%, reduce = 0%
|
|||
|
2013-09-18 02:58:04,395 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:05,401 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:06,406 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:07,411 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:08,416 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:09,420 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:10,425 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:11,431 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:12,436 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:13,442 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:14,446 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:15,451 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 43.57 sec
|
|||
|
2013-09-18 02:58:16,455 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 50.29 sec
|
|||
|
2013-09-18 02:58:17,460 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 50.29 sec
|
|||
|
2013-09-18 02:58:18,465 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 50.29 sec
|
|||
|
MapReduce Total cumulative CPU time: 50 seconds 290 msec
|
|||
|
Ended Job = job_201309172235_0288
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 152.17 sec HDFS Read: 84944733 HDFS Write: 241346048 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 50.29 sec HDFS Read: 241349358 HDFS Write: 268 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 22 seconds 460 msec
|
|||
|
OK
|
|||
|
Time taken: 106.376 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- ещё более сложная агрегация, не стоит выполнять на больших таблицах.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT UserID FROM hits_10m WHERE UserID = 12345678901234567890;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_7338@mturlrep13_201309180258_545165403.txt
|
|||
|
hive> SELECT UserID FROM hits_10m WHERE UserID = 12345678901234567890;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks is set to 0 since there's no reduce operator
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0289
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 0
|
|||
|
2013-09-18 02:58:36,403 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:58:42,436 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 16.49 sec
|
|||
|
2013-09-18 02:58:43,443 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 16.49 sec
|
|||
|
2013-09-18 02:58:44,451 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 16.49 sec
|
|||
|
MapReduce Total cumulative CPU time: 16 seconds 490 msec
|
|||
|
Ended Job = job_201309172235_0289
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Cumulative CPU: 16.49 sec HDFS Read: 57312623 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 16 seconds 490 msec
|
|||
|
OK
|
|||
|
Time taken: 17.948 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT UserID FROM hits_10m WHERE UserID = 12345678901234567890;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_8366@mturlrep13_201309180258_1808547962.txt
|
|||
|
hive> SELECT UserID FROM hits_10m WHERE UserID = 12345678901234567890;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks is set to 0 since there's no reduce operator
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0290
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 0
|
|||
|
2013-09-18 02:58:54,728 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:58:58,753 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 16.35 sec
|
|||
|
2013-09-18 02:58:59,761 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 16.35 sec
|
|||
|
2013-09-18 02:59:00,768 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 16.35 sec
|
|||
|
MapReduce Total cumulative CPU time: 16 seconds 350 msec
|
|||
|
Ended Job = job_201309172235_0290
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Cumulative CPU: 16.35 sec HDFS Read: 57312623 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 16 seconds 350 msec
|
|||
|
OK
|
|||
|
Time taken: 14.47 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT UserID FROM hits_10m WHERE UserID = 12345678901234567890;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_9390@mturlrep13_201309180259_978485030.txt
|
|||
|
hive> SELECT UserID FROM hits_10m WHERE UserID = 12345678901234567890;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks is set to 0 since there's no reduce operator
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0291
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 0
|
|||
|
2013-09-18 02:59:10,689 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:59:14,716 Stage-1 map = 25%, reduce = 0%, Cumulative CPU 4.14 sec
|
|||
|
2013-09-18 02:59:15,723 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 17.49 sec
|
|||
|
2013-09-18 02:59:16,732 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 17.49 sec
|
|||
|
MapReduce Total cumulative CPU time: 17 seconds 490 msec
|
|||
|
Ended Job = job_201309172235_0291
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Cumulative CPU: 17.49 sec HDFS Read: 57312623 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 17 seconds 490 msec
|
|||
|
OK
|
|||
|
Time taken: 14.154 seconds
|
|||
|
hive> quit;
|
|||
|
-- мощная фильтрация по столбцу типа UInt64.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%';
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_10422@mturlrep13_201309180259_1489695405.txt
|
|||
|
hive> SELECT count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%';;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0292
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:59:33,176 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 02:59:40,206 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 02:59:41,218 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:42,225 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:43,232 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:44,236 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:45,242 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:46,247 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:47,254 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:48,261 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 36.3 sec
|
|||
|
2013-09-18 02:59:49,269 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 38.08 sec
|
|||
|
2013-09-18 02:59:50,275 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 38.08 sec
|
|||
|
MapReduce Total cumulative CPU time: 38 seconds 80 msec
|
|||
|
Ended Job = job_201309172235_0292
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 38.08 sec HDFS Read: 109451651 HDFS Write: 5 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 38 seconds 80 msec
|
|||
|
OK
|
|||
|
8428
|
|||
|
Time taken: 27.452 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%';
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_11577@mturlrep13_201309180259_208606502.txt
|
|||
|
hive> SELECT count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%';;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0293
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 02:59:59,752 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:00:07,795 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:08,802 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:09,809 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:10,815 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:11,821 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:12,827 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:13,834 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:14,841 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:00:15,848 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 37.96 sec
|
|||
|
2013-09-18 03:00:16,855 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 37.96 sec
|
|||
|
MapReduce Total cumulative CPU time: 37 seconds 960 msec
|
|||
|
Ended Job = job_201309172235_0293
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 37.96 sec HDFS Read: 109451651 HDFS Write: 5 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 37 seconds 960 msec
|
|||
|
OK
|
|||
|
8428
|
|||
|
Time taken: 24.527 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%';
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_13398@mturlrep13_201309180300_29036781.txt
|
|||
|
hive> SELECT count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%';;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0294
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:00:27,278 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:00:33,314 Stage-1 map = 25%, reduce = 0%, Cumulative CPU 8.78 sec
|
|||
|
2013-09-18 03:00:34,322 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.08 sec
|
|||
|
2013-09-18 03:00:35,329 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.08 sec
|
|||
|
2013-09-18 03:00:36,334 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.08 sec
|
|||
|
2013-09-18 03:00:37,340 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.08 sec
|
|||
|
2013-09-18 03:00:38,346 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.08 sec
|
|||
|
2013-09-18 03:00:39,351 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.08 sec
|
|||
|
2013-09-18 03:00:40,356 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.08 sec
|
|||
|
2013-09-18 03:00:41,364 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.24 sec
|
|||
|
2013-09-18 03:00:42,372 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.24 sec
|
|||
|
2013-09-18 03:00:43,377 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 39.24 sec
|
|||
|
MapReduce Total cumulative CPU time: 39 seconds 240 msec
|
|||
|
Ended Job = job_201309172235_0294
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 39.24 sec HDFS Read: 109451651 HDFS Write: 5 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 39 seconds 240 msec
|
|||
|
OK
|
|||
|
8428
|
|||
|
Time taken: 24.674 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
-- фильтрация по поиску подстроки в строке.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchPhrase, MAX(URL), count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_14574@mturlrep13_201309180300_1950004320.txt
|
|||
|
hive> SELECT SearchPhrase, MAX(URL), count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0295
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:00:59,984 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:01:08,021 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:01:09,034 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 28.57 sec
|
|||
|
2013-09-18 03:01:10,042 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.55 sec
|
|||
|
2013-09-18 03:01:11,051 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.55 sec
|
|||
|
2013-09-18 03:01:12,057 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.55 sec
|
|||
|
2013-09-18 03:01:13,064 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.55 sec
|
|||
|
2013-09-18 03:01:14,071 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.55 sec
|
|||
|
2013-09-18 03:01:15,078 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.55 sec
|
|||
|
2013-09-18 03:01:16,084 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 38.55 sec
|
|||
|
2013-09-18 03:01:17,103 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.91 sec
|
|||
|
2013-09-18 03:01:18,109 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.91 sec
|
|||
|
MapReduce Total cumulative CPU time: 42 seconds 910 msec
|
|||
|
Ended Job = job_201309172235_0295
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0296
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:01:20,611 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:01:22,620 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:23,626 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:24,631 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:25,636 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:26,641 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:27,647 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:28,652 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:29,657 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:01:30,663 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.26 sec
|
|||
|
2013-09-18 03:01:31,669 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.26 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 260 msec
|
|||
|
Ended Job = job_201309172235_0296
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 42.91 sec HDFS Read: 136675723 HDFS Write: 5172 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.26 sec HDFS Read: 5941 HDFS Write: 984 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 170 msec
|
|||
|
OK
|
|||
|
Time taken: 41.792 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchPhrase, MAX(URL), count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16383@mturlrep13_201309180301_278669941.txt
|
|||
|
hive> SELECT SearchPhrase, MAX(URL), count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0297
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:01:41,964 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:01:49,008 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 9.23 sec
|
|||
|
2013-09-18 03:01:50,016 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.34 sec
|
|||
|
2013-09-18 03:01:51,024 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.34 sec
|
|||
|
2013-09-18 03:01:52,031 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.34 sec
|
|||
|
2013-09-18 03:01:53,036 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.34 sec
|
|||
|
2013-09-18 03:01:54,042 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.34 sec
|
|||
|
2013-09-18 03:01:55,049 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.34 sec
|
|||
|
2013-09-18 03:01:56,056 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.34 sec
|
|||
|
2013-09-18 03:01:57,065 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.44 sec
|
|||
|
2013-09-18 03:01:58,071 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.44 sec
|
|||
|
2013-09-18 03:01:59,078 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.44 sec
|
|||
|
MapReduce Total cumulative CPU time: 43 seconds 440 msec
|
|||
|
Ended Job = job_201309172235_0297
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0298
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:02:01,570 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:02:03,580 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:04,586 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:05,591 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:06,596 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:07,601 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:08,606 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:09,611 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:10,617 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.21 sec
|
|||
|
2013-09-18 03:02:11,623 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
2013-09-18 03:02:12,629 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
2013-09-18 03:02:13,635 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 210 msec
|
|||
|
Ended Job = job_201309172235_0298
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 43.44 sec HDFS Read: 136675723 HDFS Write: 5172 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.21 sec HDFS Read: 5941 HDFS Write: 984 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 650 msec
|
|||
|
OK
|
|||
|
Time taken: 40.17 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchPhrase, MAX(URL), count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_18173@mturlrep13_201309180302_1009071937.txt
|
|||
|
hive> SELECT SearchPhrase, MAX(URL), count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0299
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:02:23,943 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:02:30,983 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 9.23 sec
|
|||
|
2013-09-18 03:02:31,990 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.61 sec
|
|||
|
2013-09-18 03:02:32,998 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.61 sec
|
|||
|
2013-09-18 03:02:34,004 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.61 sec
|
|||
|
2013-09-18 03:02:35,010 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.61 sec
|
|||
|
2013-09-18 03:02:36,015 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.61 sec
|
|||
|
2013-09-18 03:02:37,021 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.61 sec
|
|||
|
2013-09-18 03:02:38,027 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.61 sec
|
|||
|
2013-09-18 03:02:39,036 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.69 sec
|
|||
|
2013-09-18 03:02:40,042 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.69 sec
|
|||
|
2013-09-18 03:02:41,048 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.69 sec
|
|||
|
MapReduce Total cumulative CPU time: 42 seconds 690 msec
|
|||
|
Ended Job = job_201309172235_0299
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0300
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:02:43,536 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:02:45,545 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:46,551 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:47,556 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:48,560 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:49,565 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:50,570 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:51,575 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:52,580 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:02:53,585 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
2013-09-18 03:02:54,591 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
2013-09-18 03:02:55,597 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 210 msec
|
|||
|
Ended Job = job_201309172235_0300
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 42.69 sec HDFS Read: 136675723 HDFS Write: 5172 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.21 sec HDFS Read: 5941 HDFS Write: 984 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 44 seconds 900 msec
|
|||
|
OK
|
|||
|
Time taken: 40.052 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- вынимаем большие столбцы, фильтрация по строке.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_19998@mturlrep13_201309180303_483326023.txt
|
|||
|
hive> SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0301
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:03:12,709 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:03:19,743 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:03:22,757 Stage-1 map = 81%, reduce = 0%
|
|||
|
2013-09-18 03:03:23,769 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:24,777 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:25,783 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:26,790 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:27,797 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:28,802 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:29,808 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:30,814 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 51.01 sec
|
|||
|
2013-09-18 03:03:31,821 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 55.69 sec
|
|||
|
2013-09-18 03:03:32,828 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 55.69 sec
|
|||
|
MapReduce Total cumulative CPU time: 55 seconds 690 msec
|
|||
|
Ended Job = job_201309172235_0301
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0302
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:03:36,496 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:03:38,506 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:03:39,511 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:03:40,516 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:03:41,521 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:03:42,526 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:03:43,531 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:03:44,536 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:03:45,542 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
2013-09-18 03:03:46,549 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
2013-09-18 03:03:47,555 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.21 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 210 msec
|
|||
|
Ended Job = job_201309172235_0302
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 55.69 sec HDFS Read: 298803179 HDFS Write: 12221 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.21 sec HDFS Read: 12990 HDFS Write: 2646 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 57 seconds 900 msec
|
|||
|
OK
|
|||
|
Time taken: 45.079 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_21928@mturlrep13_201309180303_564237676.txt
|
|||
|
hive> SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0303
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:03:56,744 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:04:04,788 Stage-1 map = 48%, reduce = 0%
|
|||
|
2013-09-18 03:04:06,804 Stage-1 map = 57%, reduce = 0%, Cumulative CPU 11.57 sec
|
|||
|
2013-09-18 03:04:07,820 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.6 sec
|
|||
|
2013-09-18 03:04:08,827 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.6 sec
|
|||
|
2013-09-18 03:04:09,833 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.6 sec
|
|||
|
2013-09-18 03:04:10,839 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.6 sec
|
|||
|
2013-09-18 03:04:11,845 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.6 sec
|
|||
|
2013-09-18 03:04:12,850 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.6 sec
|
|||
|
2013-09-18 03:04:13,856 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.6 sec
|
|||
|
2013-09-18 03:04:14,864 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 53.66 sec
|
|||
|
2013-09-18 03:04:15,870 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 53.66 sec
|
|||
|
2013-09-18 03:04:16,877 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 53.66 sec
|
|||
|
MapReduce Total cumulative CPU time: 53 seconds 660 msec
|
|||
|
Ended Job = job_201309172235_0303
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0304
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:04:20,500 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:04:21,506 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:22,512 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:23,518 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:24,523 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:25,527 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:26,532 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:27,537 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:28,542 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:29,548 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:04:30,554 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.25 sec
|
|||
|
2013-09-18 03:04:31,560 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.25 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 250 msec
|
|||
|
Ended Job = job_201309172235_0304
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 53.66 sec HDFS Read: 298803179 HDFS Write: 12221 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.25 sec HDFS Read: 12990 HDFS Write: 2646 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 55 seconds 910 msec
|
|||
|
OK
|
|||
|
Time taken: 42.12 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_23843@mturlrep13_201309180304_1844665833.txt
|
|||
|
hive> SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0305
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:04:42,274 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:04:49,306 Stage-1 map = 48%, reduce = 0%
|
|||
|
2013-09-18 03:04:52,325 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 37.24 sec
|
|||
|
2013-09-18 03:04:53,332 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.15 sec
|
|||
|
2013-09-18 03:04:54,339 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.15 sec
|
|||
|
2013-09-18 03:04:55,345 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.15 sec
|
|||
|
2013-09-18 03:04:56,351 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.15 sec
|
|||
|
2013-09-18 03:04:57,357 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.15 sec
|
|||
|
2013-09-18 03:04:58,363 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 50.15 sec
|
|||
|
2013-09-18 03:04:59,369 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 50.15 sec
|
|||
|
2013-09-18 03:05:00,379 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 54.25 sec
|
|||
|
2013-09-18 03:05:01,386 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 54.25 sec
|
|||
|
MapReduce Total cumulative CPU time: 54 seconds 250 msec
|
|||
|
Ended Job = job_201309172235_0305
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0306
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:05:05,825 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:05:07,833 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:08,839 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:09,844 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:10,848 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:11,853 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:12,858 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:13,863 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:14,868 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:05:15,874 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.32 sec
|
|||
|
2013-09-18 03:05:16,880 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.32 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 320 msec
|
|||
|
Ended Job = job_201309172235_0306
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 54.25 sec HDFS Read: 298803179 HDFS Write: 12221 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.32 sec HDFS Read: 12990 HDFS Write: 2646 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 56 seconds 570 msec
|
|||
|
OK
|
|||
|
Time taken: 43.438 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- чуть больше столбцы.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_26447@mturlrep13_201309180305_468727938.txt
|
|||
|
hive> SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0307
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:05:34,147 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:05:44,189 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:05:47,201 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 03:05:51,218 Stage-1 map = 33%, reduce = 0%
|
|||
|
2013-09-18 03:05:53,227 Stage-1 map = 37%, reduce = 0%
|
|||
|
2013-09-18 03:05:54,247 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:05:56,278 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:05:59,290 Stage-1 map = 67%, reduce = 0%
|
|||
|
2013-09-18 03:06:03,314 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 39.6 sec
|
|||
|
2013-09-18 03:06:04,322 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 39.6 sec
|
|||
|
2013-09-18 03:06:05,327 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 77.13 sec
|
|||
|
2013-09-18 03:06:06,333 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 77.13 sec
|
|||
|
2013-09-18 03:06:07,338 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 166.73 sec
|
|||
|
2013-09-18 03:06:08,344 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 166.73 sec
|
|||
|
2013-09-18 03:06:09,350 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 166.73 sec
|
|||
|
2013-09-18 03:06:10,355 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 166.73 sec
|
|||
|
2013-09-18 03:06:11,361 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 167.32 sec
|
|||
|
2013-09-18 03:06:12,368 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 169.65 sec
|
|||
|
2013-09-18 03:06:13,374 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 169.65 sec
|
|||
|
2013-09-18 03:06:14,380 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 169.65 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 49 seconds 650 msec
|
|||
|
Ended Job = job_201309172235_0307
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 169.65 sec HDFS Read: 1082943442 HDFS Write: 5318 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 49 seconds 650 msec
|
|||
|
OK
|
|||
|
Time taken: 50.847 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27643@mturlrep13_201309180306_526482942.txt
|
|||
|
hive> SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0308
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:06:24,046 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:06:35,093 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:06:38,107 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 03:06:41,122 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:06:44,158 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:06:47,171 Stage-1 map = 63%, reduce = 0%
|
|||
|
2013-09-18 03:06:50,185 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 03:06:52,213 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 33.78 sec
|
|||
|
2013-09-18 03:06:53,220 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 33.78 sec
|
|||
|
2013-09-18 03:06:54,225 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 72.08 sec
|
|||
|
2013-09-18 03:06:55,230 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 148.07 sec
|
|||
|
2013-09-18 03:06:56,235 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 148.07 sec
|
|||
|
2013-09-18 03:06:57,241 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 148.07 sec
|
|||
|
2013-09-18 03:06:58,246 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 148.07 sec
|
|||
|
2013-09-18 03:06:59,252 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 148.07 sec
|
|||
|
2013-09-18 03:07:00,260 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 150.7 sec
|
|||
|
2013-09-18 03:07:01,266 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 150.7 sec
|
|||
|
2013-09-18 03:07:02,273 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 150.7 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 30 seconds 700 msec
|
|||
|
Ended Job = job_201309172235_0308
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 150.7 sec HDFS Read: 1082943442 HDFS Write: 5318 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 30 seconds 700 msec
|
|||
|
OK
|
|||
|
Time taken: 46.004 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_28785@mturlrep13_201309180307_1226276178.txt
|
|||
|
hive> SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0309
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:07:12,867 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:07:22,931 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:07:25,943 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 03:07:28,956 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:07:31,968 Stage-1 map = 48%, reduce = 0%
|
|||
|
2013-09-18 03:07:34,979 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:07:37,991 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:07:40,007 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 37.4 sec
|
|||
|
2013-09-18 03:07:41,014 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 37.4 sec
|
|||
|
2013-09-18 03:07:42,019 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 37.4 sec
|
|||
|
2013-09-18 03:07:43,024 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 37.4 sec
|
|||
|
2013-09-18 03:07:44,030 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 160.55 sec
|
|||
|
2013-09-18 03:07:45,035 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 160.55 sec
|
|||
|
2013-09-18 03:07:46,040 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 160.55 sec
|
|||
|
2013-09-18 03:07:47,045 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 160.55 sec
|
|||
|
2013-09-18 03:07:48,052 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 163.46 sec
|
|||
|
2013-09-18 03:07:49,059 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 163.46 sec
|
|||
|
2013-09-18 03:07:50,065 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 163.46 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 43 seconds 460 msec
|
|||
|
Ended Job = job_201309172235_0309
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 163.46 sec HDFS Read: 1082943442 HDFS Write: 5318 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 2 minutes 43 seconds 460 msec
|
|||
|
OK
|
|||
|
Time taken: 45.95 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- плохой запрос - вынимаем все столбцы.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_29936@mturlrep13_201309180307_1379307985.txt
|
|||
|
hive> SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0310
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:08:06,541 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:08:14,581 Stage-1 map = 86%, reduce = 0%, Cumulative CPU 19.92 sec
|
|||
|
2013-09-18 03:08:15,589 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:16,597 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:17,602 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:18,607 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:19,613 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:20,619 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:21,626 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:22,632 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:23,637 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:24,643 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.4 sec
|
|||
|
2013-09-18 03:08:25,651 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 45.56 sec
|
|||
|
2013-09-18 03:08:26,656 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 45.56 sec
|
|||
|
2013-09-18 03:08:27,662 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 45.56 sec
|
|||
|
MapReduce Total cumulative CPU time: 45 seconds 560 msec
|
|||
|
Ended Job = job_201309172235_0310
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 45.56 sec HDFS Read: 28228143 HDFS Write: 766 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 560 msec
|
|||
|
OK
|
|||
|
Time taken: 31.007 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_31173@mturlrep13_201309180308_1952201032.txt
|
|||
|
hive> SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0311
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:08:37,914 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:08:44,951 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:45,959 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:46,969 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:47,974 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:48,980 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:49,986 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:50,992 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:51,998 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:53,004 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:54,009 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 39.48 sec
|
|||
|
2013-09-18 03:08:55,017 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 44.59 sec
|
|||
|
2013-09-18 03:08:56,023 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 44.59 sec
|
|||
|
MapReduce Total cumulative CPU time: 44 seconds 590 msec
|
|||
|
Ended Job = job_201309172235_0311
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 44.59 sec HDFS Read: 28228143 HDFS Write: 766 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 44 seconds 590 msec
|
|||
|
OK
|
|||
|
Time taken: 26.473 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_32425@mturlrep13_201309180308_2035672137.txt
|
|||
|
hive> SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0312
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:09:05,993 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:09:13,029 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 29.62 sec
|
|||
|
2013-09-18 03:09:14,037 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:15,043 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:16,049 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:17,054 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:18,060 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:19,066 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:20,072 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:21,079 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:22,085 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 39.74 sec
|
|||
|
2013-09-18 03:09:23,092 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 45.03 sec
|
|||
|
2013-09-18 03:09:24,098 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 45.03 sec
|
|||
|
MapReduce Total cumulative CPU time: 45 seconds 30 msec
|
|||
|
Ended Job = job_201309172235_0312
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 45.03 sec HDFS Read: 28228143 HDFS Write: 766 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 30 msec
|
|||
|
OK
|
|||
|
Time taken: 26.277 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- большая сортировка.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_1250@mturlrep13_201309180309_358088813.txt
|
|||
|
hive> SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0313
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:09:40,971 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:09:49,018 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 10.24 sec
|
|||
|
2013-09-18 03:09:50,025 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:51,032 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:52,037 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:53,042 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:54,048 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:55,053 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:56,059 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:57,065 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:58,070 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:09:59,075 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:10:00,080 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 42.07 sec
|
|||
|
2013-09-18 03:10:01,087 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.23 sec
|
|||
|
2013-09-18 03:10:02,093 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 48 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0313
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 48.23 sec HDFS Read: 27820105 HDFS Write: 666 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 48 seconds 230 msec
|
|||
|
OK
|
|||
|
ялта интурист
|
|||
|
! как одеть трехнедельного ребенка при температуре 20 градусов
|
|||
|
! отель rattana beach hotel 3*
|
|||
|
! официальный сайт ооо "группа аист"г москва, ул коцюбинского, д 4, офис 343
|
|||
|
! официальный сайт ооо "группа аист"г москва, ул коцюбинского, д 4, офис 343
|
|||
|
!( центробежный скважинный калибр форумы)
|
|||
|
!(!(storm master silmarils))
|
|||
|
!(!(storm master silmarils))
|
|||
|
!(!(title:(схема sputnik hi 4000)))
|
|||
|
!(44-фз о контрактной системе)
|
|||
|
Time taken: 30.985 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_2534@mturlrep13_201309180310_1314367270.txt
|
|||
|
hive> SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0314
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:10:12,384 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:10:19,428 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 20.28 sec
|
|||
|
2013-09-18 03:10:20,435 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:21,442 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:22,447 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:23,452 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:24,457 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:25,463 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:26,469 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:27,475 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:28,481 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:29,486 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 41.07 sec
|
|||
|
2013-09-18 03:10:30,494 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 47.39 sec
|
|||
|
2013-09-18 03:10:31,500 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 47.39 sec
|
|||
|
MapReduce Total cumulative CPU time: 47 seconds 390 msec
|
|||
|
Ended Job = job_201309172235_0314
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 47.39 sec HDFS Read: 27820105 HDFS Write: 666 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 47 seconds 390 msec
|
|||
|
OK
|
|||
|
ялта интурист
|
|||
|
! как одеть трехнедельного ребенка при температуре 20 градусов
|
|||
|
! отель rattana beach hotel 3*
|
|||
|
! официальный сайт ооо "группа аист"г москва, ул коцюбинского, д 4, офис 343
|
|||
|
! официальный сайт ооо "группа аист"г москва, ул коцюбинского, д 4, офис 343
|
|||
|
!( центробежный скважинный калибр форумы)
|
|||
|
!(!(storm master silmarils))
|
|||
|
!(!(storm master silmarils))
|
|||
|
!(!(title:(схема sputnik hi 4000)))
|
|||
|
!(44-фз о контрактной системе)
|
|||
|
Time taken: 27.724 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_4327@mturlrep13_201309180310_2107081385.txt
|
|||
|
hive> SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0315
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:10:40,801 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:10:48,841 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:49,849 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:50,856 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:51,862 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:52,867 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:53,872 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:54,877 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:55,884 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:56,889 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:57,894 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:58,899 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 40.7 sec
|
|||
|
2013-09-18 03:10:59,906 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 47.23 sec
|
|||
|
2013-09-18 03:11:00,915 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 47.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 47 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0315
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 47.23 sec HDFS Read: 27820105 HDFS Write: 666 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 47 seconds 230 msec
|
|||
|
OK
|
|||
|
ялта интурист
|
|||
|
! как одеть трехнедельного ребенка при температуре 20 градусов
|
|||
|
! отель rattana beach hotel 3*
|
|||
|
! официальный сайт ооо "группа аист"г москва, ул коцюбинского, д 4, офис 343
|
|||
|
! официальный сайт ооо "группа аист"г москва, ул коцюбинского, д 4, офис 343
|
|||
|
!( центробежный скважинный калибр форумы)
|
|||
|
!(!(storm master silmarils))
|
|||
|
!(!(storm master silmarils))
|
|||
|
!(!(title:(схема sputnik hi 4000)))
|
|||
|
!(44-фз о контрактной системе)
|
|||
|
Time taken: 27.357 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- большая сортировка по строкам.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_5527@mturlrep13_201309180311_656071928.txt
|
|||
|
hive> SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0316
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:11:19,126 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:11:26,155 Stage-1 map = 85%, reduce = 0%
|
|||
|
2013-09-18 03:11:27,169 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 10.31 sec
|
|||
|
2013-09-18 03:11:28,177 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:29,184 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:30,190 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:31,196 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:32,201 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:33,206 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:34,213 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:35,219 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:36,225 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:37,255 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:38,260 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 41.22 sec
|
|||
|
2013-09-18 03:11:39,268 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.23 sec
|
|||
|
2013-09-18 03:11:40,274 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.23 sec
|
|||
|
2013-09-18 03:11:41,280 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 48 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0316
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 48.23 sec HDFS Read: 28228143 HDFS Write: 762 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 48 seconds 230 msec
|
|||
|
OK
|
|||
|
Time taken: 32.747 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_7168@mturlrep13_201309180311_1915819780.txt
|
|||
|
hive> SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0317
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:11:50,402 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:11:58,444 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 10.7 sec
|
|||
|
2013-09-18 03:11:59,451 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:00,458 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:01,463 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:02,468 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:03,473 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:04,479 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:05,485 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:06,490 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:07,496 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:08,501 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 41.62 sec
|
|||
|
2013-09-18 03:12:09,508 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.08 sec
|
|||
|
2013-09-18 03:12:10,515 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.08 sec
|
|||
|
2013-09-18 03:12:11,520 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 48.08 sec
|
|||
|
MapReduce Total cumulative CPU time: 48 seconds 80 msec
|
|||
|
Ended Job = job_201309172235_0317
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 48.08 sec HDFS Read: 28228143 HDFS Write: 762 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 48 seconds 80 msec
|
|||
|
OK
|
|||
|
Time taken: 28.329 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_8422@mturlrep13_201309180312_364801550.txt
|
|||
|
hive> SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0318
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:12:21,723 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:12:28,768 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 9.8 sec
|
|||
|
2013-09-18 03:12:29,775 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:30,780 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:31,785 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:32,790 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:33,796 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:34,801 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:35,807 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:36,814 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:37,820 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:12:38,828 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 49.41 sec
|
|||
|
2013-09-18 03:12:39,833 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 49.41 sec
|
|||
|
2013-09-18 03:12:40,839 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 49.41 sec
|
|||
|
MapReduce Total cumulative CPU time: 49 seconds 410 msec
|
|||
|
Ended Job = job_201309172235_0318
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 49.41 sec HDFS Read: 28228143 HDFS Write: 762 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 49 seconds 410 msec
|
|||
|
OK
|
|||
|
Time taken: 27.439 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- большая сортировка по кортежу.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_9670@mturlrep13_201309180312_462358371.txt
|
|||
|
hive> SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0319
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:12:57,315 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:13:04,346 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:13:07,360 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:13:10,373 Stage-1 map = 63%, reduce = 0%
|
|||
|
2013-09-18 03:13:13,393 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 68.42 sec
|
|||
|
2013-09-18 03:13:14,400 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 68.42 sec
|
|||
|
2013-09-18 03:13:15,412 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 73.5 sec
|
|||
|
2013-09-18 03:13:16,420 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 79.72 sec
|
|||
|
2013-09-18 03:13:17,427 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:18,434 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:19,439 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:20,444 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:21,449 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:22,456 Stage-1 map = 100%, reduce = 4%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:23,462 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:24,468 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:25,474 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:26,481 Stage-1 map = 100%, reduce = 51%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:27,487 Stage-1 map = 100%, reduce = 51%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:28,493 Stage-1 map = 100%, reduce = 71%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:29,498 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 93.63 sec
|
|||
|
2013-09-18 03:13:30,505 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 103.43 sec
|
|||
|
2013-09-18 03:13:31,511 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 103.43 sec
|
|||
|
2013-09-18 03:13:32,516 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 103.43 sec
|
|||
|
2013-09-18 03:13:33,521 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 103.43 sec
|
|||
|
2013-09-18 03:13:34,527 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 117.54 sec
|
|||
|
2013-09-18 03:13:35,533 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 117.54 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 57 seconds 540 msec
|
|||
|
Ended Job = job_201309172235_0319
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0320
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:13:39,017 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:13:40,022 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:41,027 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:42,033 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:43,039 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:44,044 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:45,049 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:46,053 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:47,058 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:13:48,064 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.23 sec
|
|||
|
2013-09-18 03:13:49,069 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.23 sec
|
|||
|
2013-09-18 03:13:50,074 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0320
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 117.54 sec HDFS Read: 117363067 HDFS Write: 794 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.23 sec HDFS Read: 1563 HDFS Write: 571 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 59 seconds 770 msec
|
|||
|
OK
|
|||
|
Time taken: 62.932 seconds, Fetched: 19 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_11483@mturlrep13_201309180313_1683269213.txt
|
|||
|
hive> SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0321
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:13:59,358 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:14:07,402 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 03:14:10,416 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:14:13,434 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 56.74 sec
|
|||
|
2013-09-18 03:14:14,442 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 56.74 sec
|
|||
|
2013-09-18 03:14:15,450 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 56.74 sec
|
|||
|
2013-09-18 03:14:16,460 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 63.68 sec
|
|||
|
2013-09-18 03:14:17,467 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 72.77 sec
|
|||
|
2013-09-18 03:14:18,473 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:19,479 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:20,484 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:21,490 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:22,496 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:23,503 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:24,509 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:25,515 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:26,521 Stage-1 map = 100%, reduce = 68%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:27,528 Stage-1 map = 100%, reduce = 68%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:28,534 Stage-1 map = 100%, reduce = 68%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:29,540 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 92.25 sec
|
|||
|
2013-09-18 03:14:30,548 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 102.34 sec
|
|||
|
2013-09-18 03:14:31,554 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 102.34 sec
|
|||
|
2013-09-18 03:14:32,561 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 102.34 sec
|
|||
|
2013-09-18 03:14:33,567 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 102.34 sec
|
|||
|
2013-09-18 03:14:34,573 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 116.97 sec
|
|||
|
2013-09-18 03:14:35,579 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 116.97 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 56 seconds 970 msec
|
|||
|
Ended Job = job_201309172235_0321
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0322
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:14:38,052 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:14:40,061 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:41,066 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:42,071 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:43,075 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:44,081 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:45,086 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:46,090 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:47,096 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:14:48,102 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.07 sec
|
|||
|
2013-09-18 03:14:49,107 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.07 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 70 msec
|
|||
|
Ended Job = job_201309172235_0322
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 116.97 sec HDFS Read: 117363067 HDFS Write: 794 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.07 sec HDFS Read: 1563 HDFS Write: 571 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 59 seconds 40 msec
|
|||
|
OK
|
|||
|
Time taken: 57.159 seconds, Fetched: 19 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_13299@mturlrep13_201309180314_854084991.txt
|
|||
|
hive> SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0323
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:14:59,355 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:15:06,385 Stage-1 map = 25%, reduce = 0%
|
|||
|
2013-09-18 03:15:09,399 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:15:12,413 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:15:15,516 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 74.75 sec
|
|||
|
2013-09-18 03:15:16,523 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 80.15 sec
|
|||
|
2013-09-18 03:15:17,530 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:18,536 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:19,542 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:20,547 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:21,552 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:22,558 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:23,563 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:24,568 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:25,574 Stage-1 map = 100%, reduce = 29%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:26,579 Stage-1 map = 100%, reduce = 51%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:27,585 Stage-1 map = 100%, reduce = 51%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:28,590 Stage-1 map = 100%, reduce = 51%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:29,595 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 91.8 sec
|
|||
|
2013-09-18 03:15:30,603 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 101.93 sec
|
|||
|
2013-09-18 03:15:31,608 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 101.93 sec
|
|||
|
2013-09-18 03:15:32,613 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 101.93 sec
|
|||
|
2013-09-18 03:15:33,626 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 116.67 sec
|
|||
|
2013-09-18 03:15:34,633 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 116.67 sec
|
|||
|
2013-09-18 03:15:35,639 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 116.67 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 56 seconds 670 msec
|
|||
|
Ended Job = job_201309172235_0323
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0324
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:15:39,125 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:15:40,130 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:41,136 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:42,141 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:43,145 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:44,150 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:45,155 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:46,159 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:47,165 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:15:48,170 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.19 sec
|
|||
|
2013-09-18 03:15:49,176 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.19 sec
|
|||
|
2013-09-18 03:15:50,182 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.19 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 190 msec
|
|||
|
Ended Job = job_201309172235_0324
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 116.67 sec HDFS Read: 117363067 HDFS Write: 794 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.19 sec HDFS Read: 1563 HDFS Write: 571 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 58 seconds 860 msec
|
|||
|
OK
|
|||
|
Time taken: 59.233 seconds, Fetched: 19 row(s)
|
|||
|
hive> quit;
|
|||
|
-- считаем средние длины URL для крупных счётчиков.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)), avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)) HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_15829@mturlrep13_201309180315_1054632059.txt
|
|||
|
hive> SELECT SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)), avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)) HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;;
|
|||
|
FAILED: SemanticException [Error 10011]: Line 1:336 Invalid function 'GREATEST'
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)), avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)) HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16050@mturlrep13_201309180316_1145304248.txt
|
|||
|
hive> SELECT SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)), avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)) HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;;
|
|||
|
FAILED: SemanticException [Error 10011]: Line 1:336 Invalid function 'GREATEST'
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)), avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)) HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16266@mturlrep13_201309180316_80926883.txt
|
|||
|
hive> SELECT SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)), avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY SUBSTRING(SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2), 1, GREATEST(0, FIND_IN_SET('/', SUBSTRING(Referer, FIND_IN_SET('//', Referer) + 2)) - 1)) HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;;
|
|||
|
FAILED: SemanticException [Error 10011]: Line 1:336 Invalid function 'GREATEST'
|
|||
|
hive> quit;
|
|||
|
-- то же самое, но с разбивкой по доменам.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16487@mturlrep13_201309180316_664462210.txt
|
|||
|
hive> SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0325
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:16:31,560 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:16:45,621 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:16:51,650 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 03:16:57,674 Stage-1 map = 33%, reduce = 0%
|
|||
|
2013-09-18 03:17:00,686 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:17:06,709 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:17:12,731 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:17:18,761 Stage-1 map = 78%, reduce = 0%, Cumulative CPU 173.32 sec
|
|||
|
2013-09-18 03:17:19,768 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 181.18 sec
|
|||
|
2013-09-18 03:17:20,774 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 181.18 sec
|
|||
|
2013-09-18 03:17:21,778 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 181.18 sec
|
|||
|
2013-09-18 03:17:22,783 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 181.18 sec
|
|||
|
2013-09-18 03:17:23,787 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 181.18 sec
|
|||
|
2013-09-18 03:17:24,792 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 181.18 sec
|
|||
|
2013-09-18 03:17:25,797 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 193.89 sec
|
|||
|
2013-09-18 03:17:26,802 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 222.98 sec
|
|||
|
2013-09-18 03:17:27,806 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 222.98 sec
|
|||
|
2013-09-18 03:17:28,811 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 222.98 sec
|
|||
|
2013-09-18 03:17:29,815 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 222.98 sec
|
|||
|
2013-09-18 03:17:30,819 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 222.98 sec
|
|||
|
2013-09-18 03:17:31,824 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 222.98 sec
|
|||
|
2013-09-18 03:17:32,828 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 222.98 sec
|
|||
|
2013-09-18 03:17:33,835 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 226.19 sec
|
|||
|
2013-09-18 03:17:34,846 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 226.19 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 minutes 46 seconds 190 msec
|
|||
|
Ended Job = job_201309172235_0325
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 226.19 sec HDFS Read: 7797536 HDFS Write: 1080 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 46 seconds 190 msec
|
|||
|
OK
|
|||
|
Time taken: 74.663 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_17636@mturlrep13_201309180317_1915588567.txt
|
|||
|
hive> SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0326
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:17:45,771 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:17:56,817 Stage-1 map = 7%, reduce = 0%
|
|||
|
2013-09-18 03:17:59,831 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:18:05,856 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 03:18:11,880 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:18:17,911 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:18,916 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:19,924 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:20,929 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:21,934 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:22,939 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:23,945 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:24,951 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:25,956 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:26,961 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:27,966 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:28,971 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:29,976 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:30,982 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:31,986 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 115.96 sec
|
|||
|
2013-09-18 03:18:32,993 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 138.53 sec
|
|||
|
2013-09-18 03:18:33,999 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 138.53 sec
|
|||
|
2013-09-18 03:18:35,004 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 138.53 sec
|
|||
|
2013-09-18 03:18:36,010 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 138.53 sec
|
|||
|
2013-09-18 03:18:37,015 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 138.53 sec
|
|||
|
2013-09-18 03:18:38,020 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 192.67 sec
|
|||
|
2013-09-18 03:18:39,024 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 220.35 sec
|
|||
|
2013-09-18 03:18:40,029 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 220.35 sec
|
|||
|
2013-09-18 03:18:41,034 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 220.35 sec
|
|||
|
2013-09-18 03:18:42,040 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 223.54 sec
|
|||
|
2013-09-18 03:18:43,046 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 223.54 sec
|
|||
|
2013-09-18 03:18:44,052 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 223.54 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 minutes 43 seconds 540 msec
|
|||
|
Ended Job = job_201309172235_0326
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 223.54 sec HDFS Read: 7797536 HDFS Write: 1080 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 43 seconds 540 msec
|
|||
|
OK
|
|||
|
Time taken: 67.206 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_18790@mturlrep13_201309180318_731576789.txt
|
|||
|
hive> SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;;
|
|||
|
Total MapReduce jobs = 1
|
|||
|
Launching Job 1 out of 1
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0327
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
|||
|
2013-09-18 03:18:55,297 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:19:05,340 Stage-1 map = 4%, reduce = 0%
|
|||
|
2013-09-18 03:19:08,352 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:19:14,381 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:15,388 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:16,395 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:17,401 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:18,407 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:19,412 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:20,417 Stage-1 map = 36%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:21,422 Stage-1 map = 36%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:22,427 Stage-1 map = 36%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:23,432 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:24,437 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:25,443 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:26,448 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:27,452 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:28,458 Stage-1 map = 44%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:29,463 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:30,468 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:31,473 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:32,478 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:33,484 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:34,489 Stage-1 map = 59%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:35,493 Stage-1 map = 70%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:36,498 Stage-1 map = 70%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:37,502 Stage-1 map = 70%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:38,507 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:39,511 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:40,516 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:41,520 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:42,525 Stage-1 map = 78%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:43,530 Stage-1 map = 78%, reduce = 0%, Cumulative CPU 77.56 sec
|
|||
|
2013-09-18 03:19:44,536 Stage-1 map = 82%, reduce = 0%, Cumulative CPU 111.44 sec
|
|||
|
2013-09-18 03:19:45,541 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 111.44 sec
|
|||
|
2013-09-18 03:19:46,546 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 111.44 sec
|
|||
|
2013-09-18 03:19:47,551 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 111.44 sec
|
|||
|
2013-09-18 03:19:48,556 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 149.94 sec
|
|||
|
2013-09-18 03:19:49,561 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 229.05 sec
|
|||
|
2013-09-18 03:19:50,565 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 229.05 sec
|
|||
|
2013-09-18 03:19:51,569 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 229.05 sec
|
|||
|
2013-09-18 03:19:52,574 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 229.05 sec
|
|||
|
2013-09-18 03:19:53,581 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 232.1 sec
|
|||
|
2013-09-18 03:19:54,586 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 232.1 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 minutes 52 seconds 100 msec
|
|||
|
Ended Job = job_201309172235_0327
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 232.1 sec HDFS Read: 7797536 HDFS Write: 1080 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 52 seconds 100 msec
|
|||
|
OK
|
|||
|
Time taken: 68.586 seconds, Fetched: 1 row(s)
|
|||
|
hive> quit;
|
|||
|
-- много тупых агрегатных функций.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_19971@mturlrep13_201309180319_1200397175.txt
|
|||
|
hive> SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0328
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:20:11,947 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:20:18,978 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:20:22,002 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:23,010 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:24,018 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:25,023 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:26,029 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:27,036 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:28,043 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:29,050 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:30,056 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:31,061 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:32,067 Stage-1 map = 100%, reduce = 57%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:33,072 Stage-1 map = 100%, reduce = 81%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:20:34,080 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 63.8 sec
|
|||
|
2013-09-18 03:20:35,087 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 63.8 sec
|
|||
|
2013-09-18 03:20:36,093 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 63.8 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 3 seconds 800 msec
|
|||
|
Ended Job = job_201309172235_0328
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0329
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:20:39,567 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:20:46,589 Stage-2 map = 52%, reduce = 0%
|
|||
|
2013-09-18 03:20:48,598 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:49,603 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:50,607 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:51,612 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:52,616 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:53,621 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:54,626 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:55,632 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:56,637 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 12.27 sec
|
|||
|
2013-09-18 03:20:57,642 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 15.17 sec
|
|||
|
2013-09-18 03:20:58,647 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 15.17 sec
|
|||
|
MapReduce Total cumulative CPU time: 15 seconds 170 msec
|
|||
|
Ended Job = job_201309172235_0329
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 63.8 sec HDFS Read: 69312553 HDFS Write: 31841963 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 15.17 sec HDFS Read: 31842732 HDFS Write: 372 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 18 seconds 970 msec
|
|||
|
OK
|
|||
|
Time taken: 58.017 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_22672@mturlrep13_201309180321_532082554.txt
|
|||
|
hive> SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0330
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:21:09,059 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:21:16,090 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:21:17,103 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 11.29 sec
|
|||
|
2013-09-18 03:21:18,111 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:19,120 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:20,126 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:21,132 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:22,138 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:23,145 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:24,152 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:25,159 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:26,165 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:27,172 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:28,178 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:29,184 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 48.51 sec
|
|||
|
2013-09-18 03:21:30,191 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.9 sec
|
|||
|
2013-09-18 03:21:31,198 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.9 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 2 seconds 900 msec
|
|||
|
Ended Job = job_201309172235_0330
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0331
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:21:34,779 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:21:41,804 Stage-2 map = 52%, reduce = 0%
|
|||
|
2013-09-18 03:21:42,810 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:43,816 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:44,822 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:45,827 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:46,832 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:47,838 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:48,843 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:49,849 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:50,855 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 11.4 sec
|
|||
|
2013-09-18 03:21:51,860 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 14.23 sec
|
|||
|
2013-09-18 03:21:52,866 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 14.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 14 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0331
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 62.9 sec HDFS Read: 69312553 HDFS Write: 31841963 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 14.23 sec HDFS Read: 31842732 HDFS Write: 372 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 17 seconds 130 msec
|
|||
|
OK
|
|||
|
Time taken: 52.364 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_24667@mturlrep13_201309180321_1625792841.txt
|
|||
|
hive> SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0332
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:22:03,145 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:22:10,175 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:22:11,188 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 11.62 sec
|
|||
|
2013-09-18 03:22:12,196 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:13,204 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:14,211 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:15,217 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:16,223 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:17,229 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:18,236 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:19,243 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:20,249 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:21,255 Stage-1 map = 100%, reduce = 56%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:22,261 Stage-1 map = 100%, reduce = 81%, Cumulative CPU 48.16 sec
|
|||
|
2013-09-18 03:22:23,269 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.71 sec
|
|||
|
2013-09-18 03:22:24,275 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.71 sec
|
|||
|
2013-09-18 03:22:25,281 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.71 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 2 seconds 710 msec
|
|||
|
Ended Job = job_201309172235_0332
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0333
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:22:28,772 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:22:35,794 Stage-2 map = 52%, reduce = 0%
|
|||
|
2013-09-18 03:22:37,803 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:38,808 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:39,813 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:40,817 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:41,822 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:42,826 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:43,831 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:44,837 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:45,843 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 11.58 sec
|
|||
|
2013-09-18 03:22:46,848 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 14.44 sec
|
|||
|
2013-09-18 03:22:47,853 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 14.44 sec
|
|||
|
MapReduce Total cumulative CPU time: 14 seconds 440 msec
|
|||
|
Ended Job = job_201309172235_0333
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 62.71 sec HDFS Read: 69312553 HDFS Write: 31841963 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 14.44 sec HDFS Read: 31842732 HDFS Write: 372 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 17 seconds 150 msec
|
|||
|
OK
|
|||
|
Time taken: 53.155 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- сложная агрегация, для больших таблиц может не хватить оперативки.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_26648@mturlrep13_201309180322_1394561217.txt
|
|||
|
hive> SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0334
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:23:04,981 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:23:12,010 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:23:15,034 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:16,041 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:17,049 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:18,055 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:19,061 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:20,068 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:21,073 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:22,078 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:23,084 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:24,089 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:25,095 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:26,101 Stage-1 map = 100%, reduce = 76%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:27,106 Stage-1 map = 100%, reduce = 76%, Cumulative CPU 49.3 sec
|
|||
|
2013-09-18 03:23:28,114 Stage-1 map = 100%, reduce = 88%, Cumulative CPU 57.83 sec
|
|||
|
2013-09-18 03:23:29,122 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 66.46 sec
|
|||
|
2013-09-18 03:23:30,129 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 66.46 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 6 seconds 460 msec
|
|||
|
Ended Job = job_201309172235_0334
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0335
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:23:33,600 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:23:43,633 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 03:23:46,644 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:47,649 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:48,654 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:49,659 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:50,664 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:51,668 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:52,674 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:53,679 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:54,685 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:55,690 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 17.06 sec
|
|||
|
2013-09-18 03:23:56,696 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 21.88 sec
|
|||
|
2013-09-18 03:23:57,701 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 21.88 sec
|
|||
|
MapReduce Total cumulative CPU time: 21 seconds 880 msec
|
|||
|
Ended Job = job_201309172235_0335
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 66.46 sec HDFS Read: 112931901 HDFS Write: 72725701 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 21.88 sec HDFS Read: 72726470 HDFS Write: 417 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 28 seconds 340 msec
|
|||
|
OK
|
|||
|
Time taken: 62.907 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_28712@mturlrep13_201309180323_447001239.txt
|
|||
|
hive> SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0336
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:24:06,957 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:24:14,997 Stage-1 map = 63%, reduce = 0%, Cumulative CPU 41.58 sec
|
|||
|
2013-09-18 03:24:16,004 Stage-1 map = 63%, reduce = 0%, Cumulative CPU 41.58 sec
|
|||
|
2013-09-18 03:24:17,015 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:18,023 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:19,029 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:20,034 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:21,040 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:22,047 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:23,052 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:24,058 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:25,064 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:26,070 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:27,075 Stage-1 map = 100%, reduce = 76%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:28,081 Stage-1 map = 100%, reduce = 76%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:29,087 Stage-1 map = 100%, reduce = 76%, Cumulative CPU 51.71 sec
|
|||
|
2013-09-18 03:24:30,095 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.32 sec
|
|||
|
2013-09-18 03:24:31,104 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.32 sec
|
|||
|
2013-09-18 03:24:32,110 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.32 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 9 seconds 320 msec
|
|||
|
Ended Job = job_201309172235_0336
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0337
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:24:34,678 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:24:45,718 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 03:24:48,730 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:49,735 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:50,739 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:51,743 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:52,748 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:53,752 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:54,758 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:55,763 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:56,769 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 17.64 sec
|
|||
|
2013-09-18 03:24:57,774 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 22.51 sec
|
|||
|
2013-09-18 03:24:58,778 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 22.51 sec
|
|||
|
2013-09-18 03:24:59,784 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 22.51 sec
|
|||
|
MapReduce Total cumulative CPU time: 22 seconds 510 msec
|
|||
|
Ended Job = job_201309172235_0337
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 69.32 sec HDFS Read: 112931901 HDFS Write: 72725701 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 22.51 sec HDFS Read: 72726466 HDFS Write: 417 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 31 seconds 830 msec
|
|||
|
OK
|
|||
|
Time taken: 60.202 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_30775@mturlrep13_201309180325_1495569206.txt
|
|||
|
hive> SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0338
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:25:10,560 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:25:17,591 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:25:19,608 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:20,616 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:21,624 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:22,630 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:23,635 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:24,641 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:25,647 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:26,653 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:27,659 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:28,664 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:29,670 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:30,676 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:31,681 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 53.02 sec
|
|||
|
2013-09-18 03:25:32,689 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 61.27 sec
|
|||
|
2013-09-18 03:25:33,713 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.91 sec
|
|||
|
2013-09-18 03:25:34,719 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.91 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 9 seconds 910 msec
|
|||
|
Ended Job = job_201309172235_0338
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0339
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:25:37,201 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:25:48,240 Stage-2 map = 50%, reduce = 0%
|
|||
|
2013-09-18 03:25:51,252 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:52,258 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:53,262 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:54,267 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:55,272 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:56,277 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:57,283 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:58,288 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:25:59,293 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 16.68 sec
|
|||
|
2013-09-18 03:26:00,299 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 21.77 sec
|
|||
|
2013-09-18 03:26:01,304 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 21.77 sec
|
|||
|
MapReduce Total cumulative CPU time: 21 seconds 770 msec
|
|||
|
Ended Job = job_201309172235_0339
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 69.91 sec HDFS Read: 112931901 HDFS Write: 72725701 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 21.77 sec HDFS Read: 72726470 HDFS Write: 417 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 31 seconds 680 msec
|
|||
|
OK
|
|||
|
Time taken: 59.653 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация по двум полям, которая ничего не агрегирует. Для больших таблиц выполнить не получится.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_1150@mturlrep13_201309180326_1951977116.txt
|
|||
|
hive> SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0340
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:26:18,728 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:26:26,762 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:26:29,776 Stage-1 map = 29%, reduce = 0%
|
|||
|
2013-09-18 03:26:32,790 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:26:35,804 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:26:38,824 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 24.87 sec
|
|||
|
2013-09-18 03:26:39,832 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 24.87 sec
|
|||
|
2013-09-18 03:26:40,840 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 80.51 sec
|
|||
|
2013-09-18 03:26:41,846 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:42,852 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:43,857 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:44,862 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:45,868 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:46,873 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:47,879 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:48,884 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:49,890 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:50,895 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:51,901 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:52,907 Stage-1 map = 100%, reduce = 69%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:53,913 Stage-1 map = 100%, reduce = 69%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:54,918 Stage-1 map = 100%, reduce = 69%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:55,924 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:56,930 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:57,936 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:58,941 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:26:59,947 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:00,953 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:01,958 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:02,963 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:03,968 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:04,974 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:05,980 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:06,986 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:07,992 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:08,998 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:11,596 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:12,610 Stage-1 map = 100%, reduce = 98%, Cumulative CPU 108.69 sec
|
|||
|
2013-09-18 03:27:13,618 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 170.18 sec
|
|||
|
2013-09-18 03:27:14,642 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 170.18 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 50 seconds 180 msec
|
|||
|
Ended Job = job_201309172235_0340
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0341
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:27:18,241 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:27:31,283 Stage-2 map = 36%, reduce = 0%
|
|||
|
2013-09-18 03:27:40,311 Stage-2 map = 72%, reduce = 0%
|
|||
|
2013-09-18 03:27:42,320 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:43,325 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:44,330 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:45,335 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:46,341 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:47,346 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:48,350 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:49,355 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:50,360 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:51,365 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:52,370 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:53,375 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:54,380 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:55,385 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:56,390 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:57,395 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:58,400 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 30.17 sec
|
|||
|
2013-09-18 03:27:59,405 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:00,409 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:01,414 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:02,418 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:03,423 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:04,427 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:05,432 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:06,437 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:07,441 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:08,446 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:09,450 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:10,455 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:11,464 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:12,469 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 78.9 sec
|
|||
|
2013-09-18 03:28:13,473 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 89.45 sec
|
|||
|
2013-09-18 03:28:14,478 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 89.45 sec
|
|||
|
2013-09-18 03:28:15,483 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 89.45 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 29 seconds 450 msec
|
|||
|
Ended Job = job_201309172235_0341
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 170.18 sec HDFS Read: 85707829 HDFS Write: 413932232 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 89.45 sec HDFS Read: 413942944 HDFS Write: 420 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 4 minutes 19 seconds 630 msec
|
|||
|
OK
|
|||
|
Time taken: 127.206 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_3393@mturlrep13_201309180328_1674206959.txt
|
|||
|
hive> SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0342
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:28:25,773 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:28:32,804 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:28:35,821 Stage-1 map = 41%, reduce = 0%
|
|||
|
2013-09-18 03:28:38,834 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:28:41,849 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:28:44,868 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 25.88 sec
|
|||
|
2013-09-18 03:28:45,875 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 53.48 sec
|
|||
|
2013-09-18 03:28:46,883 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 53.48 sec
|
|||
|
2013-09-18 03:28:47,888 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:48,894 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:49,908 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:50,913 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:51,919 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:52,925 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:53,931 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:54,936 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:55,942 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:56,948 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:57,954 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:58,960 Stage-1 map = 100%, reduce = 69%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:28:59,966 Stage-1 map = 100%, reduce = 69%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:00,971 Stage-1 map = 100%, reduce = 69%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:01,977 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:02,983 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:03,989 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:04,995 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:06,000 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:07,005 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:08,010 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:09,016 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:10,021 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:11,026 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:12,032 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:13,037 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:14,043 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:15,048 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 112.03 sec
|
|||
|
2013-09-18 03:29:16,055 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 165.0 sec
|
|||
|
2013-09-18 03:29:19,092 Stage-1 map = 100%, reduce = 99%, Cumulative CPU 168.62 sec
|
|||
|
2013-09-18 03:29:20,097 Stage-1 map = 100%, reduce = 99%, Cumulative CPU 168.62 sec
|
|||
|
2013-09-18 03:29:21,103 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 173.79 sec
|
|||
|
2013-09-18 03:29:22,108 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 173.79 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 53 seconds 790 msec
|
|||
|
Ended Job = job_201309172235_0342
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0343
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:29:25,610 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:29:38,653 Stage-2 map = 36%, reduce = 0%
|
|||
|
2013-09-18 03:29:47,683 Stage-2 map = 72%, reduce = 0%
|
|||
|
2013-09-18 03:29:49,691 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:50,696 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:51,701 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:52,706 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:53,711 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:54,715 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:55,720 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:56,725 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:57,730 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:58,735 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:29:59,739 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:00,743 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:01,748 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:02,753 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:03,758 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:04,762 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:05,767 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:06,771 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 28.46 sec
|
|||
|
2013-09-18 03:30:07,775 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:08,780 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:09,784 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:10,788 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:11,792 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:12,797 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:13,802 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:14,806 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 77.77 sec
|
|||
|
2013-09-18 03:30:15,831 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 83.0 sec
|
|||
|
2013-09-18 03:30:16,836 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 83.0 sec
|
|||
|
2013-09-18 03:30:18,119 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 83.0 sec
|
|||
|
2013-09-18 03:30:19,123 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 83.0 sec
|
|||
|
2013-09-18 03:30:20,128 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 83.0 sec
|
|||
|
2013-09-18 03:30:21,133 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 88.85 sec
|
|||
|
2013-09-18 03:30:22,138 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 88.85 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 28 seconds 850 msec
|
|||
|
Ended Job = job_201309172235_0343
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 173.79 sec HDFS Read: 85707829 HDFS Write: 413932232 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 88.85 sec HDFS Read: 413942944 HDFS Write: 420 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 4 minutes 22 seconds 640 msec
|
|||
|
OK
|
|||
|
Time taken: 124.701 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_6613@mturlrep13_201309180330_1704971326.txt
|
|||
|
hive> SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0344
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:30:32,334 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:30:39,366 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:30:42,380 Stage-1 map = 41%, reduce = 0%
|
|||
|
2013-09-18 03:30:45,392 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:30:48,407 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 03:30:51,428 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 25.55 sec
|
|||
|
2013-09-18 03:30:52,435 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 52.78 sec
|
|||
|
2013-09-18 03:30:53,442 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 81.04 sec
|
|||
|
2013-09-18 03:30:54,447 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:30:55,453 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:30:56,458 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:30:57,463 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:30:58,469 Stage-1 map = 100%, reduce = 13%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:30:59,474 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:00,480 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:01,485 Stage-1 map = 100%, reduce = 42%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:02,491 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:03,497 Stage-1 map = 100%, reduce = 50%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:04,503 Stage-1 map = 100%, reduce = 52%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:05,509 Stage-1 map = 100%, reduce = 70%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:06,514 Stage-1 map = 100%, reduce = 70%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:07,520 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:08,525 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:09,531 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:10,536 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:11,541 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:12,547 Stage-1 map = 100%, reduce = 79%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:13,552 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:14,557 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 110.99 sec
|
|||
|
2013-09-18 03:31:15,564 Stage-1 map = 100%, reduce = 84%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:16,570 Stage-1 map = 100%, reduce = 87%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:17,575 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:18,580 Stage-1 map = 100%, reduce = 89%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:19,585 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:20,590 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:21,595 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:22,600 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 151.68 sec
|
|||
|
2013-09-18 03:31:25,469 Stage-1 map = 100%, reduce = 99%, Cumulative CPU 161.77 sec
|
|||
|
2013-09-18 03:31:26,474 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 173.12 sec
|
|||
|
2013-09-18 03:31:27,479 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 173.12 sec
|
|||
|
2013-09-18 03:31:28,485 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 173.12 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 53 seconds 120 msec
|
|||
|
Ended Job = job_201309172235_0344
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0345
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:31:30,990 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:31:45,038 Stage-2 map = 36%, reduce = 0%
|
|||
|
2013-09-18 03:31:51,058 Stage-2 map = 48%, reduce = 0%
|
|||
|
2013-09-18 03:31:54,070 Stage-2 map = 72%, reduce = 0%
|
|||
|
2013-09-18 03:31:55,075 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:31:56,080 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:31:57,086 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:31:58,090 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:31:59,096 Stage-2 map = 74%, reduce = 0%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:00,101 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:01,106 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:02,112 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:03,117 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:04,122 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:05,127 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:06,132 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:07,137 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:08,141 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:09,147 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:10,151 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 27.96 sec
|
|||
|
2013-09-18 03:32:11,156 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 74.69 sec
|
|||
|
2013-09-18 03:32:12,161 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 74.69 sec
|
|||
|
2013-09-18 03:32:13,165 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 74.69 sec
|
|||
|
2013-09-18 03:32:14,170 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 74.69 sec
|
|||
|
2013-09-18 03:32:15,174 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 74.69 sec
|
|||
|
2013-09-18 03:32:16,179 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:17,183 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:18,188 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:19,192 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:20,202 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:21,207 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:22,211 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:23,215 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:24,220 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 75.98 sec
|
|||
|
2013-09-18 03:32:25,225 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 85.63 sec
|
|||
|
2013-09-18 03:32:26,229 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 85.63 sec
|
|||
|
2013-09-18 03:32:27,234 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 85.63 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 25 seconds 630 msec
|
|||
|
Ended Job = job_201309172235_0345
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 173.12 sec HDFS Read: 85707829 HDFS Write: 413932232 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 85.63 sec HDFS Read: 413942944 HDFS Write: 420 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 4 minutes 18 seconds 750 msec
|
|||
|
OK
|
|||
|
Time taken: 123.291 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- то же самое, но ещё и без фильтрации.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT URL, count(*) AS c FROM hits_10m GROUP BY URL ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_8833@mturlrep13_201309180332_1808208086.txt
|
|||
|
hive> SELECT URL, count(*) AS c FROM hits_10m GROUP BY URL ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0346
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:32:43,874 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:32:50,903 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:32:53,915 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:32:57,932 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:33:00,946 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:33:03,965 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 25.99 sec
|
|||
|
2013-09-18 03:33:04,972 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 25.99 sec
|
|||
|
2013-09-18 03:33:05,979 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 54.38 sec
|
|||
|
2013-09-18 03:33:06,986 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 83.01 sec
|
|||
|
2013-09-18 03:33:07,992 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:08,998 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:10,003 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:11,010 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:12,016 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:13,023 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:14,029 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:15,036 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 112.86 sec
|
|||
|
2013-09-18 03:33:16,042 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:17,049 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:18,055 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:19,061 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:20,068 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:21,075 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:22,081 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:23,087 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:24,094 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:25,099 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:26,106 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:27,112 Stage-1 map = 100%, reduce = 95%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:28,118 Stage-1 map = 100%, reduce = 95%, Cumulative CPU 136.61 sec
|
|||
|
2013-09-18 03:33:29,126 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 170.16 sec
|
|||
|
2013-09-18 03:33:30,132 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 170.16 sec
|
|||
|
2013-09-18 03:33:31,138 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 170.16 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 50 seconds 160 msec
|
|||
|
Ended Job = job_201309172235_0346
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0347
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:33:34,663 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:33:41,687 Stage-2 map = 25%, reduce = 0%
|
|||
|
2013-09-18 03:33:43,695 Stage-2 map = 50%, reduce = 0%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:44,701 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:45,705 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:46,711 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:47,716 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:48,720 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:49,726 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:50,731 Stage-2 map = 87%, reduce = 17%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:51,736 Stage-2 map = 87%, reduce = 17%, Cumulative CPU 13.37 sec
|
|||
|
2013-09-18 03:33:52,741 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:33:53,746 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:33:54,753 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:33:55,759 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:33:56,765 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:33:57,771 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:33:58,776 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:33:59,782 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:34:00,787 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 36.76 sec
|
|||
|
2013-09-18 03:34:01,793 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 44.31 sec
|
|||
|
2013-09-18 03:34:02,799 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 44.31 sec
|
|||
|
2013-09-18 03:34:03,804 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 44.31 sec
|
|||
|
MapReduce Total cumulative CPU time: 44 seconds 310 msec
|
|||
|
Ended Job = job_201309172235_0347
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 170.16 sec HDFS Read: 109451651 HDFS Write: 399298510 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 44.31 sec HDFS Read: 399308167 HDFS Write: 445 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 34 seconds 470 msec
|
|||
|
OK
|
|||
|
Time taken: 89.931 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT URL, count(*) AS c FROM hits_10m GROUP BY URL ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_10716@mturlrep13_201309180334_544608212.txt
|
|||
|
hive> SELECT URL, count(*) AS c FROM hits_10m GROUP BY URL ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0348
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:34:13,365 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:34:21,405 Stage-1 map = 18%, reduce = 0%
|
|||
|
2013-09-18 03:34:24,418 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:34:27,431 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:34:30,444 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 03:34:33,463 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 25.62 sec
|
|||
|
2013-09-18 03:34:34,469 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 25.62 sec
|
|||
|
2013-09-18 03:34:35,477 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 25.62 sec
|
|||
|
2013-09-18 03:34:36,485 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:37,490 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:38,495 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:39,500 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:40,506 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:41,512 Stage-1 map = 100%, reduce = 13%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:42,518 Stage-1 map = 100%, reduce = 13%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:43,523 Stage-1 map = 100%, reduce = 13%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:44,529 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:45,534 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:46,540 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:47,546 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:48,552 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:49,559 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:50,565 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:51,570 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:52,577 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:53,583 Stage-1 map = 100%, reduce = 85%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:54,588 Stage-1 map = 100%, reduce = 85%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:55,593 Stage-1 map = 100%, reduce = 85%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:56,599 Stage-1 map = 100%, reduce = 95%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:57,605 Stage-1 map = 100%, reduce = 95%, Cumulative CPU 114.36 sec
|
|||
|
2013-09-18 03:34:58,612 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 168.31 sec
|
|||
|
2013-09-18 03:34:59,618 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 168.31 sec
|
|||
|
2013-09-18 03:35:00,625 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 168.31 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 48 seconds 310 msec
|
|||
|
Ended Job = job_201309172235_0348
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0349
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:35:04,093 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:35:11,115 Stage-2 map = 25%, reduce = 0%
|
|||
|
2013-09-18 03:35:14,127 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.31 sec
|
|||
|
2013-09-18 03:35:15,135 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.31 sec
|
|||
|
2013-09-18 03:35:16,141 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 28.98 sec
|
|||
|
2013-09-18 03:35:17,146 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 28.98 sec
|
|||
|
2013-09-18 03:35:18,152 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 28.98 sec
|
|||
|
2013-09-18 03:35:19,158 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 28.98 sec
|
|||
|
2013-09-18 03:35:20,163 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 28.98 sec
|
|||
|
2013-09-18 03:35:21,169 Stage-2 map = 87%, reduce = 17%, Cumulative CPU 28.98 sec
|
|||
|
2013-09-18 03:35:22,174 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:23,178 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:24,183 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:25,187 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:26,192 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:27,197 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:28,202 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:29,207 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:30,212 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:31,217 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 38.51 sec
|
|||
|
2013-09-18 03:35:32,222 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 46.15 sec
|
|||
|
2013-09-18 03:35:33,228 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 46.15 sec
|
|||
|
MapReduce Total cumulative CPU time: 46 seconds 150 msec
|
|||
|
Ended Job = job_201309172235_0349
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 168.31 sec HDFS Read: 109451651 HDFS Write: 399298510 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 46.15 sec HDFS Read: 399308173 HDFS Write: 445 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 34 seconds 460 msec
|
|||
|
OK
|
|||
|
Time taken: 87.6 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT URL, count(*) AS c FROM hits_10m GROUP BY URL ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_13289@mturlrep13_201309180335_999822129.txt
|
|||
|
hive> SELECT URL, count(*) AS c FROM hits_10m GROUP BY URL ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0350
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:35:43,626 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:35:50,655 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:35:53,668 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:35:56,682 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:35:59,697 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:36:02,709 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 03:36:06,735 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:07,740 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:08,747 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:09,752 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:10,758 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:11,764 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:12,769 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:13,779 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:14,784 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:15,790 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 116.11 sec
|
|||
|
2013-09-18 03:36:16,795 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:17,801 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:18,808 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:19,814 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:20,820 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:21,826 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:22,832 Stage-1 map = 100%, reduce = 85%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:23,838 Stage-1 map = 100%, reduce = 85%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:24,843 Stage-1 map = 100%, reduce = 85%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:25,849 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:26,854 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:27,860 Stage-1 map = 100%, reduce = 94%, Cumulative CPU 130.52 sec
|
|||
|
2013-09-18 03:36:28,867 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 172.7 sec
|
|||
|
2013-09-18 03:36:29,873 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 172.7 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 52 seconds 700 msec
|
|||
|
Ended Job = job_201309172235_0350
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0351
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:36:33,293 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:36:40,314 Stage-2 map = 25%, reduce = 0%
|
|||
|
2013-09-18 03:36:43,325 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:44,330 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:45,335 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:46,340 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:47,344 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:48,349 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:49,354 Stage-2 map = 87%, reduce = 0%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:50,359 Stage-2 map = 87%, reduce = 17%, Cumulative CPU 13.65 sec
|
|||
|
2013-09-18 03:36:51,364 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:52,368 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:53,373 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:54,377 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:55,383 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:56,388 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:57,392 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:58,397 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:36:59,402 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 37.93 sec
|
|||
|
2013-09-18 03:37:00,407 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 45.36 sec
|
|||
|
2013-09-18 03:37:01,413 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 45.36 sec
|
|||
|
2013-09-18 03:37:02,418 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 45.36 sec
|
|||
|
MapReduce Total cumulative CPU time: 45 seconds 360 msec
|
|||
|
Ended Job = job_201309172235_0351
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 172.7 sec HDFS Read: 109451651 HDFS Write: 399298510 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 45.36 sec HDFS Read: 399308173 HDFS Write: 445 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 38 seconds 60 msec
|
|||
|
OK
|
|||
|
Time taken: 87.325 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация по URL.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_15219@mturlrep13_201309180337_1137207213.txt
|
|||
|
hive> SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0352
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:37:21,097 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:37:29,132 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:37:32,145 Stage-1 map = 40%, reduce = 0%
|
|||
|
2013-09-18 03:37:35,160 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:37:38,174 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:37:41,187 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 03:37:45,212 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:46,218 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:47,225 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:48,230 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:49,235 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:50,241 Stage-1 map = 100%, reduce = 4%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:51,247 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:52,252 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:53,259 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:54,265 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:55,271 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:56,278 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:57,283 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:58,289 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:37:59,295 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:00,301 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:01,307 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:02,312 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:03,319 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:04,324 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:05,331 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:06,337 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:07,343 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:08,348 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 116.3 sec
|
|||
|
2013-09-18 03:38:09,356 Stage-1 map = 100%, reduce = 99%, Cumulative CPU 146.05 sec
|
|||
|
2013-09-18 03:38:10,363 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 177.94 sec
|
|||
|
2013-09-18 03:38:11,369 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 177.94 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 57 seconds 940 msec
|
|||
|
Ended Job = job_201309172235_0352
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0353
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:38:19,184 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:38:27,211 Stage-2 map = 25%, reduce = 0%
|
|||
|
2013-09-18 03:38:30,222 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:31,227 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:32,232 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:33,237 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:34,242 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:35,247 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:36,252 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:37,256 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 14.05 sec
|
|||
|
2013-09-18 03:38:38,260 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:39,264 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:40,268 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:41,272 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:42,277 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:43,282 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:44,287 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:45,293 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:46,297 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:47,303 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 38.6 sec
|
|||
|
2013-09-18 03:38:48,308 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 46.2 sec
|
|||
|
2013-09-18 03:38:49,314 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 46.2 sec
|
|||
|
MapReduce Total cumulative CPU time: 46 seconds 200 msec
|
|||
|
Ended Job = job_201309172235_0353
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 177.94 sec HDFS Read: 109451651 HDFS Write: 402873759 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 46.2 sec HDFS Read: 402889658 HDFS Write: 465 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 44 seconds 140 msec
|
|||
|
OK
|
|||
|
Time taken: 98.879 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_17127@mturlrep13_201309180338_1157991305.txt
|
|||
|
hive> SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0354
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:38:59,272 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:39:07,305 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:39:10,318 Stage-1 map = 37%, reduce = 0%
|
|||
|
2013-09-18 03:39:13,332 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:39:16,349 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 86.63 sec
|
|||
|
2013-09-18 03:39:17,356 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 86.63 sec
|
|||
|
2013-09-18 03:39:18,364 Stage-1 map = 74%, reduce = 0%, Cumulative CPU 86.63 sec
|
|||
|
2013-09-18 03:39:19,371 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 90.6 sec
|
|||
|
2013-09-18 03:39:20,378 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 90.6 sec
|
|||
|
2013-09-18 03:39:21,383 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 90.6 sec
|
|||
|
2013-09-18 03:39:22,389 Stage-1 map = 89%, reduce = 0%, Cumulative CPU 90.6 sec
|
|||
|
2013-09-18 03:39:23,395 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 99.43 sec
|
|||
|
2013-09-18 03:39:24,400 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:25,406 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:26,412 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:27,417 Stage-1 map = 100%, reduce = 13%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:28,423 Stage-1 map = 100%, reduce = 13%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:29,429 Stage-1 map = 100%, reduce = 13%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:30,434 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:31,439 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:32,445 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:33,450 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:34,456 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:35,462 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:36,468 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:37,474 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:38,480 Stage-1 map = 100%, reduce = 73%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:39,486 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:40,491 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:41,496 Stage-1 map = 100%, reduce = 82%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:42,502 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:43,507 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:44,513 Stage-1 map = 100%, reduce = 91%, Cumulative CPU 118.35 sec
|
|||
|
2013-09-18 03:39:45,519 Stage-1 map = 100%, reduce = 99%, Cumulative CPU 146.1 sec
|
|||
|
2013-09-18 03:39:46,524 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 174.21 sec
|
|||
|
2013-09-18 03:39:47,529 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 174.21 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 minutes 54 seconds 210 msec
|
|||
|
Ended Job = job_201309172235_0354
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0355
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:39:50,983 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:39:58,006 Stage-2 map = 25%, reduce = 0%
|
|||
|
2013-09-18 03:40:01,016 Stage-2 map = 62%, reduce = 0%
|
|||
|
2013-09-18 03:40:04,029 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.78 sec
|
|||
|
2013-09-18 03:40:05,035 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.78 sec
|
|||
|
2013-09-18 03:40:06,039 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.78 sec
|
|||
|
2013-09-18 03:40:07,044 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.78 sec
|
|||
|
2013-09-18 03:40:08,049 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.78 sec
|
|||
|
2013-09-18 03:40:09,053 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 14.78 sec
|
|||
|
2013-09-18 03:40:10,058 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 40.36 sec
|
|||
|
2013-09-18 03:40:11,063 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 40.36 sec
|
|||
|
2013-09-18 03:40:12,067 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 40.36 sec
|
|||
|
2013-09-18 03:40:13,073 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 40.36 sec
|
|||
|
2013-09-18 03:40:14,079 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 40.36 sec
|
|||
|
2013-09-18 03:40:15,084 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 40.36 sec
|
|||
|
2013-09-18 03:40:16,089 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 40.36 sec
|
|||
|
2013-09-18 03:40:17,094 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 41.49 sec
|
|||
|
2013-09-18 03:40:18,099 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 41.49 sec
|
|||
|
2013-09-18 03:40:19,104 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 41.49 sec
|
|||
|
2013-09-18 03:40:20,109 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 48.21 sec
|
|||
|
2013-09-18 03:40:21,114 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 48.21 sec
|
|||
|
MapReduce Total cumulative CPU time: 48 seconds 210 msec
|
|||
|
Ended Job = job_201309172235_0355
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 174.21 sec HDFS Read: 109451651 HDFS Write: 402873759 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 48.21 sec HDFS Read: 402889658 HDFS Write: 465 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 42 seconds 420 msec
|
|||
|
OK
|
|||
|
Time taken: 89.299 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_19706@mturlrep13_201309180340_906204677.txt
|
|||
|
hive> SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0356
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:40:31,369 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:40:38,401 Stage-1 map = 15%, reduce = 0%
|
|||
|
2013-09-18 03:40:41,415 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:40:44,429 Stage-1 map = 59%, reduce = 0%
|
|||
|
2013-09-18 03:40:47,444 Stage-1 map = 78%, reduce = 0%
|
|||
|
2013-09-18 03:40:50,457 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 03:40:53,477 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 57.18 sec
|
|||
|
2013-09-18 03:40:54,483 Stage-1 map = 96%, reduce = 0%, Cumulative CPU 88.61 sec
|
|||
|
2013-09-18 03:40:55,490 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:40:56,494 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:40:57,499 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:40:58,503 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:40:59,509 Stage-1 map = 100%, reduce = 4%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:00,514 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:01,520 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:02,526 Stage-1 map = 100%, reduce = 8%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:03,532 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:04,538 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:05,544 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:06,550 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:07,556 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:08,562 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:09,568 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:10,573 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:11,580 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:12,586 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:13,591 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:14,597 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:15,602 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 120.2 sec
|
|||
|
2013-09-18 03:41:16,610 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 173.32 sec
|
|||
|
2013-09-18 03:41:17,616 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 173.32 sec
|
|||
|
2013-09-18 03:41:18,622 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 180.14 sec
|
|||
|
2013-09-18 03:41:19,628 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 180.14 sec
|
|||
|
MapReduce Total cumulative CPU time: 3 minutes 0 seconds 140 msec
|
|||
|
Ended Job = job_201309172235_0356
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0357
|
|||
|
Hadoop job information for Stage-2: number of mappers: 2; number of reducers: 1
|
|||
|
2013-09-18 03:41:23,145 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:41:30,169 Stage-2 map = 25%, reduce = 0%
|
|||
|
2013-09-18 03:41:33,182 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:34,187 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:35,193 Stage-2 map = 75%, reduce = 0%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:36,198 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:37,203 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:38,208 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:39,214 Stage-2 map = 88%, reduce = 0%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:40,219 Stage-2 map = 88%, reduce = 17%, Cumulative CPU 14.51 sec
|
|||
|
2013-09-18 03:41:41,224 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:42,229 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:43,234 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:44,238 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:45,243 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:46,248 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:47,252 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:48,257 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:49,262 Stage-2 map = 100%, reduce = 17%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:50,266 Stage-2 map = 100%, reduce = 67%, Cumulative CPU 39.04 sec
|
|||
|
2013-09-18 03:41:51,271 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 46.66 sec
|
|||
|
2013-09-18 03:41:52,277 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 46.66 sec
|
|||
|
2013-09-18 03:41:53,282 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 46.66 sec
|
|||
|
MapReduce Total cumulative CPU time: 46 seconds 660 msec
|
|||
|
Ended Job = job_201309172235_0357
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 180.14 sec HDFS Read: 109451651 HDFS Write: 402873759 SUCCESS
|
|||
|
Job 1: Map: 2 Reduce: 1 Cumulative CPU: 46.66 sec HDFS Read: 402889658 HDFS Write: 465 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 3 minutes 46 seconds 800 msec
|
|||
|
OK
|
|||
|
Time taken: 90.377 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
-- агрегация по URL и числу.;
|
|||
|
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_21698@mturlrep13_201309180342_83883651.txt
|
|||
|
hive> SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0358
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:42:11,758 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:42:18,790 Stage-1 map = 63%, reduce = 0%
|
|||
|
2013-09-18 03:42:21,804 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 03:42:22,816 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 26.17 sec
|
|||
|
2013-09-18 03:42:23,825 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:24,832 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:25,838 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:26,845 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:27,851 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:28,856 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:29,862 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:30,869 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 54.18 sec
|
|||
|
2013-09-18 03:42:31,876 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 56.28 sec
|
|||
|
2013-09-18 03:42:32,882 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 56.28 sec
|
|||
|
2013-09-18 03:42:33,889 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 56.28 sec
|
|||
|
2013-09-18 03:42:34,895 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 56.28 sec
|
|||
|
2013-09-18 03:42:35,902 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 56.28 sec
|
|||
|
2013-09-18 03:42:36,909 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 56.28 sec
|
|||
|
2013-09-18 03:42:37,915 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 67.05 sec
|
|||
|
2013-09-18 03:42:38,921 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 67.05 sec
|
|||
|
2013-09-18 03:42:39,927 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 67.05 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 7 seconds 50 msec
|
|||
|
Ended Job = job_201309172235_0358
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0359
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:42:42,428 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:42:54,471 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:42:55,477 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:42:56,482 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:42:57,487 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:42:58,491 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:42:59,495 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:43:00,500 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:43:01,505 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:43:02,510 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 15.53 sec
|
|||
|
2013-09-18 03:43:03,515 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.82 sec
|
|||
|
2013-09-18 03:43:04,520 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.82 sec
|
|||
|
2013-09-18 03:43:05,527 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.82 sec
|
|||
|
MapReduce Total cumulative CPU time: 18 seconds 820 msec
|
|||
|
Ended Job = job_201309172235_0359
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 67.05 sec HDFS Read: 31344843 HDFS Write: 51717050 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 18.82 sec HDFS Read: 51717819 HDFS Write: 490 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 25 seconds 870 msec
|
|||
|
OK
|
|||
|
Time taken: 63.792 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_23423@mturlrep13_201309180343_849801634.txt
|
|||
|
hive> SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0360
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:43:14,871 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:43:22,908 Stage-1 map = 74%, reduce = 0%
|
|||
|
2013-09-18 03:43:25,929 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 27.51 sec
|
|||
|
2013-09-18 03:43:26,937 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:27,944 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:28,950 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:29,956 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:30,961 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:31,966 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:32,971 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:33,978 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 56.55 sec
|
|||
|
2013-09-18 03:43:34,985 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 58.54 sec
|
|||
|
2013-09-18 03:43:35,993 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 58.54 sec
|
|||
|
2013-09-18 03:43:36,998 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 58.54 sec
|
|||
|
2013-09-18 03:43:38,004 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 58.54 sec
|
|||
|
2013-09-18 03:43:39,010 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 58.54 sec
|
|||
|
2013-09-18 03:43:40,016 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 58.54 sec
|
|||
|
2013-09-18 03:43:41,023 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.2 sec
|
|||
|
2013-09-18 03:43:42,029 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.2 sec
|
|||
|
2013-09-18 03:43:43,035 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 69.2 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 9 seconds 200 msec
|
|||
|
Ended Job = job_201309172235_0360
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0361
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:43:45,535 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:43:57,575 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:43:58,580 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:43:59,588 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:44:00,592 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:44:01,597 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:44:02,601 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:44:03,606 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:44:04,612 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:44:05,617 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 15.46 sec
|
|||
|
2013-09-18 03:44:06,621 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.66 sec
|
|||
|
2013-09-18 03:44:07,642 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.66 sec
|
|||
|
2013-09-18 03:44:08,647 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 18.66 sec
|
|||
|
MapReduce Total cumulative CPU time: 18 seconds 660 msec
|
|||
|
Ended Job = job_201309172235_0361
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 69.2 sec HDFS Read: 31344843 HDFS Write: 51717050 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 18.66 sec HDFS Read: 51717819 HDFS Write: 490 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 27 seconds 860 msec
|
|||
|
OK
|
|||
|
Time taken: 61.127 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_25150@mturlrep13_201309180344_1265388056.txt
|
|||
|
hive> SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0362
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:44:18,182 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:44:26,214 Stage-1 map = 63%, reduce = 0%
|
|||
|
2013-09-18 03:44:29,227 Stage-1 map = 89%, reduce = 0%
|
|||
|
2013-09-18 03:44:30,239 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:31,245 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:32,252 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:33,258 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:34,264 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:35,269 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:36,275 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:37,280 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 57.27 sec
|
|||
|
2013-09-18 03:44:38,287 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 59.28 sec
|
|||
|
2013-09-18 03:44:39,293 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 59.28 sec
|
|||
|
2013-09-18 03:44:40,298 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 59.28 sec
|
|||
|
2013-09-18 03:44:41,303 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 59.28 sec
|
|||
|
2013-09-18 03:44:42,308 Stage-1 map = 100%, reduce = 86%, Cumulative CPU 59.28 sec
|
|||
|
2013-09-18 03:44:43,314 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 59.28 sec
|
|||
|
2013-09-18 03:44:44,319 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 59.28 sec
|
|||
|
2013-09-18 03:44:45,325 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 70.26 sec
|
|||
|
2013-09-18 03:44:46,331 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 70.26 sec
|
|||
|
MapReduce Total cumulative CPU time: 1 minutes 10 seconds 260 msec
|
|||
|
Ended Job = job_201309172235_0362
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0363
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:44:49,785 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:45:00,823 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:01,829 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:02,834 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:03,903 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:04,908 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:05,912 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:06,916 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:07,921 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:08,927 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 15.52 sec
|
|||
|
2013-09-18 03:45:09,932 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.85 sec
|
|||
|
2013-09-18 03:45:10,938 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.85 sec
|
|||
|
2013-09-18 03:45:11,943 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 19.85 sec
|
|||
|
MapReduce Total cumulative CPU time: 19 seconds 850 msec
|
|||
|
Ended Job = job_201309172235_0363
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 70.26 sec HDFS Read: 31344843 HDFS Write: 51717050 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 19.85 sec HDFS Read: 51717819 HDFS Write: 490 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 1 minutes 30 seconds 110 msec
|
|||
|
OK
|
|||
|
Time taken: 61.517 seconds, Fetched: 10 row(s)
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query:
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27316@mturlrep13_201309180345_1510224697.txt
|
|||
|
hive> ;
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query:
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27776@mturlrep13_201309180345_1361207737.txt
|
|||
|
hive> ;
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query:
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27977@mturlrep13_201309180345_862396725.txt
|
|||
|
hive> ;
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_28185@mturlrep13_201309180345_1903344467.txt
|
|||
|
hive> SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0364
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:45:40,647 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:45:48,681 Stage-1 map = 51%, reduce = 0%
|
|||
|
2013-09-18 03:45:49,694 Stage-1 map = 75%, reduce = 0%, Cumulative CPU 18.62 sec
|
|||
|
2013-09-18 03:45:50,702 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.91 sec
|
|||
|
2013-09-18 03:45:51,710 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.91 sec
|
|||
|
2013-09-18 03:45:52,716 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.91 sec
|
|||
|
2013-09-18 03:45:53,721 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.91 sec
|
|||
|
2013-09-18 03:45:54,726 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.91 sec
|
|||
|
2013-09-18 03:45:55,733 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.91 sec
|
|||
|
2013-09-18 03:45:56,738 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.91 sec
|
|||
|
2013-09-18 03:45:57,746 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 40.93 sec
|
|||
|
2013-09-18 03:45:58,752 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.77 sec
|
|||
|
2013-09-18 03:45:59,758 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.77 sec
|
|||
|
MapReduce Total cumulative CPU time: 42 seconds 770 msec
|
|||
|
Ended Job = job_201309172235_0364
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0365
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:46:03,281 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:46:04,286 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:05,292 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:06,300 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:07,305 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:08,309 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:09,313 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:10,318 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:11,323 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:46:12,328 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.28 sec
|
|||
|
2013-09-18 03:46:13,335 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.28 sec
|
|||
|
2013-09-18 03:46:14,340 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.28 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 280 msec
|
|||
|
Ended Job = job_201309172235_0365
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 42.77 sec HDFS Read: 118784021 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.28 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 50 msec
|
|||
|
OK
|
|||
|
Time taken: 44.325 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_30170@mturlrep13_201309180346_1971537054.txt
|
|||
|
hive> SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0366
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:46:23,592 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:46:31,635 Stage-1 map = 86%, reduce = 0%, Cumulative CPU 18.42 sec
|
|||
|
2013-09-18 03:46:32,643 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.46 sec
|
|||
|
2013-09-18 03:46:33,650 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.46 sec
|
|||
|
2013-09-18 03:46:34,657 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.46 sec
|
|||
|
2013-09-18 03:46:35,663 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.46 sec
|
|||
|
2013-09-18 03:46:36,670 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.46 sec
|
|||
|
2013-09-18 03:46:37,677 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.46 sec
|
|||
|
2013-09-18 03:46:38,684 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.46 sec
|
|||
|
2013-09-18 03:46:39,692 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 40.52 sec
|
|||
|
2013-09-18 03:46:40,698 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.46 sec
|
|||
|
2013-09-18 03:46:41,704 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.46 sec
|
|||
|
MapReduce Total cumulative CPU time: 42 seconds 460 msec
|
|||
|
Ended Job = job_201309172235_0366
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0367
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:46:45,150 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:46:46,156 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:47,162 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:48,167 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:49,172 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:50,177 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:51,183 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:52,188 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:53,192 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:46:54,198 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
|||
|
2013-09-18 03:46:55,203 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
|||
|
2013-09-18 03:46:56,209 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 140 msec
|
|||
|
Ended Job = job_201309172235_0367
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 42.46 sec HDFS Read: 118784021 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.14 sec HDFS Read: 959 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 44 seconds 600 msec
|
|||
|
OK
|
|||
|
Time taken: 39.995 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_32117@mturlrep13_201309180346_265757676.txt
|
|||
|
hive> SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0368
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:47:06,367 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:47:13,406 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 9.51 sec
|
|||
|
2013-09-18 03:47:14,415 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.45 sec
|
|||
|
2013-09-18 03:47:15,422 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.45 sec
|
|||
|
2013-09-18 03:47:16,428 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.45 sec
|
|||
|
2013-09-18 03:47:17,433 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.45 sec
|
|||
|
2013-09-18 03:47:18,439 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.45 sec
|
|||
|
2013-09-18 03:47:19,444 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.45 sec
|
|||
|
2013-09-18 03:47:20,450 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.45 sec
|
|||
|
2013-09-18 03:47:21,459 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 41.37 sec
|
|||
|
2013-09-18 03:47:22,465 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.32 sec
|
|||
|
2013-09-18 03:47:23,471 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.32 sec
|
|||
|
MapReduce Total cumulative CPU time: 43 seconds 320 msec
|
|||
|
Ended Job = job_201309172235_0368
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0369
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:47:26,930 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:47:27,935 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:28,941 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:29,947 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:30,952 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:31,956 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:32,961 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:33,966 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:34,971 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.72 sec
|
|||
|
2013-09-18 03:47:35,977 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.29 sec
|
|||
|
2013-09-18 03:47:36,983 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.29 sec
|
|||
|
2013-09-18 03:47:37,988 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.29 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 290 msec
|
|||
|
Ended Job = job_201309172235_0369
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 43.32 sec HDFS Read: 118784021 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.29 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 610 msec
|
|||
|
OK
|
|||
|
Time taken: 39.979 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_1699@mturlrep13_201309180347_773881817.txt
|
|||
|
hive> SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0370
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:47:54,727 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:48:01,761 Stage-1 map = 66%, reduce = 0%
|
|||
|
2013-09-18 03:48:03,778 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:04,785 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:05,792 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:06,799 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:07,804 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:08,811 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:09,818 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:10,824 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 38.99 sec
|
|||
|
2013-09-18 03:48:11,833 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.36 sec
|
|||
|
2013-09-18 03:48:12,840 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.36 sec
|
|||
|
2013-09-18 03:48:13,847 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.36 sec
|
|||
|
MapReduce Total cumulative CPU time: 43 seconds 360 msec
|
|||
|
Ended Job = job_201309172235_0370
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0371
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:48:17,471 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:48:18,477 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:19,486 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:20,491 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:21,496 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:22,502 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:23,508 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:24,514 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:25,520 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.71 sec
|
|||
|
2013-09-18 03:48:26,526 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.23 sec
|
|||
|
2013-09-18 03:48:27,532 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.23 sec
|
|||
|
2013-09-18 03:48:28,538 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.23 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 230 msec
|
|||
|
Ended Job = job_201309172235_0371
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 43.36 sec HDFS Read: 115339269 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.23 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 590 msec
|
|||
|
OK
|
|||
|
Time taken: 43.852 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_3593@mturlrep13_201309180348_533961775.txt
|
|||
|
hive> SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0372
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:48:38,968 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:48:46,007 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 18.26 sec
|
|||
|
2013-09-18 03:48:47,015 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.04 sec
|
|||
|
2013-09-18 03:48:48,023 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.04 sec
|
|||
|
2013-09-18 03:48:49,029 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.04 sec
|
|||
|
2013-09-18 03:48:50,035 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.04 sec
|
|||
|
2013-09-18 03:48:51,040 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.04 sec
|
|||
|
2013-09-18 03:48:52,047 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.04 sec
|
|||
|
2013-09-18 03:48:53,053 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.04 sec
|
|||
|
2013-09-18 03:48:54,062 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 39.06 sec
|
|||
|
2013-09-18 03:48:55,069 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 41.09 sec
|
|||
|
2013-09-18 03:48:56,076 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 41.09 sec
|
|||
|
MapReduce Total cumulative CPU time: 41 seconds 90 msec
|
|||
|
Ended Job = job_201309172235_0372
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0373
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:48:58,539 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:49:00,548 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:01,554 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:02,560 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:03,564 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:04,569 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:05,574 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:06,578 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:07,583 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.69 sec
|
|||
|
2013-09-18 03:49:08,589 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
|||
|
2013-09-18 03:49:09,594 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
|||
|
2013-09-18 03:49:10,600 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 140 msec
|
|||
|
Ended Job = job_201309172235_0373
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 41.09 sec HDFS Read: 115339269 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.14 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 43 seconds 230 msec
|
|||
|
OK
|
|||
|
Time taken: 40.178 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_5455@mturlrep13_201309180349_1828648291.txt
|
|||
|
hive> SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0374
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:49:21,008 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:49:28,046 Stage-1 map = 86%, reduce = 0%, Cumulative CPU 8.89 sec
|
|||
|
2013-09-18 03:49:29,054 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:30,062 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:31,067 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:32,073 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:33,079 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:34,084 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:35,090 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:36,096 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 37.98 sec
|
|||
|
2013-09-18 03:49:37,104 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.56 sec
|
|||
|
2013-09-18 03:49:38,109 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.56 sec
|
|||
|
MapReduce Total cumulative CPU time: 42 seconds 560 msec
|
|||
|
Ended Job = job_201309172235_0374
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0375
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:49:41,584 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:49:43,592 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:49:44,596 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:49:45,600 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:49:46,604 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:49:47,608 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:49:48,612 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:49:49,616 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:49:50,620 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 03:49:51,625 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 03:49:52,630 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 100 msec
|
|||
|
Ended Job = job_201309172235_0375
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 42.56 sec HDFS Read: 115339269 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.1 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 44 seconds 660 msec
|
|||
|
OK
|
|||
|
Time taken: 40.131 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_7725@mturlrep13_201309180349_550066891.txt
|
|||
|
hive> SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0376
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:50:09,712 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:50:16,741 Stage-1 map = 44%, reduce = 0%
|
|||
|
2013-09-18 03:50:18,758 Stage-1 map = 72%, reduce = 0%, Cumulative CPU 34.83 sec
|
|||
|
2013-09-18 03:50:19,766 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.89 sec
|
|||
|
2013-09-18 03:50:20,774 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.89 sec
|
|||
|
2013-09-18 03:50:21,781 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.89 sec
|
|||
|
2013-09-18 03:50:22,787 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.89 sec
|
|||
|
2013-09-18 03:50:23,793 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.89 sec
|
|||
|
2013-09-18 03:50:24,799 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.89 sec
|
|||
|
2013-09-18 03:50:25,805 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.89 sec
|
|||
|
2013-09-18 03:50:26,813 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 41.94 sec
|
|||
|
2013-09-18 03:50:27,820 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 44.02 sec
|
|||
|
2013-09-18 03:50:28,827 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 44.02 sec
|
|||
|
MapReduce Total cumulative CPU time: 44 seconds 20 msec
|
|||
|
Ended Job = job_201309172235_0376
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0377
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:50:31,408 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:50:33,417 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:34,422 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:35,428 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:36,432 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:37,437 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:38,442 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:39,447 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:40,452 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.66 sec
|
|||
|
2013-09-18 03:50:41,458 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.19 sec
|
|||
|
2013-09-18 03:50:42,463 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.19 sec
|
|||
|
2013-09-18 03:50:43,469 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.19 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 190 msec
|
|||
|
Ended Job = job_201309172235_0377
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 44.02 sec HDFS Read: 118662691 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.19 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 46 seconds 210 msec
|
|||
|
OK
|
|||
|
Time taken: 44.493 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_10373@mturlrep13_201309180350_1849852052.txt
|
|||
|
hive> SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0378
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:50:53,794 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:51:00,831 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 9.24 sec
|
|||
|
2013-09-18 03:51:01,838 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.73 sec
|
|||
|
2013-09-18 03:51:02,846 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.73 sec
|
|||
|
2013-09-18 03:51:03,852 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.73 sec
|
|||
|
2013-09-18 03:51:04,858 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.73 sec
|
|||
|
2013-09-18 03:51:05,865 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.73 sec
|
|||
|
2013-09-18 03:51:06,871 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.73 sec
|
|||
|
2013-09-18 03:51:07,876 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.73 sec
|
|||
|
2013-09-18 03:51:08,883 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 40.86 sec
|
|||
|
2013-09-18 03:51:09,890 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.85 sec
|
|||
|
2013-09-18 03:51:10,897 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.85 sec
|
|||
|
MapReduce Total cumulative CPU time: 42 seconds 850 msec
|
|||
|
Ended Job = job_201309172235_0378
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0379
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:51:14,363 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:51:16,372 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:51:17,377 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:51:18,382 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:51:19,387 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:51:20,392 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:51:21,397 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:51:22,403 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:51:23,408 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
2013-09-18 03:51:24,414 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
2013-09-18 03:51:25,420 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 160 msec
|
|||
|
Ended Job = job_201309172235_0379
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 42.85 sec HDFS Read: 118662691 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.16 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 10 msec
|
|||
|
OK
|
|||
|
Time taken: 40.17 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_12355@mturlrep13_201309180351_396050441.txt
|
|||
|
hive> SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0380
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:51:34,723 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:51:42,768 Stage-1 map = 86%, reduce = 0%, Cumulative CPU 19.49 sec
|
|||
|
2013-09-18 03:51:43,777 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:44,785 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:45,791 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:46,798 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:47,804 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:48,810 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:49,818 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:50,825 Stage-1 map = 100%, reduce = 33%, Cumulative CPU 40.68 sec
|
|||
|
2013-09-18 03:51:51,834 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 44.72 sec
|
|||
|
2013-09-18 03:51:52,841 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 44.72 sec
|
|||
|
MapReduce Total cumulative CPU time: 44 seconds 720 msec
|
|||
|
Ended Job = job_201309172235_0380
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0381
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:51:56,309 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:51:57,314 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:51:58,319 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:51:59,325 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:52:00,330 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:52:01,334 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:52:02,339 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:52:03,343 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:52:04,348 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:52:05,354 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
2013-09-18 03:52:06,360 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
2013-09-18 03:52:07,365 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 160 msec
|
|||
|
Ended Job = job_201309172235_0381
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 44.72 sec HDFS Read: 118662691 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.16 sec HDFS Read: 959 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 46 seconds 880 msec
|
|||
|
OK
|
|||
|
Time taken: 40.171 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*), if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') AS SRC FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, ''), URL ORDER BY count(*) DESC LIMIT 1000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_14305@mturlrep13_201309180352_966734049.txt
|
|||
|
hive> SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*), if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') AS SRC FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, ''), URL ORDER BY count(*) DESC LIMIT 1000; ;
|
|||
|
FAILED: SemanticException [Error 10128]: Line 1:414 Not yet supported place for UDAF 'count'
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*), if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') AS SRC FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, ''), URL ORDER BY count(*) DESC LIMIT 1000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_14524@mturlrep13_201309180352_643113009.txt
|
|||
|
hive> SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*), if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') AS SRC FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, ''), URL ORDER BY count(*) DESC LIMIT 1000; ;
|
|||
|
FAILED: SemanticException [Error 10128]: Line 1:414 Not yet supported place for UDAF 'count'
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*), if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') AS SRC FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, ''), URL ORDER BY count(*) DESC LIMIT 1000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_14731@mturlrep13_201309180352_231906667.txt
|
|||
|
hive> SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*), if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') AS SRC FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, ''), URL ORDER BY count(*) DESC LIMIT 1000; ;
|
|||
|
FAILED: SemanticException [Error 10128]: Line 1:414 Not yet supported place for UDAF 'count'
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_14942@mturlrep13_201309180352_606832963.txt
|
|||
|
hive> SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000; ;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0382
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:52:47,876 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:52:54,907 Stage-1 map = 63%, reduce = 0%
|
|||
|
2013-09-18 03:52:55,919 Stage-1 map = 72%, reduce = 0%, Cumulative CPU 9.33 sec
|
|||
|
2013-09-18 03:52:56,927 Stage-1 map = 79%, reduce = 0%, Cumulative CPU 19.11 sec
|
|||
|
2013-09-18 03:52:57,935 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.01 sec
|
|||
|
2013-09-18 03:52:58,941 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.01 sec
|
|||
|
2013-09-18 03:52:59,946 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.01 sec
|
|||
|
2013-09-18 03:53:00,952 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.01 sec
|
|||
|
2013-09-18 03:53:01,958 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.01 sec
|
|||
|
2013-09-18 03:53:02,965 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 39.01 sec
|
|||
|
2013-09-18 03:53:03,972 Stage-1 map = 100%, reduce = 21%, Cumulative CPU 39.01 sec
|
|||
|
2013-09-18 03:53:04,980 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.35 sec
|
|||
|
2013-09-18 03:53:05,987 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 43.35 sec
|
|||
|
MapReduce Total cumulative CPU time: 43 seconds 350 msec
|
|||
|
Ended Job = job_201309172235_0382
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0383
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:53:09,467 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:53:11,476 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:53:12,482 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:53:13,487 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:53:14,491 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:53:15,496 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:53:16,513 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:53:17,519 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:53:18,524 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 03:53:19,530 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 03:53:20,536 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 100 msec
|
|||
|
Ended Job = job_201309172235_0383
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 43.35 sec HDFS Read: 148406904 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.1 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 45 seconds 450 msec
|
|||
|
OK
|
|||
|
Time taken: 43.025 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16891@mturlrep13_201309180353_499012303.txt
|
|||
|
hive> SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000; ;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0384
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:53:30,022 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:53:38,064 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 27.01 sec
|
|||
|
2013-09-18 03:53:39,072 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.42 sec
|
|||
|
2013-09-18 03:53:40,080 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.42 sec
|
|||
|
2013-09-18 03:53:41,086 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.42 sec
|
|||
|
2013-09-18 03:53:42,092 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.42 sec
|
|||
|
2013-09-18 03:53:43,098 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.42 sec
|
|||
|
2013-09-18 03:53:44,105 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.42 sec
|
|||
|
2013-09-18 03:53:45,112 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 36.42 sec
|
|||
|
2013-09-18 03:53:46,120 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.33 sec
|
|||
|
2013-09-18 03:53:47,127 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.33 sec
|
|||
|
2013-09-18 03:53:48,134 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.33 sec
|
|||
|
MapReduce Total cumulative CPU time: 40 seconds 330 msec
|
|||
|
Ended Job = job_201309172235_0384
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0385
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:53:50,608 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:53:52,616 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:53:53,622 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:53:54,627 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:53:55,631 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:53:56,636 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:53:57,641 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:53:58,646 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:53:59,652 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:00,658 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
2013-09-18 03:54:01,664 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.16 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 160 msec
|
|||
|
Ended Job = job_201309172235_0385
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 40.33 sec HDFS Read: 148406904 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.16 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 42 seconds 490 msec
|
|||
|
OK
|
|||
|
Time taken: 39.168 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_18843@mturlrep13_201309180354_379983066.txt
|
|||
|
hive> SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000; ;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0386
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:54:10,812 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:54:18,851 Stage-1 map = 86%, reduce = 0%, Cumulative CPU 36.06 sec
|
|||
|
2013-09-18 03:54:19,859 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.5 sec
|
|||
|
2013-09-18 03:54:20,867 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.5 sec
|
|||
|
2013-09-18 03:54:21,873 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.5 sec
|
|||
|
2013-09-18 03:54:22,879 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.5 sec
|
|||
|
2013-09-18 03:54:23,885 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.5 sec
|
|||
|
2013-09-18 03:54:24,891 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 38.5 sec
|
|||
|
2013-09-18 03:54:25,898 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 38.5 sec
|
|||
|
2013-09-18 03:54:26,905 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.56 sec
|
|||
|
2013-09-18 03:54:27,912 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.56 sec
|
|||
|
2013-09-18 03:54:28,918 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 42.56 sec
|
|||
|
MapReduce Total cumulative CPU time: 42 seconds 560 msec
|
|||
|
Ended Job = job_201309172235_0386
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0387
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:54:32,443 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:54:33,449 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:34,455 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:35,460 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:36,465 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:37,470 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:38,475 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:39,479 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:40,484 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.65 sec
|
|||
|
2013-09-18 03:54:41,490 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.08 sec
|
|||
|
2013-09-18 03:54:42,495 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.08 sec
|
|||
|
2013-09-18 03:54:43,500 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.08 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 80 msec
|
|||
|
Ended Job = job_201309172235_0387
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 42.56 sec HDFS Read: 148406904 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.08 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 44 seconds 640 msec
|
|||
|
OK
|
|||
|
Time taken: 40.042 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_20827@mturlrep13_201309180354_891781723.txt
|
|||
|
hive> SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000; ;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0388
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:54:59,946 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:55:06,975 Stage-1 map = 56%, reduce = 0%
|
|||
|
2013-09-18 03:55:08,992 Stage-1 map = 75%, reduce = 0%, Cumulative CPU 17.58 sec
|
|||
|
2013-09-18 03:55:09,999 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.11 sec
|
|||
|
2013-09-18 03:55:11,007 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.11 sec
|
|||
|
2013-09-18 03:55:12,014 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.11 sec
|
|||
|
2013-09-18 03:55:13,020 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.11 sec
|
|||
|
2013-09-18 03:55:14,027 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.11 sec
|
|||
|
2013-09-18 03:55:15,033 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 37.11 sec
|
|||
|
2013-09-18 03:55:16,039 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 37.11 sec
|
|||
|
2013-09-18 03:55:17,047 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 41.2 sec
|
|||
|
2013-09-18 03:55:18,053 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 41.2 sec
|
|||
|
MapReduce Total cumulative CPU time: 41 seconds 200 msec
|
|||
|
Ended Job = job_201309172235_0388
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0389
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:55:20,567 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:55:22,576 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:23,582 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:24,587 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:25,591 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:26,596 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:27,600 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:28,605 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:29,610 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:55:30,615 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 03:55:31,620 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
2013-09-18 03:55:32,625 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.1 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 100 msec
|
|||
|
Ended Job = job_201309172235_0389
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 41.2 sec HDFS Read: 105631340 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.1 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 43 seconds 300 msec
|
|||
|
OK
|
|||
|
Time taken: 42.914 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_23548@mturlrep13_201309180355_61949059.txt
|
|||
|
hive> SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000; ;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0390
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:55:41,925 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:55:49,963 Stage-1 map = 93%, reduce = 0%, Cumulative CPU 27.55 sec
|
|||
|
2013-09-18 03:55:50,974 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.61 sec
|
|||
|
2013-09-18 03:55:51,982 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.61 sec
|
|||
|
2013-09-18 03:55:52,988 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.61 sec
|
|||
|
2013-09-18 03:55:53,994 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.61 sec
|
|||
|
2013-09-18 03:55:55,000 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.61 sec
|
|||
|
2013-09-18 03:55:56,005 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.61 sec
|
|||
|
2013-09-18 03:55:57,012 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.61 sec
|
|||
|
2013-09-18 03:55:58,020 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 38.63 sec
|
|||
|
2013-09-18 03:55:59,026 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.74 sec
|
|||
|
2013-09-18 03:56:00,032 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.74 sec
|
|||
|
MapReduce Total cumulative CPU time: 40 seconds 740 msec
|
|||
|
Ended Job = job_201309172235_0390
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0391
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:56:03,570 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:56:05,578 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:06,583 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:07,588 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:08,593 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:09,598 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:10,603 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:11,608 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:12,614 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.67 sec
|
|||
|
2013-09-18 03:56:13,620 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.29 sec
|
|||
|
2013-09-18 03:56:14,625 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.29 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 290 msec
|
|||
|
Ended Job = job_201309172235_0391
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 40.74 sec HDFS Read: 105631340 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.29 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 43 seconds 30 msec
|
|||
|
OK
|
|||
|
Time taken: 40.129 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_25608@mturlrep13_201309180356_1104505823.txt
|
|||
|
hive> SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000; ;
|
|||
|
Total MapReduce jobs = 2
|
|||
|
Launching Job 1 out of 2
|
|||
|
Number of reduce tasks not specified. Estimated from input data size: 2
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0392
|
|||
|
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
|||
|
2013-09-18 03:56:24,831 Stage-1 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:56:31,868 Stage-1 map = 97%, reduce = 0%, Cumulative CPU 26.64 sec
|
|||
|
2013-09-18 03:56:32,876 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:56:33,883 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:56:34,889 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:56:35,895 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:56:36,901 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:56:37,907 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:56:38,914 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 36.09 sec
|
|||
|
2013-09-18 03:56:39,922 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.24 sec
|
|||
|
2013-09-18 03:56:40,930 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.24 sec
|
|||
|
2013-09-18 03:56:41,935 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 40.24 sec
|
|||
|
MapReduce Total cumulative CPU time: 40 seconds 240 msec
|
|||
|
Ended Job = job_201309172235_0392
|
|||
|
Launching Job 2 out of 2
|
|||
|
Number of reduce tasks determined at compile time: 1
|
|||
|
In order to change the average load for a reducer (in bytes):
|
|||
|
set hive.exec.reducers.bytes.per.reducer=<number>
|
|||
|
In order to limit the maximum number of reducers:
|
|||
|
set hive.exec.reducers.max=<number>
|
|||
|
In order to set a constant number of reducers:
|
|||
|
set mapred.reduce.tasks=<number>
|
|||
|
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309172235_0393
|
|||
|
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
|||
|
2013-09-18 03:56:44,482 Stage-2 map = 0%, reduce = 0%
|
|||
|
2013-09-18 03:56:46,491 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:47,497 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:48,502 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:49,507 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:50,512 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:51,516 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:52,521 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:53,527 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.64 sec
|
|||
|
2013-09-18 03:56:54,532 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.0 sec
|
|||
|
2013-09-18 03:56:55,541 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.0 sec
|
|||
|
MapReduce Total cumulative CPU time: 2 seconds 0 msec
|
|||
|
Ended Job = job_201309172235_0393
|
|||
|
MapReduce Jobs Launched:
|
|||
|
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 40.24 sec HDFS Read: 105631340 HDFS Write: 192 SUCCESS
|
|||
|
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.0 sec HDFS Read: 961 HDFS Write: 0 SUCCESS
|
|||
|
Total MapReduce CPU Time Spent: 42 seconds 240 msec
|
|||
|
OK
|
|||
|
Time taken: 39.135 seconds
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 1
|
|||
|
query: SELECT unix_timestamp(EventTime) - SECOND(EventTime), count(*) FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY m ORDER BY m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27654@mturlrep13_201309180357_1202049838.txt
|
|||
|
hive> SELECT unix_timestamp(EventTime) - SECOND(EventTime), count(*) FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY m ORDER BY m; ;
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 2
|
|||
|
query: SELECT unix_timestamp(EventTime) - SECOND(EventTime), count(*) FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY m ORDER BY m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27867@mturlrep13_201309180357_2097495906.txt
|
|||
|
hive> SELECT unix_timestamp(EventTime) - SECOND(EventTime), count(*) FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY m ORDER BY m; ;
|
|||
|
hive> quit;
|
|||
|
|
|||
|
times: 3
|
|||
|
query: SELECT unix_timestamp(EventTime) - SECOND(EventTime), count(*) FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY m ORDER BY m;
|
|||
|
spawn hive
|
|||
|
|
|||
|
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
|||
|
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_28078@mturlrep13_201309180357_1074846519.txt
|
|||
|
hive> SELECT unix_timestamp(EventTime) - SECOND(EventTime), count(*) FROM hits_10m WHERE CounterID = 34 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY m ORDER BY m; ;
|
|||
|
hive> quit;
|
|||
|
stop time: Ср. сент. 18 03:57:19 MSK 2013
|