----原语句(运行18min)
SELECT
bb.ip
FROM
(
SELECT
ip ,
sum(click) click_num,
round(sum(click) / sum(imp), 4) user_click_rate
FROM
schema.srctable1
WHERE
date = '20171020'
AND ip IS NOT NULL
AND imp > 0
GROUP BY ip
) bb
LEFT OUTER JOIN
(
SELECT
round(sum(click) / sum(imp), 4) avg_click_rate
FROM
schema.srctable1
WHERE
date = '20171020'
) aa
LEFT OUTER JOIN schema.dstable cc
on cc.ip = bb.ip
WHERE cc.ip is null
AND
(
bb.user_click_rate > aa.avg_click_rate * 3
AND click_num > 500
)
OR
(
click_num > 1000
)
分析:
1、aa表存放的就是一个指标数据,1条记录,列为小表
2、bb表存放的是按ip聚合的明细数据,记录很多,列为大表
3、cc表用来过滤ip,数量也很小,列为过滤表,作用很小。
查看执行计划,发现bb与aa进行left outer join时,引发了shuffle过程,造成大量的磁盘及网络IO,影响性能。
解决策略
优化方案1:调整大小表位置,将小表放在左边后,提升至29s (该方案一直不太明白为啥会提升,执行计划里显示的也就是大小表位置调换下而已,跟之前的没其他区别)
优化方案2: 将 or 改成 union,提升至35s(各种调整,一直怀疑跟or有关系,后面调整成union其他不变,果真效率不一样;但方案1只是调整了下大小表顺序,并未调整其他,其效率同样提升很大;不太明白sparksql内部到底走了什么优化机制,后面继续研究);
优化方案3: 采用cache+broadcast方式,提升至20s(该方案将小表缓存至内存,进行map侧关联)
方案具体实施
----方案2:or 改成 union(运行35s)
select aa.ip
from (
SELECT bb.ip ip
FROM
(
SELECT
ip ,
sum(click) click_num,
round(sum(click) / sum(imp), 4)
user_click_rate
FROM
schema.srctable1
WHERE
date = '20171020'
AND ip IS NOT NULL
AND imp > 0
GROUP BY ip
) bb
LEFT OUTER JOIN
(
SELECT round(sum(click) / sum(imp), 4) avg_click_rate
FROM schema.srctable1
WHERE date = '20171020'
) aa
WHERE ( bb.user_click_rate > aa.avg_click_rate * 3
AND click_num > 20 )
union
SELECT
bb.ip ip
FROM
(
SELECT
ip , sum(click) click_num,
round(sum(click) / sum(imp), 4) user_click_rate
FROM schema.srctable1
WHERE
date = '20171020'
AND ip IS NOT NULL
AND imp > 0
GROUP BY ip
) bb
LEFT OUTER JOIN
(
SELECT
round(sum(click) / sum(imp), 4) avg_click_rate
FROM schema.srctable1
WHERE date = '20171020'
) aa
WHERE click_num > 40
) aa
LEFT OUTER JOIN schema.dstable cc
on aa.ip = cc.ip
where cc.ip is null
-----cache+broadcast方式(20s)
原理:使用broadcast将会把小表分发到每台执行节点上,因此,关联操作都在本地完成,基本就取消了shuffle的过程,运行效率大幅度提高。
cache table cta
as
SELECT round(sum(click) / sum(imp), 4) avg_click_rate
FROM schema.srctable1
WHERE date = '20171020';
INSERT into TABLE schema.dstable
SELECT bb.ip
FROM (
SELECT
ip ,
sum(click) click_num,
round(sum(click) / sum(imp), 4) user_click_rate
FROM schema.srctable1
WHERE
date = '20171020'
AND ip IS NOT NULL
AND imp > 0
GROUP BY ip
) bb
LEFT OUTER JOIN cta aa
LEFT OUTER JOIN schema.dstable cc
on cc.ip = bb.ip
WHERE cc.ip is null
AND (
bb.user_click_rate > aa.avg_click_rate * 3
AND click_num > 500
)
OR(
click_num > 1000
)
注意:
cache 表不一定会被广播到Executor,执行map side join,还受另外一个参数:spark.sql.autoBroadcastJoinThreshold影响,该参数判断是否将该表广播;
spark.sql.autoBroadcastJoinThreshold参数默认值是10M,所以只有cache的表小于10M的才被广播到Executor上去执行map side join。