Mastering Postgresql 13 chapter 6 을 간단히 정리한 글입니다.
CLUSTER
사용SELECT avg(aid), avg(bid)
FROM a
LEFT JOIN b
ON (aid = bid AND bid = 2);
avg | avg
-------------------+--------------------
2.0000000000000000 | 2.0000000000000000
(A leftjoin B on (Pab)) innerjoin C on (Pac) = (A innerjoin C on (Pac)) leftjoin B on (Pab)
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on
#enable_sort = on
#enable_incrementalsort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
join_collapse_limit
등을 이용해서 최적화 경험을 쌓길CREATE TABLE t_data (id serial, t date, payload text);
CREATE TABLE t_data_2016 () INHERITS (t_data);
CREATE TABLE t_data_2015 () INHERITS (t_data);
CREATE TABLE t_data_2014 () INHERITS (t_data);
CREATE TABLE t_data_2013 (special text) INHERITS (t_data);
EXPLAIN SELECT * FROM t_data WHERE t = '2016-01-04';
QUERY PLAN
----------------------------------------------------------------------------
Append (cost=0.00..95.24 rows=23 width=40)
-> Seq Scan on t_data t_data_1 (cost=0.00..0.00 rows=1 width=40)
Filter: (t = '2016-01-04'::date)
-> Seq Scan on t_data_2016 t_data_2 (cost=0.00..25.00 rows=6 width=40)
Filter: (t = '2016-01-04'::date)
-> Seq Scan on t_data_2015 t_data_3 (cost=0.00..25.00 rows=6 width=40)
Filter: (t = '2016-01-04'::date)
-> Seq Scan on t_data_2014 t_data_4 (cost=0.00..25.00 rows=6 width=40)
Filter: (t = '2016-01-04'::date)
ALTER TABLE t_data_2013
ADD CHECK (t < '2014-01-01');
ALTER TABLE t_data_2014
ADD CHECK (t >= '2014-01-01' AND t < '2015-01-01');
ALTER TABLE t_data_2015
ADD CHECK (t >= '2015-01-01' AND t < '2016-01-01');
ALTER TABLE t_data_2016
ADD CHECK (t >= '2016-01-01' AND t < '2017-01-01');
Postgresql 의 constraints 는 MySQL 보다 더 엄격히 관리된다
EXPLAIN SELECT * FROM t_data WHERE t = '2016-01-04';
QUERY PLAN
----------------------------------------------------------------------------
Append (cost=0.00..25.04 rows=7 width=40)
-> Seq Scan on t_data t_data_1 (cost=0.00..0.00 rows=1 width=40)
Filter: (t = '2016-01-04'::date)
-> Seq Scan on t_data_2016 t_data_2 (cost=0.00..25.00 rows=6 width=40)
Filter: (t = '2016-01-04'::date)
work_mem
의 사이즈를 늘려줌으로써 해소 가능work_mem
은 grouping 의 속도만 향상시키는 것은 아님work_mem
default 는 4MB
work_mem
을 높게 잡을 경우 OLTP 환경에서는 OOM 이 날 수 있다고들 한다CREATE INDEX
같은 경우 maintenance_work_mem
를 사용DROP INDEX idx_id;
DROP INDEX
SET maintenance_work_mem TO '1 MB';
SET
\timing
Timing is on.
CREATE INDEX idx_id ON t_test (id);
CREATE INDEX
Time: 104.268 ms
------------------------------------
SET maintenance_work_mem TO '1 GB';
SET
CREATE INDEX idx_id2 ON t_test (id); CREATE INDEX;
Time: 46.774 ms
btree
인덱스를 병렬 생성 가능SHOW max_parallel_maintenance_workers;
max_parallel_maintenance_workers
----------------------------------
2
vacuum
/ alter table
작업에도 사용됨max_parallel_workers_per_gather (2)
, min_parallel_table_scan_size (8MB)
CREATE INDEX
)CREATE INDEX
#parallel_tuple_cost = 0.1
프로세스끼리 튜플을 넘길 때 계산에서 0.1 포인트씩 추가
--with-llvm build with LLVM based JIT support