Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit c9dedad

Browse files
committed
Docs for broadcast.
1 parent b6ae844 commit c9dedad

File tree

8 files changed

+350
-303
lines changed

8 files changed

+350
-303
lines changed

bin/pgbench_history.sql

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
drop table if exists pgbench_history;
2+
create table pgbench_history (tid int, bid int, aid int, delta int,
3+
mtime timestamp, filler char(22));

bin/setup.sh.example

Lines changed: 29 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -24,45 +24,46 @@ declare -a worker_ports=("5433" "5434" "5435" "5436")
2424
function run_demo()
2525
{
2626
:
27-
28-
for port in "${worker_ports[@]}" $lord_port; do
29-
psql -p $port -c "set synchronous_commit to local; drop role if exists joe; create role joe login password '12345'; grant usage on FOREIGN DATA WRAPPER postgres_fdw to joe;"
30-
done
31-
3227
psql -c "drop table if exists t cascade;"
3328
psql -c "CREATE TABLE t(i int);"
34-
psql -c "drop table if exists t cascade;"
35-
psql -c "CREATE TABLE t(i int);"
36-
3729

3830
psql -c "drop table if exists pt cascade;"
3931
psql -c "CREATE TABLE pt(id INT primary key, payload REAL);"
4032
psql -c "INSERT INTO pt SELECT generate_series(1, 10), random();"
4133

42-
psql -c "select shardman.add_node('port=5433', conn_string => 'user=joe password=12345 dbname=ars port=5433');"
43-
psql -c "select shardman.add_node('port=5434', conn_string => 'user=joe password=12345 dbname=ars port=5434');"
44-
psql -c "select shardman.add_node('port=5435', conn_string => 'user=joe password=12345 dbname=ars port=5435');"
45-
psql -c "select shardman.add_node('port=5436', conn_string => 'user=joe password=12345 dbname=ars port=5436');"
34+
# declarative partitioning
35+
# psql -p 5433 -c "drop table if exists ppt cascade;"
36+
# psql -p 5433 -c "CREATE TABLE ppt(id INT primary key, payload REAL);"
37+
# psql -c "create server node_1 foreign data wrapper postgres_fdw options (port '5433');"
38+
# psql -c "create user mapping for current_user server node_1;"
39+
# psql -c "create foreign table ppt (id int, payload real) server node_1;"
40+
41+
# psql -c "create table ppt_root (id int, payload real) partition by range (id);"
42+
# psql -c "alter table ppt_root attach partition ppt for values from (MINVALUE) TO (MAXVALUE);"
43+
44+
# with joe
45+
# for port in "${worker_ports[@]}" $lord_port; do
46+
# psql -p $port -c "set synchronous_commit to local; drop role if exists joe; create role joe superuser login; grant usage on FOREIGN DATA WRAPPER postgres_fdw to joe;"
47+
# done
48+
# psql -c "select shardman.add_node('port=5433', conn_string => 'user=joe password=12345 dbname=ars port=5433');"
49+
# psql -c "select shardman.add_node('port=5434', conn_string => 'user=joe password=12345 dbname=ars port=5434');"
50+
# psql -c "select shardman.add_node('port=5435', conn_string => 'user=joe password=12345 dbname=ars port=5435');"
51+
# psql -c "select shardman.add_node('port=5436', conn_string => 'user=joe password=12345 dbname=ars port=5436');"
52+
53+
psql -c "select shardman.add_node('port=5433');"
54+
psql -c "select shardman.add_node('port=5434');"
55+
psql -c "select shardman.add_node('port=5435');"
56+
psql -c "select shardman.add_node('port=5436');"
4657

4758
psql -c "select shardman.create_hash_partitions('pt', 'id', 4);"
4859
psql -c "select shardman.set_redundancy('pt', 2);"
4960

50-
# psql -c "select shardman.create_hash_partitions(2, 'pt', 'id', 4, true);"
51-
# psql -c "select shardman.set_replevel('pt', 1);"
52-
53-
# psql -c "drop table if exists pgbench_accounts cascade";
54-
# psql -c "drop table if exists pgbench_tellers cascade";
55-
# psql -c "drop table if exists pgbench_branches cascade";
56-
# psql -c "drop table if exists pgbench_tellers cascade;"
57-
# psql -c "drop table if exists pgbench_accounts cascade;"
58-
# pgbench -s 5 -i
59-
# psql -c "select shardman.create_hash_partitions(2, 'pgbench_accounts', 'aid', 4);"
60-
# psql -c "select shardman.set_replevel('pgbench_accounts', 1);"
61-
# psql -c "select shardman.create_hash_partitions(2, 'pgbench_tellers', 'tid', 10);"
62-
# psql -c "select shardman.set_replevel('pgbench_tellers', 1);"
63-
# psql -c "select shardman.create_hash_partitions(2, 'pgbench_branches', 'bid', 10);"
64-
# psql -c "select shardman.set_replevel('pgbench_branches', 1);"
6561
# for port in "${worker_ports[@]}"; do
66-
# psql -p $port -f /home/ars/postgres/pg_shardman/bin/pgbench_history.sql
62+
# psql -p $port -f /home/ars/postgres/pg_shardman/bin/pgbench_history.sql
6763
# done
64+
# psql -f devops/pgbench_ddl.sql
65+
# psql -c "select shardman.create_hash_partitions('pgbench_accounts', 'aid', 30, 1);"
66+
# psql -c "select shardman.create_hash_partitions('pgbench_tellers', 'tid', 30, 1);"
67+
# psql -c "select shardman.create_hash_partitions('pgbench_branches', 'bid', 30, 1);"
68+
# pgbench -p 5433 -s 10 -i --no-ddl
6869
}

devops/readme.txt

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,9 @@ ansible-playbook -i inventory_ec2 pg_ctl.yml -e "pg_ctl_action=restart"
5353

5454
Read cmd log on shardlord:
5555
ansible-playbook -i inventory_ec2/ psql.yml --limit 'shardlord' -e "cmd='\'table shardman.cmd_log\''"
56-
Read nodes table on workers:
57-
nodes': ansible-playbook -i inventory_ec2/ psql.yml --limit 'workers' -e "cmd='\'table shardman.nodes\''"
5856

5957
Create, fill and shard pgbench tables:
60-
ansible-playbook -i inventory_ec2/ pgbench_prepare.yml -e "scale=10 nparts=3 repfactor=0 rebalance=true"
58+
ansible-playbook -i inventory_ec2/ pgbench_prepare.yml -e "scale=10 nparts=3 redundancy=0"
6159
Run pgbench test:
6260
ansible-playbook -i inventory_ec2/ pgbench_run.yml -e 'tmstmp=false tname=t pgbench_opts="-c 1 -T 5"'
6361
Run pgbench only on node:
@@ -75,14 +73,14 @@ ansible-playbook -i inventory_ec2/ ec2.yml --tags "terminate"
7573

7674
Ubuntu images EC2 locator:
7775
https://cloud-images.ubuntu.com/locator/ec2/
78-
We need ami-4199282e.
76+
We use ami-4199282e.
7977

8078
Other hints:
8179
Currently pgbench exits on first error. postgres_fdw currently supports only
8280
repeatable read / serializable isolation levels which immediately leads to
8381
serialization errors, so you should use either patched postgres_fdw or pgbench.
8482

85-
If you don't want to measure the dist performance, keep data dir on tmpfs or
83+
If you don't want to measure the disk performance, keep data dir on tmpfs or
8684
turn of fsync, the effect it similar.
8785

8886
Things that made me wonder during writing this:

postgresql.conf.common

Lines changed: 1 addition & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,59 +1,8 @@
11
shared_preload_libraries = 'pg_pathman'
22

3-
# We support printing node ids in log messages. For that, include '%z' in
4-
# log_line_prefix. This feature is only supported if there is one shardman
5-
# installation per database cluster.
6-
log_line_prefix = '%m [%p]%z'
7-
8-
log_min_messages = DEBUG1
9-
client_min_messages = NOTICE
10-
log_replication_commands = on
11-
# log_statement = 'all'
12-
13-
# We use logical replication for sending metadata from shardlord to workers
14-
# and for data replication.
15-
wal_level = logical # necessary for logical replication
16-
17-
# On shardlord, this must be at least max number of worker nodes + some reserved
18-
# for initial tablesync.
19-
#
20-
# On worker node 'A', this must be at least 'mrs':
21-
# mrs = 1
22-
# for each sharded table t
23-
# for each primary shard of t lying on A
24-
# mrs += number of this shard replicas
25-
# for each replica shard of t lying on A
26-
# mrs++
27-
#
28-
# For example, imagine we have one sharded table with 10 partitions, replication
29-
# factor 3 (1 primary and 2 replicas for each shard), 5 nodes and distribute
30-
# data evenly so that each node has 6 shards. In the almost worst case, if node
31-
# A keeps 5 primaries and 1 replica, this node needs 1 + 5*2 + 1 = 12 repslots.
32-
max_replication_slots = 100
33-
34-
# Similar is true for max_wal_senders. Shardlord should have this at equal
35-
# max_replication_slots.
36-
37-
# On worker node 'A', this must be at least 'mws':
38-
# mws = 0
39-
# for each sharded table t
40-
# for each primary shard of t lying on A
41-
# mrs += number of this shard replicas
42-
#
43-
# So it is 5*2 = 10 walsenders in previous example.
44-
max_wal_senders = 50
45-
46-
# never set this to 'off' globally while using pg_shardman if you want
47-
# synchronous replication between shards and its replicas.
48-
synchronous_commit = on
49-
50-
# performance-related settings
3+
# a bit usual performance-related settings
514
shared_buffers = 512MB
525
effective_cache_size = 512MB
536
work_mem = 4MB
547
max_connections = 1000
558
max_wal_size = 5GB
56-
57-
max_prepared_transactions = 1000
58-
postgres_fdw.use_2pc = on
59-
postgres_fdw.use_repeatable_read = off

postgresql.conf.lord

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
shardman.shardlord_connstring = 'port=5432' # shardlord's connstring
2+
# This node is shardlord?
23
shardman.shardlord = on
4+
# While creating replicas, configure sync or async replication?
35
shardman.sync_replication = on

postgresql.conf.worker

Lines changed: 27 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,30 @@
1-
# On worker node 'A', this must be at least 'mrs':
2-
# mrs = 1
3-
# for each sharded table t
4-
# for each replica shard of t lying on A
5-
# mrs++
6-
#
7-
# So it is 1 + 1 = 2 for node A from example in postgresql.conf.common
8-
max_logical_replication_workers = 50
1+
# We use logical replication for data replication.
2+
wal_level = logical # necessary for logical replication
3+
# On worker node from replication group with n nodes, 'max_replication_slots'
4+
# must be at least 2n + 1 (+ 1 for moving parts/replicas)
5+
max_replication_slots = 101
6+
# On worker node from replication group with n nodes, 'max_replication_slots'
7+
# must be at least n + 1
8+
max_wal_senders = 51
9+
# On worker node from replication group with n nodes, 'max_replication_slots'
10+
# must be at least n + 1
11+
max_logical_replication_workers = 51
912
# At least max_logical_replication_workers + 1
1013
max_worker_processes = 60
11-
# Logical worker dies if it hadn't receive anything new during wal_receiver_timeout
14+
# Logical worker dies if it hadn't receive anything new during
15+
# wal_receiver_timeout
1216
wal_receiver_timeout = 60s
17+
18+
# set this to 'on' on for synchronous replication between shards and their
19+
# replicas, otherwise to 'local'.
20+
synchronous_commit = on
21+
22+
# for 2pc
23+
max_prepared_transactions = 1000
24+
postgres_fdw.use_2pc = on
25+
# only for testing performace; setting this to 'on' violates visibilitys
26+
postgres_fdw.use_repeatable_read = off
27+
28+
shardman.shardlord_connstring = 'port=5432' # shardlord's connstring
29+
# This node is shardlord?
30+
shardman.shardlord = off

0 commit comments

Comments
 (0)