Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit a62f1f7

Browse files
committed
Merge branch 'devops_broadcast' into broadcast
Merging devops stuff.
2 parents 3014fb9 + fd85b6d commit a62f1f7

39 files changed

+3155
-0
lines changed

devops/.gitignore

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
*.retry
2+
inventory_manual/manual
3+
logs/
4+
res/
5+
tester_res.csv*
6+
wal_lag.txt
7+
8+
/postgresql.conf.common
9+
/postgresql.conf.lord
10+
/postgresql.conf.worker
11+
12+
# vagrant stuff
13+
*.log
14+
.vagrant/

devops/Vagrantfile

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
Vagrant.configure("2") do |config|
2+
3+
config.vm.define "node1" do |node|
4+
node.vm.box = "ubuntu/xenial64"
5+
node.vm.network "private_network", ip: "10.42.42.10"
6+
end
7+
8+
config.vm.define "node2" do |node|
9+
node.vm.box = "ubuntu/xenial64"
10+
node.vm.network "private_network", ip: "10.42.42.20"
11+
end
12+
13+
config.vm.define "node3" do |node|
14+
node.vm.box = "ubuntu/xenial64"
15+
node.vm.network "private_network", ip: "10.42.42.30"
16+
end
17+
18+
# ssh-copy-id
19+
config.vm.provision "shell" do |s|
20+
ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub").first.strip
21+
s.inline = <<-SHELL
22+
echo #{ssh_pub_key} >> /home/ubuntu/.ssh/authorized_keys
23+
echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
24+
SHELL
25+
end
26+
27+
end

devops/__init__.py

Whitespace-only changes.

devops/analyze.sql

Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
drop table if exists shmn_benchmarks cascade;
2+
create table shmn_benchmarks (
3+
test_id text, instance_type text, workers int, nparts int, sharded_tables text,
4+
replicas int, repmode text, sync_replicas bool, sync_commit text, CFLAGS text,
5+
scale int, seconds int, test text, fdw_2pc bool, active_workers text, clients int,
6+
tps_sum int, avg_latency numeric, end_latency numeric, wal_lag bigint,
7+
comment text);
8+
copy shmn_benchmarks from '/home/ars/shmn_benchmarks.csv' with (format csv, header);
9+
10+
select workers, nparts, repmode, sync_replicas, clients, tps_sum, pg_size_pretty(wal_lag) from shmn_benchmarks;
11+
12+
-- only important fields
13+
drop view if exists shmn_bench;
14+
create view shmn_bench as select workers, nparts, sharded_tables, repmode, sync_replicas, test, fdw_2pc, active_workers, clients, tps_sum,
15+
pg_size_pretty(wal_lag) wal_lag
16+
from shmn_benchmarks;
17+
18+
-- take only runs with number of clients maximizing tps
19+
-- for each set of rows which differ only by number of clients we take from
20+
-- window a row with max tps
21+
drop view if exists shmn_benchmarks_optimal_clients;
22+
create view shmn_benchmarks_optimal_clients as
23+
select distinct on (workers, nparts, sharded_tables, repmode, sync_replicas, test, fdw_2pc, active_workers)
24+
workers, nparts, sharded_tables, repmode, sync_replicas, test, fdw_2pc, active_workers,
25+
last_value(clients) over wnd clients,
26+
last_value(tps_sum) over wnd tps_sum,
27+
last_value(avg_latency) over wnd avg_latency,
28+
last_value(end_latency) over wnd end_latency,
29+
pg_size_pretty(last_value(wal_lag) over wnd) wal_lag
30+
from shmn_benchmarks
31+
window wnd as
32+
(partition by workers, nparts, sharded_tables, repmode, sync_replicas, fdw_2pc, test, active_workers order by tps_sum
33+
rows between unbounded preceding and unbounded following);
34+
35+
-- Create first() aggregate, taken from
36+
-- https://wiki.postgresql.org/wiki/First/last_(aggregate)
37+
-- Create a function that always returns the first non-NULL item
38+
CREATE OR REPLACE FUNCTION public.first_agg ( anyelement, anyelement )
39+
RETURNS anyelement LANGUAGE SQL IMMUTABLE STRICT AS $$
40+
SELECT $1;
41+
$$;
42+
43+
-- And then wrap an aggregate around it
44+
drop aggregate public.first();
45+
CREATE AGGREGATE public.FIRST (
46+
sfunc = public.first_agg,
47+
basetype = anyelement,
48+
stype = anyelement
49+
);
50+
51+
-- flatten to compare no rep, trigger, sync and async, 2pc only
52+
select workers, nparts, (nparts / workers) nparts_per_node,
53+
first(tps_sum) filter (where repmode is null) no_rep_tps,
54+
first(tps_sum) filter (where repmode = 'trigger') trig_rep_tps,
55+
first(tps_sum) filter (where repmode = 'logical' and sync_replicas) sync_rep_tps,
56+
first(tps_sum) filter (where repmode = 'logical' and not sync_replicas) async_rep_tps,
57+
first(wal_lag) filter (where repmode = 'logical' and not sync_replicas) async_rep_wal_lag
58+
from shmn_benchmarks_optimal_clients
59+
where active_workers = workers::text and fdw_2pc
60+
group by workers, nparts;
61+
62+
-- showing clients
63+
select workers, nparts, (nparts / workers) nparts_per_node,
64+
first(tps_sum) filter (where repmode is null) no_rep_tps,
65+
first(clients) filter (where repmode is null) no_rep_tps,
66+
first(tps_sum) filter (where repmode = 'trigger') trig_rep_tps,
67+
first(tps_sum) filter (where repmode = 'logical' and sync_replicas) sync_rep_tps,
68+
first(tps_sum) filter (where repmode = 'logical' and not sync_replicas) async_rep_tps,
69+
first(wal_lag) filter (where repmode = 'logical' and not sync_replicas) async_rep_wal_lag
70+
from shmn_benchmarks_optimal_clients
71+
where active_workers = workers::text and fdw_2pc
72+
group by workers, nparts;
73+
74+
-- either with 2pc and not, showing it
75+
select workers, nparts, (nparts / workers) nparts_per_node,
76+
first(tps_sum) filter (where repmode is null) no_rep_tps,
77+
first(fdw_2pc) filter (where repmode is null) no_rep_2pc,
78+
first(tps_sum) filter (where repmode = 'trigger') trig_rep_tps,
79+
first(fdw_2pc) filter (where repmode = 'trigger') trig_rep_2pc,
80+
first(tps_sum) filter (where repmode = 'logical' and sync_replicas) sync_rep_tps,
81+
first(fdw_2pc) filter (where repmode = 'logical' and sync_replicas) sync_rep_2pc,
82+
first(tps_sum) filter (where repmode = 'logical' and not sync_replicas) async_rep_tps,
83+
first(fdw_2pc) filter (where repmode = 'logical' and not sync_replicas) async_rep_2pc,
84+
first(wal_lag) filter (where repmode = 'logical' and not sync_replicas) async_rep_wal_lag
85+
from shmn_benchmarks_optimal_clients
86+
where active_workers = workers::text
87+
group by workers, nparts;
88+
89+
90+
select workers, nparts, sharded_tables, repmode, sync_replicas, clients, tps_sum,
91+
wal_lag
92+
from shmn_benchmarks_optimal_clients where active_workers = workers::text and fdw_2pc;
93+
94+
select workers, nparts, sharded_tables, repmode, sync_replicas, fdw_2pc, test, clients, tps_sum,
95+
wal_lag
96+
from shmn_benchmarks_optimal_clients where active_workers = workers::text and sharded_tables = 'pgbench_accounts';
97+
98+
99+
-- see, here lag increases only where there are too many clients already
100+
select * from shmn_bench where repmode = 'logical' and not sync_replicas and fdw_2pc and (
101+
(workers = 3 and (nparts = 9 or nparts = 30)) or
102+
(workers = 6 and (nparts = 6 or nparts = 12 or nparts = 18 or nparts = 60)) or
103+
(workers = 9 and (nparts = 27 or nparts = 90)) or
104+
(workers = 12)
105+
)
106+
order by workers, nparts, clients;
107+
108+
-- same, only for 6:6 and 6:12
109+
select * from shmn_bench where repmode = 'logical' and not sync_replicas and fdw_2pc and (
110+
(workers = 6 and (nparts = 6 or nparts = 12))
111+
)
112+
order by workers, nparts, clients;
113+
114+
115+
-- 2pc vs non-2pc
116+
select *, (s.no_two_pc_tps::numeric(10, 0) / s.two_pc_tps)::numeric(3, 2) no_two_pc_faster_times from
117+
(select workers, nparts, sharded_tables, repmode, sync_replicas, test, active_workers, clients,
118+
first(tps_sum) filter (where fdw_2pc) two_pc_tps,
119+
first(tps_sum) filter (where not fdw_2pc) no_two_pc_tps
120+
from shmn_bench
121+
group by workers, nparts, sharded_tables, repmode, sync_replicas, test, active_workers, clients) s
122+
where (s.two_pc_tps is not null and s.no_two_pc_tps is not null)
123+
order by workers, nparts, repmode, sync_replicas, active_workers, clients;

0 commit comments

Comments
 (0)