Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit c273b11

Browse files
committed
Shardmaster renamed to shardlord.
1 parent 87fe771 commit c273b11

14 files changed

+99
-101
lines changed

bin/cancel_cmd.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
#!/bin/bash
22

3-
kill -SIGUSR1 `ps aux | grep 'postgres: bgworker: shardmaster' | grep -v 'grep' | awk '{print $2}'`
3+
kill -SIGUSR1 `ps aux | grep 'postgres: bgworker: shardlord' | grep -v 'grep' | awk '{print $2}'`

bin/common.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ function start_nodes()
1313
port="${worker_ports[i]}"
1414
pg_ctl -o "-p $port" -D $datadir -l $logfile start
1515
done
16-
pg_ctl -o "-p $master_port" -D $master_datadir -l $logfile start
16+
pg_ctl -o "-p $lord_port" -D $lord_datadir -l $logfile start
1717
}
1818

1919
function stop_nodes()
2020
{
2121
echo "Stopping nodes"
22-
for datadir in $master_datadir "${worker_datadirs[@]}"; do
22+
for datadir in $lord_datadir "${worker_datadirs[@]}"; do
2323
pg_ctl -D $datadir stop || true
2424
done
2525
}
@@ -32,5 +32,5 @@ function restart_nodes()
3232
port="${worker_ports[i]}"
3333
pg_ctl -o "-p $port" -D $datadir -l $logfile restart
3434
done
35-
pg_ctl -o "-p $master_port" -D $master_datadir -l $logfile restart
35+
pg_ctl -o "-p $lord_port" -D $lord_datadir -l $logfile restart
3636
}

bin/setup.sh.example

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ pathmanpath=~/postgres/pg_pathman
33
install_pathman=false
44
logfile=$HOME/tmp/tmp/tmp.log
55

6-
master_datadir=~/postgres/data1
7-
master_port=5432
6+
lord_datadir=~/postgres/data1
7+
lord_port=5432
88

99
# declare -a worker_datadirs=()
1010
# declare -a worker_ports=()

bin/shardman_init.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,21 +16,21 @@ make install
1616
> $logfile
1717

1818
stop_nodes
19-
for datadir in $master_datadir "${worker_datadirs[@]}"; do
19+
for datadir in $lord_datadir "${worker_datadirs[@]}"; do
2020
rm -rf "$datadir"
2121
mkdir -p "$datadir"
2222
initdb -D "$datadir"
2323
done
2424

25-
cat postgresql.conf.common.template >> ${master_datadir}/postgresql.conf
26-
cat postgresql.conf.lord.template >> ${master_datadir}/postgresql.conf
25+
cat postgresql.conf.common.template >> ${lord_datadir}/postgresql.conf
26+
cat postgresql.conf.lord.template >> ${lord_datadir}/postgresql.conf
2727
for worker_datadir in "${worker_datadirs[@]}"; do
2828
cat postgresql.conf.common.template >> ${worker_datadir}/postgresql.conf
2929
cat postgresql.conf.worker.template >> ${worker_datadir}/postgresql.conf
3030
done
3131

3232
start_nodes
33-
for port in $master_port "${worker_ports[@]}"; do
33+
for port in $lord_port "${worker_ports[@]}"; do
3434
createdb -p $port `whoami`
3535
psql -p $port -c "create extension pg_shardman cascade;"
3636
done

bin/shardman_start.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,20 @@ cd "${script_dir}/.."
88
> $logfile
99

1010
restart_nodes # make sure nodes run
11-
# first workers, then master
12-
for port in "${worker_ports[@]}" $master_port; do
11+
# first workers, then lord
12+
for port in "${worker_ports[@]}" $lord_port; do
1313
psql -p $port -c "drop extension if exists pg_shardman cascade;"
1414
done
1515

1616
make clean
1717
make install
1818

1919
restart_nodes
20-
for port in $master_port "${worker_ports[@]}"; do
20+
for port in $lord_port "${worker_ports[@]}"; do
2121
psql -p $port -c "create extension pg_shardman cascade;"
2222
done
2323

24-
# to restart master bgw
24+
# to restart lord bgw
2525
restart_nodes
2626

2727
run_demo

init.sql

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,16 @@ CREATE TABLE cmd_log (
3636
status cmd_status DEFAULT 'waiting' NOT NULL
3737
);
3838

39-
-- Notify shardman master bgw about new commands
40-
CREATE FUNCTION notify_shardmaster() RETURNS trigger AS $$
39+
-- Notify shardlord bgw about new commands
40+
CREATE FUNCTION notify_shardlord() RETURNS trigger AS $$
4141
BEGIN
4242
NOTIFY shardman_cmd_log_update;
4343
RETURN NULL;
4444
END
4545
$$ LANGUAGE plpgsql;
4646
CREATE TRIGGER cmd_log_inserts
4747
AFTER INSERT ON cmd_log
48-
FOR EACH STATEMENT EXECUTE PROCEDURE notify_shardmaster();
48+
FOR EACH STATEMENT EXECUTE PROCEDURE notify_shardlord();
4949

5050
-- probably better to keep opts in an array field, but working with arrays from
5151
-- libpq is not very handy
@@ -83,7 +83,7 @@ END
8383
$$ LANGUAGE plpgsql;
8484

8585
-- Shard table with hash partitions. Params as in pathman, except for relation
86-
-- (master doesn't know oid of the table)
86+
-- (lord doesn't know oid of the table)
8787
CREATE FUNCTION create_hash_partitions(
8888
node_id int, relation text, expr text, partitions_count int,
8989
rebalance bool DEFAULT true)
@@ -162,39 +162,39 @@ END $$ LANGUAGE plpgsql STRICT;
162162

163163
-- Internal functions
164164

165-
-- Called on shardmaster bgw start. Add itself to nodes table, set id, create
165+
-- Called on shardlord bgw start. Add itself to nodes table, set id, create
166166
-- publication.
167-
CREATE FUNCTION master_boot() RETURNS void AS $$
167+
CREATE FUNCTION lord_boot() RETURNS void AS $$
168168
DECLARE
169-
-- If we have never booted as a master before, we have a work to do
170-
init_master bool DEFAULT false;
171-
master_connstring text;
172-
master_id int;
169+
-- If we have never booted as a lord before, we have a work to do
170+
init_lord bool DEFAULT false;
171+
lord_connstring text;
172+
lord_id int;
173173
BEGIN
174-
raise INFO 'Booting master';
174+
raise INFO 'Booting lord';
175175
PERFORM shardman.create_meta_pub();
176176

177-
master_id := shardman.my_id();
178-
IF master_id IS NULL THEN
179-
SELECT pg_settings.setting into master_connstring from pg_settings
180-
WHERE NAME = 'shardman.master_connstring';
177+
lord_id := shardman.my_id();
178+
IF lord_id IS NULL THEN
179+
SELECT pg_settings.setting INTO lord_connstring FROM pg_settings
180+
WHERE NAME = 'shardman.shardlord_connstring';
181181
EXECUTE format(
182182
'INSERT INTO @extschema@.nodes VALUES (DEFAULT, %L, NULL, false, true)
183-
RETURNING id', master_connstring) INTO master_id;
184-
PERFORM shardman.set_node_id(master_id);
185-
init_master := true;
183+
RETURNING id', lord_connstring) INTO lord_id;
184+
PERFORM shardman.set_node_id(lord_id);
185+
init_lord := true;
186186
ELSE
187-
EXECUTE 'SELECT NOT (SELECT master FROM shardman.nodes WHERE id = $1)'
188-
INTO init_master USING master_id;
189-
EXECUTE 'UPDATE shardman.nodes SET master = true WHERE id = $1' USING master_id;
187+
EXECUTE 'SELECT NOT (SELECT lord FROM shardman.nodes WHERE id = $1)'
188+
INTO init_lord USING lord_id;
189+
EXECUTE 'UPDATE shardman.nodes SET lord = true WHERE id = $1' USING lord_id;
190190
END IF;
191-
IF init_master THEN
191+
IF init_lord THEN
192192
-- TODO: set up lr channels
193193
END IF;
194194
END $$ LANGUAGE plpgsql;
195195

196196
-- These tables will be replicated to worker nodes, notifying them about changes.
197-
-- Called on master.
197+
-- Called on lord.
198198
CREATE FUNCTION create_meta_pub() RETURNS void AS $$
199199
BEGIN
200200
IF NOT EXISTS (SELECT * FROM pg_publication WHERE pubname = 'shardman_meta_pub') THEN

membership.sql

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,16 @@ CREATE TABLE nodes (
1515
id serial PRIMARY KEY,
1616
connstring text NOT NULL UNIQUE,
1717
worker_status worker_node_status,
18-
-- While currently we don't support master and worker roles on one node,
19-
-- potentially node can be either worker, master or both, so we need 2 bits.
18+
-- While currently we don't support lord and worker roles on one node,
19+
-- potentially node can be either worker, lord or both, so we need 2 bits.
2020
-- One bool with NULL might be fine, but it seems a bit counter-intuitive.
2121
worker bool NOT NULL DEFAULT true,
22-
master bool NOT NULL DEFAULT false,
22+
lord bool NOT NULL DEFAULT false,
2323
-- cmd by which node was added
2424
added_by bigint REFERENCES shardman.cmd_log(id)
2525
);
2626

27-
-- Master is removing us, so reset our state, removing all subscriptions. A bit
27+
-- Lord is removing us, so reset our state, removing all subscriptions. A bit
2828
-- tricky part: we can't DROP SUBSCRIPTION here, because that would mean
2929
-- shooting (sending SIGTERM) ourselvers (to replication apply worker) in the
3030
-- leg. So for now we just disable subscription, worker will stop after the end

postgresql.conf.lord.template

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
shardman.master = on # this instance is shardlor?
2-
shardman.master_dbname = ars # lord's dbname
3-
shardman.master_connstring = 'port=5432' # lord's connstring
1+
shardman.shardlord = on # this instance is shardlord?
2+
shardman.shardlord_dbname = ars # shardlord's dbname
3+
shardman.shardlord_connstring = 'port=5432' # shardlord's connstring
44
shardman.cmd_retry_naptime = 500 # sleep milliseconds after failure
55
shardman.poll_interval = 500 # long operations poll frequency

readme.txt

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,14 @@
11
First, some terminology:
22
'shardlord' or 'lord' is postgres instance and background process (bgw) spinning
3-
on it which manages sharding. In some places it is still called 'shardmaster'
4-
or 'master'.
3+
on it which manages sharding.
54
'worker nodes' or 'workers' are other nodes with data.
65
'sharded table' is table managed by shardman.
76
'shard' or 'partition' is any table containing part of sharded table.
87
'primary' is main partition of sharded table, i.e. the only writable
98
partition.
109
'replica' is secondary partition of sharded table, i.e. read-only partition.
11-
'cluster' -- the whole system of shardlord and workers, or cluster in PostgreSQL
12-
sense, this should be clear from the context.
10+
'cluster' -- either the whole system of shardlord and workers, or cluster in
11+
traditional PostgreSQL sense, this should be clear from the context.
1312

1413
For quick setup, see scripts in bin/ directory. Setup is configured in file
1514
setup.sh which needs to be placed in the same directory; see setup.sh.example

shard.sql

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,9 +49,9 @@ CREATE TRIGGER new_table_worker_side AFTER INSERT ON shardman.tables
4949
FOR EACH ROW EXECUTE PROCEDURE new_table_worker_side();
5050
-- fire trigger only on worker nodes
5151
ALTER TABLE shardman.tables ENABLE REPLICA TRIGGER new_table_worker_side;
52-
-- On master side, insert partitions.
52+
-- On lord side, insert partitions.
5353
-- All of them are primary and have no prev or nxt.
54-
CREATE FUNCTION new_table_master_side() RETURNS TRIGGER AS $$
54+
CREATE FUNCTION new_table_lord_side() RETURNS TRIGGER AS $$
5555
BEGIN
5656
INSERT INTO shardman.partitions
5757
SELECT part_name, NEW.initial_node AS owner, NULL, NULL, NEW.relation AS relation
@@ -61,8 +61,8 @@ BEGIN
6161
RETURN NULL;
6262
END
6363
$$ LANGUAGE plpgsql;
64-
CREATE TRIGGER new_table_master_side AFTER INSERT ON shardman.tables
65-
FOR EACH ROW EXECUTE PROCEDURE new_table_master_side();
64+
CREATE TRIGGER new_table_lord_side AFTER INSERT ON shardman.tables
65+
FOR EACH ROW EXECUTE PROCEDURE new_table_lord_side();
6666

6767
------------------------------------------------------------
6868
-- Partitions

src/include/copypart.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,7 @@ typedef enum
3131
} ExecTaskRes;
3232

3333
/*
34-
* Current step of 1 master partition move. See comments to corresponding
35-
* funcs, e.g. start_tablesync.
34+
* Current step of 1 partition move.
3635
*/
3736
typedef enum
3837
{

src/include/pg_shardman.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ extern volatile sig_atomic_t got_sigterm;
2222
extern volatile sig_atomic_t got_sigusr1;
2323

2424
/* GUC variables */
25-
extern bool shardman_master;
26-
extern char *shardman_master_dbname;
27-
extern char *shardman_master_connstring;
25+
extern bool shardman_shardlord;
26+
extern char *shardman_shardlord_dbname;
27+
extern char *shardman_shardlord_connstring;
2828
extern int shardman_cmd_retry_naptime;
2929
extern int shardman_poll_interval;
3030

@@ -52,7 +52,7 @@ typedef struct RepCount
5252
} RepCount;
5353

5454
extern void _PG_init(void);
55-
extern void shardmaster_main(Datum main_arg);
55+
extern void shardlord_main(Datum main_arg);
5656
extern void check_for_sigterm(void);
5757
extern uint64 void_spi(char *sql);
5858
extern void update_cmd_status(int64 id, const char *new_status);

0 commit comments

Comments
 (0)