diff options
author | Masahiko Sawada | 2025-02-13 00:31:34 +0000 |
---|---|---|
committer | Masahiko Sawada | 2025-02-13 00:31:34 +0000 |
commit | 072ee847ad4c3fb52b0c24f7dddbe0798bd70c24 (patch) | |
tree | b77b3f475ec5fb5e2e649ac0f6f4f38d9f57df01 /contrib/test_decoding/sql | |
parent | 9e66a2b7841a5c1f929103c82c73244d3f5f3416 (diff) |
Skip logical decoding of already-aborted transactions.
Previously, transaction aborts were detected concurrently only during
system catalog scans while replaying a transaction in streaming mode.
This commit adds an additional CLOG lookup to check the transaction
status, allowing the logical decoding to skip changes also when it
doesn't touch system catalogs, if the transaction is already
aborted. This optimization enhances logical decoding performance,
especially for large transactions that have already been rolled back,
as it avoids unnecessary disk or network I/O.
To avoid potential slowdowns caused by frequent CLOG lookups for small
transactions (most of which commit), the CLOG lookup is performed only
for large transactions before eviction. The performance benchmark
results showed there is not noticeable performance regression due to
CLOG lookups.
Reviewed-by: Amit Kapila, Peter Smith, Vignesh C, Ajin Cherian
Reviewed-by: Dilip Kumar, Andres Freund
Discussion: https://postgr.es/m/CAD21AoDht9Pz_DFv_R2LqBTBbO4eGrpa9Vojmt5z5sEx3XwD7A@mail.gmail.com
Diffstat (limited to 'contrib/test_decoding/sql')
-rw-r--r-- | contrib/test_decoding/sql/stats.sql | 20 | ||||
-rw-r--r-- | contrib/test_decoding/sql/stream.sql | 6 |
2 files changed, 25 insertions, 1 deletions
diff --git a/contrib/test_decoding/sql/stats.sql b/contrib/test_decoding/sql/stats.sql index 630371f147a..a022fe1bf07 100644 --- a/contrib/test_decoding/sql/stats.sql +++ b/contrib/test_decoding/sql/stats.sql @@ -50,7 +50,25 @@ SELECT slot_name FROM pg_stat_replication_slots; SELECT slot_name FROM pg_stat_replication_slots; COMMIT; + +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot_stats4_twophase', 'test_decoding', false, true) s4; + +-- The INSERT changes are large enough to be spilled but will not be, because +-- the transaction is aborted. The logical decoding skips collecting further +-- changes too. The transaction is prepared to make sure the decoding processes +-- the aborted transaction. +BEGIN; +INSERT INTO stats_test SELECT 'serialize-toobig--1:'||g.i FROM generate_series(1, 5000) g(i); +PREPARE TRANSACTION 'test1_abort'; +ROLLBACK PREPARED 'test1_abort'; +SELECT count(*) FROM pg_logical_slot_get_changes('regression_slot_stats4_twophase', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +-- Verify that the decoding doesn't spill already-aborted transaction's changes. +SELECT pg_stat_force_next_flush(); +SELECT slot_name, spill_txns, spill_count FROM pg_stat_replication_slots WHERE slot_name = 'regression_slot_stats4_twophase'; + DROP TABLE stats_test; SELECT pg_drop_replication_slot('regression_slot_stats1'), pg_drop_replication_slot('regression_slot_stats2'), - pg_drop_replication_slot('regression_slot_stats3'); + pg_drop_replication_slot('regression_slot_stats3'), + pg_drop_replication_slot('regression_slot_stats4_twophase'); diff --git a/contrib/test_decoding/sql/stream.sql b/contrib/test_decoding/sql/stream.sql index 7f43f0c2ab7..f1269403e0a 100644 --- a/contrib/test_decoding/sql/stream.sql +++ b/contrib/test_decoding/sql/stream.sql @@ -49,7 +49,12 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL,NULL, 'incl * detect that the subtransaction was aborted, and reset the transaction while having * the TOAST changes in memory, resulting in deallocating both decoded changes and * TOAST reconstruction data. Memory usage counters must be updated correctly. + * + * Set debug_logical_replication_streaming to 'immediate' to disable the transaction + * status check happening before streaming the second insertion, so we can detect a + * concurrent abort while streaming. */ +SET debug_logical_replication_streaming = immediate; BEGIN; INSERT INTO stream_test SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i); ALTER TABLE stream_test ADD COLUMN i INT; @@ -58,6 +63,7 @@ INSERT INTO stream_test(data, i) SELECT repeat(string_agg(to_char(g.i, 'FM0000') ROLLBACK TO s1; COMMIT; SELECT count(*) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'stream-changes', '1'); +RESET debug_logical_replication_streaming; DROP TABLE stream_test; SELECT pg_drop_replication_slot('regression_slot'); |