Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 9fab40a

Browse files
committed
Use the new "Slab" context for some allocations in reorderbuffer.h.
Note that this change alone does not yet fully address the performance problems triggering this work, a large portion of the slowdown is triggered by the tuple allocator, which isn't converted to the new allocator. It would be possible to do so, but using evenly sized objects, like both the current implementation in reorderbuffer.c and slab.c, wastes a fair amount of memory. A later patch by Tomas will introduce a better approach. Author: Tomas Vondra Reviewed-By: Andres Freund Discussion: https://postgr.es/m/d15dff83-0b37-28ed-0809-95a5cc7292ad@2ndquadrant.com
1 parent 58b25e9 commit 9fab40a

File tree

2 files changed

+22
-66
lines changed

2 files changed

+22
-66
lines changed

src/backend/replication/logical/reorderbuffer.c

+16-58
Original file line numberDiff line numberDiff line change
@@ -156,10 +156,7 @@ static const Size max_changes_in_memory = 4096;
156156
* major bottleneck, especially when spilling to disk while decoding batch
157157
* workloads.
158158
*/
159-
static const Size max_cached_changes = 4096 * 2;
160159
static const Size max_cached_tuplebufs = 4096 * 2; /* ~8MB */
161-
static const Size max_cached_transactions = 512;
162-
163160

164161
/* ---------------------------------------
165162
* primary reorderbuffer support routines
@@ -241,6 +238,16 @@ ReorderBufferAllocate(void)
241238

242239
buffer->context = new_ctx;
243240

241+
buffer->change_context = SlabContextCreate(new_ctx,
242+
"Change",
243+
SLAB_DEFAULT_BLOCK_SIZE,
244+
sizeof(ReorderBufferChange));
245+
246+
buffer->txn_context = SlabContextCreate(new_ctx,
247+
"TXN",
248+
SLAB_DEFAULT_BLOCK_SIZE,
249+
sizeof(ReorderBufferTXN));
250+
244251
hash_ctl.keysize = sizeof(TransactionId);
245252
hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt);
246253
hash_ctl.hcxt = buffer->context;
@@ -251,8 +258,6 @@ ReorderBufferAllocate(void)
251258
buffer->by_txn_last_xid = InvalidTransactionId;
252259
buffer->by_txn_last_txn = NULL;
253260

254-
buffer->nr_cached_transactions = 0;
255-
buffer->nr_cached_changes = 0;
256261
buffer->nr_cached_tuplebufs = 0;
257262

258263
buffer->outbuf = NULL;
@@ -261,8 +266,6 @@ ReorderBufferAllocate(void)
261266
buffer->current_restart_decoding_lsn = InvalidXLogRecPtr;
262267

263268
dlist_init(&buffer->toplevel_by_lsn);
264-
dlist_init(&buffer->cached_transactions);
265-
dlist_init(&buffer->cached_changes);
266269
slist_init(&buffer->cached_tuplebufs);
267270

268271
return buffer;
@@ -291,19 +294,8 @@ ReorderBufferGetTXN(ReorderBuffer *rb)
291294
{
292295
ReorderBufferTXN *txn;
293296

294-
/* check the slab cache */
295-
if (rb->nr_cached_transactions > 0)
296-
{
297-
rb->nr_cached_transactions--;
298-
txn = (ReorderBufferTXN *)
299-
dlist_container(ReorderBufferTXN, node,
300-
dlist_pop_head_node(&rb->cached_transactions));
301-
}
302-
else
303-
{
304-
txn = (ReorderBufferTXN *)
305-
MemoryContextAlloc(rb->context, sizeof(ReorderBufferTXN));
306-
}
297+
txn = (ReorderBufferTXN *)
298+
MemoryContextAlloc(rb->txn_context, sizeof(ReorderBufferTXN));
307299

308300
memset(txn, 0, sizeof(ReorderBufferTXN));
309301

@@ -344,18 +336,7 @@ ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
344336
txn->invalidations = NULL;
345337
}
346338

347-
/* check whether to put into the slab cache */
348-
if (rb->nr_cached_transactions < max_cached_transactions)
349-
{
350-
rb->nr_cached_transactions++;
351-
dlist_push_head(&rb->cached_transactions, &txn->node);
352-
VALGRIND_MAKE_MEM_UNDEFINED(txn, sizeof(ReorderBufferTXN));
353-
VALGRIND_MAKE_MEM_DEFINED(&txn->node, sizeof(txn->node));
354-
}
355-
else
356-
{
357-
pfree(txn);
358-
}
339+
pfree(txn);
359340
}
360341

361342
/*
@@ -366,19 +347,8 @@ ReorderBufferGetChange(ReorderBuffer *rb)
366347
{
367348
ReorderBufferChange *change;
368349

369-
/* check the slab cache */
370-
if (rb->nr_cached_changes)
371-
{
372-
rb->nr_cached_changes--;
373-
change = (ReorderBufferChange *)
374-
dlist_container(ReorderBufferChange, node,
375-
dlist_pop_head_node(&rb->cached_changes));
376-
}
377-
else
378-
{
379-
change = (ReorderBufferChange *)
380-
MemoryContextAlloc(rb->context, sizeof(ReorderBufferChange));
381-
}
350+
change = (ReorderBufferChange *)
351+
MemoryContextAlloc(rb->change_context, sizeof(ReorderBufferChange));
382352

383353
memset(change, 0, sizeof(ReorderBufferChange));
384354
return change;
@@ -434,21 +404,9 @@ ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change)
434404
break;
435405
}
436406

437-
/* check whether to put into the slab cache */
438-
if (rb->nr_cached_changes < max_cached_changes)
439-
{
440-
rb->nr_cached_changes++;
441-
dlist_push_head(&rb->cached_changes, &change->node);
442-
VALGRIND_MAKE_MEM_UNDEFINED(change, sizeof(ReorderBufferChange));
443-
VALGRIND_MAKE_MEM_DEFINED(&change->node, sizeof(change->node));
444-
}
445-
else
446-
{
447-
pfree(change);
448-
}
407+
pfree(change);
449408
}
450409

451-
452410
/*
453411
* Get an unused, possibly preallocated, ReorderBufferTupleBuf fitting at
454412
* least a tuple of size tuple_len (excluding header overhead).

src/include/replication/reorderbuffer.h

+6-8
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,12 @@ struct ReorderBuffer
330330
*/
331331
MemoryContext context;
332332

333+
/*
334+
* Memory contexts for specific types objects
335+
*/
336+
MemoryContext change_context;
337+
MemoryContext txn_context;
338+
333339
/*
334340
* Data structure slab cache.
335341
*
@@ -340,14 +346,6 @@ struct ReorderBuffer
340346
* on top of reorderbuffer.c
341347
*/
342348

343-
/* cached ReorderBufferTXNs */
344-
dlist_head cached_transactions;
345-
Size nr_cached_transactions;
346-
347-
/* cached ReorderBufferChanges */
348-
dlist_head cached_changes;
349-
Size nr_cached_changes;
350-
351349
/* cached ReorderBufferTupleBufs */
352350
slist_head cached_tuplebufs;
353351
Size nr_cached_tuplebufs;

0 commit comments

Comments
 (0)