@@ -300,10 +300,20 @@ struct Tuplesortstate
300
300
* the already-read length of the stored tuple. Create a palloc'd copy,
301
301
* initialize tuple/datum1/isnull1 in the target SortTuple struct, and
302
302
* decrease state->availMem by the amount of memory space consumed.
303
+ * (See batchUsed notes for details on how memory is handled when
304
+ * incremental accounting is abandoned.)
303
305
*/
304
306
void (* readtup ) (Tuplesortstate * state , SortTuple * stup ,
305
307
int tapenum , unsigned int len );
306
308
309
+ /*
310
+ * Function to move a caller tuple. This is usually implemented as a
311
+ * memmove() shim, but function may also perform additional fix-up of
312
+ * caller tuple where needed. Batch memory support requires the
313
+ * movement of caller tuples from one location in memory to another.
314
+ */
315
+ void (* movetup ) (void * dest , void * src , unsigned int len );
316
+
307
317
/*
308
318
* This array holds the tuples now in sort memory. If we are in state
309
319
* INITIAL, the tuples are in no particular order; if we are in state
@@ -475,6 +485,7 @@ struct Tuplesortstate
475
485
#define COPYTUP (state ,stup ,tup ) ((*(state)->copytup) (state, stup, tup))
476
486
#define WRITETUP (state ,tape ,stup ) ((*(state)->writetup) (state, tape, stup))
477
487
#define READTUP (state ,stup ,tape ,len ) ((*(state)->readtup) (state, stup, tape, len))
488
+ #define MOVETUP (dest ,src ,len ) ((*(state)->movetup) (dest, src, len))
478
489
#define LACKMEM (state ) ((state)->availMem < 0 && !(state)->batchUsed)
479
490
#define USEMEM (state ,amt ) ((state)->availMem -= (amt))
480
491
#define FREEMEM (state ,amt ) ((state)->availMem += (amt))
@@ -571,13 +582,15 @@ static void writetup_heap(Tuplesortstate *state, int tapenum,
571
582
SortTuple * stup );
572
583
static void readtup_heap (Tuplesortstate * state , SortTuple * stup ,
573
584
int tapenum , unsigned int len );
585
+ static void movetup_heap (void * dest , void * src , unsigned int len );
574
586
static int comparetup_cluster (const SortTuple * a , const SortTuple * b ,
575
587
Tuplesortstate * state );
576
588
static void copytup_cluster (Tuplesortstate * state , SortTuple * stup , void * tup );
577
589
static void writetup_cluster (Tuplesortstate * state , int tapenum ,
578
590
SortTuple * stup );
579
591
static void readtup_cluster (Tuplesortstate * state , SortTuple * stup ,
580
592
int tapenum , unsigned int len );
593
+ static void movetup_cluster (void * dest , void * src , unsigned int len );
581
594
static int comparetup_index_btree (const SortTuple * a , const SortTuple * b ,
582
595
Tuplesortstate * state );
583
596
static int comparetup_index_hash (const SortTuple * a , const SortTuple * b ,
@@ -587,13 +600,15 @@ static void writetup_index(Tuplesortstate *state, int tapenum,
587
600
SortTuple * stup );
588
601
static void readtup_index (Tuplesortstate * state , SortTuple * stup ,
589
602
int tapenum , unsigned int len );
603
+ static void movetup_index (void * dest , void * src , unsigned int len );
590
604
static int comparetup_datum (const SortTuple * a , const SortTuple * b ,
591
605
Tuplesortstate * state );
592
606
static void copytup_datum (Tuplesortstate * state , SortTuple * stup , void * tup );
593
607
static void writetup_datum (Tuplesortstate * state , int tapenum ,
594
608
SortTuple * stup );
595
609
static void readtup_datum (Tuplesortstate * state , SortTuple * stup ,
596
610
int tapenum , unsigned int len );
611
+ static void movetup_datum (void * dest , void * src , unsigned int len );
597
612
static void free_sort_tuple (Tuplesortstate * state , SortTuple * stup );
598
613
599
614
/*
@@ -749,6 +764,7 @@ tuplesort_begin_heap(TupleDesc tupDesc,
749
764
state -> copytup = copytup_heap ;
750
765
state -> writetup = writetup_heap ;
751
766
state -> readtup = readtup_heap ;
767
+ state -> movetup = movetup_heap ;
752
768
753
769
state -> tupDesc = tupDesc ; /* assume we need not copy tupDesc */
754
770
state -> abbrevNext = 10 ;
@@ -821,6 +837,7 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
821
837
state -> copytup = copytup_cluster ;
822
838
state -> writetup = writetup_cluster ;
823
839
state -> readtup = readtup_cluster ;
840
+ state -> movetup = movetup_cluster ;
824
841
state -> abbrevNext = 10 ;
825
842
826
843
state -> indexInfo = BuildIndexInfo (indexRel );
@@ -912,6 +929,7 @@ tuplesort_begin_index_btree(Relation heapRel,
912
929
state -> copytup = copytup_index ;
913
930
state -> writetup = writetup_index ;
914
931
state -> readtup = readtup_index ;
932
+ state -> movetup = movetup_index ;
915
933
state -> abbrevNext = 10 ;
916
934
917
935
state -> heapRel = heapRel ;
@@ -979,6 +997,7 @@ tuplesort_begin_index_hash(Relation heapRel,
979
997
state -> copytup = copytup_index ;
980
998
state -> writetup = writetup_index ;
981
999
state -> readtup = readtup_index ;
1000
+ state -> movetup = movetup_index ;
982
1001
983
1002
state -> heapRel = heapRel ;
984
1003
state -> indexRel = indexRel ;
@@ -1021,6 +1040,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
1021
1040
state -> copytup = copytup_datum ;
1022
1041
state -> writetup = writetup_datum ;
1023
1042
state -> readtup = readtup_datum ;
1043
+ state -> movetup = movetup_datum ;
1024
1044
state -> abbrevNext = 10 ;
1025
1045
1026
1046
state -> datumType = datumType ;
@@ -2975,7 +2995,7 @@ mergebatchone(Tuplesortstate *state, int srcTape, SortTuple *rtup,
2975
2995
*/
2976
2996
tupLen = state -> mergecurrent [srcTape ] - state -> mergetail [srcTape ];
2977
2997
state -> mergecurrent [srcTape ] = state -> mergetuples [srcTape ];
2978
- memmove (state -> mergecurrent [srcTape ], state -> mergetail [srcTape ],
2998
+ MOVETUP (state -> mergecurrent [srcTape ], state -> mergetail [srcTape ],
2979
2999
tupLen );
2980
3000
2981
3001
/* Make SortTuple at top of the merge heap point to new tuple */
@@ -3039,7 +3059,7 @@ mergebatchfreetape(Tuplesortstate *state, int srcTape, SortTuple *rtup,
3039
3059
3040
3060
tuplen = state -> mergecurrent [srcTape ] - state -> mergetail [srcTape ];
3041
3061
rtup -> tuple = MemoryContextAlloc (state -> sortcontext , tuplen );
3042
- memcpy (rtup -> tuple , oldTuple , tuplen );
3062
+ MOVETUP (rtup -> tuple , oldTuple , tuplen );
3043
3063
* should_free = true;
3044
3064
}
3045
3065
@@ -4061,6 +4081,12 @@ readtup_heap(Tuplesortstate *state, SortTuple *stup,
4061
4081
& stup -> isnull1 );
4062
4082
}
4063
4083
4084
+ static void
4085
+ movetup_heap (void * dest , void * src , unsigned int len )
4086
+ {
4087
+ memmove (dest , src , len );
4088
+ }
4089
+
4064
4090
/*
4065
4091
* Routines specialized for the CLUSTER case (HeapTuple data, with
4066
4092
* comparisons per a btree index definition)
@@ -4302,6 +4328,18 @@ readtup_cluster(Tuplesortstate *state, SortTuple *stup,
4302
4328
& stup -> isnull1 );
4303
4329
}
4304
4330
4331
+ static void
4332
+ movetup_cluster (void * dest , void * src , unsigned int len )
4333
+ {
4334
+ HeapTuple tuple ;
4335
+
4336
+ memmove (dest , src , len );
4337
+
4338
+ /* Repoint the HeapTupleData header */
4339
+ tuple = (HeapTuple ) dest ;
4340
+ tuple -> t_data = (HeapTupleHeader ) ((char * ) tuple + HEAPTUPLESIZE );
4341
+ }
4342
+
4305
4343
4306
4344
/*
4307
4345
* Routines specialized for IndexTuple case
@@ -4594,6 +4632,12 @@ readtup_index(Tuplesortstate *state, SortTuple *stup,
4594
4632
& stup -> isnull1 );
4595
4633
}
4596
4634
4635
+ static void
4636
+ movetup_index (void * dest , void * src , unsigned int len )
4637
+ {
4638
+ memmove (dest , src , len );
4639
+ }
4640
+
4597
4641
/*
4598
4642
* Routines specialized for DatumTuple case
4599
4643
*/
@@ -4704,6 +4748,12 @@ readtup_datum(Tuplesortstate *state, SortTuple *stup,
4704
4748
& tuplen , sizeof (tuplen ));
4705
4749
}
4706
4750
4751
+ static void
4752
+ movetup_datum (void * dest , void * src , unsigned int len )
4753
+ {
4754
+ memmove (dest , src , len );
4755
+ }
4756
+
4707
4757
/*
4708
4758
* Convenience routine to free a tuple previously loaded into sort memory
4709
4759
*/
0 commit comments