26
26
27
27
#define DEFAULT_TABLE_ACCESS_METHOD "heap"
28
28
29
+ /* GUCs */
29
30
extern char * default_table_access_method ;
30
31
extern bool synchronize_seqscans ;
31
32
@@ -40,7 +41,7 @@ struct ValidateIndexState;
40
41
41
42
42
43
/*
43
- * Result codes for table_{update,delete,lock}_tuple , and for visibility
44
+ * Result codes for table_{update,delete,lock_tuple} , and for visibility
44
45
* routines inside table AMs.
45
46
*/
46
47
typedef enum TM_Result
@@ -68,8 +69,8 @@ typedef enum TM_Result
68
69
69
70
/*
70
71
* The affected tuple is currently being modified by another session. This
71
- * will only be returned if (update/delete/lock)_tuple are instructed not
72
- * to wait.
72
+ * will only be returned if table_ (update/delete/lock_tuple) are
73
+ * instructed not to wait.
73
74
*/
74
75
TM_BeingModified ,
75
76
@@ -82,12 +83,15 @@ typedef enum TM_Result
82
83
* When table_update, table_delete, or table_lock_tuple fail because the target
83
84
* tuple is already outdated, they fill in this struct to provide information
84
85
* to the caller about what happened.
86
+ *
85
87
* ctid is the target's ctid link: it is the same as the target's TID if the
86
88
* target was deleted, or the location of the replacement tuple if the target
87
89
* was updated.
90
+ *
88
91
* xmax is the outdating transaction's XID. If the caller wants to visit the
89
92
* replacement tuple, it must check that this matches before believing the
90
93
* replacement is really a match.
94
+ *
91
95
* cmax is the outdating command's CID, but only when the failure code is
92
96
* TM_SelfModified (i.e., something in the current transaction outdated the
93
97
* tuple); otherwise cmax is zero. (We make this restriction because
@@ -108,10 +112,10 @@ typedef struct TM_FailureData
108
112
#define TABLE_INSERT_FROZEN 0x0004
109
113
#define TABLE_INSERT_NO_LOGICAL 0x0008
110
114
111
- /* flag bits fortable_lock_tuple */
115
+ /* flag bits for table_lock_tuple */
112
116
/* Follow tuples whose update is in progress if lock modes don't conflict */
113
117
#define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS (1 << 0)
114
- /* Follow update chain and lock lastest version of tuple */
118
+ /* Follow update chain and lock latest version of tuple */
115
119
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION (1 << 1)
116
120
117
121
@@ -128,8 +132,8 @@ typedef void (*IndexBuildCallback) (Relation index,
128
132
* server-lifetime manner, typically as a static const struct, which then gets
129
133
* returned by FormData_pg_am.amhandler.
130
134
*
131
- * I most cases it's not appropriate to directly call the callbacks directly,
132
- * instead use the table_* wrapper functions.
135
+ * In most cases it's not appropriate to call the callbacks directly, use the
136
+ * table_* wrapper functions instead .
133
137
*
134
138
* GetTableAmRoutine() asserts that required callbacks are filled in, remember
135
139
* to update when adding a callback.
@@ -194,7 +198,7 @@ typedef struct TableAmRoutine
194
198
void (* scan_end ) (TableScanDesc scan );
195
199
196
200
/*
197
- * Restart relation scan. If set_params is set to true, allow {strat,
201
+ * Restart relation scan. If set_params is set to true, allow_ {strat,
198
202
* sync, pagemode} (see scan_begin) changes should be taken into account.
199
203
*/
200
204
void (* scan_rescan ) (TableScanDesc scan , struct ScanKeyData * key ,
@@ -222,7 +226,7 @@ typedef struct TableAmRoutine
222
226
223
227
/*
224
228
* Initialize ParallelTableScanDesc for a parallel scan of this relation.
225
- * pscan will be sized according to parallelscan_estimate() for the same
229
+ * ` pscan` will be sized according to parallelscan_estimate() for the same
226
230
* relation.
227
231
*/
228
232
Size (* parallelscan_initialize ) (Relation rel ,
@@ -243,7 +247,7 @@ typedef struct TableAmRoutine
243
247
244
248
/*
245
249
* Prepare to fetch tuples from the relation, as needed when fetching
246
- * tuples for an index scan. The callback has to return a
250
+ * tuples for an index scan. The callback has to return an
247
251
* IndexFetchTableData, which the AM will typically embed in a larger
248
252
* structure with additional information.
249
253
*
@@ -268,16 +272,16 @@ typedef struct TableAmRoutine
268
272
* test, return true, false otherwise.
269
273
*
270
274
* Note that AMs that do not necessarily update indexes when indexed
271
- * columns do not change, need to return the current/correct version of a
272
- * tuple as appropriate , even if the tid points to an older version of the
273
- * tuple.
275
+ * columns do not change, need to return the current/correct version of
276
+ * the tuple that is visible to the snapshot , even if the tid points to an
277
+ * older version of the tuple.
274
278
*
275
279
* *call_again is false on the first call to index_fetch_tuple for a tid.
276
280
* If there potentially is another tuple matching the tid, *call_again
277
281
* needs be set to true by index_fetch_tuple, signalling to the caller
278
282
* that index_fetch_tuple should be called again for the same tid.
279
283
*
280
- * *all_dead, if all_dead is not NULL, should be set to true if by
284
+ * *all_dead, if all_dead is not NULL, should be set to true by
281
285
* index_fetch_tuple iff it is guaranteed that no backend needs to see
282
286
* that tuple. Index AMs can use that do avoid returning that tid in
283
287
* future searches.
@@ -288,14 +292,14 @@ typedef struct TableAmRoutine
288
292
TupleTableSlot * slot ,
289
293
bool * call_again , bool * all_dead );
290
294
295
+
291
296
/* ------------------------------------------------------------------------
292
297
* Callbacks for non-modifying operations on individual tuples
293
298
* ------------------------------------------------------------------------
294
299
*/
295
300
296
-
297
301
/*
298
- * Fetch tuple at `tid` into `slot, after doing a visibility test
302
+ * Fetch tuple at `tid` into `slot` , after doing a visibility test
299
303
* according to `snapshot`. If a tuple was found and passed the visibility
300
304
* test, returns true, false otherwise.
301
305
*/
@@ -390,13 +394,13 @@ typedef struct TableAmRoutine
390
394
/*
391
395
* Perform operations necessary to complete insertions made via
392
396
* tuple_insert and multi_insert with a BulkInsertState specified. This
393
- * e.g. may e.g. used to flush the relation when inserting with
394
- * TABLE_INSERT_SKIP_WAL specified .
397
+ * may for example be used to flush the relation, when the
398
+ * TABLE_INSERT_SKIP_WAL option was used .
395
399
*
396
400
* Typically callers of tuple_insert and multi_insert will just pass all
397
- * the flags the apply to them, and each AM has to decide which of them
398
- * make sense for it, and then only take actions in finish_bulk_insert
399
- * that make sense for a specific AM .
401
+ * the flags that apply to them, and each AM has to decide which of them
402
+ * make sense for it, and then only take actions in finish_bulk_insert for
403
+ * those flags, and ignore others .
400
404
*
401
405
* Optional callback.
402
406
*/
@@ -412,10 +416,10 @@ typedef struct TableAmRoutine
412
416
* This callback needs to create a new relation filenode for `rel`, with
413
417
* appropriate durability behaviour for `persistence`.
414
418
*
415
- * On output *freezeXid, *minmulti should be set to the values appropriate
416
- * for pg_class.{relfrozenxid, relminmxid} have to be set to . For AMs that
417
- * don't need those fields to be filled they can be set to
418
- * InvalidTransactionId, InvalidMultiXactId respectively.
419
+ * On output *freezeXid, *minmulti must be set to the values appropriate
420
+ * for pg_class.{relfrozenxid, relminmxid}. For AMs that don't need those
421
+ * fields to be filled they can be set to InvalidTransactionId and
422
+ * InvalidMultiXactId, respectively.
419
423
*
420
424
* See also table_relation_set_new_filenode().
421
425
*/
@@ -463,7 +467,7 @@ typedef struct TableAmRoutine
463
467
* locked with a ShareUpdateExclusive lock.
464
468
*
465
469
* Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through
466
- * this routine, even if (in the latter case), part of the same VACUUM
470
+ * this routine, even if (for ANALYZE) it is part of the same VACUUM
467
471
* command.
468
472
*
469
473
* There probably, in the future, needs to be a separate callback to
@@ -487,8 +491,8 @@ typedef struct TableAmRoutine
487
491
* sampling, e.g. because it's a metapage that could never contain tuples.
488
492
*
489
493
* XXX: This obviously is primarily suited for block-based AMs. It's not
490
- * clear what a good interface for non block based AMs would be, so don't
491
- * try to invent one yet.
494
+ * clear what a good interface for non block based AMs would be, so there
495
+ * isn't one yet.
492
496
*/
493
497
bool (* scan_analyze_next_block ) (TableScanDesc scan ,
494
498
BlockNumber blockno ,
@@ -537,7 +541,7 @@ typedef struct TableAmRoutine
537
541
/*
538
542
* See table_relation_estimate_size().
539
543
*
540
- * While block oriented, it shouldn't be too hard to for an AM that
544
+ * While block oriented, it shouldn't be too hard for an AM that doesn't
541
545
* doesn't internally use blocks to convert into a usable representation.
542
546
*/
543
547
void (* relation_estimate_size ) (Relation rel , int32 * attr_widths ,
@@ -553,7 +557,7 @@ typedef struct TableAmRoutine
553
557
/*
554
558
* Prepare to fetch / check / return tuples from `tbmres->blockno` as part
555
559
* of a bitmap table scan. `scan` was started via table_beginscan_bm().
556
- * Return false if there's no tuples to be found on the page, true
560
+ * Return false if there are no tuples to be found on the page, true
557
561
* otherwise.
558
562
*
559
563
* This will typically read and pin the target block, and do the necessary
@@ -617,8 +621,8 @@ typedef struct TableAmRoutine
617
621
* Note that it's not acceptable to hold deadlock prone resources such as
618
622
* lwlocks until scan_sample_next_tuple() has exhausted the tuples on the
619
623
* block - the tuple is likely to be returned to an upper query node, and
620
- * the next call could be off a long while. Holding buffer pins etc is
621
- * obviously OK.
624
+ * the next call could be off a long while. Holding buffer pins and such
625
+ * is obviously OK.
622
626
*
623
627
* Currently it is required to implement this interface, as there's no
624
628
* alternative way (contrary e.g. to bitmap scans) to implement sample
@@ -707,7 +711,6 @@ table_beginscan_strat(Relation rel, Snapshot snapshot,
707
711
false, false, false);
708
712
}
709
713
710
-
711
714
/*
712
715
* table_beginscan_bm is an alternative entry point for setting up a
713
716
* TableScanDesc for a bitmap heap scan. Although that scan technology is
@@ -762,7 +765,6 @@ table_endscan(TableScanDesc scan)
762
765
scan -> rs_rd -> rd_tableam -> scan_end (scan );
763
766
}
764
767
765
-
766
768
/*
767
769
* Restart a relation scan.
768
770
*/
@@ -795,7 +797,6 @@ table_rescan_set_params(TableScanDesc scan, struct ScanKeyData *key,
795
797
*/
796
798
extern void table_scan_update_snapshot (TableScanDesc scan , Snapshot snapshot );
797
799
798
-
799
800
/*
800
801
* Return next tuple from `scan`, store in slot.
801
802
*/
@@ -833,7 +834,7 @@ extern void table_parallelscan_initialize(Relation rel,
833
834
* table_parallelscan_initialize(), for the same relation. The initialization
834
835
* does not need to have happened in this backend.
835
836
*
836
- * Caller must hold a suitable lock on the correct relation.
837
+ * Caller must hold a suitable lock on the relation.
837
838
*/
838
839
extern TableScanDesc table_beginscan_parallel (Relation rel ,
839
840
ParallelTableScanDesc pscan );
@@ -904,7 +905,7 @@ table_index_fetch_end(struct IndexFetchTableData *scan)
904
905
* The difference between this function and table_fetch_row_version is that
905
906
* this function returns the currently visible version of a row if the AM
906
907
* supports storing multiple row versions reachable via a single index entry
907
- * (like heap's HOT). Whereas table_fetch_row_version only evaluates the the
908
+ * (like heap's HOT). Whereas table_fetch_row_version only evaluates the
908
909
* tuple exactly at `tid`. Outside of index entry ->table tuple lookups,
909
910
* table_fetch_row_version is what's usually needed.
910
911
*/
@@ -940,7 +941,7 @@ extern bool table_index_fetch_tuple_check(Relation rel,
940
941
941
942
942
943
/*
943
- * Fetch tuple at `tid` into `slot, after doing a visibility test according to
944
+ * Fetch tuple at `tid` into `slot` , after doing a visibility test according to
944
945
* `snapshot`. If a tuple was found and passed the visibility test, returns
945
946
* true, false otherwise.
946
947
*
@@ -1009,8 +1010,8 @@ table_compute_xid_horizon_for_tuples(Relation rel,
1009
1010
* behaviour of the AM. Several options might be ignored by AMs not supporting
1010
1011
* them.
1011
1012
*
1012
- * If the TABLE_INSERT_SKIP_WAL option is specified, the new tuple will not
1013
- * necessarily logged to WAL, even for a non-temp relation. It is the AMs
1013
+ * If the TABLE_INSERT_SKIP_WAL option is specified, the new tuple doesn't
1014
+ * need to be logged to WAL, even for a non-temp relation. It is the AMs
1014
1015
* choice whether this optimization is supported.
1015
1016
*
1016
1017
* If the TABLE_INSERT_SKIP_FSM option is specified, AMs are free to not reuse
@@ -1030,7 +1031,7 @@ table_compute_xid_horizon_for_tuples(Relation rel,
1030
1031
* relation.
1031
1032
*
1032
1033
* Note that most of these options will be applied when inserting into the
1033
- * heap's TOAST table, too, if the tuple requires any out-of-line data
1034
+ * heap's TOAST table, too, if the tuple requires any out-of-line data.
1034
1035
*
1035
1036
*
1036
1037
* The BulkInsertState object (if any; bistate can be NULL for default
@@ -1082,7 +1083,7 @@ table_complete_speculative(Relation rel, TupleTableSlot *slot,
1082
1083
}
1083
1084
1084
1085
/*
1085
- * Insert multiple tuple into a table.
1086
+ * Insert multiple tuples into a table.
1086
1087
*
1087
1088
* This is like table_insert(), but inserts multiple tuples in one
1088
1089
* operation. That's often faster than calling table_insert() in a loop,
@@ -1121,10 +1122,9 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
1121
1122
* changingPart - true iff the tuple is being moved to another partition
1122
1123
* table due to an update of the partition key. Otherwise, false.
1123
1124
*
1124
- * Normal, successful return value is TM_Ok, which
1125
- * actually means we did delete it. Failure return codes are
1126
- * TM_SelfModified, TM_Updated, or TM_BeingModified
1127
- * (the last only possible if wait == false).
1125
+ * Normal, successful return value is TM_Ok, which means we did actually
1126
+ * delete it. Failure return codes are TM_SelfModified, TM_Updated, and
1127
+ * TM_BeingModified (the last only possible if wait == false).
1128
1128
*
1129
1129
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
1130
1130
* t_xmax, and, if possible, and, if possible, t_cmax. See comments for
@@ -1160,10 +1160,9 @@ table_delete(Relation rel, ItemPointer tid, CommandId cid,
1160
1160
* update_indexes - in success cases this is set to true if new index entries
1161
1161
* are required for this tuple
1162
1162
*
1163
- * Normal, successful return value is TM_Ok, which
1164
- * actually means we *did* update it. Failure return codes are
1165
- * TM_SelfModified, TM_Updated, or TM_BeingModified
1166
- * (the last only possible if wait == false).
1163
+ * Normal, successful return value is TM_Ok, which means we did actually
1164
+ * update it. Failure return codes are TM_SelfModified, TM_Updated, and
1165
+ * TM_BeingModified (the last only possible if wait == false).
1167
1166
*
1168
1167
* On success, the slot's tts_tid and tts_tableOid are updated to match the new
1169
1168
* stored tuple; in particular, slot->tts_tid is set to the TID where the
@@ -1201,8 +1200,8 @@ table_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
1201
1200
* flags:
1202
1201
* If TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, follow the update chain to
1203
1202
* also lock descendant tuples if lock modes don't conflict.
1204
- * If TUPLE_LOCK_FLAG_FIND_LAST_VERSION, update chain and lock latest
1205
- * version.
1203
+ * If TUPLE_LOCK_FLAG_FIND_LAST_VERSION, follow the update chain and lock
1204
+ * latest version.
1206
1205
*
1207
1206
* Output parameters:
1208
1207
* *slot: contains the target tuple
@@ -1303,7 +1302,7 @@ table_relation_copy_data(Relation rel, RelFileNode newrnode)
1303
1302
* is copied in that index's order; if use_sort is false and OidIndex is
1304
1303
* InvalidOid, no sorting is performed.
1305
1304
*
1306
- * OldestXmin, FreezeXid, MultiXactCutoff need to currently valid values for
1305
+ * OldestXmin, FreezeXid, MultiXactCutoff must be currently valid values for
1307
1306
* the table.
1308
1307
*
1309
1308
* *num_tuples, *tups_vacuumed, *tups_recently_dead will contain statistics
@@ -1329,15 +1328,15 @@ table_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
1329
1328
}
1330
1329
1331
1330
/*
1332
- * Perform VACUUM on the relation. The VACUUM can be user triggered or by
1331
+ * Perform VACUUM on the relation. The VACUUM can be user- triggered or by
1333
1332
* autovacuum. The specific actions performed by the AM will depend heavily on
1334
1333
* the individual AM.
1335
1334
1336
1335
* On entry a transaction needs to already been established, and the
1337
- * transaction is locked with a ShareUpdateExclusive lock.
1336
+ * table is locked with a ShareUpdateExclusive lock.
1338
1337
*
1339
1338
* Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through this
1340
- * routine, even if (in the latter case), part of the same VACUUM command.
1339
+ * routine, even if (for ANALYZE) it is part of the same VACUUM command.
1341
1340
*/
1342
1341
static inline void
1343
1342
table_relation_vacuum (Relation rel , struct VacuumParams * params ,
@@ -1363,7 +1362,7 @@ table_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno,
1363
1362
}
1364
1363
1365
1364
/*
1366
- * Iterate over tuples tuples in the block selected with
1365
+ * Iterate over tuples in the block selected with
1367
1366
* table_scan_analyze_next_block() (which needs to have returned true, and
1368
1367
* this routine may not have returned false for the same block before). If a
1369
1368
* tuple that's suitable for sampling is found, true is returned and a tuple
@@ -1383,7 +1382,7 @@ table_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
1383
1382
}
1384
1383
1385
1384
/*
1386
- * table_index_build_range_scan - scan the table to find tuples to be indexed
1385
+ * table_index_build_scan - scan the table to find tuples to be indexed
1387
1386
*
1388
1387
* This is called back from an access-method-specific index build procedure
1389
1388
* after the AM has done whatever setup it needs. The parent heap relation
@@ -1515,8 +1514,8 @@ table_relation_estimate_size(Relation rel, int32 *attr_widths,
1515
1514
/*
1516
1515
* Prepare to fetch / check / return tuples from `tbmres->blockno` as part of
1517
1516
* a bitmap table scan. `scan` needs to have been started via
1518
- * table_beginscan_bm(). Returns false if there's no tuples to be found on the
1519
- * page, true otherwise.
1517
+ * table_beginscan_bm(). Returns false if there are no tuples to be found on
1518
+ * the page, true otherwise.
1520
1519
*
1521
1520
* Note, this is an optionally implemented function, therefore should only be
1522
1521
* used after verifying the presence (at plan time or such).
0 commit comments