Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndres Freund2019-05-23 23:25:48 +0000
committerAndres Freund2019-05-23 23:32:36 +0000
commit73b8c3bd2889fed986044e15aefd0911f96ccdd3 (patch)
tree8fcd867ac811feecc99ed5f645088b73c44d5051 /src/backend/commands
parent54487d1560619a0027e0651d1b8d715ca8fc388c (diff)
tableam: Rename wrapper functions to match callback names.
Some of the wrapper functions didn't match the callback names. Many of them due to staying "consistent" with historic naming of the wrapped functionality. We decided that for most cases it's more important to be for tableam to be consistent going forward, than with the past. The one exception is beginscan/endscan/... because it'd have looked odd to have systable_beginscan/endscan/... with a different naming scheme, and changing the systable_* APIs would have caused way too much churn (including breaking a lot of external users). Author: Ashwin Agrawal, with some small additions by Andres Freund Reviewed-By: Andres Freund Discussion: https://postgr.es/m/CALfoeiugyrXZfX7n0ORCa4L-m834dzmaE8eFdbNR6PMpetU4Ww@mail.gmail.com
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/copy.c24
-rw-r--r--src/backend/commands/createas.c14
-rw-r--r--src/backend/commands/matview.c14
-rw-r--r--src/backend/commands/tablecmds.c9
-rw-r--r--src/backend/commands/trigger.c17
5 files changed, 42 insertions, 36 deletions
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index b00891ffd2b..a8ff3049094 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -90,7 +90,7 @@ typedef enum EolType
*/
typedef enum CopyInsertMethod
{
- CIM_SINGLE, /* use table_insert or fdw routine */
+ CIM_SINGLE, /* use table_tuple_insert or fdw routine */
CIM_MULTI, /* always use table_multi_insert */
CIM_MULTI_CONDITIONAL /* use table_multi_insert only if valid */
} CopyInsertMethod;
@@ -2664,7 +2664,7 @@ CopyFrom(CopyState cstate)
PartitionTupleRouting *proute = NULL;
ErrorContextCallback errcallback;
CommandId mycid = GetCurrentCommandId(true);
- int ti_options = 0; /* start with default table_insert options */
+ int ti_options = 0; /* start with default options for insert */
BulkInsertState bistate = NULL;
CopyInsertMethod insertMethod;
CopyMultiInsertInfo multiInsertInfo = {0}; /* pacify compiler */
@@ -2737,11 +2737,11 @@ CopyFrom(CopyState cstate)
* FSM for free space is a waste of time, even if we must use WAL because
* of archiving. This could possibly be wrong, but it's unlikely.
*
- * The comments for table_insert and RelationGetBufferForTuple specify that
- * skipping WAL logging is only safe if we ensure that our tuples do not
- * go into pages containing tuples from any other transactions --- but this
- * must be the case if we have a new table or new relfilenode, so we need
- * no additional work to enforce that.
+ * The comments for table_tuple_insert and RelationGetBufferForTuple
+ * specify that skipping WAL logging is only safe if we ensure that our
+ * tuples do not go into pages containing tuples from any other
+ * transactions --- but this must be the case if we have a new table or
+ * new relfilenode, so we need no additional work to enforce that.
*
* We currently don't support this optimization if the COPY target is a
* partitioned table as we currently only lazily initialize partition
@@ -2888,9 +2888,9 @@ CopyFrom(CopyState cstate)
/*
* It's generally more efficient to prepare a bunch of tuples for
* insertion, and insert them in one table_multi_insert() call, than call
- * table_insert() separately for every tuple. However, there are a number
- * of reasons why we might not be able to do this. These are explained
- * below.
+ * table_tuple_insert() separately for every tuple. However, there are a
+ * number of reasons why we might not be able to do this. These are
+ * explained below.
*/
if (resultRelInfo->ri_TrigDesc != NULL &&
(resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
@@ -3286,8 +3286,8 @@ CopyFrom(CopyState cstate)
else
{
/* OK, store the tuple and create index entries for it */
- table_insert(resultRelInfo->ri_RelationDesc, myslot,
- mycid, ti_options, bistate);
+ table_tuple_insert(resultRelInfo->ri_RelationDesc,
+ myslot, mycid, ti_options, bistate);
if (resultRelInfo->ri_NumIndices > 0)
recheckIndexes = ExecInsertIndexTuples(myslot,
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index 43c2fa91242..4c1d909d380 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -60,7 +60,7 @@ typedef struct
Relation rel; /* relation to write to */
ObjectAddress reladdr; /* address of rel, for ExecCreateTableAs */
CommandId output_cid; /* cmin to insert in output tuples */
- int ti_options; /* table_insert performance options */
+ int ti_options; /* table_tuple_insert performance options */
BulkInsertState bistate; /* bulk insert state */
} DR_intorel;
@@ -576,18 +576,18 @@ intorel_receive(TupleTableSlot *slot, DestReceiver *self)
/*
* Note that the input slot might not be of the type of the target
- * relation. That's supported by table_insert(), but slightly less
+ * relation. That's supported by table_tuple_insert(), but slightly less
* efficient than inserting with the right slot - but the alternative
* would be to copy into a slot of the right type, which would not be
* cheap either. This also doesn't allow accessing per-AM data (say a
* tuple's xmin), but since we don't do that here...
*/
- table_insert(myState->rel,
- slot,
- myState->output_cid,
- myState->ti_options,
- myState->bistate);
+ table_tuple_insert(myState->rel,
+ slot,
+ myState->output_cid,
+ myState->ti_options,
+ myState->bistate);
/* We know this is a newly created relation, so there are no indexes */
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index dc2940cd4eb..537d0e8ceff 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -54,7 +54,7 @@ typedef struct
/* These fields are filled by transientrel_startup: */
Relation transientrel; /* relation to write to */
CommandId output_cid; /* cmin to insert in output tuples */
- int ti_options; /* table_insert performance options */
+ int ti_options; /* table_tuple_insert performance options */
BulkInsertState bistate; /* bulk insert state */
} DR_transientrel;
@@ -481,18 +481,18 @@ transientrel_receive(TupleTableSlot *slot, DestReceiver *self)
/*
* Note that the input slot might not be of the type of the target
- * relation. That's supported by table_insert(), but slightly less
+ * relation. That's supported by table_tuple_insert(), but slightly less
* efficient than inserting with the right slot - but the alternative
* would be to copy into a slot of the right type, which would not be
* cheap either. This also doesn't allow accessing per-AM data (say a
* tuple's xmin), but since we don't do that here...
*/
- table_insert(myState->transientrel,
- slot,
- myState->output_cid,
- myState->ti_options,
- myState->bistate);
+ table_tuple_insert(myState->transientrel,
+ slot,
+ myState->output_cid,
+ myState->ti_options,
+ myState->bistate);
/* We know this is a newly created relation, so there are no indexes */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 602a8dbd1c3..c9b8857d306 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -4732,9 +4732,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
newrel = NULL;
/*
- * Prepare a BulkInsertState and options for table_insert. Because we're
- * building a new heap, we can skip WAL-logging and fsync it to disk at
- * the end instead (unless WAL-logging is required for archiving or
+ * Prepare a BulkInsertState and options for table_tuple_insert. Because
+ * we're building a new heap, we can skip WAL-logging and fsync it to disk
+ * at the end instead (unless WAL-logging is required for archiving or
* streaming replication). The FSM is empty too, so don't bother using it.
*/
if (newrel)
@@ -5005,7 +5005,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
/* Write the tuple out to the new relation */
if (newrel)
- table_insert(newrel, insertslot, mycid, ti_options, bistate);
+ table_tuple_insert(newrel, insertslot, mycid,
+ ti_options, bistate);
ResetExprContext(econtext);
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 3132a13785b..316692b7c27 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -3332,7 +3332,7 @@ GetTupleForTrigger(EState *estate,
*/
if (!IsolationUsesXactSnapshot())
lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
- test = table_lock_tuple(relation, tid, estate->es_snapshot, oldslot,
+ test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
estate->es_output_cid,
lockmode, LockWaitBlock,
lockflags,
@@ -3386,7 +3386,7 @@ GetTupleForTrigger(EState *estate,
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- elog(ERROR, "unexpected table_lock_tuple status: %u", test);
+ elog(ERROR, "unexpected table_tuple_lock status: %u", test);
break;
case TM_Deleted:
@@ -3402,7 +3402,7 @@ GetTupleForTrigger(EState *estate,
break;
default:
- elog(ERROR, "unrecognized table_lock_tuple status: %u", test);
+ elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
return false; /* keep compiler quiet */
}
}
@@ -3412,7 +3412,8 @@ GetTupleForTrigger(EState *estate,
* We expect the tuple to be present, thus very simple error handling
* suffices.
*/
- if (!table_fetch_row_version(relation, tid, SnapshotAny, oldslot))
+ if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
+ oldslot))
elog(ERROR, "failed to fetch tuple for trigger");
}
@@ -4270,7 +4271,9 @@ AfterTriggerExecute(EState *estate,
{
LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
- if (!table_fetch_row_version(rel, &(event->ate_ctid1), SnapshotAny, LocTriggerData.tg_trigslot))
+ if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
+ SnapshotAny,
+ LocTriggerData.tg_trigslot))
elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
LocTriggerData.tg_trigtuple =
ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
@@ -4287,7 +4290,9 @@ AfterTriggerExecute(EState *estate,
{
LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
- if (!table_fetch_row_version(rel, &(event->ate_ctid2), SnapshotAny, LocTriggerData.tg_newslot))
+ if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
+ SnapshotAny,
+ LocTriggerData.tg_newslot))
elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
LocTriggerData.tg_newtuple =
ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);