|
80 | 80 | * Portions Copyright (c) 1994, Regents of the University of California
|
81 | 81 | *
|
82 | 82 | * IDENTIFICATION
|
83 |
| - * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.83 2008/01/01 19:45:53 momjian Exp $ |
| 83 | + * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.84 2008/03/13 18:00:32 tgl Exp $ |
84 | 84 | *
|
85 | 85 | *-------------------------------------------------------------------------
|
86 | 86 | */
|
@@ -964,6 +964,99 @@ CommandEndInvalidationMessages(void)
|
964 | 964 | &transInvalInfo->CurrentCmdInvalidMsgs);
|
965 | 965 | }
|
966 | 966 |
|
| 967 | + |
| 968 | +/* |
| 969 | + * BeginNonTransactionalInvalidation |
| 970 | + * Prepare for invalidation messages for nontransactional updates. |
| 971 | + * |
| 972 | + * A nontransactional invalidation is one that must be sent whether or not |
| 973 | + * the current transaction eventually commits. We arrange for all invals |
| 974 | + * queued between this call and EndNonTransactionalInvalidation() to be sent |
| 975 | + * immediately when the latter is called. |
| 976 | + * |
| 977 | + * Currently, this is only used by heap_page_prune(), and only when it is |
| 978 | + * invoked during VACUUM FULL's first pass over a table. We expect therefore |
| 979 | + * that we are not inside a subtransaction and there are no already-pending |
| 980 | + * invalidations. This could be relaxed by setting up a new nesting level of |
| 981 | + * invalidation data, but for now there's no need. Note that heap_page_prune |
| 982 | + * knows that this function does not change any state, and therefore there's |
| 983 | + * no need to worry about cleaning up if there's an elog(ERROR) before |
| 984 | + * reaching EndNonTransactionalInvalidation (the invals will just be thrown |
| 985 | + * away if that happens). |
| 986 | + */ |
| 987 | +void |
| 988 | +BeginNonTransactionalInvalidation(void) |
| 989 | +{ |
| 990 | + /* Must be at top of stack */ |
| 991 | + Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); |
| 992 | + |
| 993 | + /* Must not have any previously-queued activity */ |
| 994 | + Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL); |
| 995 | + Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL); |
| 996 | + Assert(transInvalInfo->CurrentCmdInvalidMsgs.cclist == NULL); |
| 997 | + Assert(transInvalInfo->CurrentCmdInvalidMsgs.rclist == NULL); |
| 998 | + Assert(transInvalInfo->RelcacheInitFileInval == false); |
| 999 | +} |
| 1000 | + |
| 1001 | +/* |
| 1002 | + * EndNonTransactionalInvalidation |
| 1003 | + * Process queued-up invalidation messages for nontransactional updates. |
| 1004 | + * |
| 1005 | + * We expect to find messages in CurrentCmdInvalidMsgs only (else there |
| 1006 | + * was a CommandCounterIncrement within the "nontransactional" update). |
| 1007 | + * We must process them locally and send them out to the shared invalidation |
| 1008 | + * message queue. |
| 1009 | + * |
| 1010 | + * We must also reset the lists to empty and explicitly free memory (we can't |
| 1011 | + * rely on end-of-transaction cleanup for that). |
| 1012 | + */ |
| 1013 | +void |
| 1014 | +EndNonTransactionalInvalidation(void) |
| 1015 | +{ |
| 1016 | + InvalidationChunk *chunk; |
| 1017 | + InvalidationChunk *next; |
| 1018 | + |
| 1019 | + /* Must be at top of stack */ |
| 1020 | + Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); |
| 1021 | + |
| 1022 | + /* Must not have any prior-command messages */ |
| 1023 | + Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL); |
| 1024 | + Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL); |
| 1025 | + |
| 1026 | + /* |
| 1027 | + * At present, this function is only used for CTID-changing updates; |
| 1028 | + * since the relcache init file doesn't store any tuple CTIDs, we |
| 1029 | + * don't have to invalidate it. That might not be true forever |
| 1030 | + * though, in which case we'd need code similar to AtEOXact_Inval. |
| 1031 | + */ |
| 1032 | + |
| 1033 | + /* Send out the invals */ |
| 1034 | + ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, |
| 1035 | + LocalExecuteInvalidationMessage); |
| 1036 | + ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, |
| 1037 | + SendSharedInvalidMessage); |
| 1038 | + |
| 1039 | + /* Clean up and release memory */ |
| 1040 | + for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.cclist; |
| 1041 | + chunk != NULL; |
| 1042 | + chunk = next) |
| 1043 | + { |
| 1044 | + next = chunk->next; |
| 1045 | + pfree(chunk); |
| 1046 | + } |
| 1047 | + for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.rclist; |
| 1048 | + chunk != NULL; |
| 1049 | + chunk = next) |
| 1050 | + { |
| 1051 | + next = chunk->next; |
| 1052 | + pfree(chunk); |
| 1053 | + } |
| 1054 | + transInvalInfo->CurrentCmdInvalidMsgs.cclist = NULL; |
| 1055 | + transInvalInfo->CurrentCmdInvalidMsgs.rclist = NULL; |
| 1056 | + transInvalInfo->RelcacheInitFileInval = false; |
| 1057 | +} |
| 1058 | + |
| 1059 | + |
967 | 1060 | /*
|
968 | 1061 | * CacheInvalidateHeapTuple
|
969 | 1062 | * Register the given tuple for invalidation at end of command
|
|
0 commit comments