53
53
*
54
54
* Also, whenever we see an operation on a pg_class or pg_attribute tuple,
55
55
* we register a relcache flush operation for the relation described by that
56
- * tuple.
57
- *
58
- * We keep the relcache flush requests in lists separate from the catcache
59
- * tuple flush requests. This allows us to issue all the pending catcache
60
- * flushes before we issue relcache flushes, which saves us from loading
61
- * a catcache tuple during relcache load only to flush it again right away.
62
- * Also, we avoid queuing multiple relcache flush requests for the same
63
- * relation, since a relcache flush is relatively expensive to do.
56
+ * tuple. pg_class updates trigger an smgr flush operation as well.
57
+ *
58
+ * We keep the relcache and smgr flush requests in lists separate from the
59
+ * catcache tuple flush requests. This allows us to issue all the pending
60
+ * catcache flushes before we issue relcache flushes, which saves us from
61
+ * loading a catcache tuple during relcache load only to flush it again
62
+ * right away. Also, we avoid queuing multiple relcache flush requests for
63
+ * the same relation, since a relcache flush is relatively expensive to do.
64
64
* (XXX is it worth testing likewise for duplicate catcache flush entries?
65
65
* Probably not.)
66
66
*
80
80
* Portions Copyright (c) 1994, Regents of the University of California
81
81
*
82
82
* IDENTIFICATION
83
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.69 2005/01/10 20:02:23 tgl Exp $
83
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.70 2005/01/10 21:57:17 tgl Exp $
84
84
*
85
85
*-------------------------------------------------------------------------
86
86
*/
@@ -115,7 +115,7 @@ typedef struct InvalidationChunk
115
115
typedef struct InvalidationListHeader
116
116
{
117
117
InvalidationChunk * cclist ; /* list of chunks holding catcache msgs */
118
- InvalidationChunk * rclist ; /* list of chunks holding relcache msgs */
118
+ InvalidationChunk * rclist ; /* list of chunks holding relcache/smgr msgs */
119
119
} InvalidationListHeader ;
120
120
121
121
/*----------------
@@ -164,7 +164,7 @@ static TransInvalidationInfo *transInvalInfo = NULL;
164
164
165
165
static struct CACHECALLBACK
166
166
{
167
- int16 id ; /* cache number or SHAREDINVALRELCACHE_ID */
167
+ int16 id ; /* cache number or message type id */
168
168
CacheCallbackFunction function ;
169
169
Datum arg ;
170
170
} cache_callback_list [MAX_CACHE_CALLBACKS ];
@@ -273,7 +273,7 @@ AppendInvalidationMessageList(InvalidationChunk **destHdr,
273
273
* Invalidation set support functions
274
274
*
275
275
* These routines understand about the division of a logical invalidation
276
- * list into separate physical lists for catcache and relcache entries.
276
+ * list into separate physical lists for catcache and relcache/smgr entries.
277
277
* ----------------------------------------------------------------
278
278
*/
279
279
@@ -299,22 +299,42 @@ AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
299
299
*/
300
300
static void
301
301
AddRelcacheInvalidationMessage (InvalidationListHeader * hdr ,
302
- Oid dbId , Oid relId , RelFileNode physId )
302
+ Oid dbId , Oid relId )
303
303
{
304
304
SharedInvalidationMessage msg ;
305
305
306
306
/* Don't add a duplicate item */
307
307
/* We assume dbId need not be checked because it will never change */
308
- /* relfilenode fields must be checked to support reassignment */
309
308
ProcessMessageList (hdr -> rclist ,
310
- if (msg -> rc .relId == relId &&
311
- RelFileNodeEquals (msg -> rc .physId , physId )) return );
309
+ if (msg -> rc .id == SHAREDINVALRELCACHE_ID &&
310
+ msg -> rc .relId == relId )
311
+ return );
312
312
313
313
/* OK, add the item */
314
314
msg .rc .id = SHAREDINVALRELCACHE_ID ;
315
315
msg .rc .dbId = dbId ;
316
316
msg .rc .relId = relId ;
317
- msg .rc .physId = physId ;
317
+ AddInvalidationMessage (& hdr -> rclist , & msg );
318
+ }
319
+
320
+ /*
321
+ * Add an smgr inval entry
322
+ */
323
+ static void
324
+ AddSmgrInvalidationMessage (InvalidationListHeader * hdr ,
325
+ RelFileNode rnode )
326
+ {
327
+ SharedInvalidationMessage msg ;
328
+
329
+ /* Don't add a duplicate item */
330
+ ProcessMessageList (hdr -> rclist ,
331
+ if (msg -> sm .id == SHAREDINVALSMGR_ID &&
332
+ RelFileNodeEquals (msg -> sm .rnode , rnode ))
333
+ return );
334
+
335
+ /* OK, add the item */
336
+ msg .sm .id = SHAREDINVALSMGR_ID ;
337
+ msg .sm .rnode = rnode ;
318
338
AddInvalidationMessage (& hdr -> rclist , & msg );
319
339
}
320
340
@@ -370,10 +390,10 @@ RegisterCatcacheInvalidation(int cacheId,
370
390
* As above, but register a relcache invalidation event.
371
391
*/
372
392
static void
373
- RegisterRelcacheInvalidation (Oid dbId , Oid relId , RelFileNode physId )
393
+ RegisterRelcacheInvalidation (Oid dbId , Oid relId )
374
394
{
375
395
AddRelcacheInvalidationMessage (& transInvalInfo -> CurrentCmdInvalidMsgs ,
376
- dbId , relId , physId );
396
+ dbId , relId );
377
397
378
398
/*
379
399
* If the relation being invalidated is one of those cached in the
@@ -383,10 +403,22 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId, RelFileNode physId)
383
403
transInvalInfo -> RelcacheInitFileInval = true;
384
404
}
385
405
406
+ /*
407
+ * RegisterSmgrInvalidation
408
+ *
409
+ * As above, but register an smgr invalidation event.
410
+ */
411
+ static void
412
+ RegisterSmgrInvalidation (RelFileNode rnode )
413
+ {
414
+ AddSmgrInvalidationMessage (& transInvalInfo -> CurrentCmdInvalidMsgs ,
415
+ rnode );
416
+ }
417
+
386
418
/*
387
419
* LocalExecuteInvalidationMessage
388
420
*
389
- * Process a single invalidation message (which could be either type).
421
+ * Process a single invalidation message (which could be of any type).
390
422
* Only the local caches are flushed; this does not transmit the message
391
423
* to other backends.
392
424
*/
@@ -426,17 +458,14 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
426
458
(* ccitem -> function ) (ccitem -> arg , msg -> rc .relId );
427
459
}
428
460
}
461
+ }
462
+ else if (msg -> id == SHAREDINVALSMGR_ID )
463
+ {
429
464
/*
430
- * If the message includes a valid relfilenode, we must ensure
431
- * the smgr cache entry gets zapped. This might not have happened
432
- * above since the relcache entry might not have existed or might
433
- * have been associated with a different relfilenode.
434
- *
435
- * XXX there is no real good reason for rnode inval to be in the
436
- * same message at all. FIXME in 8.1.
465
+ * We could have smgr entries for relations of other databases,
466
+ * so no short-circuit test is possible here.
437
467
*/
438
- if (OidIsValid (msg -> rc .physId .relNode ))
439
- smgrclosenode (msg -> rc .physId );
468
+ smgrclosenode (msg -> sm .rnode );
440
469
}
441
470
else
442
471
elog (FATAL , "unrecognized SI message id: %d" , msg -> id );
@@ -475,16 +504,11 @@ InvalidateSystemCaches(void)
475
504
* of catalog/relation cache entries; if so, register inval events.
476
505
*/
477
506
static void
478
- PrepareForTupleInvalidation (Relation relation , HeapTuple tuple ,
479
- void (* CacheIdRegisterFunc ) (int , uint32 ,
480
- ItemPointer , Oid ),
481
- void (* RelationIdRegisterFunc ) (Oid , Oid ,
482
- RelFileNode ))
507
+ PrepareForTupleInvalidation (Relation relation , HeapTuple tuple )
483
508
{
484
509
Oid tupleRelId ;
485
510
Oid databaseId ;
486
511
Oid relationId ;
487
- RelFileNode rnode ;
488
512
489
513
/* Do nothing during bootstrap */
490
514
if (IsBootstrapProcessingMode ())
@@ -510,7 +534,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
510
534
* First let the catcache do its thing
511
535
*/
512
536
PrepareToInvalidateCacheTuple (relation , tuple ,
513
- CacheIdRegisterFunc );
537
+ RegisterCatcacheInvalidation );
514
538
515
539
/*
516
540
* Now, is this tuple one of the primary definers of a relcache entry?
@@ -520,27 +544,36 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
520
544
if (tupleRelId == RelOid_pg_class )
521
545
{
522
546
Form_pg_class classtup = (Form_pg_class ) GETSTRUCT (tuple );
547
+ RelFileNode rnode ;
523
548
524
549
relationId = HeapTupleGetOid (tuple );
525
550
if (classtup -> relisshared )
526
551
databaseId = InvalidOid ;
527
552
else
528
553
databaseId = MyDatabaseId ;
529
- if (classtup -> reltablespace )
530
- rnode .spcNode = classtup -> reltablespace ;
531
- else
532
- rnode .spcNode = MyDatabaseTableSpace ;
533
- rnode .dbNode = databaseId ;
534
- rnode .relNode = classtup -> relfilenode ;
535
554
536
555
/*
556
+ * We need to send out an smgr inval as well as a relcache inval.
557
+ * This is needed because other backends might possibly possess
558
+ * smgr cache but not relcache entries for the target relation.
559
+ *
537
560
* Note: during a pg_class row update that assigns a new
538
561
* relfilenode or reltablespace value, we will be called on both
539
562
* the old and new tuples, and thus will broadcast invalidation
540
563
* messages showing both the old and new RelFileNode values. This
541
564
* ensures that other backends will close smgr references to the
542
565
* old file.
566
+ *
567
+ * XXX possible future cleanup: it might be better to trigger smgr
568
+ * flushes explicitly, rather than indirectly from pg_class updates.
543
569
*/
570
+ if (classtup -> reltablespace )
571
+ rnode .spcNode = classtup -> reltablespace ;
572
+ else
573
+ rnode .spcNode = MyDatabaseTableSpace ;
574
+ rnode .dbNode = databaseId ;
575
+ rnode .relNode = classtup -> relfilenode ;
576
+ RegisterSmgrInvalidation (rnode );
544
577
}
545
578
else if (tupleRelId == RelOid_pg_attribute )
546
579
{
@@ -558,18 +591,14 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
558
591
* though.
559
592
*/
560
593
databaseId = MyDatabaseId ;
561
- /* We assume no smgr cache flush is needed, either */
562
- rnode .spcNode = InvalidOid ;
563
- rnode .dbNode = InvalidOid ;
564
- rnode .relNode = InvalidOid ;
565
594
}
566
595
else
567
596
return ;
568
597
569
598
/*
570
599
* Yes. We need to register a relcache invalidation event.
571
600
*/
572
- ( * RelationIdRegisterFunc ) ( databaseId , relationId , rnode );
601
+ RegisterRelcacheInvalidation ( databaseId , relationId );
573
602
}
574
603
575
604
@@ -790,9 +819,7 @@ CommandEndInvalidationMessages(void)
790
819
void
791
820
CacheInvalidateHeapTuple (Relation relation , HeapTuple tuple )
792
821
{
793
- PrepareForTupleInvalidation (relation , tuple ,
794
- RegisterCatcacheInvalidation ,
795
- RegisterRelcacheInvalidation );
822
+ PrepareForTupleInvalidation (relation , tuple );
796
823
}
797
824
798
825
/*
@@ -803,7 +830,10 @@ CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple)
803
830
* This is used in places that need to force relcache rebuild but aren't
804
831
* changing any of the tuples recognized as contributors to the relcache
805
832
* entry by PrepareForTupleInvalidation. (An example is dropping an index.)
806
- * We assume in particular that relfilenode isn't changing.
833
+ * We assume in particular that relfilenode/reltablespace aren't changing
834
+ * (so the rd_node value is still good).
835
+ *
836
+ * XXX most callers of this probably don't need to force an smgr flush.
807
837
*/
808
838
void
809
839
CacheInvalidateRelcache (Relation relation )
@@ -817,7 +847,8 @@ CacheInvalidateRelcache(Relation relation)
817
847
else
818
848
databaseId = MyDatabaseId ;
819
849
820
- RegisterRelcacheInvalidation (databaseId , relationId , relation -> rd_node );
850
+ RegisterRelcacheInvalidation (databaseId , relationId );
851
+ RegisterSmgrInvalidation (relation -> rd_node );
821
852
}
822
853
823
854
/*
@@ -844,7 +875,8 @@ CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
844
875
rnode .dbNode = databaseId ;
845
876
rnode .relNode = classtup -> relfilenode ;
846
877
847
- RegisterRelcacheInvalidation (databaseId , relationId , rnode );
878
+ RegisterRelcacheInvalidation (databaseId , relationId );
879
+ RegisterSmgrInvalidation (rnode );
848
880
}
849
881
850
882
/*
0 commit comments