8
8
*
9
9
*
10
10
* IDENTIFICATION
11
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.56 2007/04/19 20:24:04 tgl Exp $
11
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.57 2007/05/03 16:45:58 tgl Exp $
12
12
*
13
13
* NOTES
14
14
* Overflow pages look like ordinary relation pages.
@@ -107,20 +107,21 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
107
107
108
108
/* allocate and lock an empty overflow page */
109
109
ovflbuf = _hash_getovflpage (rel , metabuf );
110
- ovflpage = BufferGetPage (ovflbuf );
111
110
112
111
/*
113
112
* Write-lock the tail page. It is okay to hold two buffer locks here
114
113
* since there cannot be anyone else contending for access to ovflbuf.
115
114
*/
116
115
_hash_chgbufaccess (rel , buf , HASH_NOLOCK , HASH_WRITE );
117
116
117
+ /* probably redundant... */
118
+ _hash_checkpage (rel , buf , LH_BUCKET_PAGE | LH_OVERFLOW_PAGE );
119
+
118
120
/* loop to find current tail page, in case someone else inserted too */
119
121
for (;;)
120
122
{
121
123
BlockNumber nextblkno ;
122
124
123
- _hash_checkpage (rel , buf , LH_BUCKET_PAGE | LH_OVERFLOW_PAGE );
124
125
page = BufferGetPage (buf );
125
126
pageopaque = (HashPageOpaque ) PageGetSpecialPointer (page );
126
127
nextblkno = pageopaque -> hasho_nextblkno ;
@@ -131,11 +132,11 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
131
132
/* we assume we do not need to write the unmodified page */
132
133
_hash_relbuf (rel , buf );
133
134
134
- buf = _hash_getbuf (rel , nextblkno , HASH_WRITE );
135
+ buf = _hash_getbuf (rel , nextblkno , HASH_WRITE , LH_OVERFLOW_PAGE );
135
136
}
136
137
137
138
/* now that we have correct backlink, initialize new overflow page */
138
- _hash_pageinit ( ovflpage , BufferGetPageSize (ovflbuf ) );
139
+ ovflpage = BufferGetPage (ovflbuf );
139
140
ovflopaque = (HashPageOpaque ) PageGetSpecialPointer (ovflpage );
140
141
ovflopaque -> hasho_prevblkno = BufferGetBlockNumber (buf );
141
142
ovflopaque -> hasho_nextblkno = InvalidBlockNumber ;
@@ -156,7 +157,8 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
156
157
* _hash_getovflpage()
157
158
*
158
159
* Find an available overflow page and return it. The returned buffer
159
- * is pinned and write-locked, but its contents are not initialized.
160
+ * is pinned and write-locked, and has had _hash_pageinit() applied,
161
+ * but it is caller's responsibility to fill the special space.
160
162
*
161
163
* The caller must hold a pin, but no lock, on the metapage buffer.
162
164
* That buffer is left in the same state at exit.
@@ -220,8 +222,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
220
222
/* Release exclusive lock on metapage while reading bitmap page */
221
223
_hash_chgbufaccess (rel , metabuf , HASH_READ , HASH_NOLOCK );
222
224
223
- mapbuf = _hash_getbuf (rel , mapblkno , HASH_WRITE );
224
- _hash_checkpage (rel , mapbuf , LH_BITMAP_PAGE );
225
+ mapbuf = _hash_getbuf (rel , mapblkno , HASH_WRITE , LH_BITMAP_PAGE );
225
226
mappage = BufferGetPage (mapbuf );
226
227
freep = HashPageGetBitmap (mappage );
227
228
@@ -277,7 +278,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
277
278
* with metapage write lock held; would be better to use a lock that
278
279
* doesn't block incoming searches.
279
280
*/
280
- newbuf = _hash_getnewbuf (rel , blkno , HASH_WRITE );
281
+ newbuf = _hash_getnewbuf (rel , blkno );
281
282
282
283
metap -> hashm_spares [splitnum ]++ ;
283
284
@@ -327,8 +328,8 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
327
328
_hash_chgbufaccess (rel , metabuf , HASH_READ , HASH_NOLOCK );
328
329
}
329
330
330
- /* Fetch and return the recycled page */
331
- return _hash_getbuf (rel , blkno , HASH_WRITE );
331
+ /* Fetch, init, and return the recycled page */
332
+ return _hash_getinitbuf (rel , blkno );
332
333
}
333
334
334
335
/*
@@ -412,30 +413,29 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
412
413
*/
413
414
if (BlockNumberIsValid (prevblkno ))
414
415
{
415
- Buffer prevbuf = _hash_getbuf (rel , prevblkno , HASH_WRITE );
416
+ Buffer prevbuf = _hash_getbuf (rel , prevblkno , HASH_WRITE ,
417
+ LH_BUCKET_PAGE | LH_OVERFLOW_PAGE );
416
418
Page prevpage = BufferGetPage (prevbuf );
417
419
HashPageOpaque prevopaque = (HashPageOpaque ) PageGetSpecialPointer (prevpage );
418
420
419
- _hash_checkpage (rel , prevbuf , LH_BUCKET_PAGE | LH_OVERFLOW_PAGE );
420
421
Assert (prevopaque -> hasho_bucket == bucket );
421
422
prevopaque -> hasho_nextblkno = nextblkno ;
422
423
_hash_wrtbuf (rel , prevbuf );
423
424
}
424
425
if (BlockNumberIsValid (nextblkno ))
425
426
{
426
- Buffer nextbuf = _hash_getbuf (rel , nextblkno , HASH_WRITE );
427
+ Buffer nextbuf = _hash_getbuf (rel , nextblkno , HASH_WRITE ,
428
+ LH_OVERFLOW_PAGE );
427
429
Page nextpage = BufferGetPage (nextbuf );
428
430
HashPageOpaque nextopaque = (HashPageOpaque ) PageGetSpecialPointer (nextpage );
429
431
430
- _hash_checkpage (rel , nextbuf , LH_OVERFLOW_PAGE );
431
432
Assert (nextopaque -> hasho_bucket == bucket );
432
433
nextopaque -> hasho_prevblkno = prevblkno ;
433
434
_hash_wrtbuf (rel , nextbuf );
434
435
}
435
436
436
437
/* Read the metapage so we can determine which bitmap page to use */
437
- metabuf = _hash_getbuf (rel , HASH_METAPAGE , HASH_READ );
438
- _hash_checkpage (rel , metabuf , LH_META_PAGE );
438
+ metabuf = _hash_getbuf (rel , HASH_METAPAGE , HASH_READ , LH_META_PAGE );
439
439
metap = (HashMetaPage ) BufferGetPage (metabuf );
440
440
441
441
/* Identify which bit to set */
@@ -452,8 +452,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
452
452
_hash_chgbufaccess (rel , metabuf , HASH_READ , HASH_NOLOCK );
453
453
454
454
/* Clear the bitmap bit to indicate that this overflow page is free */
455
- mapbuf = _hash_getbuf (rel , blkno , HASH_WRITE );
456
- _hash_checkpage (rel , mapbuf , LH_BITMAP_PAGE );
455
+ mapbuf = _hash_getbuf (rel , blkno , HASH_WRITE , LH_BITMAP_PAGE );
457
456
mappage = BufferGetPage (mapbuf );
458
457
freep = HashPageGetBitmap (mappage );
459
458
Assert (ISSET (freep , bitmapbit ));
@@ -507,11 +506,10 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
507
506
* page while holding the metapage lock, but this path is taken so seldom
508
507
* that it's not worth worrying about.
509
508
*/
510
- buf = _hash_getnewbuf (rel , blkno , HASH_WRITE );
509
+ buf = _hash_getnewbuf (rel , blkno );
511
510
pg = BufferGetPage (buf );
512
511
513
- /* initialize the page */
514
- _hash_pageinit (pg , BufferGetPageSize (buf ));
512
+ /* initialize the page's special space */
515
513
op = (HashPageOpaque ) PageGetSpecialPointer (pg );
516
514
op -> hasho_prevblkno = InvalidBlockNumber ;
517
515
op -> hasho_nextblkno = InvalidBlockNumber ;
@@ -583,8 +581,7 @@ _hash_squeezebucket(Relation rel,
583
581
* start squeezing into the base bucket page.
584
582
*/
585
583
wblkno = bucket_blkno ;
586
- wbuf = _hash_getbuf (rel , wblkno , HASH_WRITE );
587
- _hash_checkpage (rel , wbuf , LH_BUCKET_PAGE );
584
+ wbuf = _hash_getbuf (rel , wblkno , HASH_WRITE , LH_BUCKET_PAGE );
588
585
wpage = BufferGetPage (wbuf );
589
586
wopaque = (HashPageOpaque ) PageGetSpecialPointer (wpage );
590
587
@@ -607,8 +604,7 @@ _hash_squeezebucket(Relation rel,
607
604
rblkno = ropaque -> hasho_nextblkno ;
608
605
if (ropaque != wopaque )
609
606
_hash_relbuf (rel , rbuf );
610
- rbuf = _hash_getbuf (rel , rblkno , HASH_WRITE );
611
- _hash_checkpage (rel , rbuf , LH_OVERFLOW_PAGE );
607
+ rbuf = _hash_getbuf (rel , rblkno , HASH_WRITE , LH_OVERFLOW_PAGE );
612
608
rpage = BufferGetPage (rbuf );
613
609
ropaque = (HashPageOpaque ) PageGetSpecialPointer (rpage );
614
610
Assert (ropaque -> hasho_bucket == bucket );
@@ -648,8 +644,7 @@ _hash_squeezebucket(Relation rel,
648
644
return ;
649
645
}
650
646
651
- wbuf = _hash_getbuf (rel , wblkno , HASH_WRITE );
652
- _hash_checkpage (rel , wbuf , LH_OVERFLOW_PAGE );
647
+ wbuf = _hash_getbuf (rel , wblkno , HASH_WRITE , LH_OVERFLOW_PAGE );
653
648
wpage = BufferGetPage (wbuf );
654
649
wopaque = (HashPageOpaque ) PageGetSpecialPointer (wpage );
655
650
Assert (wopaque -> hasho_bucket == bucket );
@@ -701,8 +696,7 @@ _hash_squeezebucket(Relation rel,
701
696
/* free this overflow page, then get the previous one */
702
697
_hash_freeovflpage (rel , rbuf );
703
698
704
- rbuf = _hash_getbuf (rel , rblkno , HASH_WRITE );
705
- _hash_checkpage (rel , rbuf , LH_OVERFLOW_PAGE );
699
+ rbuf = _hash_getbuf (rel , rblkno , HASH_WRITE , LH_OVERFLOW_PAGE );
706
700
rpage = BufferGetPage (rbuf );
707
701
ropaque = (HashPageOpaque ) PageGetSpecialPointer (rpage );
708
702
Assert (ropaque -> hasho_bucket == bucket );
0 commit comments