Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 0fef38d

Browse files
committed
Tweak hash index AM to use the new ReadOrZeroBuffer bufmgr API when fetching
pages it intends to zero immediately. Just to show there is some use for that function besides WAL recovery :-). Along the way, fold _hash_checkpage and _hash_pageinit calls into _hash_getbuf and friends, instead of expecting callers to do that separately.
1 parent 1aefa04 commit 0fef38d

File tree

8 files changed

+110
-85
lines changed

8 files changed

+110
-85
lines changed

contrib/pgstattuple/pgstattuple.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.26 2007/03/25 19:45:13 tgl Exp $
2+
* $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.27 2007/05/03 16:45:58 tgl Exp $
33
*
44
* Copyright (c) 2001,2002 Tatsuo Ishii
55
*
@@ -360,7 +360,7 @@ pgstat_hash_page(pgstattuple_type * stat, Relation rel, BlockNumber blkno)
360360
Page page;
361361

362362
_hash_getlock(rel, blkno, HASH_SHARE);
363-
buf = _hash_getbuf(rel, blkno, HASH_READ);
363+
buf = _hash_getbuf(rel, blkno, HASH_READ, 0);
364364
page = BufferGetPage(buf);
365365

366366
if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData)))

src/backend/access/hash/hash.c

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
*
99
*
1010
* IDENTIFICATION
11-
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.93 2007/01/20 18:43:35 neilc Exp $
11+
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.94 2007/05/03 16:45:58 tgl Exp $
1212
*
1313
* NOTES
1414
* This file contains only the public interface routines.
@@ -506,8 +506,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
506506
* array cannot change under us; and it beats rereading the metapage for
507507
* each bucket.
508508
*/
509-
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
510-
_hash_checkpage(rel, metabuf, LH_META_PAGE);
509+
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
511510
metap = (HashMetaPage) BufferGetPage(metabuf);
512511
orig_maxbucket = metap->hashm_maxbucket;
513512
orig_ntuples = metap->hashm_ntuples;
@@ -548,8 +547,8 @@ hashbulkdelete(PG_FUNCTION_ARGS)
548547

549548
vacuum_delay_point();
550549

551-
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
552-
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
550+
buf = _hash_getbuf(rel, blkno, HASH_WRITE,
551+
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
553552
page = BufferGetPage(buf);
554553
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
555554
Assert(opaque->hasho_bucket == cur_bucket);
@@ -607,8 +606,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
607606
}
608607

609608
/* Write-lock metapage and check for split since we started */
610-
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
611-
_hash_checkpage(rel, metabuf, LH_META_PAGE);
609+
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE, LH_META_PAGE);
612610
metap = (HashMetaPage) BufferGetPage(metabuf);
613611

614612
if (cur_maxbucket != metap->hashm_maxbucket)

src/backend/access/hash/hashinsert.c

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
*
99
*
1010
* IDENTIFICATION
11-
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.44 2007/01/05 22:19:22 momjian Exp $
11+
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.45 2007/05/03 16:45:58 tgl Exp $
1212
*
1313
*-------------------------------------------------------------------------
1414
*/
@@ -66,8 +66,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
6666
_hash_getlock(rel, 0, HASH_SHARE);
6767

6868
/* Read the metapage */
69-
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
70-
_hash_checkpage(rel, metabuf, LH_META_PAGE);
69+
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
7170
metap = (HashMetaPage) BufferGetPage(metabuf);
7271

7372
/*
@@ -104,8 +103,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
104103
_hash_droplock(rel, 0, HASH_SHARE);
105104

106105
/* Fetch the primary bucket page for the bucket */
107-
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
108-
_hash_checkpage(rel, buf, LH_BUCKET_PAGE);
106+
buf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BUCKET_PAGE);
109107
page = BufferGetPage(buf);
110108
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
111109
Assert(pageopaque->hasho_bucket == bucket);
@@ -125,7 +123,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
125123
* find out next pass through the loop test above.
126124
*/
127125
_hash_relbuf(rel, buf);
128-
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
126+
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
129127
page = BufferGetPage(buf);
130128
}
131129
else
@@ -145,8 +143,8 @@ _hash_doinsert(Relation rel, IndexTuple itup)
145143
/* should fit now, given test above */
146144
Assert(PageGetFreeSpace(page) >= itemsz);
147145
}
148-
_hash_checkpage(rel, buf, LH_OVERFLOW_PAGE);
149146
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
147+
Assert(pageopaque->hasho_flag == LH_OVERFLOW_PAGE);
150148
Assert(pageopaque->hasho_bucket == bucket);
151149
}
152150

src/backend/access/hash/hashovfl.c

Lines changed: 24 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
*
99
*
1010
* IDENTIFICATION
11-
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.56 2007/04/19 20:24:04 tgl Exp $
11+
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.57 2007/05/03 16:45:58 tgl Exp $
1212
*
1313
* NOTES
1414
* Overflow pages look like ordinary relation pages.
@@ -107,20 +107,21 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
107107

108108
/* allocate and lock an empty overflow page */
109109
ovflbuf = _hash_getovflpage(rel, metabuf);
110-
ovflpage = BufferGetPage(ovflbuf);
111110

112111
/*
113112
* Write-lock the tail page. It is okay to hold two buffer locks here
114113
* since there cannot be anyone else contending for access to ovflbuf.
115114
*/
116115
_hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE);
117116

117+
/* probably redundant... */
118+
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
119+
118120
/* loop to find current tail page, in case someone else inserted too */
119121
for (;;)
120122
{
121123
BlockNumber nextblkno;
122124

123-
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
124125
page = BufferGetPage(buf);
125126
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
126127
nextblkno = pageopaque->hasho_nextblkno;
@@ -131,11 +132,11 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
131132
/* we assume we do not need to write the unmodified page */
132133
_hash_relbuf(rel, buf);
133134

134-
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
135+
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
135136
}
136137

137138
/* now that we have correct backlink, initialize new overflow page */
138-
_hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf));
139+
ovflpage = BufferGetPage(ovflbuf);
139140
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
140141
ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf);
141142
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
@@ -156,7 +157,8 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
156157
* _hash_getovflpage()
157158
*
158159
* Find an available overflow page and return it. The returned buffer
159-
* is pinned and write-locked, but its contents are not initialized.
160+
* is pinned and write-locked, and has had _hash_pageinit() applied,
161+
* but it is caller's responsibility to fill the special space.
160162
*
161163
* The caller must hold a pin, but no lock, on the metapage buffer.
162164
* That buffer is left in the same state at exit.
@@ -220,8 +222,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
220222
/* Release exclusive lock on metapage while reading bitmap page */
221223
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
222224

223-
mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE);
224-
_hash_checkpage(rel, mapbuf, LH_BITMAP_PAGE);
225+
mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE, LH_BITMAP_PAGE);
225226
mappage = BufferGetPage(mapbuf);
226227
freep = HashPageGetBitmap(mappage);
227228

@@ -277,7 +278,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
277278
* with metapage write lock held; would be better to use a lock that
278279
* doesn't block incoming searches.
279280
*/
280-
newbuf = _hash_getnewbuf(rel, blkno, HASH_WRITE);
281+
newbuf = _hash_getnewbuf(rel, blkno);
281282

282283
metap->hashm_spares[splitnum]++;
283284

@@ -327,8 +328,8 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
327328
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
328329
}
329330

330-
/* Fetch and return the recycled page */
331-
return _hash_getbuf(rel, blkno, HASH_WRITE);
331+
/* Fetch, init, and return the recycled page */
332+
return _hash_getinitbuf(rel, blkno);
332333
}
333334

334335
/*
@@ -412,30 +413,29 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
412413
*/
413414
if (BlockNumberIsValid(prevblkno))
414415
{
415-
Buffer prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE);
416+
Buffer prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE,
417+
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
416418
Page prevpage = BufferGetPage(prevbuf);
417419
HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
418420

419-
_hash_checkpage(rel, prevbuf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
420421
Assert(prevopaque->hasho_bucket == bucket);
421422
prevopaque->hasho_nextblkno = nextblkno;
422423
_hash_wrtbuf(rel, prevbuf);
423424
}
424425
if (BlockNumberIsValid(nextblkno))
425426
{
426-
Buffer nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
427+
Buffer nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE,
428+
LH_OVERFLOW_PAGE);
427429
Page nextpage = BufferGetPage(nextbuf);
428430
HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage);
429431

430-
_hash_checkpage(rel, nextbuf, LH_OVERFLOW_PAGE);
431432
Assert(nextopaque->hasho_bucket == bucket);
432433
nextopaque->hasho_prevblkno = prevblkno;
433434
_hash_wrtbuf(rel, nextbuf);
434435
}
435436

436437
/* Read the metapage so we can determine which bitmap page to use */
437-
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
438-
_hash_checkpage(rel, metabuf, LH_META_PAGE);
438+
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
439439
metap = (HashMetaPage) BufferGetPage(metabuf);
440440

441441
/* Identify which bit to set */
@@ -452,8 +452,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
452452
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
453453

454454
/* Clear the bitmap bit to indicate that this overflow page is free */
455-
mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE);
456-
_hash_checkpage(rel, mapbuf, LH_BITMAP_PAGE);
455+
mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BITMAP_PAGE);
457456
mappage = BufferGetPage(mapbuf);
458457
freep = HashPageGetBitmap(mappage);
459458
Assert(ISSET(freep, bitmapbit));
@@ -507,11 +506,10 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
507506
* page while holding the metapage lock, but this path is taken so seldom
508507
* that it's not worth worrying about.
509508
*/
510-
buf = _hash_getnewbuf(rel, blkno, HASH_WRITE);
509+
buf = _hash_getnewbuf(rel, blkno);
511510
pg = BufferGetPage(buf);
512511

513-
/* initialize the page */
514-
_hash_pageinit(pg, BufferGetPageSize(buf));
512+
/* initialize the page's special space */
515513
op = (HashPageOpaque) PageGetSpecialPointer(pg);
516514
op->hasho_prevblkno = InvalidBlockNumber;
517515
op->hasho_nextblkno = InvalidBlockNumber;
@@ -583,8 +581,7 @@ _hash_squeezebucket(Relation rel,
583581
* start squeezing into the base bucket page.
584582
*/
585583
wblkno = bucket_blkno;
586-
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE);
587-
_hash_checkpage(rel, wbuf, LH_BUCKET_PAGE);
584+
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE, LH_BUCKET_PAGE);
588585
wpage = BufferGetPage(wbuf);
589586
wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
590587

@@ -607,8 +604,7 @@ _hash_squeezebucket(Relation rel,
607604
rblkno = ropaque->hasho_nextblkno;
608605
if (ropaque != wopaque)
609606
_hash_relbuf(rel, rbuf);
610-
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
611-
_hash_checkpage(rel, rbuf, LH_OVERFLOW_PAGE);
607+
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
612608
rpage = BufferGetPage(rbuf);
613609
ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
614610
Assert(ropaque->hasho_bucket == bucket);
@@ -648,8 +644,7 @@ _hash_squeezebucket(Relation rel,
648644
return;
649645
}
650646

651-
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE);
652-
_hash_checkpage(rel, wbuf, LH_OVERFLOW_PAGE);
647+
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
653648
wpage = BufferGetPage(wbuf);
654649
wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
655650
Assert(wopaque->hasho_bucket == bucket);
@@ -701,8 +696,7 @@ _hash_squeezebucket(Relation rel,
701696
/* free this overflow page, then get the previous one */
702697
_hash_freeovflpage(rel, rbuf);
703698

704-
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
705-
_hash_checkpage(rel, rbuf, LH_OVERFLOW_PAGE);
699+
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
706700
rpage = BufferGetPage(rbuf);
707701
ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
708702
Assert(ropaque->hasho_bucket == bucket);

0 commit comments

Comments
 (0)