@@ -2117,82 +2117,72 @@ heapam_estimate_rel_size(Relation rel, int32 *attr_widths,
2117
2117
2118
2118
static bool
2119
2119
heapam_scan_bitmap_next_block (TableScanDesc scan ,
2120
- BlockNumber * blockno , bool * recheck ,
2120
+ bool * recheck ,
2121
2121
uint64 * lossy_pages , uint64 * exact_pages )
2122
2122
{
2123
2123
BitmapHeapScanDesc bscan = (BitmapHeapScanDesc ) scan ;
2124
2124
HeapScanDesc hscan = (HeapScanDesc ) bscan ;
2125
2125
BlockNumber block ;
2126
+ void * per_buffer_data ;
2126
2127
Buffer buffer ;
2127
2128
Snapshot snapshot ;
2128
2129
int ntup ;
2129
- TBMIterateResult tbmres ;
2130
+ TBMIterateResult * tbmres ;
2130
2131
OffsetNumber offsets [TBM_MAX_TUPLES_PER_PAGE ];
2131
2132
int noffsets = -1 ;
2132
2133
2133
2134
Assert (scan -> rs_flags & SO_TYPE_BITMAPSCAN );
2135
+ Assert (hscan -> rs_read_stream );
2134
2136
2135
2137
hscan -> rs_cindex = 0 ;
2136
2138
hscan -> rs_ntuples = 0 ;
2137
2139
2138
- * blockno = InvalidBlockNumber ;
2139
- * recheck = true;
2140
-
2141
- do
2140
+ /* Release buffer containing previous block. */
2141
+ if (BufferIsValid (hscan -> rs_cbuf ))
2142
2142
{
2143
- CHECK_FOR_INTERRUPTS ();
2143
+ ReleaseBuffer (hscan -> rs_cbuf );
2144
+ hscan -> rs_cbuf = InvalidBuffer ;
2145
+ }
2144
2146
2145
- if (! tbm_iterate ( & scan -> st . rs_tbmiterator , & tbmres ))
2146
- return false ;
2147
+ hscan -> rs_cbuf = read_stream_next_buffer ( hscan -> rs_read_stream ,
2148
+ & per_buffer_data ) ;
2147
2149
2148
- /* Exact pages need their tuple offsets extracted. */
2149
- if (!tbmres .lossy )
2150
- noffsets = tbm_extract_page_tuple (& tbmres , offsets ,
2151
- TBM_MAX_TUPLES_PER_PAGE );
2150
+ if (BufferIsInvalid (hscan -> rs_cbuf ))
2151
+ {
2152
+ if (BufferIsValid (bscan -> rs_vmbuffer ))
2153
+ {
2154
+ ReleaseBuffer (bscan -> rs_vmbuffer );
2155
+ bscan -> rs_vmbuffer = InvalidBuffer ;
2156
+ }
2152
2157
2153
2158
/*
2154
- * Ignore any claimed entries past what we think is the end of the
2155
- * relation. It may have been extended after the start of our scan (we
2156
- * only hold an AccessShareLock, and it could be inserts from this
2157
- * backend). We don't take this optimization in SERIALIZABLE
2158
- * isolation though, as we need to examine all invisible tuples
2159
- * reachable by the index.
2159
+ * Bitmap is exhausted. Time to emit empty tuples if relevant. We emit
2160
+ * all empty tuples at the end instead of emitting them per block we
2161
+ * skip fetching. This is necessary because the streaming read API
2162
+ * will only return TBMIterateResults for blocks actually fetched.
2163
+ * When we skip fetching a block, we keep track of how many empty
2164
+ * tuples to emit at the end of the BitmapHeapScan. We do not recheck
2165
+ * all NULL tuples.
2160
2166
*/
2161
- } while (!IsolationIsSerializable () &&
2162
- tbmres .blockno >= hscan -> rs_nblocks );
2167
+ * recheck = false;
2168
+ return bscan -> rs_empty_tuples_pending > 0 ;
2169
+ }
2163
2170
2164
- /* Got a valid block */
2165
- * blockno = tbmres .blockno ;
2166
- * recheck = tbmres .recheck ;
2171
+ Assert (per_buffer_data );
2167
2172
2168
- /*
2169
- * We can skip fetching the heap page if we don't need any fields from the
2170
- * heap, the bitmap entries don't need rechecking, and all tuples on the
2171
- * page are visible to our transaction.
2172
- */
2173
- if (!(scan -> rs_flags & SO_NEED_TUPLES ) &&
2174
- !tbmres .recheck &&
2175
- VM_ALL_VISIBLE (scan -> rs_rd , tbmres .blockno , & bscan -> rs_vmbuffer ))
2176
- {
2177
- /* can't be lossy in the skip_fetch case */
2178
- Assert (!tbmres .lossy );
2179
- Assert (bscan -> rs_empty_tuples_pending >= 0 );
2180
- Assert (noffsets > -1 );
2173
+ tbmres = per_buffer_data ;
2181
2174
2182
- bscan -> rs_empty_tuples_pending += noffsets ;
2175
+ Assert (BlockNumberIsValid (tbmres -> blockno ));
2176
+ Assert (BufferGetBlockNumber (hscan -> rs_cbuf ) == tbmres -> blockno );
2183
2177
2184
- return true;
2185
- }
2178
+ /* Exact pages need their tuple offsets extracted. */
2179
+ if (!tbmres -> lossy )
2180
+ noffsets = tbm_extract_page_tuple (tbmres , offsets ,
2181
+ TBM_MAX_TUPLES_PER_PAGE );
2186
2182
2187
- block = tbmres . blockno ;
2183
+ * recheck = tbmres -> recheck ;
2188
2184
2189
- /*
2190
- * Acquire pin on the target heap page, trading in any pin we held before.
2191
- */
2192
- hscan -> rs_cbuf = ReleaseAndReadBuffer (hscan -> rs_cbuf ,
2193
- scan -> rs_rd ,
2194
- block );
2195
- hscan -> rs_cblock = block ;
2185
+ block = hscan -> rs_cblock = tbmres -> blockno ;
2196
2186
buffer = hscan -> rs_cbuf ;
2197
2187
snapshot = scan -> rs_snapshot ;
2198
2188
@@ -2213,7 +2203,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
2213
2203
/*
2214
2204
* We need two separate strategies for lossy and non-lossy cases.
2215
2205
*/
2216
- if (!tbmres . lossy )
2206
+ if (!tbmres -> lossy )
2217
2207
{
2218
2208
/*
2219
2209
* Bitmap is non-lossy, so we just look through the offsets listed in
@@ -2277,7 +2267,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
2277
2267
Assert (ntup <= MaxHeapTuplesPerPage );
2278
2268
hscan -> rs_ntuples = ntup ;
2279
2269
2280
- if (tbmres . lossy )
2270
+ if (tbmres -> lossy )
2281
2271
(* lossy_pages )++ ;
2282
2272
else
2283
2273
(* exact_pages )++ ;
0 commit comments