1 /*-------------------------------------------------------------------------
2  *
3  * hashpage.c
4  *	  Hash table page management code for the Postgres hash access method
5  *
6  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/access/hash/hashpage.c
12  *
13  * NOTES
14  *	  Postgres hash pages look like ordinary relation pages.  The opaque
15  *	  data at high addresses includes information about the page including
16  *	  whether a page is an overflow page or a true bucket, the bucket
17  *	  number, and the block numbers of the preceding and following pages
18  *	  in the same bucket.
19  *
20  *	  The first page in a hash relation, page zero, is special -- it stores
21  *	  information describing the hash table; it is referred to as the
22  *	  "meta page." Pages one and higher store the actual data.
23  *
24  *	  There are also bitmap pages, which are not manipulated here;
25  *	  see hashovfl.c.
26  *
27  *-------------------------------------------------------------------------
28  */
29 #include "postgres.h"
30 
31 #include "access/hash.h"
32 #include "access/hash_xlog.h"
33 #include "miscadmin.h"
34 #include "storage/lmgr.h"
35 #include "storage/smgr.h"
36 #include "storage/predicate.h"
37 
38 
39 static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
40 					uint32 nblocks);
41 static void _hash_splitbucket(Relation rel, Buffer metabuf,
42 				  Bucket obucket, Bucket nbucket,
43 				  Buffer obuf,
44 				  Buffer nbuf,
45 				  HTAB *htab,
46 				  uint32 maxbucket,
47 				  uint32 highmask, uint32 lowmask);
48 static void log_split_page(Relation rel, Buffer buf);
49 
50 
51 /*
52  * We use high-concurrency locking on hash indexes (see README for an overview
53  * of the locking rules).  However, we can skip taking lmgr locks when the
54  * index is local to the current backend (ie, either temp or new in the
55  * current transaction).  No one else can see it, so there's no reason to
56  * take locks.  We still take buffer-level locks, but not lmgr locks.
57  */
58 #define USELOCKING(rel)		(!RELATION_IS_LOCAL(rel))
59 
60 
61 /*
62  *	_hash_getbuf() -- Get a buffer by block number for read or write.
63  *
64  *		'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
65  *		'flags' is a bitwise OR of the allowed page types.
66  *
67  *		This must be used only to fetch pages that are expected to be valid
68  *		already.  _hash_checkpage() is applied using the given flags.
69  *
70  *		When this routine returns, the appropriate lock is set on the
71  *		requested buffer and its reference count has been incremented
72  *		(ie, the buffer is "locked and pinned").
73  *
74  *		P_NEW is disallowed because this routine can only be used
75  *		to access pages that are known to be before the filesystem EOF.
76  *		Extending the index should be done with _hash_getnewbuf.
77  */
78 Buffer
_hash_getbuf(Relation rel,BlockNumber blkno,int access,int flags)79 _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
80 {
81 	Buffer		buf;
82 
83 	if (blkno == P_NEW)
84 		elog(ERROR, "hash AM does not use P_NEW");
85 
86 	buf = ReadBuffer(rel, blkno);
87 
88 	if (access != HASH_NOLOCK)
89 		LockBuffer(buf, access);
90 
91 	/* ref count and lock type are correct */
92 
93 	_hash_checkpage(rel, buf, flags);
94 
95 	return buf;
96 }
97 
98 /*
99  * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
100  *
101  *		We read the page and try to acquire a cleanup lock.  If we get it,
102  *		we return the buffer; otherwise, we return InvalidBuffer.
103  */
104 Buffer
_hash_getbuf_with_condlock_cleanup(Relation rel,BlockNumber blkno,int flags)105 _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
106 {
107 	Buffer		buf;
108 
109 	if (blkno == P_NEW)
110 		elog(ERROR, "hash AM does not use P_NEW");
111 
112 	buf = ReadBuffer(rel, blkno);
113 
114 	if (!ConditionalLockBufferForCleanup(buf))
115 	{
116 		ReleaseBuffer(buf);
117 		return InvalidBuffer;
118 	}
119 
120 	/* ref count and lock type are correct */
121 
122 	_hash_checkpage(rel, buf, flags);
123 
124 	return buf;
125 }
126 
127 /*
128  *	_hash_getinitbuf() -- Get and initialize a buffer by block number.
129  *
130  *		This must be used only to fetch pages that are known to be before
131  *		the index's filesystem EOF, but are to be filled from scratch.
132  *		_hash_pageinit() is applied automatically.  Otherwise it has
133  *		effects similar to _hash_getbuf() with access = HASH_WRITE.
134  *
135  *		When this routine returns, a write lock is set on the
136  *		requested buffer and its reference count has been incremented
137  *		(ie, the buffer is "locked and pinned").
138  *
139  *		P_NEW is disallowed because this routine can only be used
140  *		to access pages that are known to be before the filesystem EOF.
141  *		Extending the index should be done with _hash_getnewbuf.
142  */
143 Buffer
_hash_getinitbuf(Relation rel,BlockNumber blkno)144 _hash_getinitbuf(Relation rel, BlockNumber blkno)
145 {
146 	Buffer		buf;
147 
148 	if (blkno == P_NEW)
149 		elog(ERROR, "hash AM does not use P_NEW");
150 
151 	buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
152 							 NULL);
153 
154 	/* ref count and lock type are correct */
155 
156 	/* initialize the page */
157 	_hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
158 
159 	return buf;
160 }
161 
162 /*
163  *	_hash_initbuf() -- Get and initialize a buffer by bucket number.
164  */
165 void
_hash_initbuf(Buffer buf,uint32 max_bucket,uint32 num_bucket,uint32 flag,bool initpage)166 _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
167 			  bool initpage)
168 {
169 	HashPageOpaque pageopaque;
170 	Page		page;
171 
172 	page = BufferGetPage(buf);
173 
174 	/* initialize the page */
175 	if (initpage)
176 		_hash_pageinit(page, BufferGetPageSize(buf));
177 
178 	pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
179 
180 	/*
181 	 * Set hasho_prevblkno with current hashm_maxbucket. This value will be
182 	 * used to validate cached HashMetaPageData. See
183 	 * _hash_getbucketbuf_from_hashkey().
184 	 */
185 	pageopaque->hasho_prevblkno = max_bucket;
186 	pageopaque->hasho_nextblkno = InvalidBlockNumber;
187 	pageopaque->hasho_bucket = num_bucket;
188 	pageopaque->hasho_flag = flag;
189 	pageopaque->hasho_page_id = HASHO_PAGE_ID;
190 }
191 
192 /*
193  *	_hash_getnewbuf() -- Get a new page at the end of the index.
194  *
195  *		This has the same API as _hash_getinitbuf, except that we are adding
196  *		a page to the index, and hence expect the page to be past the
197  *		logical EOF.  (However, we have to support the case where it isn't,
198  *		since a prior try might have crashed after extending the filesystem
199  *		EOF but before updating the metapage to reflect the added page.)
200  *
201  *		It is caller's responsibility to ensure that only one process can
202  *		extend the index at a time.  In practice, this function is called
203  *		only while holding write lock on the metapage, because adding a page
204  *		is always associated with an update of metapage data.
205  */
206 Buffer
_hash_getnewbuf(Relation rel,BlockNumber blkno,ForkNumber forkNum)207 _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
208 {
209 	BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
210 	Buffer		buf;
211 
212 	if (blkno == P_NEW)
213 		elog(ERROR, "hash AM does not use P_NEW");
214 	if (blkno > nblocks)
215 		elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
216 			 RelationGetRelationName(rel));
217 
218 	/* smgr insists we use P_NEW to extend the relation */
219 	if (blkno == nblocks)
220 	{
221 		buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
222 		if (BufferGetBlockNumber(buf) != blkno)
223 			elog(ERROR, "unexpected hash relation size: %u, should be %u",
224 				 BufferGetBlockNumber(buf), blkno);
225 		LockBuffer(buf, HASH_WRITE);
226 	}
227 	else
228 	{
229 		buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
230 								 NULL);
231 	}
232 
233 	/* ref count and lock type are correct */
234 
235 	/* initialize the page */
236 	_hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
237 
238 	return buf;
239 }
240 
241 /*
242  *	_hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
243  *
244  *		This is identical to _hash_getbuf() but also allows a buffer access
245  *		strategy to be specified.  We use this for VACUUM operations.
246  */
247 Buffer
_hash_getbuf_with_strategy(Relation rel,BlockNumber blkno,int access,int flags,BufferAccessStrategy bstrategy)248 _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
249 						   int access, int flags,
250 						   BufferAccessStrategy bstrategy)
251 {
252 	Buffer		buf;
253 
254 	if (blkno == P_NEW)
255 		elog(ERROR, "hash AM does not use P_NEW");
256 
257 	buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
258 
259 	if (access != HASH_NOLOCK)
260 		LockBuffer(buf, access);
261 
262 	/* ref count and lock type are correct */
263 
264 	_hash_checkpage(rel, buf, flags);
265 
266 	return buf;
267 }
268 
269 /*
270  *	_hash_relbuf() -- release a locked buffer.
271  *
272  * Lock and pin (refcount) are both dropped.
273  */
274 void
_hash_relbuf(Relation rel,Buffer buf)275 _hash_relbuf(Relation rel, Buffer buf)
276 {
277 	UnlockReleaseBuffer(buf);
278 }
279 
280 /*
281  *	_hash_dropbuf() -- release an unlocked buffer.
282  *
283  * This is used to unpin a buffer on which we hold no lock.
284  */
285 void
_hash_dropbuf(Relation rel,Buffer buf)286 _hash_dropbuf(Relation rel, Buffer buf)
287 {
288 	ReleaseBuffer(buf);
289 }
290 
291 /*
292  *	_hash_dropscanbuf() -- release buffers used in scan.
293  *
294  * This routine unpins the buffers used during scan on which we
295  * hold no lock.
296  */
297 void
_hash_dropscanbuf(Relation rel,HashScanOpaque so)298 _hash_dropscanbuf(Relation rel, HashScanOpaque so)
299 {
300 	/* release pin we hold on primary bucket page */
301 	if (BufferIsValid(so->hashso_bucket_buf) &&
302 		so->hashso_bucket_buf != so->currPos.buf)
303 		_hash_dropbuf(rel, so->hashso_bucket_buf);
304 	so->hashso_bucket_buf = InvalidBuffer;
305 
306 	/* release pin we hold on primary bucket page  of bucket being split */
307 	if (BufferIsValid(so->hashso_split_bucket_buf) &&
308 		so->hashso_split_bucket_buf != so->currPos.buf)
309 		_hash_dropbuf(rel, so->hashso_split_bucket_buf);
310 	so->hashso_split_bucket_buf = InvalidBuffer;
311 
312 	/* release any pin we still hold */
313 	if (BufferIsValid(so->currPos.buf))
314 		_hash_dropbuf(rel, so->currPos.buf);
315 	so->currPos.buf = InvalidBuffer;
316 
317 	/* reset split scan */
318 	so->hashso_buc_populated = false;
319 	so->hashso_buc_split = false;
320 }
321 
322 
323 /*
324  *	_hash_init() -- Initialize the metadata page of a hash index,
325  *				the initial buckets, and the initial bitmap page.
326  *
327  * The initial number of buckets is dependent on num_tuples, an estimate
328  * of the number of tuples to be loaded into the index initially.  The
329  * chosen number of buckets is returned.
330  *
331  * We are fairly cavalier about locking here, since we know that no one else
332  * could be accessing this index.  In particular the rule about not holding
333  * multiple buffer locks is ignored.
334  */
335 uint32
_hash_init(Relation rel,double num_tuples,ForkNumber forkNum)336 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
337 {
338 	Buffer		metabuf;
339 	Buffer		buf;
340 	Buffer		bitmapbuf;
341 	Page		pg;
342 	HashMetaPage metap;
343 	RegProcedure procid;
344 	int32		data_width;
345 	int32		item_width;
346 	int32		ffactor;
347 	uint32		num_buckets;
348 	uint32		i;
349 	bool		use_wal;
350 
351 	/* safety check */
352 	if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
353 		elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
354 			 RelationGetRelationName(rel));
355 
356 	/*
357 	 * WAL log creation of pages if the relation is persistent, or this is the
358 	 * init fork.  Init forks for unlogged relations always need to be WAL
359 	 * logged.
360 	 */
361 	use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
362 
363 	/*
364 	 * Determine the target fill factor (in tuples per bucket) for this index.
365 	 * The idea is to make the fill factor correspond to pages about as full
366 	 * as the user-settable fillfactor parameter says.  We can compute it
367 	 * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
368 	 */
369 	data_width = sizeof(uint32);
370 	item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
371 		sizeof(ItemIdData);		/* include the line pointer */
372 	ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
373 	/* keep to a sane range */
374 	if (ffactor < 10)
375 		ffactor = 10;
376 
377 	procid = index_getprocid(rel, 1, HASHSTANDARD_PROC);
378 
379 	/*
380 	 * We initialize the metapage, the first N bucket pages, and the first
381 	 * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
382 	 * calls to occur.  This ensures that the smgr level has the right idea of
383 	 * the physical index length.
384 	 *
385 	 * Critical section not required, because on error the creation of the
386 	 * whole relation will be rolled back.
387 	 */
388 	metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
389 	_hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
390 	MarkBufferDirty(metabuf);
391 
392 	pg = BufferGetPage(metabuf);
393 	metap = HashPageGetMeta(pg);
394 
395 	/* XLOG stuff */
396 	if (use_wal)
397 	{
398 		xl_hash_init_meta_page xlrec;
399 		XLogRecPtr	recptr;
400 
401 		xlrec.num_tuples = num_tuples;
402 		xlrec.procid = metap->hashm_procid;
403 		xlrec.ffactor = metap->hashm_ffactor;
404 
405 		XLogBeginInsert();
406 		XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage);
407 		XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
408 
409 		recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
410 
411 		PageSetLSN(BufferGetPage(metabuf), recptr);
412 	}
413 
414 	num_buckets = metap->hashm_maxbucket + 1;
415 
416 	/*
417 	 * Release buffer lock on the metapage while we initialize buckets.
418 	 * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
419 	 * won't accomplish anything.  It's a bad idea to hold buffer locks for
420 	 * long intervals in any case, since that can block the bgwriter.
421 	 */
422 	LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
423 
424 	/*
425 	 * Initialize and WAL Log the first N buckets
426 	 */
427 	for (i = 0; i < num_buckets; i++)
428 	{
429 		BlockNumber blkno;
430 
431 		/* Allow interrupts, in case N is huge */
432 		CHECK_FOR_INTERRUPTS();
433 
434 		blkno = BUCKET_TO_BLKNO(metap, i);
435 		buf = _hash_getnewbuf(rel, blkno, forkNum);
436 		_hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
437 		MarkBufferDirty(buf);
438 
439 		if (use_wal)
440 			log_newpage(&rel->rd_node,
441 						forkNum,
442 						blkno,
443 						BufferGetPage(buf),
444 						true);
445 		_hash_relbuf(rel, buf);
446 	}
447 
448 	/* Now reacquire buffer lock on metapage */
449 	LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
450 
451 	/*
452 	 * Initialize bitmap page
453 	 */
454 	bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
455 	_hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
456 	MarkBufferDirty(bitmapbuf);
457 
458 	/* add the new bitmap page to the metapage's list of bitmaps */
459 	/* metapage already has a write lock */
460 	if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
461 		ereport(ERROR,
462 				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
463 				 errmsg("out of overflow pages in hash index \"%s\"",
464 						RelationGetRelationName(rel))));
465 
466 	metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
467 
468 	metap->hashm_nmaps++;
469 	MarkBufferDirty(metabuf);
470 
471 	/* XLOG stuff */
472 	if (use_wal)
473 	{
474 		xl_hash_init_bitmap_page xlrec;
475 		XLogRecPtr	recptr;
476 
477 		xlrec.bmsize = metap->hashm_bmsize;
478 
479 		XLogBeginInsert();
480 		XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage);
481 		XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
482 
483 		/*
484 		 * This is safe only because nobody else can be modifying the index at
485 		 * this stage; it's only visible to the transaction that is creating
486 		 * it.
487 		 */
488 		XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
489 
490 		recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
491 
492 		PageSetLSN(BufferGetPage(bitmapbuf), recptr);
493 		PageSetLSN(BufferGetPage(metabuf), recptr);
494 	}
495 
496 	/* all done */
497 	_hash_relbuf(rel, bitmapbuf);
498 	_hash_relbuf(rel, metabuf);
499 
500 	return num_buckets;
501 }
502 
503 /*
504  *	_hash_init_metabuffer() -- Initialize the metadata page of a hash index.
505  */
506 void
_hash_init_metabuffer(Buffer buf,double num_tuples,RegProcedure procid,uint16 ffactor,bool initpage)507 _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
508 					  uint16 ffactor, bool initpage)
509 {
510 	HashMetaPage metap;
511 	HashPageOpaque pageopaque;
512 	Page		page;
513 	double		dnumbuckets;
514 	uint32		num_buckets;
515 	uint32		spare_index;
516 	uint32		i;
517 
518 	/*
519 	 * Choose the number of initial bucket pages to match the fill factor
520 	 * given the estimated number of tuples.  We round up the result to the
521 	 * total number of buckets which has to be allocated before using its
522 	 * _hashm_spare element. However always force at least 2 bucket pages. The
523 	 * upper limit is determined by considerations explained in
524 	 * _hash_expandtable().
525 	 */
526 	dnumbuckets = num_tuples / ffactor;
527 	if (dnumbuckets <= 2.0)
528 		num_buckets = 2;
529 	else if (dnumbuckets >= (double) 0x40000000)
530 		num_buckets = 0x40000000;
531 	else
532 		num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets));
533 
534 	spare_index = _hash_spareindex(num_buckets);
535 	Assert(spare_index < HASH_MAX_SPLITPOINTS);
536 
537 	page = BufferGetPage(buf);
538 	if (initpage)
539 		_hash_pageinit(page, BufferGetPageSize(buf));
540 
541 	pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
542 	pageopaque->hasho_prevblkno = InvalidBlockNumber;
543 	pageopaque->hasho_nextblkno = InvalidBlockNumber;
544 	pageopaque->hasho_bucket = -1;
545 	pageopaque->hasho_flag = LH_META_PAGE;
546 	pageopaque->hasho_page_id = HASHO_PAGE_ID;
547 
548 	metap = HashPageGetMeta(page);
549 
550 	metap->hashm_magic = HASH_MAGIC;
551 	metap->hashm_version = HASH_VERSION;
552 	metap->hashm_ntuples = 0;
553 	metap->hashm_nmaps = 0;
554 	metap->hashm_ffactor = ffactor;
555 	metap->hashm_bsize = HashGetMaxBitmapSize(page);
556 	/* find largest bitmap array size that will fit in page size */
557 	for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
558 	{
559 		if ((1 << i) <= metap->hashm_bsize)
560 			break;
561 	}
562 	Assert(i > 0);
563 	metap->hashm_bmsize = 1 << i;
564 	metap->hashm_bmshift = i + BYTE_TO_BIT;
565 	Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
566 
567 	/*
568 	 * Label the index with its primary hash support function's OID.  This is
569 	 * pretty useless for normal operation (in fact, hashm_procid is not used
570 	 * anywhere), but it might be handy for forensic purposes so we keep it.
571 	 */
572 	metap->hashm_procid = procid;
573 
574 	/*
575 	 * We initialize the index with N buckets, 0 .. N-1, occupying physical
576 	 * blocks 1 to N.  The first freespace bitmap page is in block N+1.
577 	 */
578 	metap->hashm_maxbucket = num_buckets - 1;
579 
580 	/*
581 	 * Set highmask as next immediate ((2 ^ x) - 1), which should be
582 	 * sufficient to cover num_buckets.
583 	 */
584 	metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
585 	metap->hashm_lowmask = (metap->hashm_highmask >> 1);
586 
587 	MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
588 	MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
589 
590 	/* Set up mapping for one spare page after the initial splitpoints */
591 	metap->hashm_spares[spare_index] = 1;
592 	metap->hashm_ovflpoint = spare_index;
593 	metap->hashm_firstfree = 0;
594 
595 	/*
596 	 * Set pd_lower just past the end of the metadata.  This is essential,
597 	 * because without doing so, metadata will be lost if xlog.c compresses
598 	 * the page.
599 	 */
600 	((PageHeader) page)->pd_lower =
601 		((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
602 }
603 
604 /*
605  *	_hash_pageinit() -- Initialize a new hash index page.
606  */
607 void
_hash_pageinit(Page page,Size size)608 _hash_pageinit(Page page, Size size)
609 {
610 	PageInit(page, size, sizeof(HashPageOpaqueData));
611 }
612 
613 /*
614  * Attempt to expand the hash table by creating one new bucket.
615  *
616  * This will silently do nothing if we don't get cleanup lock on old or
617  * new bucket.
618  *
619  * Complete the pending splits and remove the tuples from old bucket,
620  * if there are any left over from the previous split.
621  *
622  * The caller must hold a pin, but no lock, on the metapage buffer.
623  * The buffer is returned in the same state.
624  */
625 void
_hash_expandtable(Relation rel,Buffer metabuf)626 _hash_expandtable(Relation rel, Buffer metabuf)
627 {
628 	HashMetaPage metap;
629 	Bucket		old_bucket;
630 	Bucket		new_bucket;
631 	uint32		spare_ndx;
632 	BlockNumber start_oblkno;
633 	BlockNumber start_nblkno;
634 	Buffer		buf_nblkno;
635 	Buffer		buf_oblkno;
636 	Page		opage;
637 	Page		npage;
638 	HashPageOpaque oopaque;
639 	HashPageOpaque nopaque;
640 	uint32		maxbucket;
641 	uint32		highmask;
642 	uint32		lowmask;
643 	bool		metap_update_masks = false;
644 	bool		metap_update_splitpoint = false;
645 
646 restart_expand:
647 
648 	/*
649 	 * Write-lock the meta page.  It used to be necessary to acquire a
650 	 * heavyweight lock to begin a split, but that is no longer required.
651 	 */
652 	LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
653 
654 	_hash_checkpage(rel, metabuf, LH_META_PAGE);
655 	metap = HashPageGetMeta(BufferGetPage(metabuf));
656 
657 	/*
658 	 * Check to see if split is still needed; someone else might have already
659 	 * done one while we waited for the lock.
660 	 *
661 	 * Make sure this stays in sync with _hash_doinsert()
662 	 */
663 	if (metap->hashm_ntuples <=
664 		(double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
665 		goto fail;
666 
667 	/*
668 	 * Can't split anymore if maxbucket has reached its maximum possible
669 	 * value.
670 	 *
671 	 * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
672 	 * the calculation maxbucket+1 mustn't overflow).  Currently we restrict
673 	 * to half that because of overflow looping in _hash_log2() and
674 	 * insufficient space in hashm_spares[].  It's moot anyway because an
675 	 * index with 2^32 buckets would certainly overflow BlockNumber and hence
676 	 * _hash_alloc_buckets() would fail, but if we supported buckets smaller
677 	 * than a disk block then this would be an independent constraint.
678 	 *
679 	 * If you change this, see also the maximum initial number of buckets in
680 	 * _hash_init().
681 	 */
682 	if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
683 		goto fail;
684 
685 	/*
686 	 * Determine which bucket is to be split, and attempt to take cleanup lock
687 	 * on the old bucket.  If we can't get the lock, give up.
688 	 *
689 	 * The cleanup lock protects us not only against other backends, but
690 	 * against our own backend as well.
691 	 *
692 	 * The cleanup lock is mainly to protect the split from concurrent
693 	 * inserts. See src/backend/access/hash/README, Lock Definitions for
694 	 * further details.  Due to this locking restriction, if there is any
695 	 * pending scan, the split will give up which is not good, but harmless.
696 	 */
697 	new_bucket = metap->hashm_maxbucket + 1;
698 
699 	old_bucket = (new_bucket & metap->hashm_lowmask);
700 
701 	start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
702 
703 	buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
704 	if (!buf_oblkno)
705 		goto fail;
706 
707 	opage = BufferGetPage(buf_oblkno);
708 	oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
709 
710 	/*
711 	 * We want to finish the split from a bucket as there is no apparent
712 	 * benefit by not doing so and it will make the code complicated to finish
713 	 * the split that involves multiple buckets considering the case where new
714 	 * split also fails.  We don't need to consider the new bucket for
715 	 * completing the split here as it is not possible that a re-split of new
716 	 * bucket starts when there is still a pending split from old bucket.
717 	 */
718 	if (H_BUCKET_BEING_SPLIT(oopaque))
719 	{
720 		/*
721 		 * Copy bucket mapping info now; refer the comment in code below where
722 		 * we copy this information before calling _hash_splitbucket to see
723 		 * why this is okay.
724 		 */
725 		maxbucket = metap->hashm_maxbucket;
726 		highmask = metap->hashm_highmask;
727 		lowmask = metap->hashm_lowmask;
728 
729 		/*
730 		 * Release the lock on metapage and old_bucket, before completing the
731 		 * split.
732 		 */
733 		LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
734 		LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
735 
736 		_hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
737 						   highmask, lowmask);
738 
739 		/* release the pin on old buffer and retry for expand. */
740 		_hash_dropbuf(rel, buf_oblkno);
741 
742 		goto restart_expand;
743 	}
744 
745 	/*
746 	 * Clean the tuples remained from the previous split.  This operation
747 	 * requires cleanup lock and we already have one on the old bucket, so
748 	 * let's do it. We also don't want to allow further splits from the bucket
749 	 * till the garbage of previous split is cleaned.  This has two
750 	 * advantages; first, it helps in avoiding the bloat due to garbage and
751 	 * second is, during cleanup of bucket, we are always sure that the
752 	 * garbage tuples belong to most recently split bucket.  On the contrary,
753 	 * if we allow cleanup of bucket after meta page is updated to indicate
754 	 * the new split and before the actual split, the cleanup operation won't
755 	 * be able to decide whether the tuple has been moved to the newly created
756 	 * bucket and ended up deleting such tuples.
757 	 */
758 	if (H_NEEDS_SPLIT_CLEANUP(oopaque))
759 	{
760 		/*
761 		 * Copy bucket mapping info now; refer to the comment in code below
762 		 * where we copy this information before calling _hash_splitbucket to
763 		 * see why this is okay.
764 		 */
765 		maxbucket = metap->hashm_maxbucket;
766 		highmask = metap->hashm_highmask;
767 		lowmask = metap->hashm_lowmask;
768 
769 		/* Release the metapage lock. */
770 		LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
771 
772 		hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
773 						  maxbucket, highmask, lowmask, NULL, NULL, true,
774 						  NULL, NULL);
775 
776 		_hash_dropbuf(rel, buf_oblkno);
777 
778 		goto restart_expand;
779 	}
780 
781 	/*
782 	 * There shouldn't be any active scan on new bucket.
783 	 *
784 	 * Note: it is safe to compute the new bucket's blkno here, even though we
785 	 * may still need to update the BUCKET_TO_BLKNO mapping.  This is because
786 	 * the current value of hashm_spares[hashm_ovflpoint] correctly shows
787 	 * where we are going to put a new splitpoint's worth of buckets.
788 	 */
789 	start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
790 
791 	/*
792 	 * If the split point is increasing we need to allocate a new batch of
793 	 * bucket pages.
794 	 */
795 	spare_ndx = _hash_spareindex(new_bucket + 1);
796 	if (spare_ndx > metap->hashm_ovflpoint)
797 	{
798 		uint32		buckets_to_add;
799 
800 		Assert(spare_ndx == metap->hashm_ovflpoint + 1);
801 
802 		/*
803 		 * We treat allocation of buckets as a separate WAL-logged action.
804 		 * Even if we fail after this operation, won't leak bucket pages;
805 		 * rather, the next split will consume this space. In any case, even
806 		 * without failure we don't use all the space in one split operation.
807 		 */
808 		buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
809 		if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
810 		{
811 			/* can't split due to BlockNumber overflow */
812 			_hash_relbuf(rel, buf_oblkno);
813 			goto fail;
814 		}
815 	}
816 
817 	/*
818 	 * Physically allocate the new bucket's primary page.  We want to do this
819 	 * before changing the metapage's mapping info, in case we can't get the
820 	 * disk space.  Ideally, we don't need to check for cleanup lock on new
821 	 * bucket as no other backend could find this bucket unless meta page is
822 	 * updated.  However, it is good to be consistent with old bucket locking.
823 	 */
824 	buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
825 	if (!IsBufferCleanupOK(buf_nblkno))
826 	{
827 		_hash_relbuf(rel, buf_oblkno);
828 		_hash_relbuf(rel, buf_nblkno);
829 		goto fail;
830 	}
831 
832 	/*
833 	 * Since we are scribbling on the pages in the shared buffers, establish a
834 	 * critical section.  Any failure in this next code leaves us with a big
835 	 * problem: the metapage is effectively corrupt but could get written back
836 	 * to disk.
837 	 */
838 	START_CRIT_SECTION();
839 
840 	/*
841 	 * Okay to proceed with split.  Update the metapage bucket mapping info.
842 	 */
843 	metap->hashm_maxbucket = new_bucket;
844 
845 	if (new_bucket > metap->hashm_highmask)
846 	{
847 		/* Starting a new doubling */
848 		metap->hashm_lowmask = metap->hashm_highmask;
849 		metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
850 		metap_update_masks = true;
851 	}
852 
853 	/*
854 	 * If the split point is increasing we need to adjust the hashm_spares[]
855 	 * array and hashm_ovflpoint so that future overflow pages will be created
856 	 * beyond this new batch of bucket pages.
857 	 */
858 	if (spare_ndx > metap->hashm_ovflpoint)
859 	{
860 		metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
861 		metap->hashm_ovflpoint = spare_ndx;
862 		metap_update_splitpoint = true;
863 	}
864 
865 	MarkBufferDirty(metabuf);
866 
867 	/*
868 	 * Copy bucket mapping info now; this saves re-accessing the meta page
869 	 * inside _hash_splitbucket's inner loop.  Note that once we drop the
870 	 * split lock, other splits could begin, so these values might be out of
871 	 * date before _hash_splitbucket finishes.  That's okay, since all it
872 	 * needs is to tell which of these two buckets to map hashkeys into.
873 	 */
874 	maxbucket = metap->hashm_maxbucket;
875 	highmask = metap->hashm_highmask;
876 	lowmask = metap->hashm_lowmask;
877 
878 	opage = BufferGetPage(buf_oblkno);
879 	oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
880 
881 	/*
882 	 * Mark the old bucket to indicate that split is in progress.  (At
883 	 * operation end, we will clear the split-in-progress flag.)  Also, for a
884 	 * primary bucket page, hasho_prevblkno stores the number of buckets that
885 	 * existed as of the last split, so we must update that value here.
886 	 */
887 	oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
888 	oopaque->hasho_prevblkno = maxbucket;
889 
890 	MarkBufferDirty(buf_oblkno);
891 
892 	npage = BufferGetPage(buf_nblkno);
893 
894 	/*
895 	 * initialize the new bucket's primary page and mark it to indicate that
896 	 * split is in progress.
897 	 */
898 	nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
899 	nopaque->hasho_prevblkno = maxbucket;
900 	nopaque->hasho_nextblkno = InvalidBlockNumber;
901 	nopaque->hasho_bucket = new_bucket;
902 	nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED;
903 	nopaque->hasho_page_id = HASHO_PAGE_ID;
904 
905 	MarkBufferDirty(buf_nblkno);
906 
907 	/* XLOG stuff */
908 	if (RelationNeedsWAL(rel))
909 	{
910 		xl_hash_split_allocate_page xlrec;
911 		XLogRecPtr	recptr;
912 
913 		xlrec.new_bucket = maxbucket;
914 		xlrec.old_bucket_flag = oopaque->hasho_flag;
915 		xlrec.new_bucket_flag = nopaque->hasho_flag;
916 		xlrec.flags = 0;
917 
918 		XLogBeginInsert();
919 
920 		XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
921 		XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
922 		XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
923 
924 		if (metap_update_masks)
925 		{
926 			xlrec.flags |= XLH_SPLIT_META_UPDATE_MASKS;
927 			XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32));
928 			XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32));
929 		}
930 
931 		if (metap_update_splitpoint)
932 		{
933 			xlrec.flags |= XLH_SPLIT_META_UPDATE_SPLITPOINT;
934 			XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint,
935 								sizeof(uint32));
936 			XLogRegisterBufData(2,
937 								(char *) &metap->hashm_spares[metap->hashm_ovflpoint],
938 								sizeof(uint32));
939 		}
940 
941 		XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage);
942 
943 		recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
944 
945 		PageSetLSN(BufferGetPage(buf_oblkno), recptr);
946 		PageSetLSN(BufferGetPage(buf_nblkno), recptr);
947 		PageSetLSN(BufferGetPage(metabuf), recptr);
948 	}
949 
950 	END_CRIT_SECTION();
951 
952 	/* drop lock, but keep pin */
953 	LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
954 
955 	/* Relocate records to the new bucket */
956 	_hash_splitbucket(rel, metabuf,
957 					  old_bucket, new_bucket,
958 					  buf_oblkno, buf_nblkno, NULL,
959 					  maxbucket, highmask, lowmask);
960 
961 	/* all done, now release the pins on primary buckets. */
962 	_hash_dropbuf(rel, buf_oblkno);
963 	_hash_dropbuf(rel, buf_nblkno);
964 
965 	return;
966 
967 	/* Here if decide not to split or fail to acquire old bucket lock */
968 fail:
969 
970 	/* We didn't write the metapage, so just drop lock */
971 	LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
972 }
973 
974 
975 /*
976  * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
977  *
978  * This does not need to initialize the new bucket pages; we'll do that as
979  * each one is used by _hash_expandtable().  But we have to extend the logical
980  * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
981  * sync with ours, so that we don't get complaints from smgr.
982  *
983  * We do this by writing a page of zeroes at the end of the splitpoint range.
984  * We expect that the filesystem will ensure that the intervening pages read
985  * as zeroes too.  On many filesystems this "hole" will not be allocated
986  * immediately, which means that the index file may end up more fragmented
987  * than if we forced it all to be allocated now; but since we don't scan
988  * hash indexes sequentially anyway, that probably doesn't matter.
989  *
990  * XXX It's annoying that this code is executed with the metapage lock held.
991  * We need to interlock against _hash_addovflpage() adding a new overflow page
992  * concurrently, but it'd likely be better to use LockRelationForExtension
993  * for the purpose.  OTOH, adding a splitpoint is a very infrequent operation,
994  * so it may not be worth worrying about.
995  *
996  * Returns true if successful, or false if allocation failed due to
997  * BlockNumber overflow.
998  */
999 static bool
_hash_alloc_buckets(Relation rel,BlockNumber firstblock,uint32 nblocks)1000 _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
1001 {
1002 	BlockNumber lastblock;
1003 	PGAlignedBlock zerobuf;
1004 	Page		page;
1005 	HashPageOpaque ovflopaque;
1006 
1007 	lastblock = firstblock + nblocks - 1;
1008 
1009 	/*
1010 	 * Check for overflow in block number calculation; if so, we cannot extend
1011 	 * the index anymore.
1012 	 */
1013 	if (lastblock < firstblock || lastblock == InvalidBlockNumber)
1014 		return false;
1015 
1016 	page = (Page) zerobuf.data;
1017 
1018 	/*
1019 	 * Initialize the page.  Just zeroing the page won't work; see
1020 	 * _hash_freeovflpage for similar usage.  We take care to make the special
1021 	 * space valid for the benefit of tools such as pageinspect.
1022 	 */
1023 	_hash_pageinit(page, BLCKSZ);
1024 
1025 	ovflopaque = (HashPageOpaque) PageGetSpecialPointer(page);
1026 
1027 	ovflopaque->hasho_prevblkno = InvalidBlockNumber;
1028 	ovflopaque->hasho_nextblkno = InvalidBlockNumber;
1029 	ovflopaque->hasho_bucket = -1;
1030 	ovflopaque->hasho_flag = LH_UNUSED_PAGE;
1031 	ovflopaque->hasho_page_id = HASHO_PAGE_ID;
1032 
1033 	if (RelationNeedsWAL(rel))
1034 		log_newpage(&rel->rd_node,
1035 					MAIN_FORKNUM,
1036 					lastblock,
1037 					zerobuf.data,
1038 					true);
1039 
1040 	RelationOpenSmgr(rel);
1041 	PageSetChecksumInplace(page, lastblock);
1042 	smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf.data, false);
1043 
1044 	return true;
1045 }
1046 
1047 
1048 /*
1049  * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
1050  *
1051  * This routine is used to partition the tuples between old and new bucket and
1052  * is used to finish the incomplete split operations.  To finish the previously
1053  * interrupted split operation, the caller needs to fill htab.  If htab is set,
1054  * then we skip the movement of tuples that exists in htab, otherwise NULL
1055  * value of htab indicates movement of all the tuples that belong to the new
1056  * bucket.
1057  *
1058  * We are splitting a bucket that consists of a base bucket page and zero
1059  * or more overflow (bucket chain) pages.  We must relocate tuples that
1060  * belong in the new bucket.
1061  *
1062  * The caller must hold cleanup locks on both buckets to ensure that
1063  * no one else is trying to access them (see README).
1064  *
1065  * The caller must hold a pin, but no lock, on the metapage buffer.
1066  * The buffer is returned in the same state.  (The metapage is only
1067  * touched if it becomes necessary to add or remove overflow pages.)
1068  *
1069  * Split needs to retain pin on primary bucket pages of both old and new
1070  * buckets till end of operation.  This is to prevent vacuum from starting
1071  * while a split is in progress.
1072  *
1073  * In addition, the caller must have created the new bucket's base page,
1074  * which is passed in buffer nbuf, pinned and write-locked.  The lock will be
1075  * released here and pin must be released by the caller.  (The API is set up
1076  * this way because we must do _hash_getnewbuf() before releasing the metapage
1077  * write lock.  So instead of passing the new bucket's start block number, we
1078  * pass an actual buffer.)
1079  */
1080 static void
_hash_splitbucket(Relation rel,Buffer metabuf,Bucket obucket,Bucket nbucket,Buffer obuf,Buffer nbuf,HTAB * htab,uint32 maxbucket,uint32 highmask,uint32 lowmask)1081 _hash_splitbucket(Relation rel,
1082 				  Buffer metabuf,
1083 				  Bucket obucket,
1084 				  Bucket nbucket,
1085 				  Buffer obuf,
1086 				  Buffer nbuf,
1087 				  HTAB *htab,
1088 				  uint32 maxbucket,
1089 				  uint32 highmask,
1090 				  uint32 lowmask)
1091 {
1092 	Buffer		bucket_obuf;
1093 	Buffer		bucket_nbuf;
1094 	Page		opage;
1095 	Page		npage;
1096 	HashPageOpaque oopaque;
1097 	HashPageOpaque nopaque;
1098 	OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
1099 	IndexTuple	itups[MaxIndexTuplesPerPage];
1100 	Size		all_tups_size = 0;
1101 	int			i;
1102 	uint16		nitups = 0;
1103 
1104 	bucket_obuf = obuf;
1105 	opage = BufferGetPage(obuf);
1106 	oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1107 
1108 	bucket_nbuf = nbuf;
1109 	npage = BufferGetPage(nbuf);
1110 	nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1111 
1112 	/* Copy the predicate locks from old bucket to new bucket. */
1113 	PredicateLockPageSplit(rel,
1114 						   BufferGetBlockNumber(bucket_obuf),
1115 						   BufferGetBlockNumber(bucket_nbuf));
1116 
1117 	/*
1118 	 * Partition the tuples in the old bucket between the old bucket and the
1119 	 * new bucket, advancing along the old bucket's overflow bucket chain and
1120 	 * adding overflow pages to the new bucket as needed.  Outer loop iterates
1121 	 * once per page in old bucket.
1122 	 */
1123 	for (;;)
1124 	{
1125 		BlockNumber oblkno;
1126 		OffsetNumber ooffnum;
1127 		OffsetNumber omaxoffnum;
1128 
1129 		/* Scan each tuple in old page */
1130 		omaxoffnum = PageGetMaxOffsetNumber(opage);
1131 		for (ooffnum = FirstOffsetNumber;
1132 			 ooffnum <= omaxoffnum;
1133 			 ooffnum = OffsetNumberNext(ooffnum))
1134 		{
1135 			IndexTuple	itup;
1136 			Size		itemsz;
1137 			Bucket		bucket;
1138 			bool		found = false;
1139 
1140 			/* skip dead tuples */
1141 			if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
1142 				continue;
1143 
1144 			/*
1145 			 * Before inserting a tuple, probe the hash table containing TIDs
1146 			 * of tuples belonging to new bucket, if we find a match, then
1147 			 * skip that tuple, else fetch the item's hash key (conveniently
1148 			 * stored in the item) and determine which bucket it now belongs
1149 			 * in.
1150 			 */
1151 			itup = (IndexTuple) PageGetItem(opage,
1152 											PageGetItemId(opage, ooffnum));
1153 
1154 			if (htab)
1155 				(void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
1156 
1157 			if (found)
1158 				continue;
1159 
1160 			bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
1161 										  maxbucket, highmask, lowmask);
1162 
1163 			if (bucket == nbucket)
1164 			{
1165 				IndexTuple	new_itup;
1166 
1167 				/*
1168 				 * make a copy of index tuple as we have to scribble on it.
1169 				 */
1170 				new_itup = CopyIndexTuple(itup);
1171 
1172 				/*
1173 				 * mark the index tuple as moved by split, such tuples are
1174 				 * skipped by scan if there is split in progress for a bucket.
1175 				 */
1176 				new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
1177 
1178 				/*
1179 				 * insert the tuple into the new bucket.  if it doesn't fit on
1180 				 * the current page in the new bucket, we must allocate a new
1181 				 * overflow page and place the tuple on that page instead.
1182 				 */
1183 				itemsz = IndexTupleSize(new_itup);
1184 				itemsz = MAXALIGN(itemsz);
1185 
1186 				if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
1187 				{
1188 					/*
1189 					 * Change the shared buffer state in critical section,
1190 					 * otherwise any error could make it unrecoverable.
1191 					 */
1192 					START_CRIT_SECTION();
1193 
1194 					_hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1195 					MarkBufferDirty(nbuf);
1196 					/* log the split operation before releasing the lock */
1197 					log_split_page(rel, nbuf);
1198 
1199 					END_CRIT_SECTION();
1200 
1201 					/* drop lock, but keep pin */
1202 					LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1203 
1204 					/* be tidy */
1205 					for (i = 0; i < nitups; i++)
1206 						pfree(itups[i]);
1207 					nitups = 0;
1208 					all_tups_size = 0;
1209 
1210 					/* chain to a new overflow page */
1211 					nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
1212 					npage = BufferGetPage(nbuf);
1213 					nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1214 				}
1215 
1216 				itups[nitups++] = new_itup;
1217 				all_tups_size += itemsz;
1218 			}
1219 			else
1220 			{
1221 				/*
1222 				 * the tuple stays on this page, so nothing to do.
1223 				 */
1224 				Assert(bucket == obucket);
1225 			}
1226 		}
1227 
1228 		oblkno = oopaque->hasho_nextblkno;
1229 
1230 		/* retain the pin on the old primary bucket */
1231 		if (obuf == bucket_obuf)
1232 			LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
1233 		else
1234 			_hash_relbuf(rel, obuf);
1235 
1236 		/* Exit loop if no more overflow pages in old bucket */
1237 		if (!BlockNumberIsValid(oblkno))
1238 		{
1239 			/*
1240 			 * Change the shared buffer state in critical section, otherwise
1241 			 * any error could make it unrecoverable.
1242 			 */
1243 			START_CRIT_SECTION();
1244 
1245 			_hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
1246 			MarkBufferDirty(nbuf);
1247 			/* log the split operation before releasing the lock */
1248 			log_split_page(rel, nbuf);
1249 
1250 			END_CRIT_SECTION();
1251 
1252 			if (nbuf == bucket_nbuf)
1253 				LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1254 			else
1255 				_hash_relbuf(rel, nbuf);
1256 
1257 			/* be tidy */
1258 			for (i = 0; i < nitups; i++)
1259 				pfree(itups[i]);
1260 			break;
1261 		}
1262 
1263 		/* Else, advance to next old page */
1264 		obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
1265 		opage = BufferGetPage(obuf);
1266 		oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1267 	}
1268 
1269 	/*
1270 	 * We're at the end of the old bucket chain, so we're done partitioning
1271 	 * the tuples.  Mark the old and new buckets to indicate split is
1272 	 * finished.
1273 	 *
1274 	 * To avoid deadlocks due to locking order of buckets, first lock the old
1275 	 * bucket and then the new bucket.
1276 	 */
1277 	LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
1278 	opage = BufferGetPage(bucket_obuf);
1279 	oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
1280 
1281 	LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
1282 	npage = BufferGetPage(bucket_nbuf);
1283 	nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1284 
1285 	START_CRIT_SECTION();
1286 
1287 	oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
1288 	nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
1289 
1290 	/*
1291 	 * After the split is finished, mark the old bucket to indicate that it
1292 	 * contains deletable tuples.  We will clear split-cleanup flag after
1293 	 * deleting such tuples either at the end of split or at the next split
1294 	 * from old bucket or at the time of vacuum.
1295 	 */
1296 	oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP;
1297 
1298 	/*
1299 	 * now write the buffers, here we don't release the locks as caller is
1300 	 * responsible to release locks.
1301 	 */
1302 	MarkBufferDirty(bucket_obuf);
1303 	MarkBufferDirty(bucket_nbuf);
1304 
1305 	if (RelationNeedsWAL(rel))
1306 	{
1307 		XLogRecPtr	recptr;
1308 		xl_hash_split_complete xlrec;
1309 
1310 		xlrec.old_bucket_flag = oopaque->hasho_flag;
1311 		xlrec.new_bucket_flag = nopaque->hasho_flag;
1312 
1313 		XLogBeginInsert();
1314 
1315 		XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
1316 
1317 		XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
1318 		XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
1319 
1320 		recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
1321 
1322 		PageSetLSN(BufferGetPage(bucket_obuf), recptr);
1323 		PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
1324 	}
1325 
1326 	END_CRIT_SECTION();
1327 
1328 	/*
1329 	 * If possible, clean up the old bucket.  We might not be able to do this
1330 	 * if someone else has a pin on it, but if not then we can go ahead.  This
1331 	 * isn't absolutely necessary, but it reduces bloat; if we don't do it
1332 	 * now, VACUUM will do it eventually, but maybe not until new overflow
1333 	 * pages have been allocated.  Note that there's no need to clean up the
1334 	 * new bucket.
1335 	 */
1336 	if (IsBufferCleanupOK(bucket_obuf))
1337 	{
1338 		LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1339 		hashbucketcleanup(rel, obucket, bucket_obuf,
1340 						  BufferGetBlockNumber(bucket_obuf), NULL,
1341 						  maxbucket, highmask, lowmask, NULL, NULL, true,
1342 						  NULL, NULL);
1343 	}
1344 	else
1345 	{
1346 		LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
1347 		LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK);
1348 	}
1349 }
1350 
1351 /*
1352  *	_hash_finish_split() -- Finish the previously interrupted split operation
1353  *
1354  * To complete the split operation, we form the hash table of TIDs in new
1355  * bucket which is then used by split operation to skip tuples that are
1356  * already moved before the split operation was previously interrupted.
1357  *
1358  * The caller must hold a pin, but no lock, on the metapage and old bucket's
1359  * primary page buffer.  The buffers are returned in the same state.  (The
1360  * metapage is only touched if it becomes necessary to add or remove overflow
1361  * pages.)
1362  */
1363 void
_hash_finish_split(Relation rel,Buffer metabuf,Buffer obuf,Bucket obucket,uint32 maxbucket,uint32 highmask,uint32 lowmask)1364 _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
1365 				   uint32 maxbucket, uint32 highmask, uint32 lowmask)
1366 {
1367 	HASHCTL		hash_ctl;
1368 	HTAB	   *tidhtab;
1369 	Buffer		bucket_nbuf = InvalidBuffer;
1370 	Buffer		nbuf;
1371 	Page		npage;
1372 	BlockNumber nblkno;
1373 	BlockNumber bucket_nblkno;
1374 	HashPageOpaque npageopaque;
1375 	Bucket		nbucket;
1376 	bool		found;
1377 
1378 	/* Initialize hash tables used to track TIDs */
1379 	memset(&hash_ctl, 0, sizeof(hash_ctl));
1380 	hash_ctl.keysize = sizeof(ItemPointerData);
1381 	hash_ctl.entrysize = sizeof(ItemPointerData);
1382 	hash_ctl.hcxt = CurrentMemoryContext;
1383 
1384 	tidhtab =
1385 		hash_create("bucket ctids",
1386 					256,		/* arbitrary initial size */
1387 					&hash_ctl,
1388 					HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1389 
1390 	bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
1391 
1392 	/*
1393 	 * Scan the new bucket and build hash table of TIDs
1394 	 */
1395 	for (;;)
1396 	{
1397 		OffsetNumber noffnum;
1398 		OffsetNumber nmaxoffnum;
1399 
1400 		nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
1401 							LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
1402 
1403 		/* remember the primary bucket buffer to acquire cleanup lock on it. */
1404 		if (nblkno == bucket_nblkno)
1405 			bucket_nbuf = nbuf;
1406 
1407 		npage = BufferGetPage(nbuf);
1408 		npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1409 
1410 		/* Scan each tuple in new page */
1411 		nmaxoffnum = PageGetMaxOffsetNumber(npage);
1412 		for (noffnum = FirstOffsetNumber;
1413 			 noffnum <= nmaxoffnum;
1414 			 noffnum = OffsetNumberNext(noffnum))
1415 		{
1416 			IndexTuple	itup;
1417 
1418 			/* Fetch the item's TID and insert it in hash table. */
1419 			itup = (IndexTuple) PageGetItem(npage,
1420 											PageGetItemId(npage, noffnum));
1421 
1422 			(void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
1423 
1424 			Assert(!found);
1425 		}
1426 
1427 		nblkno = npageopaque->hasho_nextblkno;
1428 
1429 		/*
1430 		 * release our write lock without modifying buffer and ensure to
1431 		 * retain the pin on primary bucket.
1432 		 */
1433 		if (nbuf == bucket_nbuf)
1434 			LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
1435 		else
1436 			_hash_relbuf(rel, nbuf);
1437 
1438 		/* Exit loop if no more overflow pages in new bucket */
1439 		if (!BlockNumberIsValid(nblkno))
1440 			break;
1441 	}
1442 
1443 	/*
1444 	 * Conditionally get the cleanup lock on old and new buckets to perform
1445 	 * the split operation.  If we don't get the cleanup locks, silently give
1446 	 * up and next insertion on old bucket will try again to complete the
1447 	 * split.
1448 	 */
1449 	if (!ConditionalLockBufferForCleanup(obuf))
1450 	{
1451 		hash_destroy(tidhtab);
1452 		return;
1453 	}
1454 	if (!ConditionalLockBufferForCleanup(bucket_nbuf))
1455 	{
1456 		LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
1457 		hash_destroy(tidhtab);
1458 		return;
1459 	}
1460 
1461 	npage = BufferGetPage(bucket_nbuf);
1462 	npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
1463 	nbucket = npageopaque->hasho_bucket;
1464 
1465 	_hash_splitbucket(rel, metabuf, obucket,
1466 					  nbucket, obuf, bucket_nbuf, tidhtab,
1467 					  maxbucket, highmask, lowmask);
1468 
1469 	_hash_dropbuf(rel, bucket_nbuf);
1470 	hash_destroy(tidhtab);
1471 }
1472 
1473 /*
1474  *	log_split_page() -- Log the split operation
1475  *
1476  *	We log the split operation when the new page in new bucket gets full,
1477  *	so we log the entire page.
1478  *
1479  *	'buf' must be locked by the caller which is also responsible for unlocking
1480  *	it.
1481  */
1482 static void
log_split_page(Relation rel,Buffer buf)1483 log_split_page(Relation rel, Buffer buf)
1484 {
1485 	if (RelationNeedsWAL(rel))
1486 	{
1487 		XLogRecPtr	recptr;
1488 
1489 		XLogBeginInsert();
1490 
1491 		XLogRegisterBuffer(0, buf, REGBUF_FORCE_IMAGE | REGBUF_STANDARD);
1492 
1493 		recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
1494 
1495 		PageSetLSN(BufferGetPage(buf), recptr);
1496 	}
1497 }
1498 
1499 /*
1500  *	_hash_getcachedmetap() -- Returns cached metapage data.
1501  *
1502  *	If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
1503  *	the metapage.  If not set, we'll set it before returning if we have to
1504  *	refresh the cache, and return with a pin but no lock on it; caller is
1505  *	responsible for releasing the pin.
1506  *
1507  *	We refresh the cache if it's not initialized yet or force_refresh is true.
1508  */
1509 HashMetaPage
_hash_getcachedmetap(Relation rel,Buffer * metabuf,bool force_refresh)1510 _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
1511 {
1512 	Page		page;
1513 
1514 	Assert(metabuf);
1515 	if (force_refresh || rel->rd_amcache == NULL)
1516 	{
1517 		char	   *cache = NULL;
1518 
1519 		/*
1520 		 * It's important that we don't set rd_amcache to an invalid value.
1521 		 * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
1522 		 * install a pointer to the newly-allocated storage in the actual
1523 		 * relcache entry until both have succeeeded.
1524 		 */
1525 		if (rel->rd_amcache == NULL)
1526 			cache = MemoryContextAlloc(rel->rd_indexcxt,
1527 									   sizeof(HashMetaPageData));
1528 
1529 		/* Read the metapage. */
1530 		if (BufferIsValid(*metabuf))
1531 			LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
1532 		else
1533 			*metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
1534 									LH_META_PAGE);
1535 		page = BufferGetPage(*metabuf);
1536 
1537 		/* Populate the cache. */
1538 		if (rel->rd_amcache == NULL)
1539 			rel->rd_amcache = cache;
1540 		memcpy(rel->rd_amcache, HashPageGetMeta(page),
1541 			   sizeof(HashMetaPageData));
1542 
1543 		/* Release metapage lock, but keep the pin. */
1544 		LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
1545 	}
1546 
1547 	return (HashMetaPage) rel->rd_amcache;
1548 }
1549 
1550 /*
1551  *	_hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given
1552  *										 hashkey.
1553  *
1554  *	Bucket pages do not move or get removed once they are allocated. This give
1555  *	us an opportunity to use the previously saved metapage contents to reach
1556  *	the target bucket buffer, instead of reading from the metapage every time.
1557  *	This saves one buffer access every time we want to reach the target bucket
1558  *	buffer, which is very helpful savings in bufmgr traffic and contention.
1559  *
1560  *	The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
1561  *	bucket buffer has to be locked for reading or writing.
1562  *
1563  *	The out parameter cachedmetap is set with metapage contents used for
1564  *	hashkey to bucket buffer mapping. Some callers need this info to reach the
1565  *	old bucket in case of bucket split, see _hash_doinsert().
1566  */
1567 Buffer
_hash_getbucketbuf_from_hashkey(Relation rel,uint32 hashkey,int access,HashMetaPage * cachedmetap)1568 _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access,
1569 								HashMetaPage *cachedmetap)
1570 {
1571 	HashMetaPage metap;
1572 	Buffer		buf;
1573 	Buffer		metabuf = InvalidBuffer;
1574 	Page		page;
1575 	Bucket		bucket;
1576 	BlockNumber blkno;
1577 	HashPageOpaque opaque;
1578 
1579 	/* We read from target bucket buffer, hence locking is must. */
1580 	Assert(access == HASH_READ || access == HASH_WRITE);
1581 
1582 	metap = _hash_getcachedmetap(rel, &metabuf, false);
1583 	Assert(metap != NULL);
1584 
1585 	/*
1586 	 * Loop until we get a lock on the correct target bucket.
1587 	 */
1588 	for (;;)
1589 	{
1590 		/*
1591 		 * Compute the target bucket number, and convert to block number.
1592 		 */
1593 		bucket = _hash_hashkey2bucket(hashkey,
1594 									  metap->hashm_maxbucket,
1595 									  metap->hashm_highmask,
1596 									  metap->hashm_lowmask);
1597 
1598 		blkno = BUCKET_TO_BLKNO(metap, bucket);
1599 
1600 		/* Fetch the primary bucket page for the bucket */
1601 		buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
1602 		page = BufferGetPage(buf);
1603 		opaque = (HashPageOpaque) PageGetSpecialPointer(page);
1604 		Assert(opaque->hasho_bucket == bucket);
1605 		Assert(opaque->hasho_prevblkno != InvalidBlockNumber);
1606 
1607 		/*
1608 		 * If this bucket hasn't been split, we're done.
1609 		 */
1610 		if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
1611 			break;
1612 
1613 		/* Drop lock on this buffer, update cached metapage, and retry. */
1614 		_hash_relbuf(rel, buf);
1615 		metap = _hash_getcachedmetap(rel, &metabuf, true);
1616 		Assert(metap != NULL);
1617 	}
1618 
1619 	if (BufferIsValid(metabuf))
1620 		_hash_dropbuf(rel, metabuf);
1621 
1622 	if (cachedmetap)
1623 		*cachedmetap = metap;
1624 
1625 	return buf;
1626 }
1627