1 /*-------------------------------------------------------------------------
2  *
3  * nodeHash.c
4  *	  Routines to hash relations for hashjoin
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/executor/nodeHash.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * INTERFACE ROUTINES
17  *		MultiExecHash	- generate an in-memory hash table of the relation
18  *		ExecInitHash	- initialize node and subnodes
19  *		ExecEndHash		- shutdown node and subnodes
20  */
21 
22 #include "postgres.h"
23 
24 #include <math.h>
25 #include <limits.h>
26 
27 #include "access/htup_details.h"
28 #include "catalog/pg_statistic.h"
29 #include "commands/tablespace.h"
30 #include "executor/execdebug.h"
31 #include "executor/hashjoin.h"
32 #include "executor/nodeHash.h"
33 #include "executor/nodeHashjoin.h"
34 #include "miscadmin.h"
35 #include "utils/dynahash.h"
36 #include "utils/memutils.h"
37 #include "utils/lsyscache.h"
38 #include "utils/syscache.h"
39 
40 
41 static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
42 static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
43 static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
44 					  int mcvsToUse);
45 static void ExecHashSkewTableInsert(HashJoinTable hashtable,
46 						TupleTableSlot *slot,
47 						uint32 hashvalue,
48 						int bucketNumber);
49 static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
50 
51 static void *dense_alloc(HashJoinTable hashtable, Size size);
52 
53 /* ----------------------------------------------------------------
54  *		ExecHash
55  *
56  *		stub for pro forma compliance
57  * ----------------------------------------------------------------
58  */
59 TupleTableSlot *
ExecHash(HashState * node)60 ExecHash(HashState *node)
61 {
62 	elog(ERROR, "Hash node does not support ExecProcNode call convention");
63 	return NULL;
64 }
65 
66 /* ----------------------------------------------------------------
67  *		MultiExecHash
68  *
69  *		build hash table for hashjoin, doing partitioning if more
70  *		than one batch is required.
71  * ----------------------------------------------------------------
72  */
73 Node *
MultiExecHash(HashState * node)74 MultiExecHash(HashState *node)
75 {
76 	PlanState  *outerNode;
77 	List	   *hashkeys;
78 	HashJoinTable hashtable;
79 	TupleTableSlot *slot;
80 	ExprContext *econtext;
81 	uint32		hashvalue;
82 
83 	/* must provide our own instrumentation support */
84 	if (node->ps.instrument)
85 		InstrStartNode(node->ps.instrument);
86 
87 	/*
88 	 * get state info from node
89 	 */
90 	outerNode = outerPlanState(node);
91 	hashtable = node->hashtable;
92 
93 	/*
94 	 * set expression context
95 	 */
96 	hashkeys = node->hashkeys;
97 	econtext = node->ps.ps_ExprContext;
98 
99 	/*
100 	 * get all inner tuples and insert into the hash table (or temp files)
101 	 */
102 	for (;;)
103 	{
104 		slot = ExecProcNode(outerNode);
105 		if (TupIsNull(slot))
106 			break;
107 		/* We have to compute the hash value */
108 		econtext->ecxt_innertuple = slot;
109 		if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
110 								 false, hashtable->keepNulls,
111 								 &hashvalue))
112 		{
113 			int			bucketNumber;
114 
115 			bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
116 			if (bucketNumber != INVALID_SKEW_BUCKET_NO)
117 			{
118 				/* It's a skew tuple, so put it into that hash table */
119 				ExecHashSkewTableInsert(hashtable, slot, hashvalue,
120 										bucketNumber);
121 				hashtable->skewTuples += 1;
122 			}
123 			else
124 			{
125 				/* Not subject to skew optimization, so insert normally */
126 				ExecHashTableInsert(hashtable, slot, hashvalue);
127 			}
128 			hashtable->totalTuples += 1;
129 		}
130 	}
131 
132 	/* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
133 	if (hashtable->nbuckets != hashtable->nbuckets_optimal)
134 		ExecHashIncreaseNumBuckets(hashtable);
135 
136 	/* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
137 	hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
138 	if (hashtable->spaceUsed > hashtable->spacePeak)
139 		hashtable->spacePeak = hashtable->spaceUsed;
140 
141 	/* must provide our own instrumentation support */
142 	if (node->ps.instrument)
143 		InstrStopNode(node->ps.instrument, hashtable->totalTuples);
144 
145 	/*
146 	 * We do not return the hash table directly because it's not a subtype of
147 	 * Node, and so would violate the MultiExecProcNode API.  Instead, our
148 	 * parent Hashjoin node is expected to know how to fish it out of our node
149 	 * state.  Ugly but not really worth cleaning up, since Hashjoin knows
150 	 * quite a bit more about Hash besides that.
151 	 */
152 	return NULL;
153 }
154 
155 /* ----------------------------------------------------------------
156  *		ExecInitHash
157  *
158  *		Init routine for Hash node
159  * ----------------------------------------------------------------
160  */
161 HashState *
ExecInitHash(Hash * node,EState * estate,int eflags)162 ExecInitHash(Hash *node, EState *estate, int eflags)
163 {
164 	HashState  *hashstate;
165 
166 	/* check for unsupported flags */
167 	Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
168 
169 	/*
170 	 * create state structure
171 	 */
172 	hashstate = makeNode(HashState);
173 	hashstate->ps.plan = (Plan *) node;
174 	hashstate->ps.state = estate;
175 	hashstate->hashtable = NULL;
176 	hashstate->hashkeys = NIL;	/* will be set by parent HashJoin */
177 
178 	/*
179 	 * Miscellaneous initialization
180 	 *
181 	 * create expression context for node
182 	 */
183 	ExecAssignExprContext(estate, &hashstate->ps);
184 
185 	/*
186 	 * initialize our result slot
187 	 */
188 	ExecInitResultTupleSlot(estate, &hashstate->ps);
189 
190 	/*
191 	 * initialize child expressions
192 	 */
193 	hashstate->ps.targetlist = (List *)
194 		ExecInitExpr((Expr *) node->plan.targetlist,
195 					 (PlanState *) hashstate);
196 	hashstate->ps.qual = (List *)
197 		ExecInitExpr((Expr *) node->plan.qual,
198 					 (PlanState *) hashstate);
199 
200 	/*
201 	 * initialize child nodes
202 	 */
203 	outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
204 
205 	/*
206 	 * initialize tuple type. no need to initialize projection info because
207 	 * this node doesn't do projections
208 	 */
209 	ExecAssignResultTypeFromTL(&hashstate->ps);
210 	hashstate->ps.ps_ProjInfo = NULL;
211 
212 	return hashstate;
213 }
214 
215 /* ---------------------------------------------------------------
216  *		ExecEndHash
217  *
218  *		clean up routine for Hash node
219  * ----------------------------------------------------------------
220  */
221 void
ExecEndHash(HashState * node)222 ExecEndHash(HashState *node)
223 {
224 	PlanState  *outerPlan;
225 
226 	/*
227 	 * free exprcontext
228 	 */
229 	ExecFreeExprContext(&node->ps);
230 
231 	/*
232 	 * shut down the subplan
233 	 */
234 	outerPlan = outerPlanState(node);
235 	ExecEndNode(outerPlan);
236 }
237 
238 
239 /* ----------------------------------------------------------------
240  *		ExecHashTableCreate
241  *
242  *		create an empty hashtable data structure for hashjoin.
243  * ----------------------------------------------------------------
244  */
245 HashJoinTable
ExecHashTableCreate(Hash * node,List * hashOperators,bool keepNulls)246 ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls)
247 {
248 	HashJoinTable hashtable;
249 	Plan	   *outerNode;
250 	int			nbuckets;
251 	int			nbatch;
252 	int			num_skew_mcvs;
253 	int			log2_nbuckets;
254 	int			nkeys;
255 	int			i;
256 	ListCell   *ho;
257 	MemoryContext oldcxt;
258 
259 	/*
260 	 * Get information about the size of the relation to be hashed (it's the
261 	 * "outer" subtree of this node, but the inner relation of the hashjoin).
262 	 * Compute the appropriate size of the hash table.
263 	 */
264 	outerNode = outerPlan(node);
265 
266 	ExecChooseHashTableSize(outerNode->plan_rows, outerNode->plan_width,
267 							OidIsValid(node->skewTable),
268 							&nbuckets, &nbatch, &num_skew_mcvs);
269 
270 	/* nbuckets must be a power of 2 */
271 	log2_nbuckets = my_log2(nbuckets);
272 	Assert(nbuckets == (1 << log2_nbuckets));
273 
274 	/*
275 	 * Initialize the hash table control block.
276 	 *
277 	 * The hashtable control block is just palloc'd from the executor's
278 	 * per-query memory context.  Everything else should be kept inside the
279 	 * subsidiary hashCxt or batchCxt.
280 	 */
281 	hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
282 	hashtable->nbuckets = nbuckets;
283 	hashtable->nbuckets_original = nbuckets;
284 	hashtable->nbuckets_optimal = nbuckets;
285 	hashtable->log2_nbuckets = log2_nbuckets;
286 	hashtable->log2_nbuckets_optimal = log2_nbuckets;
287 	hashtable->buckets = NULL;
288 	hashtable->keepNulls = keepNulls;
289 	hashtable->skewEnabled = false;
290 	hashtable->skewBucket = NULL;
291 	hashtable->skewBucketLen = 0;
292 	hashtable->nSkewBuckets = 0;
293 	hashtable->skewBucketNums = NULL;
294 	hashtable->nbatch = nbatch;
295 	hashtable->curbatch = 0;
296 	hashtable->nbatch_original = nbatch;
297 	hashtable->nbatch_outstart = nbatch;
298 	hashtable->growEnabled = true;
299 	hashtable->totalTuples = 0;
300 	hashtable->skewTuples = 0;
301 	hashtable->innerBatchFile = NULL;
302 	hashtable->outerBatchFile = NULL;
303 	hashtable->spaceUsed = 0;
304 	hashtable->spacePeak = 0;
305 	hashtable->spaceAllowed = work_mem * 1024L;
306 	hashtable->spaceUsedSkew = 0;
307 	hashtable->spaceAllowedSkew =
308 		hashtable->spaceAllowed * SKEW_WORK_MEM_PERCENT / 100;
309 	hashtable->chunks = NULL;
310 
311 #ifdef HJDEBUG
312 	printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
313 		   hashtable, nbatch, nbuckets);
314 #endif
315 
316 	/*
317 	 * Create temporary memory contexts in which to keep the hashtable working
318 	 * storage.  See notes in executor/hashjoin.h.
319 	 */
320 	hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
321 											   "HashTableContext",
322 											   ALLOCSET_DEFAULT_SIZES);
323 
324 	hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
325 												"HashBatchContext",
326 												ALLOCSET_DEFAULT_SIZES);
327 
328 	/* Allocate data that will live for the life of the hashjoin */
329 
330 	oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
331 
332 	/*
333 	 * Get info about the hash functions to be used for each hash key. Also
334 	 * remember whether the join operators are strict.
335 	 */
336 	nkeys = list_length(hashOperators);
337 	hashtable->outer_hashfunctions =
338 		(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
339 	hashtable->inner_hashfunctions =
340 		(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
341 	hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
342 	i = 0;
343 	foreach(ho, hashOperators)
344 	{
345 		Oid			hashop = lfirst_oid(ho);
346 		Oid			left_hashfn;
347 		Oid			right_hashfn;
348 
349 		if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
350 			elog(ERROR, "could not find hash function for hash operator %u",
351 				 hashop);
352 		fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
353 		fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
354 		hashtable->hashStrict[i] = op_strict(hashop);
355 		i++;
356 	}
357 
358 	if (nbatch > 1)
359 	{
360 		/*
361 		 * allocate and initialize the file arrays in hashCxt
362 		 */
363 		hashtable->innerBatchFile = (BufFile **)
364 			palloc0(nbatch * sizeof(BufFile *));
365 		hashtable->outerBatchFile = (BufFile **)
366 			palloc0(nbatch * sizeof(BufFile *));
367 		/* The files will not be opened until needed... */
368 		/* ... but make sure we have temp tablespaces established for them */
369 		PrepareTempTablespaces();
370 	}
371 
372 	/*
373 	 * Prepare context for the first-scan space allocations; allocate the
374 	 * hashbucket array therein, and set each bucket "empty".
375 	 */
376 	MemoryContextSwitchTo(hashtable->batchCxt);
377 
378 	hashtable->buckets = (HashJoinTuple *)
379 		palloc0(nbuckets * sizeof(HashJoinTuple));
380 
381 	/*
382 	 * Set up for skew optimization, if possible and there's a need for more
383 	 * than one batch.  (In a one-batch join, there's no point in it.)
384 	 */
385 	if (nbatch > 1)
386 		ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
387 
388 	MemoryContextSwitchTo(oldcxt);
389 
390 	return hashtable;
391 }
392 
393 
394 /*
395  * Compute appropriate size for hashtable given the estimated size of the
396  * relation to be hashed (number of rows and average row width).
397  *
398  * This is exported so that the planner's costsize.c can use it.
399  */
400 
401 /* Target bucket loading (tuples per bucket) */
402 #define NTUP_PER_BUCKET			1
403 
404 void
ExecChooseHashTableSize(double ntuples,int tupwidth,bool useskew,int * numbuckets,int * numbatches,int * num_skew_mcvs)405 ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
406 						int *numbuckets,
407 						int *numbatches,
408 						int *num_skew_mcvs)
409 {
410 	int			tupsize;
411 	double		inner_rel_bytes;
412 	long		bucket_bytes;
413 	long		hash_table_bytes;
414 	long		skew_table_bytes;
415 	long		max_pointers;
416 	long		mppow2;
417 	int			nbatch = 1;
418 	int			nbuckets;
419 	double		dbuckets;
420 
421 	/* Force a plausible relation size if no info */
422 	if (ntuples <= 0.0)
423 		ntuples = 1000.0;
424 
425 	/*
426 	 * Estimate tupsize based on footprint of tuple in hashtable... note this
427 	 * does not allow for any palloc overhead.  The manipulations of spaceUsed
428 	 * don't count palloc overhead either.
429 	 */
430 	tupsize = HJTUPLE_OVERHEAD +
431 		MAXALIGN(SizeofMinimalTupleHeader) +
432 		MAXALIGN(tupwidth);
433 	inner_rel_bytes = ntuples * tupsize;
434 
435 	/*
436 	 * Target in-memory hashtable size is work_mem kilobytes.
437 	 */
438 	hash_table_bytes = work_mem * 1024L;
439 
440 	/*
441 	 * If skew optimization is possible, estimate the number of skew buckets
442 	 * that will fit in the memory allowed, and decrement the assumed space
443 	 * available for the main hash table accordingly.
444 	 *
445 	 * We make the optimistic assumption that each skew bucket will contain
446 	 * one inner-relation tuple.  If that turns out to be low, we will recover
447 	 * at runtime by reducing the number of skew buckets.
448 	 *
449 	 * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
450 	 * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
451 	 * will round up to the next power of 2 and then multiply by 4 to reduce
452 	 * collisions.
453 	 */
454 	if (useskew)
455 	{
456 		skew_table_bytes = hash_table_bytes * SKEW_WORK_MEM_PERCENT / 100;
457 
458 		/*----------
459 		 * Divisor is:
460 		 * size of a hash tuple +
461 		 * worst-case size of skewBucket[] per MCV +
462 		 * size of skewBucketNums[] entry +
463 		 * size of skew bucket struct itself
464 		 *----------
465 		 */
466 		*num_skew_mcvs = skew_table_bytes / (tupsize +
467 											 (8 * sizeof(HashSkewBucket *)) +
468 											 sizeof(int) +
469 											 SKEW_BUCKET_OVERHEAD);
470 		if (*num_skew_mcvs > 0)
471 			hash_table_bytes -= skew_table_bytes;
472 	}
473 	else
474 		*num_skew_mcvs = 0;
475 
476 	/*
477 	 * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
478 	 * memory is filled, assuming a single batch; but limit the value so that
479 	 * the pointer arrays we'll try to allocate do not exceed work_mem nor
480 	 * MaxAllocSize.
481 	 *
482 	 * Note that both nbuckets and nbatch must be powers of 2 to make
483 	 * ExecHashGetBucketAndBatch fast.
484 	 */
485 	max_pointers = (work_mem * 1024L) / sizeof(HashJoinTuple);
486 	max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
487 	/* If max_pointers isn't a power of 2, must round it down to one */
488 	mppow2 = 1L << my_log2(max_pointers);
489 	if (max_pointers != mppow2)
490 		max_pointers = mppow2 / 2;
491 
492 	/* Also ensure we avoid integer overflow in nbatch and nbuckets */
493 	/* (this step is redundant given the current value of MaxAllocSize) */
494 	max_pointers = Min(max_pointers, INT_MAX / 2);
495 
496 	dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
497 	dbuckets = Min(dbuckets, max_pointers);
498 	nbuckets = (int) dbuckets;
499 	/* don't let nbuckets be really small, though ... */
500 	nbuckets = Max(nbuckets, 1024);
501 	/* ... and force it to be a power of 2. */
502 	nbuckets = 1 << my_log2(nbuckets);
503 
504 	/*
505 	 * If there's not enough space to store the projected number of tuples and
506 	 * the required bucket headers, we will need multiple batches.
507 	 */
508 	bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
509 	if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
510 	{
511 		/* We'll need multiple batches */
512 		long		lbuckets;
513 		double		dbatch;
514 		int			minbatch;
515 		long		bucket_size;
516 
517 		/*
518 		 * Estimate the number of buckets we'll want to have when work_mem is
519 		 * entirely full.  Each bucket will contain a bucket pointer plus
520 		 * NTUP_PER_BUCKET tuples, whose projected size already includes
521 		 * overhead for the hash code, pointer to the next tuple, etc.
522 		 */
523 		bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
524 		lbuckets = 1L << my_log2(hash_table_bytes / bucket_size);
525 		lbuckets = Min(lbuckets, max_pointers);
526 		nbuckets = (int) lbuckets;
527 		nbuckets = 1 << my_log2(nbuckets);
528 		bucket_bytes = nbuckets * sizeof(HashJoinTuple);
529 
530 		/*
531 		 * Buckets are simple pointers to hashjoin tuples, while tupsize
532 		 * includes the pointer, hash code, and MinimalTupleData.  So buckets
533 		 * should never really exceed 25% of work_mem (even for
534 		 * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
535 		 * 2^N bytes, where we might get more because of doubling. So let's
536 		 * look for 50% here.
537 		 */
538 		Assert(bucket_bytes <= hash_table_bytes / 2);
539 
540 		/* Calculate required number of batches. */
541 		dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
542 		dbatch = Min(dbatch, max_pointers);
543 		minbatch = (int) dbatch;
544 		nbatch = 2;
545 		while (nbatch < minbatch)
546 			nbatch <<= 1;
547 	}
548 
549 	Assert(nbuckets > 0);
550 	Assert(nbatch > 0);
551 
552 	*numbuckets = nbuckets;
553 	*numbatches = nbatch;
554 }
555 
556 
557 /* ----------------------------------------------------------------
558  *		ExecHashTableDestroy
559  *
560  *		destroy a hash table
561  * ----------------------------------------------------------------
562  */
563 void
ExecHashTableDestroy(HashJoinTable hashtable)564 ExecHashTableDestroy(HashJoinTable hashtable)
565 {
566 	int			i;
567 
568 	/*
569 	 * Make sure all the temp files are closed.  We skip batch 0, since it
570 	 * can't have any temp files (and the arrays might not even exist if
571 	 * nbatch is only 1).
572 	 */
573 	for (i = 1; i < hashtable->nbatch; i++)
574 	{
575 		if (hashtable->innerBatchFile[i])
576 			BufFileClose(hashtable->innerBatchFile[i]);
577 		if (hashtable->outerBatchFile[i])
578 			BufFileClose(hashtable->outerBatchFile[i]);
579 	}
580 
581 	/* Release working memory (batchCxt is a child, so it goes away too) */
582 	MemoryContextDelete(hashtable->hashCxt);
583 
584 	/* And drop the control block */
585 	pfree(hashtable);
586 }
587 
588 /*
589  * ExecHashIncreaseNumBatches
590  *		increase the original number of batches in order to reduce
591  *		current memory consumption
592  */
593 static void
ExecHashIncreaseNumBatches(HashJoinTable hashtable)594 ExecHashIncreaseNumBatches(HashJoinTable hashtable)
595 {
596 	int			oldnbatch = hashtable->nbatch;
597 	int			curbatch = hashtable->curbatch;
598 	int			nbatch;
599 	MemoryContext oldcxt;
600 	long		ninmemory;
601 	long		nfreed;
602 	HashMemoryChunk oldchunks;
603 
604 	/* do nothing if we've decided to shut off growth */
605 	if (!hashtable->growEnabled)
606 		return;
607 
608 	/* safety check to avoid overflow */
609 	if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
610 		return;
611 
612 	nbatch = oldnbatch * 2;
613 	Assert(nbatch > 1);
614 
615 #ifdef HJDEBUG
616 	printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
617 		   hashtable, nbatch, hashtable->spaceUsed);
618 #endif
619 
620 	oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
621 
622 	if (hashtable->innerBatchFile == NULL)
623 	{
624 		/* we had no file arrays before */
625 		hashtable->innerBatchFile = (BufFile **)
626 			palloc0(nbatch * sizeof(BufFile *));
627 		hashtable->outerBatchFile = (BufFile **)
628 			palloc0(nbatch * sizeof(BufFile *));
629 		/* time to establish the temp tablespaces, too */
630 		PrepareTempTablespaces();
631 	}
632 	else
633 	{
634 		/* enlarge arrays and zero out added entries */
635 		hashtable->innerBatchFile = (BufFile **)
636 			repalloc(hashtable->innerBatchFile, nbatch * sizeof(BufFile *));
637 		hashtable->outerBatchFile = (BufFile **)
638 			repalloc(hashtable->outerBatchFile, nbatch * sizeof(BufFile *));
639 		MemSet(hashtable->innerBatchFile + oldnbatch, 0,
640 			   (nbatch - oldnbatch) * sizeof(BufFile *));
641 		MemSet(hashtable->outerBatchFile + oldnbatch, 0,
642 			   (nbatch - oldnbatch) * sizeof(BufFile *));
643 	}
644 
645 	MemoryContextSwitchTo(oldcxt);
646 
647 	hashtable->nbatch = nbatch;
648 
649 	/*
650 	 * Scan through the existing hash table entries and dump out any that are
651 	 * no longer of the current batch.
652 	 */
653 	ninmemory = nfreed = 0;
654 
655 	/* If know we need to resize nbuckets, we can do it while rebatching. */
656 	if (hashtable->nbuckets_optimal != hashtable->nbuckets)
657 	{
658 		/* we never decrease the number of buckets */
659 		Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
660 
661 		hashtable->nbuckets = hashtable->nbuckets_optimal;
662 		hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
663 
664 		hashtable->buckets = repalloc(hashtable->buckets,
665 								sizeof(HashJoinTuple) * hashtable->nbuckets);
666 	}
667 
668 	/*
669 	 * We will scan through the chunks directly, so that we can reset the
670 	 * buckets now and not have to keep track which tuples in the buckets have
671 	 * already been processed. We will free the old chunks as we go.
672 	 */
673 	memset(hashtable->buckets, 0, sizeof(HashJoinTuple) * hashtable->nbuckets);
674 	oldchunks = hashtable->chunks;
675 	hashtable->chunks = NULL;
676 
677 	/* so, let's scan through the old chunks, and all tuples in each chunk */
678 	while (oldchunks != NULL)
679 	{
680 		HashMemoryChunk nextchunk = oldchunks->next;
681 
682 		/* position within the buffer (up to oldchunks->used) */
683 		size_t		idx = 0;
684 
685 		/* process all tuples stored in this chunk (and then free it) */
686 		while (idx < oldchunks->used)
687 		{
688 			HashJoinTuple hashTuple = (HashJoinTuple) (oldchunks->data + idx);
689 			MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
690 			int			hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
691 			int			bucketno;
692 			int			batchno;
693 
694 			ninmemory++;
695 			ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
696 									  &bucketno, &batchno);
697 
698 			if (batchno == curbatch)
699 			{
700 				/* keep tuple in memory - copy it into the new chunk */
701 				HashJoinTuple copyTuple;
702 
703 				copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
704 				memcpy(copyTuple, hashTuple, hashTupleSize);
705 
706 				/* and add it back to the appropriate bucket */
707 				copyTuple->next = hashtable->buckets[bucketno];
708 				hashtable->buckets[bucketno] = copyTuple;
709 			}
710 			else
711 			{
712 				/* dump it out */
713 				Assert(batchno > curbatch);
714 				ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
715 									  hashTuple->hashvalue,
716 									  &hashtable->innerBatchFile[batchno]);
717 
718 				hashtable->spaceUsed -= hashTupleSize;
719 				nfreed++;
720 			}
721 
722 			/* next tuple in this chunk */
723 			idx += MAXALIGN(hashTupleSize);
724 
725 			/* allow this loop to be cancellable */
726 			CHECK_FOR_INTERRUPTS();
727 		}
728 
729 		/* we're done with this chunk - free it and proceed to the next one */
730 		pfree(oldchunks);
731 		oldchunks = nextchunk;
732 	}
733 
734 #ifdef HJDEBUG
735 	printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
736 		   hashtable, nfreed, ninmemory, hashtable->spaceUsed);
737 #endif
738 
739 	/*
740 	 * If we dumped out either all or none of the tuples in the table, disable
741 	 * further expansion of nbatch.  This situation implies that we have
742 	 * enough tuples of identical hashvalues to overflow spaceAllowed.
743 	 * Increasing nbatch will not fix it since there's no way to subdivide the
744 	 * group any more finely. We have to just gut it out and hope the server
745 	 * has enough RAM.
746 	 */
747 	if (nfreed == 0 || nfreed == ninmemory)
748 	{
749 		hashtable->growEnabled = false;
750 #ifdef HJDEBUG
751 		printf("Hashjoin %p: disabling further increase of nbatch\n",
752 			   hashtable);
753 #endif
754 	}
755 }
756 
757 /*
758  * ExecHashIncreaseNumBuckets
759  *		increase the original number of buckets in order to reduce
760  *		number of tuples per bucket
761  */
762 static void
ExecHashIncreaseNumBuckets(HashJoinTable hashtable)763 ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
764 {
765 	HashMemoryChunk chunk;
766 
767 	/* do nothing if not an increase (it's called increase for a reason) */
768 	if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
769 		return;
770 
771 #ifdef HJDEBUG
772 	printf("Hashjoin %p: increasing nbuckets %d => %d\n",
773 		   hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
774 #endif
775 
776 	hashtable->nbuckets = hashtable->nbuckets_optimal;
777 	hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
778 
779 	Assert(hashtable->nbuckets > 1);
780 	Assert(hashtable->nbuckets <= (INT_MAX / 2));
781 	Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
782 
783 	/*
784 	 * Just reallocate the proper number of buckets - we don't need to walk
785 	 * through them - we can walk the dense-allocated chunks (just like in
786 	 * ExecHashIncreaseNumBatches, but without all the copying into new
787 	 * chunks)
788 	 */
789 	hashtable->buckets =
790 		(HashJoinTuple *) repalloc(hashtable->buckets,
791 								hashtable->nbuckets * sizeof(HashJoinTuple));
792 
793 	memset(hashtable->buckets, 0, hashtable->nbuckets * sizeof(HashJoinTuple));
794 
795 	/* scan through all tuples in all chunks to rebuild the hash table */
796 	for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next)
797 	{
798 		/* process all tuples stored in this chunk */
799 		size_t		idx = 0;
800 
801 		while (idx < chunk->used)
802 		{
803 			HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx);
804 			int			bucketno;
805 			int			batchno;
806 
807 			ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
808 									  &bucketno, &batchno);
809 
810 			/* add the tuple to the proper bucket */
811 			hashTuple->next = hashtable->buckets[bucketno];
812 			hashtable->buckets[bucketno] = hashTuple;
813 
814 			/* advance index past the tuple */
815 			idx += MAXALIGN(HJTUPLE_OVERHEAD +
816 							HJTUPLE_MINTUPLE(hashTuple)->t_len);
817 		}
818 	}
819 }
820 
821 
822 /*
823  * ExecHashTableInsert
824  *		insert a tuple into the hash table depending on the hash value
825  *		it may just go to a temp file for later batches
826  *
827  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
828  * tuple; the minimal case in particular is certain to happen while reloading
829  * tuples from batch files.  We could save some cycles in the regular-tuple
830  * case by not forcing the slot contents into minimal form; not clear if it's
831  * worth the messiness required.
832  */
833 void
ExecHashTableInsert(HashJoinTable hashtable,TupleTableSlot * slot,uint32 hashvalue)834 ExecHashTableInsert(HashJoinTable hashtable,
835 					TupleTableSlot *slot,
836 					uint32 hashvalue)
837 {
838 	MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot);
839 	int			bucketno;
840 	int			batchno;
841 
842 	ExecHashGetBucketAndBatch(hashtable, hashvalue,
843 							  &bucketno, &batchno);
844 
845 	/*
846 	 * decide whether to put the tuple in the hash table or a temp file
847 	 */
848 	if (batchno == hashtable->curbatch)
849 	{
850 		/*
851 		 * put the tuple in hash table
852 		 */
853 		HashJoinTuple hashTuple;
854 		int			hashTupleSize;
855 		double		ntuples = (hashtable->totalTuples - hashtable->skewTuples);
856 
857 		/* Create the HashJoinTuple */
858 		hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
859 		hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
860 
861 		hashTuple->hashvalue = hashvalue;
862 		memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
863 
864 		/*
865 		 * We always reset the tuple-matched flag on insertion.  This is okay
866 		 * even when reloading a tuple from a batch file, since the tuple
867 		 * could not possibly have been matched to an outer tuple before it
868 		 * went into the batch file.
869 		 */
870 		HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
871 
872 		/* Push it onto the front of the bucket's list */
873 		hashTuple->next = hashtable->buckets[bucketno];
874 		hashtable->buckets[bucketno] = hashTuple;
875 
876 		/*
877 		 * Increase the (optimal) number of buckets if we just exceeded the
878 		 * NTUP_PER_BUCKET threshold, but only when there's still a single
879 		 * batch.
880 		 */
881 		if (hashtable->nbatch == 1 &&
882 			ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
883 		{
884 			/* Guard against integer overflow and alloc size overflow */
885 			if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
886 				hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
887 			{
888 				hashtable->nbuckets_optimal *= 2;
889 				hashtable->log2_nbuckets_optimal += 1;
890 			}
891 		}
892 
893 		/* Account for space used, and back off if we've used too much */
894 		hashtable->spaceUsed += hashTupleSize;
895 		if (hashtable->spaceUsed > hashtable->spacePeak)
896 			hashtable->spacePeak = hashtable->spaceUsed;
897 		if (hashtable->spaceUsed +
898 			hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
899 			> hashtable->spaceAllowed)
900 			ExecHashIncreaseNumBatches(hashtable);
901 	}
902 	else
903 	{
904 		/*
905 		 * put the tuple into a temp file for later batches
906 		 */
907 		Assert(batchno > hashtable->curbatch);
908 		ExecHashJoinSaveTuple(tuple,
909 							  hashvalue,
910 							  &hashtable->innerBatchFile[batchno]);
911 	}
912 }
913 
914 /*
915  * ExecHashGetHashValue
916  *		Compute the hash value for a tuple
917  *
918  * The tuple to be tested must be in either econtext->ecxt_outertuple or
919  * econtext->ecxt_innertuple.  Vars in the hashkeys expressions should have
920  * varno either OUTER_VAR or INNER_VAR.
921  *
922  * A TRUE result means the tuple's hash value has been successfully computed
923  * and stored at *hashvalue.  A FALSE result means the tuple cannot match
924  * because it contains a null attribute, and hence it should be discarded
925  * immediately.  (If keep_nulls is true then FALSE is never returned.)
926  */
927 bool
ExecHashGetHashValue(HashJoinTable hashtable,ExprContext * econtext,List * hashkeys,bool outer_tuple,bool keep_nulls,uint32 * hashvalue)928 ExecHashGetHashValue(HashJoinTable hashtable,
929 					 ExprContext *econtext,
930 					 List *hashkeys,
931 					 bool outer_tuple,
932 					 bool keep_nulls,
933 					 uint32 *hashvalue)
934 {
935 	uint32		hashkey = 0;
936 	FmgrInfo   *hashfunctions;
937 	ListCell   *hk;
938 	int			i = 0;
939 	MemoryContext oldContext;
940 
941 	/*
942 	 * We reset the eval context each time to reclaim any memory leaked in the
943 	 * hashkey expressions.
944 	 */
945 	ResetExprContext(econtext);
946 
947 	oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
948 
949 	if (outer_tuple)
950 		hashfunctions = hashtable->outer_hashfunctions;
951 	else
952 		hashfunctions = hashtable->inner_hashfunctions;
953 
954 	foreach(hk, hashkeys)
955 	{
956 		ExprState  *keyexpr = (ExprState *) lfirst(hk);
957 		Datum		keyval;
958 		bool		isNull;
959 
960 		/* rotate hashkey left 1 bit at each step */
961 		hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
962 
963 		/*
964 		 * Get the join attribute value of the tuple
965 		 */
966 		keyval = ExecEvalExpr(keyexpr, econtext, &isNull, NULL);
967 
968 		/*
969 		 * If the attribute is NULL, and the join operator is strict, then
970 		 * this tuple cannot pass the join qual so we can reject it
971 		 * immediately (unless we're scanning the outside of an outer join, in
972 		 * which case we must not reject it).  Otherwise we act like the
973 		 * hashcode of NULL is zero (this will support operators that act like
974 		 * IS NOT DISTINCT, though not any more-random behavior).  We treat
975 		 * the hash support function as strict even if the operator is not.
976 		 *
977 		 * Note: currently, all hashjoinable operators must be strict since
978 		 * the hash index AM assumes that.  However, it takes so little extra
979 		 * code here to allow non-strict that we may as well do it.
980 		 */
981 		if (isNull)
982 		{
983 			if (hashtable->hashStrict[i] && !keep_nulls)
984 			{
985 				MemoryContextSwitchTo(oldContext);
986 				return false;	/* cannot match */
987 			}
988 			/* else, leave hashkey unmodified, equivalent to hashcode 0 */
989 		}
990 		else
991 		{
992 			/* Compute the hash function */
993 			uint32		hkey;
994 
995 			hkey = DatumGetUInt32(FunctionCall1(&hashfunctions[i], keyval));
996 			hashkey ^= hkey;
997 		}
998 
999 		i++;
1000 	}
1001 
1002 	MemoryContextSwitchTo(oldContext);
1003 
1004 	*hashvalue = hashkey;
1005 	return true;
1006 }
1007 
1008 /*
1009  * Rotate the bits of "word" to the right by n bits.
1010  */
1011 static inline uint32
pg_rotate_right32(uint32 word,int n)1012 pg_rotate_right32(uint32 word, int n)
1013 {
1014 	return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
1015 }
1016 
1017 /*
1018  * ExecHashGetBucketAndBatch
1019  *		Determine the bucket number and batch number for a hash value
1020  *
1021  * Note: on-the-fly increases of nbatch must not change the bucket number
1022  * for a given hash code (since we don't move tuples to different hash
1023  * chains), and must only cause the batch number to remain the same or
1024  * increase.  Our algorithm is
1025  *		bucketno = hashvalue MOD nbuckets
1026  *		batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1027  * where nbuckets and nbatch are both expected to be powers of 2, so we can
1028  * do the computations by shifting and masking.  (This assumes that all hash
1029  * functions are good about randomizing all their output bits, else we are
1030  * likely to have very skewed bucket or batch occupancy.)
1031  *
1032  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1033  * bucket count growth.  Once we start batching, the value is fixed and does
1034  * not change over the course of the join (making it possible to compute batch
1035  * number the way we do here).
1036  *
1037  * nbatch is always a power of 2; we increase it only by doubling it.  This
1038  * effectively adds one more bit to the top of the batchno.  In very large
1039  * joins, we might run out of bits to add, so we do this by rotating the hash
1040  * value.  This causes batchno to steal bits from bucketno when the number of
1041  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
1042  * than to lose the ability to divide batches.
1043  */
1044 void
ExecHashGetBucketAndBatch(HashJoinTable hashtable,uint32 hashvalue,int * bucketno,int * batchno)1045 ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1046 						  uint32 hashvalue,
1047 						  int *bucketno,
1048 						  int *batchno)
1049 {
1050 	uint32		nbuckets = (uint32) hashtable->nbuckets;
1051 	uint32		nbatch = (uint32) hashtable->nbatch;
1052 
1053 	if (nbatch > 1)
1054 	{
1055 		*bucketno = hashvalue & (nbuckets - 1);
1056 		*batchno = pg_rotate_right32(hashvalue,
1057 									 hashtable->log2_nbuckets) & (nbatch - 1);
1058 	}
1059 	else
1060 	{
1061 		*bucketno = hashvalue & (nbuckets - 1);
1062 		*batchno = 0;
1063 	}
1064 }
1065 
1066 /*
1067  * ExecScanHashBucket
1068  *		scan a hash bucket for matches to the current outer tuple
1069  *
1070  * The current outer tuple must be stored in econtext->ecxt_outertuple.
1071  *
1072  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1073  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1074  * for the latter.
1075  */
1076 bool
ExecScanHashBucket(HashJoinState * hjstate,ExprContext * econtext)1077 ExecScanHashBucket(HashJoinState *hjstate,
1078 				   ExprContext *econtext)
1079 {
1080 	List	   *hjclauses = hjstate->hashclauses;
1081 	HashJoinTable hashtable = hjstate->hj_HashTable;
1082 	HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1083 	uint32		hashvalue = hjstate->hj_CurHashValue;
1084 
1085 	/*
1086 	 * hj_CurTuple is the address of the tuple last returned from the current
1087 	 * bucket, or NULL if it's time to start scanning a new bucket.
1088 	 *
1089 	 * If the tuple hashed to a skew bucket then scan the skew bucket
1090 	 * otherwise scan the standard hashtable bucket.
1091 	 */
1092 	if (hashTuple != NULL)
1093 		hashTuple = hashTuple->next;
1094 	else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
1095 		hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
1096 	else
1097 		hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
1098 
1099 	while (hashTuple != NULL)
1100 	{
1101 		if (hashTuple->hashvalue == hashvalue)
1102 		{
1103 			TupleTableSlot *inntuple;
1104 
1105 			/* insert hashtable's tuple into exec slot so ExecQual sees it */
1106 			inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1107 											 hjstate->hj_HashTupleSlot,
1108 											 false);	/* do not pfree */
1109 			econtext->ecxt_innertuple = inntuple;
1110 
1111 			/* reset temp memory each time to avoid leaks from qual expr */
1112 			ResetExprContext(econtext);
1113 
1114 			if (ExecQual(hjclauses, econtext, false))
1115 			{
1116 				hjstate->hj_CurTuple = hashTuple;
1117 				return true;
1118 			}
1119 		}
1120 
1121 		hashTuple = hashTuple->next;
1122 	}
1123 
1124 	/*
1125 	 * no match
1126 	 */
1127 	return false;
1128 }
1129 
1130 /*
1131  * ExecPrepHashTableForUnmatched
1132  *		set up for a series of ExecScanHashTableForUnmatched calls
1133  */
1134 void
ExecPrepHashTableForUnmatched(HashJoinState * hjstate)1135 ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
1136 {
1137 	/*----------
1138 	 * During this scan we use the HashJoinState fields as follows:
1139 	 *
1140 	 * hj_CurBucketNo: next regular bucket to scan
1141 	 * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
1142 	 * hj_CurTuple: last tuple returned, or NULL to start next bucket
1143 	 *----------
1144 	 */
1145 	hjstate->hj_CurBucketNo = 0;
1146 	hjstate->hj_CurSkewBucketNo = 0;
1147 	hjstate->hj_CurTuple = NULL;
1148 }
1149 
1150 /*
1151  * ExecScanHashTableForUnmatched
1152  *		scan the hash table for unmatched inner tuples
1153  *
1154  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1155  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1156  * for the latter.
1157  */
1158 bool
ExecScanHashTableForUnmatched(HashJoinState * hjstate,ExprContext * econtext)1159 ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
1160 {
1161 	HashJoinTable hashtable = hjstate->hj_HashTable;
1162 	HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1163 
1164 	for (;;)
1165 	{
1166 		/*
1167 		 * hj_CurTuple is the address of the tuple last returned from the
1168 		 * current bucket, or NULL if it's time to start scanning a new
1169 		 * bucket.
1170 		 */
1171 		if (hashTuple != NULL)
1172 			hashTuple = hashTuple->next;
1173 		else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
1174 		{
1175 			hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
1176 			hjstate->hj_CurBucketNo++;
1177 		}
1178 		else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
1179 		{
1180 			int			j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
1181 
1182 			hashTuple = hashtable->skewBucket[j]->tuples;
1183 			hjstate->hj_CurSkewBucketNo++;
1184 		}
1185 		else
1186 			break;				/* finished all buckets */
1187 
1188 		while (hashTuple != NULL)
1189 		{
1190 			if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
1191 			{
1192 				TupleTableSlot *inntuple;
1193 
1194 				/* insert hashtable's tuple into exec slot */
1195 				inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1196 												 hjstate->hj_HashTupleSlot,
1197 												 false);		/* do not pfree */
1198 				econtext->ecxt_innertuple = inntuple;
1199 
1200 				/*
1201 				 * Reset temp memory each time; although this function doesn't
1202 				 * do any qual eval, the caller will, so let's keep it
1203 				 * parallel to ExecScanHashBucket.
1204 				 */
1205 				ResetExprContext(econtext);
1206 
1207 				hjstate->hj_CurTuple = hashTuple;
1208 				return true;
1209 			}
1210 
1211 			hashTuple = hashTuple->next;
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * no more unmatched tuples
1217 	 */
1218 	return false;
1219 }
1220 
1221 /*
1222  * ExecHashTableReset
1223  *
1224  *		reset hash table header for new batch
1225  */
1226 void
ExecHashTableReset(HashJoinTable hashtable)1227 ExecHashTableReset(HashJoinTable hashtable)
1228 {
1229 	MemoryContext oldcxt;
1230 	int			nbuckets = hashtable->nbuckets;
1231 
1232 	/*
1233 	 * Release all the hash buckets and tuples acquired in the prior pass, and
1234 	 * reinitialize the context for a new pass.
1235 	 */
1236 	MemoryContextReset(hashtable->batchCxt);
1237 	oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
1238 
1239 	/* Reallocate and reinitialize the hash bucket headers. */
1240 	hashtable->buckets = (HashJoinTuple *)
1241 		palloc0(nbuckets * sizeof(HashJoinTuple));
1242 
1243 	hashtable->spaceUsed = 0;
1244 
1245 	MemoryContextSwitchTo(oldcxt);
1246 
1247 	/* Forget the chunks (the memory was freed by the context reset above). */
1248 	hashtable->chunks = NULL;
1249 }
1250 
1251 /*
1252  * ExecHashTableResetMatchFlags
1253  *		Clear all the HeapTupleHeaderHasMatch flags in the table
1254  */
1255 void
ExecHashTableResetMatchFlags(HashJoinTable hashtable)1256 ExecHashTableResetMatchFlags(HashJoinTable hashtable)
1257 {
1258 	HashJoinTuple tuple;
1259 	int			i;
1260 
1261 	/* Reset all flags in the main table ... */
1262 	for (i = 0; i < hashtable->nbuckets; i++)
1263 	{
1264 		for (tuple = hashtable->buckets[i]; tuple != NULL; tuple = tuple->next)
1265 			HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
1266 	}
1267 
1268 	/* ... and the same for the skew buckets, if any */
1269 	for (i = 0; i < hashtable->nSkewBuckets; i++)
1270 	{
1271 		int			j = hashtable->skewBucketNums[i];
1272 		HashSkewBucket *skewBucket = hashtable->skewBucket[j];
1273 
1274 		for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next)
1275 			HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
1276 	}
1277 }
1278 
1279 
1280 void
ExecReScanHash(HashState * node)1281 ExecReScanHash(HashState *node)
1282 {
1283 	/*
1284 	 * if chgParam of subnode is not null then plan will be re-scanned by
1285 	 * first ExecProcNode.
1286 	 */
1287 	if (node->ps.lefttree->chgParam == NULL)
1288 		ExecReScan(node->ps.lefttree);
1289 }
1290 
1291 
1292 /*
1293  * ExecHashBuildSkewHash
1294  *
1295  *		Set up for skew optimization if we can identify the most common values
1296  *		(MCVs) of the outer relation's join key.  We make a skew hash bucket
1297  *		for the hash value of each MCV, up to the number of slots allowed
1298  *		based on available memory.
1299  */
1300 static void
ExecHashBuildSkewHash(HashJoinTable hashtable,Hash * node,int mcvsToUse)1301 ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
1302 {
1303 	HeapTupleData *statsTuple;
1304 	Datum	   *values;
1305 	int			nvalues;
1306 	float4	   *numbers;
1307 	int			nnumbers;
1308 
1309 	/* Do nothing if planner didn't identify the outer relation's join key */
1310 	if (!OidIsValid(node->skewTable))
1311 		return;
1312 	/* Also, do nothing if we don't have room for at least one skew bucket */
1313 	if (mcvsToUse <= 0)
1314 		return;
1315 
1316 	/*
1317 	 * Try to find the MCV statistics for the outer relation's join key.
1318 	 */
1319 	statsTuple = SearchSysCache3(STATRELATTINH,
1320 								 ObjectIdGetDatum(node->skewTable),
1321 								 Int16GetDatum(node->skewColumn),
1322 								 BoolGetDatum(node->skewInherit));
1323 	if (!HeapTupleIsValid(statsTuple))
1324 		return;
1325 
1326 	if (get_attstatsslot(statsTuple, node->skewColType, node->skewColTypmod,
1327 						 STATISTIC_KIND_MCV, InvalidOid,
1328 						 NULL,
1329 						 &values, &nvalues,
1330 						 &numbers, &nnumbers))
1331 	{
1332 		double		frac;
1333 		int			nbuckets;
1334 		FmgrInfo   *hashfunctions;
1335 		int			i;
1336 
1337 		if (mcvsToUse > nvalues)
1338 			mcvsToUse = nvalues;
1339 
1340 		/*
1341 		 * Calculate the expected fraction of outer relation that will
1342 		 * participate in the skew optimization.  If this isn't at least
1343 		 * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
1344 		 */
1345 		frac = 0;
1346 		for (i = 0; i < mcvsToUse; i++)
1347 			frac += numbers[i];
1348 		if (frac < SKEW_MIN_OUTER_FRACTION)
1349 		{
1350 			free_attstatsslot(node->skewColType,
1351 							  values, nvalues, numbers, nnumbers);
1352 			ReleaseSysCache(statsTuple);
1353 			return;
1354 		}
1355 
1356 		/*
1357 		 * Okay, set up the skew hashtable.
1358 		 *
1359 		 * skewBucket[] is an open addressing hashtable with a power of 2 size
1360 		 * that is greater than the number of MCV values.  (This ensures there
1361 		 * will be at least one null entry, so searches will always
1362 		 * terminate.)
1363 		 *
1364 		 * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
1365 		 * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
1366 		 * since we limit pg_statistic entries to much less than that.
1367 		 */
1368 		nbuckets = 2;
1369 		while (nbuckets <= mcvsToUse)
1370 			nbuckets <<= 1;
1371 		/* use two more bits just to help avoid collisions */
1372 		nbuckets <<= 2;
1373 
1374 		hashtable->skewEnabled = true;
1375 		hashtable->skewBucketLen = nbuckets;
1376 
1377 		/*
1378 		 * We allocate the bucket memory in the hashtable's batch context. It
1379 		 * is only needed during the first batch, and this ensures it will be
1380 		 * automatically removed once the first batch is done.
1381 		 */
1382 		hashtable->skewBucket = (HashSkewBucket **)
1383 			MemoryContextAllocZero(hashtable->batchCxt,
1384 								   nbuckets * sizeof(HashSkewBucket *));
1385 		hashtable->skewBucketNums = (int *)
1386 			MemoryContextAllocZero(hashtable->batchCxt,
1387 								   mcvsToUse * sizeof(int));
1388 
1389 		hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
1390 			+ mcvsToUse * sizeof(int);
1391 		hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
1392 			+ mcvsToUse * sizeof(int);
1393 		if (hashtable->spaceUsed > hashtable->spacePeak)
1394 			hashtable->spacePeak = hashtable->spaceUsed;
1395 
1396 		/*
1397 		 * Create a skew bucket for each MCV hash value.
1398 		 *
1399 		 * Note: it is very important that we create the buckets in order of
1400 		 * decreasing MCV frequency.  If we have to remove some buckets, they
1401 		 * must be removed in reverse order of creation (see notes in
1402 		 * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
1403 		 * be removed first.
1404 		 */
1405 		hashfunctions = hashtable->outer_hashfunctions;
1406 
1407 		for (i = 0; i < mcvsToUse; i++)
1408 		{
1409 			uint32		hashvalue;
1410 			int			bucket;
1411 
1412 			hashvalue = DatumGetUInt32(FunctionCall1(&hashfunctions[0],
1413 													 values[i]));
1414 
1415 			/*
1416 			 * While we have not hit a hole in the hashtable and have not hit
1417 			 * the desired bucket, we have collided with some previous hash
1418 			 * value, so try the next bucket location.  NB: this code must
1419 			 * match ExecHashGetSkewBucket.
1420 			 */
1421 			bucket = hashvalue & (nbuckets - 1);
1422 			while (hashtable->skewBucket[bucket] != NULL &&
1423 				   hashtable->skewBucket[bucket]->hashvalue != hashvalue)
1424 				bucket = (bucket + 1) & (nbuckets - 1);
1425 
1426 			/*
1427 			 * If we found an existing bucket with the same hashvalue, leave
1428 			 * it alone.  It's okay for two MCVs to share a hashvalue.
1429 			 */
1430 			if (hashtable->skewBucket[bucket] != NULL)
1431 				continue;
1432 
1433 			/* Okay, create a new skew bucket for this hashvalue. */
1434 			hashtable->skewBucket[bucket] = (HashSkewBucket *)
1435 				MemoryContextAlloc(hashtable->batchCxt,
1436 								   sizeof(HashSkewBucket));
1437 			hashtable->skewBucket[bucket]->hashvalue = hashvalue;
1438 			hashtable->skewBucket[bucket]->tuples = NULL;
1439 			hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
1440 			hashtable->nSkewBuckets++;
1441 			hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
1442 			hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
1443 			if (hashtable->spaceUsed > hashtable->spacePeak)
1444 				hashtable->spacePeak = hashtable->spaceUsed;
1445 		}
1446 
1447 		free_attstatsslot(node->skewColType,
1448 						  values, nvalues, numbers, nnumbers);
1449 	}
1450 
1451 	ReleaseSysCache(statsTuple);
1452 }
1453 
1454 /*
1455  * ExecHashGetSkewBucket
1456  *
1457  *		Returns the index of the skew bucket for this hashvalue,
1458  *		or INVALID_SKEW_BUCKET_NO if the hashvalue is not
1459  *		associated with any active skew bucket.
1460  */
1461 int
ExecHashGetSkewBucket(HashJoinTable hashtable,uint32 hashvalue)1462 ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
1463 {
1464 	int			bucket;
1465 
1466 	/*
1467 	 * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
1468 	 * particular, this happens after the initial batch is done).
1469 	 */
1470 	if (!hashtable->skewEnabled)
1471 		return INVALID_SKEW_BUCKET_NO;
1472 
1473 	/*
1474 	 * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
1475 	 */
1476 	bucket = hashvalue & (hashtable->skewBucketLen - 1);
1477 
1478 	/*
1479 	 * While we have not hit a hole in the hashtable and have not hit the
1480 	 * desired bucket, we have collided with some other hash value, so try the
1481 	 * next bucket location.
1482 	 */
1483 	while (hashtable->skewBucket[bucket] != NULL &&
1484 		   hashtable->skewBucket[bucket]->hashvalue != hashvalue)
1485 		bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
1486 
1487 	/*
1488 	 * Found the desired bucket?
1489 	 */
1490 	if (hashtable->skewBucket[bucket] != NULL)
1491 		return bucket;
1492 
1493 	/*
1494 	 * There must not be any hashtable entry for this hash value.
1495 	 */
1496 	return INVALID_SKEW_BUCKET_NO;
1497 }
1498 
1499 /*
1500  * ExecHashSkewTableInsert
1501  *
1502  *		Insert a tuple into the skew hashtable.
1503  *
1504  * This should generally match up with the current-batch case in
1505  * ExecHashTableInsert.
1506  */
1507 static void
ExecHashSkewTableInsert(HashJoinTable hashtable,TupleTableSlot * slot,uint32 hashvalue,int bucketNumber)1508 ExecHashSkewTableInsert(HashJoinTable hashtable,
1509 						TupleTableSlot *slot,
1510 						uint32 hashvalue,
1511 						int bucketNumber)
1512 {
1513 	MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot);
1514 	HashJoinTuple hashTuple;
1515 	int			hashTupleSize;
1516 
1517 	/* Create the HashJoinTuple */
1518 	hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1519 	hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
1520 												   hashTupleSize);
1521 	hashTuple->hashvalue = hashvalue;
1522 	memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1523 	HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1524 
1525 	/* Push it onto the front of the skew bucket's list */
1526 	hashTuple->next = hashtable->skewBucket[bucketNumber]->tuples;
1527 	hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
1528 
1529 	/* Account for space used, and back off if we've used too much */
1530 	hashtable->spaceUsed += hashTupleSize;
1531 	hashtable->spaceUsedSkew += hashTupleSize;
1532 	if (hashtable->spaceUsed > hashtable->spacePeak)
1533 		hashtable->spacePeak = hashtable->spaceUsed;
1534 	while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
1535 		ExecHashRemoveNextSkewBucket(hashtable);
1536 
1537 	/* Check we are not over the total spaceAllowed, either */
1538 	if (hashtable->spaceUsed > hashtable->spaceAllowed)
1539 		ExecHashIncreaseNumBatches(hashtable);
1540 }
1541 
1542 /*
1543  *		ExecHashRemoveNextSkewBucket
1544  *
1545  *		Remove the least valuable skew bucket by pushing its tuples into
1546  *		the main hash table.
1547  */
1548 static void
ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)1549 ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
1550 {
1551 	int			bucketToRemove;
1552 	HashSkewBucket *bucket;
1553 	uint32		hashvalue;
1554 	int			bucketno;
1555 	int			batchno;
1556 	HashJoinTuple hashTuple;
1557 
1558 	/* Locate the bucket to remove */
1559 	bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
1560 	bucket = hashtable->skewBucket[bucketToRemove];
1561 
1562 	/*
1563 	 * Calculate which bucket and batch the tuples belong to in the main
1564 	 * hashtable.  They all have the same hash value, so it's the same for all
1565 	 * of them.  Also note that it's not possible for nbatch to increase while
1566 	 * we are processing the tuples.
1567 	 */
1568 	hashvalue = bucket->hashvalue;
1569 	ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1570 
1571 	/* Process all tuples in the bucket */
1572 	hashTuple = bucket->tuples;
1573 	while (hashTuple != NULL)
1574 	{
1575 		HashJoinTuple nextHashTuple = hashTuple->next;
1576 		MinimalTuple tuple;
1577 		Size		tupleSize;
1578 
1579 		/*
1580 		 * This code must agree with ExecHashTableInsert.  We do not use
1581 		 * ExecHashTableInsert directly as ExecHashTableInsert expects a
1582 		 * TupleTableSlot while we already have HashJoinTuples.
1583 		 */
1584 		tuple = HJTUPLE_MINTUPLE(hashTuple);
1585 		tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1586 
1587 		/* Decide whether to put the tuple in the hash table or a temp file */
1588 		if (batchno == hashtable->curbatch)
1589 		{
1590 			/* Move the tuple to the main hash table */
1591 			HashJoinTuple copyTuple;
1592 
1593 			/*
1594 			 * We must copy the tuple into the dense storage, else it will not
1595 			 * be found by, eg, ExecHashIncreaseNumBatches.
1596 			 */
1597 			copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
1598 			memcpy(copyTuple, hashTuple, tupleSize);
1599 			pfree(hashTuple);
1600 
1601 			copyTuple->next = hashtable->buckets[bucketno];
1602 			hashtable->buckets[bucketno] = copyTuple;
1603 
1604 			/* We have reduced skew space, but overall space doesn't change */
1605 			hashtable->spaceUsedSkew -= tupleSize;
1606 		}
1607 		else
1608 		{
1609 			/* Put the tuple into a temp file for later batches */
1610 			Assert(batchno > hashtable->curbatch);
1611 			ExecHashJoinSaveTuple(tuple, hashvalue,
1612 								  &hashtable->innerBatchFile[batchno]);
1613 			pfree(hashTuple);
1614 			hashtable->spaceUsed -= tupleSize;
1615 			hashtable->spaceUsedSkew -= tupleSize;
1616 		}
1617 
1618 		hashTuple = nextHashTuple;
1619 
1620 		/* allow this loop to be cancellable */
1621 		CHECK_FOR_INTERRUPTS();
1622 	}
1623 
1624 	/*
1625 	 * Free the bucket struct itself and reset the hashtable entry to NULL.
1626 	 *
1627 	 * NOTE: this is not nearly as simple as it looks on the surface, because
1628 	 * of the possibility of collisions in the hashtable.  Suppose that hash
1629 	 * values A and B collide at a particular hashtable entry, and that A was
1630 	 * entered first so B gets shifted to a different table entry.  If we were
1631 	 * to remove A first then ExecHashGetSkewBucket would mistakenly start
1632 	 * reporting that B is not in the hashtable, because it would hit the NULL
1633 	 * before finding B.  However, we always remove entries in the reverse
1634 	 * order of creation, so this failure cannot happen.
1635 	 */
1636 	hashtable->skewBucket[bucketToRemove] = NULL;
1637 	hashtable->nSkewBuckets--;
1638 	pfree(bucket);
1639 	hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
1640 	hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
1641 
1642 	/*
1643 	 * If we have removed all skew buckets then give up on skew optimization.
1644 	 * Release the arrays since they aren't useful any more.
1645 	 */
1646 	if (hashtable->nSkewBuckets == 0)
1647 	{
1648 		hashtable->skewEnabled = false;
1649 		pfree(hashtable->skewBucket);
1650 		pfree(hashtable->skewBucketNums);
1651 		hashtable->skewBucket = NULL;
1652 		hashtable->skewBucketNums = NULL;
1653 		hashtable->spaceUsed -= hashtable->spaceUsedSkew;
1654 		hashtable->spaceUsedSkew = 0;
1655 	}
1656 }
1657 
1658 /*
1659  * Allocate 'size' bytes from the currently active HashMemoryChunk
1660  */
1661 static void *
dense_alloc(HashJoinTable hashtable,Size size)1662 dense_alloc(HashJoinTable hashtable, Size size)
1663 {
1664 	HashMemoryChunk newChunk;
1665 	char	   *ptr;
1666 
1667 	/* just in case the size is not already aligned properly */
1668 	size = MAXALIGN(size);
1669 
1670 	/*
1671 	 * If tuple size is larger than of 1/4 of chunk size, allocate a separate
1672 	 * chunk.
1673 	 */
1674 	if (size > HASH_CHUNK_THRESHOLD)
1675 	{
1676 		/* allocate new chunk and put it at the beginning of the list */
1677 		newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
1678 								 offsetof(HashMemoryChunkData, data) + size);
1679 		newChunk->maxlen = size;
1680 		newChunk->used = 0;
1681 		newChunk->ntuples = 0;
1682 
1683 		/*
1684 		 * Add this chunk to the list after the first existing chunk, so that
1685 		 * we don't lose the remaining space in the "current" chunk.
1686 		 */
1687 		if (hashtable->chunks != NULL)
1688 		{
1689 			newChunk->next = hashtable->chunks->next;
1690 			hashtable->chunks->next = newChunk;
1691 		}
1692 		else
1693 		{
1694 			newChunk->next = hashtable->chunks;
1695 			hashtable->chunks = newChunk;
1696 		}
1697 
1698 		newChunk->used += size;
1699 		newChunk->ntuples += 1;
1700 
1701 		return newChunk->data;
1702 	}
1703 
1704 	/*
1705 	 * See if we have enough space for it in the current chunk (if any). If
1706 	 * not, allocate a fresh chunk.
1707 	 */
1708 	if ((hashtable->chunks == NULL) ||
1709 		(hashtable->chunks->maxlen - hashtable->chunks->used) < size)
1710 	{
1711 		/* allocate new chunk and put it at the beginning of the list */
1712 		newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
1713 					  offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
1714 
1715 		newChunk->maxlen = HASH_CHUNK_SIZE;
1716 		newChunk->used = size;
1717 		newChunk->ntuples = 1;
1718 
1719 		newChunk->next = hashtable->chunks;
1720 		hashtable->chunks = newChunk;
1721 
1722 		return newChunk->data;
1723 	}
1724 
1725 	/* There is enough space in the current chunk, let's add the tuple */
1726 	ptr = hashtable->chunks->data + hashtable->chunks->used;
1727 	hashtable->chunks->used += size;
1728 	hashtable->chunks->ntuples += 1;
1729 
1730 	/* return pointer to the start of the tuple memory */
1731 	return ptr;
1732 }
1733