1 /*-------------------------------------------------------------------------
2  *
3  * nodeHash.c
4  *	  Routines to hash relations for hashjoin
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/executor/nodeHash.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * INTERFACE ROUTINES
17  *		MultiExecHash	- generate an in-memory hash table of the relation
18  *		ExecInitHash	- initialize node and subnodes
19  *		ExecEndHash		- shutdown node and subnodes
20  */
21 
22 #include "postgres.h"
23 
24 #include <math.h>
25 #include <limits.h>
26 
27 #include "access/htup_details.h"
28 #include "catalog/pg_statistic.h"
29 #include "commands/tablespace.h"
30 #include "executor/execdebug.h"
31 #include "executor/hashjoin.h"
32 #include "executor/nodeHash.h"
33 #include "executor/nodeHashjoin.h"
34 #include "miscadmin.h"
35 #include "utils/dynahash.h"
36 #include "utils/memutils.h"
37 #include "utils/lsyscache.h"
38 #include "utils/syscache.h"
39 
40 
41 static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
42 static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
43 static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
44 					  int mcvsToUse);
45 static void ExecHashSkewTableInsert(HashJoinTable hashtable,
46 						TupleTableSlot *slot,
47 						uint32 hashvalue,
48 						int bucketNumber);
49 static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
50 
51 static void *dense_alloc(HashJoinTable hashtable, Size size);
52 
53 /* ----------------------------------------------------------------
54  *		ExecHash
55  *
56  *		stub for pro forma compliance
57  * ----------------------------------------------------------------
58  */
59 static TupleTableSlot *
ExecHash(PlanState * pstate)60 ExecHash(PlanState *pstate)
61 {
62 	elog(ERROR, "Hash node does not support ExecProcNode call convention");
63 	return NULL;
64 }
65 
66 /* ----------------------------------------------------------------
67  *		MultiExecHash
68  *
69  *		build hash table for hashjoin, doing partitioning if more
70  *		than one batch is required.
71  * ----------------------------------------------------------------
72  */
73 Node *
MultiExecHash(HashState * node)74 MultiExecHash(HashState *node)
75 {
76 	PlanState  *outerNode;
77 	List	   *hashkeys;
78 	HashJoinTable hashtable;
79 	TupleTableSlot *slot;
80 	ExprContext *econtext;
81 	uint32		hashvalue;
82 
83 	/* must provide our own instrumentation support */
84 	if (node->ps.instrument)
85 		InstrStartNode(node->ps.instrument);
86 
87 	/*
88 	 * get state info from node
89 	 */
90 	outerNode = outerPlanState(node);
91 	hashtable = node->hashtable;
92 
93 	/*
94 	 * set expression context
95 	 */
96 	hashkeys = node->hashkeys;
97 	econtext = node->ps.ps_ExprContext;
98 
99 	/*
100 	 * get all inner tuples and insert into the hash table (or temp files)
101 	 */
102 	for (;;)
103 	{
104 		slot = ExecProcNode(outerNode);
105 		if (TupIsNull(slot))
106 			break;
107 		/* We have to compute the hash value */
108 		econtext->ecxt_innertuple = slot;
109 		if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
110 								 false, hashtable->keepNulls,
111 								 &hashvalue))
112 		{
113 			int			bucketNumber;
114 
115 			bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
116 			if (bucketNumber != INVALID_SKEW_BUCKET_NO)
117 			{
118 				/* It's a skew tuple, so put it into that hash table */
119 				ExecHashSkewTableInsert(hashtable, slot, hashvalue,
120 										bucketNumber);
121 				hashtable->skewTuples += 1;
122 			}
123 			else
124 			{
125 				/* Not subject to skew optimization, so insert normally */
126 				ExecHashTableInsert(hashtable, slot, hashvalue);
127 			}
128 			hashtable->totalTuples += 1;
129 		}
130 	}
131 
132 	/* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
133 	if (hashtable->nbuckets != hashtable->nbuckets_optimal)
134 		ExecHashIncreaseNumBuckets(hashtable);
135 
136 	/* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
137 	hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
138 	if (hashtable->spaceUsed > hashtable->spacePeak)
139 		hashtable->spacePeak = hashtable->spaceUsed;
140 
141 	/* must provide our own instrumentation support */
142 	if (node->ps.instrument)
143 		InstrStopNode(node->ps.instrument, hashtable->totalTuples);
144 
145 	/*
146 	 * We do not return the hash table directly because it's not a subtype of
147 	 * Node, and so would violate the MultiExecProcNode API.  Instead, our
148 	 * parent Hashjoin node is expected to know how to fish it out of our node
149 	 * state.  Ugly but not really worth cleaning up, since Hashjoin knows
150 	 * quite a bit more about Hash besides that.
151 	 */
152 	return NULL;
153 }
154 
155 /* ----------------------------------------------------------------
156  *		ExecInitHash
157  *
158  *		Init routine for Hash node
159  * ----------------------------------------------------------------
160  */
161 HashState *
ExecInitHash(Hash * node,EState * estate,int eflags)162 ExecInitHash(Hash *node, EState *estate, int eflags)
163 {
164 	HashState  *hashstate;
165 
166 	/* check for unsupported flags */
167 	Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
168 
169 	/*
170 	 * create state structure
171 	 */
172 	hashstate = makeNode(HashState);
173 	hashstate->ps.plan = (Plan *) node;
174 	hashstate->ps.state = estate;
175 	hashstate->ps.ExecProcNode = ExecHash;
176 	hashstate->hashtable = NULL;
177 	hashstate->hashkeys = NIL;	/* will be set by parent HashJoin */
178 
179 	/*
180 	 * Miscellaneous initialization
181 	 *
182 	 * create expression context for node
183 	 */
184 	ExecAssignExprContext(estate, &hashstate->ps);
185 
186 	/*
187 	 * initialize our result slot
188 	 */
189 	ExecInitResultTupleSlot(estate, &hashstate->ps);
190 
191 	/*
192 	 * initialize child expressions
193 	 */
194 	hashstate->ps.qual =
195 		ExecInitQual(node->plan.qual, (PlanState *) hashstate);
196 
197 	/*
198 	 * initialize child nodes
199 	 */
200 	outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
201 
202 	/*
203 	 * initialize tuple type. no need to initialize projection info because
204 	 * this node doesn't do projections
205 	 */
206 	ExecAssignResultTypeFromTL(&hashstate->ps);
207 	hashstate->ps.ps_ProjInfo = NULL;
208 
209 	return hashstate;
210 }
211 
212 /* ---------------------------------------------------------------
213  *		ExecEndHash
214  *
215  *		clean up routine for Hash node
216  * ----------------------------------------------------------------
217  */
218 void
ExecEndHash(HashState * node)219 ExecEndHash(HashState *node)
220 {
221 	PlanState  *outerPlan;
222 
223 	/*
224 	 * free exprcontext
225 	 */
226 	ExecFreeExprContext(&node->ps);
227 
228 	/*
229 	 * shut down the subplan
230 	 */
231 	outerPlan = outerPlanState(node);
232 	ExecEndNode(outerPlan);
233 }
234 
235 
236 /* ----------------------------------------------------------------
237  *		ExecHashTableCreate
238  *
239  *		create an empty hashtable data structure for hashjoin.
240  * ----------------------------------------------------------------
241  */
242 HashJoinTable
ExecHashTableCreate(Hash * node,List * hashOperators,bool keepNulls)243 ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls)
244 {
245 	HashJoinTable hashtable;
246 	Plan	   *outerNode;
247 	int			nbuckets;
248 	int			nbatch;
249 	int			num_skew_mcvs;
250 	int			log2_nbuckets;
251 	int			nkeys;
252 	int			i;
253 	ListCell   *ho;
254 	MemoryContext oldcxt;
255 
256 	/*
257 	 * Get information about the size of the relation to be hashed (it's the
258 	 * "outer" subtree of this node, but the inner relation of the hashjoin).
259 	 * Compute the appropriate size of the hash table.
260 	 */
261 	outerNode = outerPlan(node);
262 
263 	ExecChooseHashTableSize(outerNode->plan_rows, outerNode->plan_width,
264 							OidIsValid(node->skewTable),
265 							&nbuckets, &nbatch, &num_skew_mcvs);
266 
267 	/* nbuckets must be a power of 2 */
268 	log2_nbuckets = my_log2(nbuckets);
269 	Assert(nbuckets == (1 << log2_nbuckets));
270 
271 	/*
272 	 * Initialize the hash table control block.
273 	 *
274 	 * The hashtable control block is just palloc'd from the executor's
275 	 * per-query memory context.  Everything else should be kept inside the
276 	 * subsidiary hashCxt or batchCxt.
277 	 */
278 	hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
279 	hashtable->nbuckets = nbuckets;
280 	hashtable->nbuckets_original = nbuckets;
281 	hashtable->nbuckets_optimal = nbuckets;
282 	hashtable->log2_nbuckets = log2_nbuckets;
283 	hashtable->log2_nbuckets_optimal = log2_nbuckets;
284 	hashtable->buckets = NULL;
285 	hashtable->keepNulls = keepNulls;
286 	hashtable->skewEnabled = false;
287 	hashtable->skewBucket = NULL;
288 	hashtable->skewBucketLen = 0;
289 	hashtable->nSkewBuckets = 0;
290 	hashtable->skewBucketNums = NULL;
291 	hashtable->nbatch = nbatch;
292 	hashtable->curbatch = 0;
293 	hashtable->nbatch_original = nbatch;
294 	hashtable->nbatch_outstart = nbatch;
295 	hashtable->growEnabled = true;
296 	hashtable->totalTuples = 0;
297 	hashtable->skewTuples = 0;
298 	hashtable->innerBatchFile = NULL;
299 	hashtable->outerBatchFile = NULL;
300 	hashtable->spaceUsed = 0;
301 	hashtable->spacePeak = 0;
302 	hashtable->spaceAllowed = work_mem * 1024L;
303 	hashtable->spaceUsedSkew = 0;
304 	hashtable->spaceAllowedSkew =
305 		hashtable->spaceAllowed * SKEW_WORK_MEM_PERCENT / 100;
306 	hashtable->chunks = NULL;
307 
308 #ifdef HJDEBUG
309 	printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
310 		   hashtable, nbatch, nbuckets);
311 #endif
312 
313 	/*
314 	 * Create temporary memory contexts in which to keep the hashtable working
315 	 * storage.  See notes in executor/hashjoin.h.
316 	 */
317 	hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
318 											   "HashTableContext",
319 											   ALLOCSET_DEFAULT_SIZES);
320 
321 	hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
322 												"HashBatchContext",
323 												ALLOCSET_DEFAULT_SIZES);
324 
325 	/* Allocate data that will live for the life of the hashjoin */
326 
327 	oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
328 
329 	/*
330 	 * Get info about the hash functions to be used for each hash key. Also
331 	 * remember whether the join operators are strict.
332 	 */
333 	nkeys = list_length(hashOperators);
334 	hashtable->outer_hashfunctions =
335 		(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
336 	hashtable->inner_hashfunctions =
337 		(FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
338 	hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
339 	i = 0;
340 	foreach(ho, hashOperators)
341 	{
342 		Oid			hashop = lfirst_oid(ho);
343 		Oid			left_hashfn;
344 		Oid			right_hashfn;
345 
346 		if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
347 			elog(ERROR, "could not find hash function for hash operator %u",
348 				 hashop);
349 		fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
350 		fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
351 		hashtable->hashStrict[i] = op_strict(hashop);
352 		i++;
353 	}
354 
355 	if (nbatch > 1)
356 	{
357 		/*
358 		 * allocate and initialize the file arrays in hashCxt
359 		 */
360 		hashtable->innerBatchFile = (BufFile **)
361 			palloc0(nbatch * sizeof(BufFile *));
362 		hashtable->outerBatchFile = (BufFile **)
363 			palloc0(nbatch * sizeof(BufFile *));
364 		/* The files will not be opened until needed... */
365 		/* ... but make sure we have temp tablespaces established for them */
366 		PrepareTempTablespaces();
367 	}
368 
369 	/*
370 	 * Prepare context for the first-scan space allocations; allocate the
371 	 * hashbucket array therein, and set each bucket "empty".
372 	 */
373 	MemoryContextSwitchTo(hashtable->batchCxt);
374 
375 	hashtable->buckets = (HashJoinTuple *)
376 		palloc0(nbuckets * sizeof(HashJoinTuple));
377 
378 	/*
379 	 * Set up for skew optimization, if possible and there's a need for more
380 	 * than one batch.  (In a one-batch join, there's no point in it.)
381 	 */
382 	if (nbatch > 1)
383 		ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
384 
385 	MemoryContextSwitchTo(oldcxt);
386 
387 	return hashtable;
388 }
389 
390 
391 /*
392  * Compute appropriate size for hashtable given the estimated size of the
393  * relation to be hashed (number of rows and average row width).
394  *
395  * This is exported so that the planner's costsize.c can use it.
396  */
397 
398 /* Target bucket loading (tuples per bucket) */
399 #define NTUP_PER_BUCKET			1
400 
401 void
ExecChooseHashTableSize(double ntuples,int tupwidth,bool useskew,int * numbuckets,int * numbatches,int * num_skew_mcvs)402 ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
403 						int *numbuckets,
404 						int *numbatches,
405 						int *num_skew_mcvs)
406 {
407 	int			tupsize;
408 	double		inner_rel_bytes;
409 	long		bucket_bytes;
410 	long		hash_table_bytes;
411 	long		skew_table_bytes;
412 	long		max_pointers;
413 	long		mppow2;
414 	int			nbatch = 1;
415 	int			nbuckets;
416 	double		dbuckets;
417 
418 	/* Force a plausible relation size if no info */
419 	if (ntuples <= 0.0)
420 		ntuples = 1000.0;
421 
422 	/*
423 	 * Estimate tupsize based on footprint of tuple in hashtable... note this
424 	 * does not allow for any palloc overhead.  The manipulations of spaceUsed
425 	 * don't count palloc overhead either.
426 	 */
427 	tupsize = HJTUPLE_OVERHEAD +
428 		MAXALIGN(SizeofMinimalTupleHeader) +
429 		MAXALIGN(tupwidth);
430 	inner_rel_bytes = ntuples * tupsize;
431 
432 	/*
433 	 * Target in-memory hashtable size is work_mem kilobytes.
434 	 */
435 	hash_table_bytes = work_mem * 1024L;
436 
437 	/*
438 	 * If skew optimization is possible, estimate the number of skew buckets
439 	 * that will fit in the memory allowed, and decrement the assumed space
440 	 * available for the main hash table accordingly.
441 	 *
442 	 * We make the optimistic assumption that each skew bucket will contain
443 	 * one inner-relation tuple.  If that turns out to be low, we will recover
444 	 * at runtime by reducing the number of skew buckets.
445 	 *
446 	 * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
447 	 * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
448 	 * will round up to the next power of 2 and then multiply by 4 to reduce
449 	 * collisions.
450 	 */
451 	if (useskew)
452 	{
453 		skew_table_bytes = hash_table_bytes * SKEW_WORK_MEM_PERCENT / 100;
454 
455 		/*----------
456 		 * Divisor is:
457 		 * size of a hash tuple +
458 		 * worst-case size of skewBucket[] per MCV +
459 		 * size of skewBucketNums[] entry +
460 		 * size of skew bucket struct itself
461 		 *----------
462 		 */
463 		*num_skew_mcvs = skew_table_bytes / (tupsize +
464 											 (8 * sizeof(HashSkewBucket *)) +
465 											 sizeof(int) +
466 											 SKEW_BUCKET_OVERHEAD);
467 		if (*num_skew_mcvs > 0)
468 			hash_table_bytes -= skew_table_bytes;
469 	}
470 	else
471 		*num_skew_mcvs = 0;
472 
473 	/*
474 	 * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
475 	 * memory is filled, assuming a single batch; but limit the value so that
476 	 * the pointer arrays we'll try to allocate do not exceed work_mem nor
477 	 * MaxAllocSize.
478 	 *
479 	 * Note that both nbuckets and nbatch must be powers of 2 to make
480 	 * ExecHashGetBucketAndBatch fast.
481 	 */
482 	max_pointers = (work_mem * 1024L) / sizeof(HashJoinTuple);
483 	max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
484 	/* If max_pointers isn't a power of 2, must round it down to one */
485 	mppow2 = 1L << my_log2(max_pointers);
486 	if (max_pointers != mppow2)
487 		max_pointers = mppow2 / 2;
488 
489 	/* Also ensure we avoid integer overflow in nbatch and nbuckets */
490 	/* (this step is redundant given the current value of MaxAllocSize) */
491 	max_pointers = Min(max_pointers, INT_MAX / 2);
492 
493 	dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
494 	dbuckets = Min(dbuckets, max_pointers);
495 	nbuckets = (int) dbuckets;
496 	/* don't let nbuckets be really small, though ... */
497 	nbuckets = Max(nbuckets, 1024);
498 	/* ... and force it to be a power of 2. */
499 	nbuckets = 1 << my_log2(nbuckets);
500 
501 	/*
502 	 * If there's not enough space to store the projected number of tuples and
503 	 * the required bucket headers, we will need multiple batches.
504 	 */
505 	bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
506 	if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
507 	{
508 		/* We'll need multiple batches */
509 		long		lbuckets;
510 		double		dbatch;
511 		int			minbatch;
512 		long		bucket_size;
513 
514 		/*
515 		 * Estimate the number of buckets we'll want to have when work_mem is
516 		 * entirely full.  Each bucket will contain a bucket pointer plus
517 		 * NTUP_PER_BUCKET tuples, whose projected size already includes
518 		 * overhead for the hash code, pointer to the next tuple, etc.
519 		 */
520 		bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
521 		lbuckets = 1L << my_log2(hash_table_bytes / bucket_size);
522 		lbuckets = Min(lbuckets, max_pointers);
523 		nbuckets = (int) lbuckets;
524 		nbuckets = 1 << my_log2(nbuckets);
525 		bucket_bytes = nbuckets * sizeof(HashJoinTuple);
526 
527 		/*
528 		 * Buckets are simple pointers to hashjoin tuples, while tupsize
529 		 * includes the pointer, hash code, and MinimalTupleData.  So buckets
530 		 * should never really exceed 25% of work_mem (even for
531 		 * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
532 		 * 2^N bytes, where we might get more because of doubling. So let's
533 		 * look for 50% here.
534 		 */
535 		Assert(bucket_bytes <= hash_table_bytes / 2);
536 
537 		/* Calculate required number of batches. */
538 		dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
539 		dbatch = Min(dbatch, max_pointers);
540 		minbatch = (int) dbatch;
541 		nbatch = 2;
542 		while (nbatch < minbatch)
543 			nbatch <<= 1;
544 	}
545 
546 	Assert(nbuckets > 0);
547 	Assert(nbatch > 0);
548 
549 	*numbuckets = nbuckets;
550 	*numbatches = nbatch;
551 }
552 
553 
554 /* ----------------------------------------------------------------
555  *		ExecHashTableDestroy
556  *
557  *		destroy a hash table
558  * ----------------------------------------------------------------
559  */
560 void
ExecHashTableDestroy(HashJoinTable hashtable)561 ExecHashTableDestroy(HashJoinTable hashtable)
562 {
563 	int			i;
564 
565 	/*
566 	 * Make sure all the temp files are closed.  We skip batch 0, since it
567 	 * can't have any temp files (and the arrays might not even exist if
568 	 * nbatch is only 1).
569 	 */
570 	for (i = 1; i < hashtable->nbatch; i++)
571 	{
572 		if (hashtable->innerBatchFile[i])
573 			BufFileClose(hashtable->innerBatchFile[i]);
574 		if (hashtable->outerBatchFile[i])
575 			BufFileClose(hashtable->outerBatchFile[i]);
576 	}
577 
578 	/* Release working memory (batchCxt is a child, so it goes away too) */
579 	MemoryContextDelete(hashtable->hashCxt);
580 
581 	/* And drop the control block */
582 	pfree(hashtable);
583 }
584 
585 /*
586  * ExecHashIncreaseNumBatches
587  *		increase the original number of batches in order to reduce
588  *		current memory consumption
589  */
590 static void
ExecHashIncreaseNumBatches(HashJoinTable hashtable)591 ExecHashIncreaseNumBatches(HashJoinTable hashtable)
592 {
593 	int			oldnbatch = hashtable->nbatch;
594 	int			curbatch = hashtable->curbatch;
595 	int			nbatch;
596 	MemoryContext oldcxt;
597 	long		ninmemory;
598 	long		nfreed;
599 	HashMemoryChunk oldchunks;
600 
601 	/* do nothing if we've decided to shut off growth */
602 	if (!hashtable->growEnabled)
603 		return;
604 
605 	/* safety check to avoid overflow */
606 	if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
607 		return;
608 
609 	nbatch = oldnbatch * 2;
610 	Assert(nbatch > 1);
611 
612 #ifdef HJDEBUG
613 	printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
614 		   hashtable, nbatch, hashtable->spaceUsed);
615 #endif
616 
617 	oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
618 
619 	if (hashtable->innerBatchFile == NULL)
620 	{
621 		/* we had no file arrays before */
622 		hashtable->innerBatchFile = (BufFile **)
623 			palloc0(nbatch * sizeof(BufFile *));
624 		hashtable->outerBatchFile = (BufFile **)
625 			palloc0(nbatch * sizeof(BufFile *));
626 		/* time to establish the temp tablespaces, too */
627 		PrepareTempTablespaces();
628 	}
629 	else
630 	{
631 		/* enlarge arrays and zero out added entries */
632 		hashtable->innerBatchFile = (BufFile **)
633 			repalloc(hashtable->innerBatchFile, nbatch * sizeof(BufFile *));
634 		hashtable->outerBatchFile = (BufFile **)
635 			repalloc(hashtable->outerBatchFile, nbatch * sizeof(BufFile *));
636 		MemSet(hashtable->innerBatchFile + oldnbatch, 0,
637 			   (nbatch - oldnbatch) * sizeof(BufFile *));
638 		MemSet(hashtable->outerBatchFile + oldnbatch, 0,
639 			   (nbatch - oldnbatch) * sizeof(BufFile *));
640 	}
641 
642 	MemoryContextSwitchTo(oldcxt);
643 
644 	hashtable->nbatch = nbatch;
645 
646 	/*
647 	 * Scan through the existing hash table entries and dump out any that are
648 	 * no longer of the current batch.
649 	 */
650 	ninmemory = nfreed = 0;
651 
652 	/* If know we need to resize nbuckets, we can do it while rebatching. */
653 	if (hashtable->nbuckets_optimal != hashtable->nbuckets)
654 	{
655 		/* we never decrease the number of buckets */
656 		Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
657 
658 		hashtable->nbuckets = hashtable->nbuckets_optimal;
659 		hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
660 
661 		hashtable->buckets = repalloc(hashtable->buckets,
662 									  sizeof(HashJoinTuple) * hashtable->nbuckets);
663 	}
664 
665 	/*
666 	 * We will scan through the chunks directly, so that we can reset the
667 	 * buckets now and not have to keep track which tuples in the buckets have
668 	 * already been processed. We will free the old chunks as we go.
669 	 */
670 	memset(hashtable->buckets, 0, sizeof(HashJoinTuple) * hashtable->nbuckets);
671 	oldchunks = hashtable->chunks;
672 	hashtable->chunks = NULL;
673 
674 	/* so, let's scan through the old chunks, and all tuples in each chunk */
675 	while (oldchunks != NULL)
676 	{
677 		HashMemoryChunk nextchunk = oldchunks->next;
678 
679 		/* position within the buffer (up to oldchunks->used) */
680 		size_t		idx = 0;
681 
682 		/* process all tuples stored in this chunk (and then free it) */
683 		while (idx < oldchunks->used)
684 		{
685 			HashJoinTuple hashTuple = (HashJoinTuple) (oldchunks->data + idx);
686 			MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
687 			int			hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
688 			int			bucketno;
689 			int			batchno;
690 
691 			ninmemory++;
692 			ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
693 									  &bucketno, &batchno);
694 
695 			if (batchno == curbatch)
696 			{
697 				/* keep tuple in memory - copy it into the new chunk */
698 				HashJoinTuple copyTuple;
699 
700 				copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
701 				memcpy(copyTuple, hashTuple, hashTupleSize);
702 
703 				/* and add it back to the appropriate bucket */
704 				copyTuple->next = hashtable->buckets[bucketno];
705 				hashtable->buckets[bucketno] = copyTuple;
706 			}
707 			else
708 			{
709 				/* dump it out */
710 				Assert(batchno > curbatch);
711 				ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
712 									  hashTuple->hashvalue,
713 									  &hashtable->innerBatchFile[batchno]);
714 
715 				hashtable->spaceUsed -= hashTupleSize;
716 				nfreed++;
717 			}
718 
719 			/* next tuple in this chunk */
720 			idx += MAXALIGN(hashTupleSize);
721 
722 			/* allow this loop to be cancellable */
723 			CHECK_FOR_INTERRUPTS();
724 		}
725 
726 		/* we're done with this chunk - free it and proceed to the next one */
727 		pfree(oldchunks);
728 		oldchunks = nextchunk;
729 	}
730 
731 #ifdef HJDEBUG
732 	printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
733 		   hashtable, nfreed, ninmemory, hashtable->spaceUsed);
734 #endif
735 
736 	/*
737 	 * If we dumped out either all or none of the tuples in the table, disable
738 	 * further expansion of nbatch.  This situation implies that we have
739 	 * enough tuples of identical hashvalues to overflow spaceAllowed.
740 	 * Increasing nbatch will not fix it since there's no way to subdivide the
741 	 * group any more finely. We have to just gut it out and hope the server
742 	 * has enough RAM.
743 	 */
744 	if (nfreed == 0 || nfreed == ninmemory)
745 	{
746 		hashtable->growEnabled = false;
747 #ifdef HJDEBUG
748 		printf("Hashjoin %p: disabling further increase of nbatch\n",
749 			   hashtable);
750 #endif
751 	}
752 }
753 
754 /*
755  * ExecHashIncreaseNumBuckets
756  *		increase the original number of buckets in order to reduce
757  *		number of tuples per bucket
758  */
759 static void
ExecHashIncreaseNumBuckets(HashJoinTable hashtable)760 ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
761 {
762 	HashMemoryChunk chunk;
763 
764 	/* do nothing if not an increase (it's called increase for a reason) */
765 	if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
766 		return;
767 
768 #ifdef HJDEBUG
769 	printf("Hashjoin %p: increasing nbuckets %d => %d\n",
770 		   hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
771 #endif
772 
773 	hashtable->nbuckets = hashtable->nbuckets_optimal;
774 	hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
775 
776 	Assert(hashtable->nbuckets > 1);
777 	Assert(hashtable->nbuckets <= (INT_MAX / 2));
778 	Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
779 
780 	/*
781 	 * Just reallocate the proper number of buckets - we don't need to walk
782 	 * through them - we can walk the dense-allocated chunks (just like in
783 	 * ExecHashIncreaseNumBatches, but without all the copying into new
784 	 * chunks)
785 	 */
786 	hashtable->buckets =
787 		(HashJoinTuple *) repalloc(hashtable->buckets,
788 								   hashtable->nbuckets * sizeof(HashJoinTuple));
789 
790 	memset(hashtable->buckets, 0, hashtable->nbuckets * sizeof(HashJoinTuple));
791 
792 	/* scan through all tuples in all chunks to rebuild the hash table */
793 	for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next)
794 	{
795 		/* process all tuples stored in this chunk */
796 		size_t		idx = 0;
797 
798 		while (idx < chunk->used)
799 		{
800 			HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx);
801 			int			bucketno;
802 			int			batchno;
803 
804 			ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
805 									  &bucketno, &batchno);
806 
807 			/* add the tuple to the proper bucket */
808 			hashTuple->next = hashtable->buckets[bucketno];
809 			hashtable->buckets[bucketno] = hashTuple;
810 
811 			/* advance index past the tuple */
812 			idx += MAXALIGN(HJTUPLE_OVERHEAD +
813 							HJTUPLE_MINTUPLE(hashTuple)->t_len);
814 		}
815 
816 		/* allow this loop to be cancellable */
817 		CHECK_FOR_INTERRUPTS();
818 	}
819 }
820 
821 
822 /*
823  * ExecHashTableInsert
824  *		insert a tuple into the hash table depending on the hash value
825  *		it may just go to a temp file for later batches
826  *
827  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
828  * tuple; the minimal case in particular is certain to happen while reloading
829  * tuples from batch files.  We could save some cycles in the regular-tuple
830  * case by not forcing the slot contents into minimal form; not clear if it's
831  * worth the messiness required.
832  */
833 void
ExecHashTableInsert(HashJoinTable hashtable,TupleTableSlot * slot,uint32 hashvalue)834 ExecHashTableInsert(HashJoinTable hashtable,
835 					TupleTableSlot *slot,
836 					uint32 hashvalue)
837 {
838 	MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot);
839 	int			bucketno;
840 	int			batchno;
841 
842 	ExecHashGetBucketAndBatch(hashtable, hashvalue,
843 							  &bucketno, &batchno);
844 
845 	/*
846 	 * decide whether to put the tuple in the hash table or a temp file
847 	 */
848 	if (batchno == hashtable->curbatch)
849 	{
850 		/*
851 		 * put the tuple in hash table
852 		 */
853 		HashJoinTuple hashTuple;
854 		int			hashTupleSize;
855 		double		ntuples = (hashtable->totalTuples - hashtable->skewTuples);
856 
857 		/* Create the HashJoinTuple */
858 		hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
859 		hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
860 
861 		hashTuple->hashvalue = hashvalue;
862 		memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
863 
864 		/*
865 		 * We always reset the tuple-matched flag on insertion.  This is okay
866 		 * even when reloading a tuple from a batch file, since the tuple
867 		 * could not possibly have been matched to an outer tuple before it
868 		 * went into the batch file.
869 		 */
870 		HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
871 
872 		/* Push it onto the front of the bucket's list */
873 		hashTuple->next = hashtable->buckets[bucketno];
874 		hashtable->buckets[bucketno] = hashTuple;
875 
876 		/*
877 		 * Increase the (optimal) number of buckets if we just exceeded the
878 		 * NTUP_PER_BUCKET threshold, but only when there's still a single
879 		 * batch.
880 		 */
881 		if (hashtable->nbatch == 1 &&
882 			ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
883 		{
884 			/* Guard against integer overflow and alloc size overflow */
885 			if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
886 				hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
887 			{
888 				hashtable->nbuckets_optimal *= 2;
889 				hashtable->log2_nbuckets_optimal += 1;
890 			}
891 		}
892 
893 		/* Account for space used, and back off if we've used too much */
894 		hashtable->spaceUsed += hashTupleSize;
895 		if (hashtable->spaceUsed > hashtable->spacePeak)
896 			hashtable->spacePeak = hashtable->spaceUsed;
897 		if (hashtable->spaceUsed +
898 			hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
899 			> hashtable->spaceAllowed)
900 			ExecHashIncreaseNumBatches(hashtable);
901 	}
902 	else
903 	{
904 		/*
905 		 * put the tuple into a temp file for later batches
906 		 */
907 		Assert(batchno > hashtable->curbatch);
908 		ExecHashJoinSaveTuple(tuple,
909 							  hashvalue,
910 							  &hashtable->innerBatchFile[batchno]);
911 	}
912 }
913 
914 /*
915  * ExecHashGetHashValue
916  *		Compute the hash value for a tuple
917  *
918  * The tuple to be tested must be in either econtext->ecxt_outertuple or
919  * econtext->ecxt_innertuple.  Vars in the hashkeys expressions should have
920  * varno either OUTER_VAR or INNER_VAR.
921  *
922  * A TRUE result means the tuple's hash value has been successfully computed
923  * and stored at *hashvalue.  A FALSE result means the tuple cannot match
924  * because it contains a null attribute, and hence it should be discarded
925  * immediately.  (If keep_nulls is true then FALSE is never returned.)
926  */
927 bool
ExecHashGetHashValue(HashJoinTable hashtable,ExprContext * econtext,List * hashkeys,bool outer_tuple,bool keep_nulls,uint32 * hashvalue)928 ExecHashGetHashValue(HashJoinTable hashtable,
929 					 ExprContext *econtext,
930 					 List *hashkeys,
931 					 bool outer_tuple,
932 					 bool keep_nulls,
933 					 uint32 *hashvalue)
934 {
935 	uint32		hashkey = 0;
936 	FmgrInfo   *hashfunctions;
937 	ListCell   *hk;
938 	int			i = 0;
939 	MemoryContext oldContext;
940 
941 	/*
942 	 * We reset the eval context each time to reclaim any memory leaked in the
943 	 * hashkey expressions.
944 	 */
945 	ResetExprContext(econtext);
946 
947 	oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
948 
949 	if (outer_tuple)
950 		hashfunctions = hashtable->outer_hashfunctions;
951 	else
952 		hashfunctions = hashtable->inner_hashfunctions;
953 
954 	foreach(hk, hashkeys)
955 	{
956 		ExprState  *keyexpr = (ExprState *) lfirst(hk);
957 		Datum		keyval;
958 		bool		isNull;
959 
960 		/* rotate hashkey left 1 bit at each step */
961 		hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
962 
963 		/*
964 		 * Get the join attribute value of the tuple
965 		 */
966 		keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
967 
968 		/*
969 		 * If the attribute is NULL, and the join operator is strict, then
970 		 * this tuple cannot pass the join qual so we can reject it
971 		 * immediately (unless we're scanning the outside of an outer join, in
972 		 * which case we must not reject it).  Otherwise we act like the
973 		 * hashcode of NULL is zero (this will support operators that act like
974 		 * IS NOT DISTINCT, though not any more-random behavior).  We treat
975 		 * the hash support function as strict even if the operator is not.
976 		 *
977 		 * Note: currently, all hashjoinable operators must be strict since
978 		 * the hash index AM assumes that.  However, it takes so little extra
979 		 * code here to allow non-strict that we may as well do it.
980 		 */
981 		if (isNull)
982 		{
983 			if (hashtable->hashStrict[i] && !keep_nulls)
984 			{
985 				MemoryContextSwitchTo(oldContext);
986 				return false;	/* cannot match */
987 			}
988 			/* else, leave hashkey unmodified, equivalent to hashcode 0 */
989 		}
990 		else
991 		{
992 			/* Compute the hash function */
993 			uint32		hkey;
994 
995 			hkey = DatumGetUInt32(FunctionCall1(&hashfunctions[i], keyval));
996 			hashkey ^= hkey;
997 		}
998 
999 		i++;
1000 	}
1001 
1002 	MemoryContextSwitchTo(oldContext);
1003 
1004 	*hashvalue = hashkey;
1005 	return true;
1006 }
1007 
1008 /*
1009  * Rotate the bits of "word" to the right by n bits.
1010  */
1011 static inline uint32
pg_rotate_right32(uint32 word,int n)1012 pg_rotate_right32(uint32 word, int n)
1013 {
1014 	return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
1015 }
1016 
1017 /*
1018  * ExecHashGetBucketAndBatch
1019  *		Determine the bucket number and batch number for a hash value
1020  *
1021  * Note: on-the-fly increases of nbatch must not change the bucket number
1022  * for a given hash code (since we don't move tuples to different hash
1023  * chains), and must only cause the batch number to remain the same or
1024  * increase.  Our algorithm is
1025  *		bucketno = hashvalue MOD nbuckets
1026  *		batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1027  * where nbuckets and nbatch are both expected to be powers of 2, so we can
1028  * do the computations by shifting and masking.  (This assumes that all hash
1029  * functions are good about randomizing all their output bits, else we are
1030  * likely to have very skewed bucket or batch occupancy.)
1031  *
1032  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1033  * bucket count growth.  Once we start batching, the value is fixed and does
1034  * not change over the course of the join (making it possible to compute batch
1035  * number the way we do here).
1036  *
1037  * nbatch is always a power of 2; we increase it only by doubling it.  This
1038  * effectively adds one more bit to the top of the batchno.  In very large
1039  * joins, we might run out of bits to add, so we do this by rotating the hash
1040  * value.  This causes batchno to steal bits from bucketno when the number of
1041  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
1042  * than to lose the ability to divide batches.
1043  */
1044 void
ExecHashGetBucketAndBatch(HashJoinTable hashtable,uint32 hashvalue,int * bucketno,int * batchno)1045 ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1046 						  uint32 hashvalue,
1047 						  int *bucketno,
1048 						  int *batchno)
1049 {
1050 	uint32		nbuckets = (uint32) hashtable->nbuckets;
1051 	uint32		nbatch = (uint32) hashtable->nbatch;
1052 
1053 	if (nbatch > 1)
1054 	{
1055 		*bucketno = hashvalue & (nbuckets - 1);
1056 		*batchno = pg_rotate_right32(hashvalue,
1057 									 hashtable->log2_nbuckets) & (nbatch - 1);
1058 	}
1059 	else
1060 	{
1061 		*bucketno = hashvalue & (nbuckets - 1);
1062 		*batchno = 0;
1063 	}
1064 }
1065 
1066 /*
1067  * ExecScanHashBucket
1068  *		scan a hash bucket for matches to the current outer tuple
1069  *
1070  * The current outer tuple must be stored in econtext->ecxt_outertuple.
1071  *
1072  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1073  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1074  * for the latter.
1075  */
1076 bool
ExecScanHashBucket(HashJoinState * hjstate,ExprContext * econtext)1077 ExecScanHashBucket(HashJoinState *hjstate,
1078 				   ExprContext *econtext)
1079 {
1080 	ExprState  *hjclauses = hjstate->hashclauses;
1081 	HashJoinTable hashtable = hjstate->hj_HashTable;
1082 	HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1083 	uint32		hashvalue = hjstate->hj_CurHashValue;
1084 
1085 	/*
1086 	 * hj_CurTuple is the address of the tuple last returned from the current
1087 	 * bucket, or NULL if it's time to start scanning a new bucket.
1088 	 *
1089 	 * If the tuple hashed to a skew bucket then scan the skew bucket
1090 	 * otherwise scan the standard hashtable bucket.
1091 	 */
1092 	if (hashTuple != NULL)
1093 		hashTuple = hashTuple->next;
1094 	else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
1095 		hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
1096 	else
1097 		hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
1098 
1099 	while (hashTuple != NULL)
1100 	{
1101 		if (hashTuple->hashvalue == hashvalue)
1102 		{
1103 			TupleTableSlot *inntuple;
1104 
1105 			/* insert hashtable's tuple into exec slot so ExecQual sees it */
1106 			inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1107 											 hjstate->hj_HashTupleSlot,
1108 											 false);	/* do not pfree */
1109 			econtext->ecxt_innertuple = inntuple;
1110 
1111 			/* reset temp memory each time to avoid leaks from qual expr */
1112 			ResetExprContext(econtext);
1113 
1114 			if (ExecQual(hjclauses, econtext))
1115 			{
1116 				hjstate->hj_CurTuple = hashTuple;
1117 				return true;
1118 			}
1119 		}
1120 
1121 		hashTuple = hashTuple->next;
1122 	}
1123 
1124 	/*
1125 	 * no match
1126 	 */
1127 	return false;
1128 }
1129 
1130 /*
1131  * ExecPrepHashTableForUnmatched
1132  *		set up for a series of ExecScanHashTableForUnmatched calls
1133  */
1134 void
ExecPrepHashTableForUnmatched(HashJoinState * hjstate)1135 ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
1136 {
1137 	/*----------
1138 	 * During this scan we use the HashJoinState fields as follows:
1139 	 *
1140 	 * hj_CurBucketNo: next regular bucket to scan
1141 	 * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
1142 	 * hj_CurTuple: last tuple returned, or NULL to start next bucket
1143 	 *----------
1144 	 */
1145 	hjstate->hj_CurBucketNo = 0;
1146 	hjstate->hj_CurSkewBucketNo = 0;
1147 	hjstate->hj_CurTuple = NULL;
1148 }
1149 
1150 /*
1151  * ExecScanHashTableForUnmatched
1152  *		scan the hash table for unmatched inner tuples
1153  *
1154  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1155  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1156  * for the latter.
1157  */
1158 bool
ExecScanHashTableForUnmatched(HashJoinState * hjstate,ExprContext * econtext)1159 ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
1160 {
1161 	HashJoinTable hashtable = hjstate->hj_HashTable;
1162 	HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1163 
1164 	for (;;)
1165 	{
1166 		/*
1167 		 * hj_CurTuple is the address of the tuple last returned from the
1168 		 * current bucket, or NULL if it's time to start scanning a new
1169 		 * bucket.
1170 		 */
1171 		if (hashTuple != NULL)
1172 			hashTuple = hashTuple->next;
1173 		else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
1174 		{
1175 			hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
1176 			hjstate->hj_CurBucketNo++;
1177 		}
1178 		else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
1179 		{
1180 			int			j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
1181 
1182 			hashTuple = hashtable->skewBucket[j]->tuples;
1183 			hjstate->hj_CurSkewBucketNo++;
1184 		}
1185 		else
1186 			break;				/* finished all buckets */
1187 
1188 		while (hashTuple != NULL)
1189 		{
1190 			if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
1191 			{
1192 				TupleTableSlot *inntuple;
1193 
1194 				/* insert hashtable's tuple into exec slot */
1195 				inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1196 												 hjstate->hj_HashTupleSlot,
1197 												 false);	/* do not pfree */
1198 				econtext->ecxt_innertuple = inntuple;
1199 
1200 				/*
1201 				 * Reset temp memory each time; although this function doesn't
1202 				 * do any qual eval, the caller will, so let's keep it
1203 				 * parallel to ExecScanHashBucket.
1204 				 */
1205 				ResetExprContext(econtext);
1206 
1207 				hjstate->hj_CurTuple = hashTuple;
1208 				return true;
1209 			}
1210 
1211 			hashTuple = hashTuple->next;
1212 		}
1213 
1214 		/* allow this loop to be cancellable */
1215 		CHECK_FOR_INTERRUPTS();
1216 	}
1217 
1218 	/*
1219 	 * no more unmatched tuples
1220 	 */
1221 	return false;
1222 }
1223 
1224 /*
1225  * ExecHashTableReset
1226  *
1227  *		reset hash table header for new batch
1228  */
1229 void
ExecHashTableReset(HashJoinTable hashtable)1230 ExecHashTableReset(HashJoinTable hashtable)
1231 {
1232 	MemoryContext oldcxt;
1233 	int			nbuckets = hashtable->nbuckets;
1234 
1235 	/*
1236 	 * Release all the hash buckets and tuples acquired in the prior pass, and
1237 	 * reinitialize the context for a new pass.
1238 	 */
1239 	MemoryContextReset(hashtable->batchCxt);
1240 	oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
1241 
1242 	/* Reallocate and reinitialize the hash bucket headers. */
1243 	hashtable->buckets = (HashJoinTuple *)
1244 		palloc0(nbuckets * sizeof(HashJoinTuple));
1245 
1246 	hashtable->spaceUsed = 0;
1247 
1248 	MemoryContextSwitchTo(oldcxt);
1249 
1250 	/* Forget the chunks (the memory was freed by the context reset above). */
1251 	hashtable->chunks = NULL;
1252 }
1253 
1254 /*
1255  * ExecHashTableResetMatchFlags
1256  *		Clear all the HeapTupleHeaderHasMatch flags in the table
1257  */
1258 void
ExecHashTableResetMatchFlags(HashJoinTable hashtable)1259 ExecHashTableResetMatchFlags(HashJoinTable hashtable)
1260 {
1261 	HashJoinTuple tuple;
1262 	int			i;
1263 
1264 	/* Reset all flags in the main table ... */
1265 	for (i = 0; i < hashtable->nbuckets; i++)
1266 	{
1267 		for (tuple = hashtable->buckets[i]; tuple != NULL; tuple = tuple->next)
1268 			HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
1269 	}
1270 
1271 	/* ... and the same for the skew buckets, if any */
1272 	for (i = 0; i < hashtable->nSkewBuckets; i++)
1273 	{
1274 		int			j = hashtable->skewBucketNums[i];
1275 		HashSkewBucket *skewBucket = hashtable->skewBucket[j];
1276 
1277 		for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next)
1278 			HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
1279 	}
1280 }
1281 
1282 
1283 void
ExecReScanHash(HashState * node)1284 ExecReScanHash(HashState *node)
1285 {
1286 	/*
1287 	 * if chgParam of subnode is not null then plan will be re-scanned by
1288 	 * first ExecProcNode.
1289 	 */
1290 	if (node->ps.lefttree->chgParam == NULL)
1291 		ExecReScan(node->ps.lefttree);
1292 }
1293 
1294 
1295 /*
1296  * ExecHashBuildSkewHash
1297  *
1298  *		Set up for skew optimization if we can identify the most common values
1299  *		(MCVs) of the outer relation's join key.  We make a skew hash bucket
1300  *		for the hash value of each MCV, up to the number of slots allowed
1301  *		based on available memory.
1302  */
1303 static void
ExecHashBuildSkewHash(HashJoinTable hashtable,Hash * node,int mcvsToUse)1304 ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
1305 {
1306 	HeapTupleData *statsTuple;
1307 	AttStatsSlot sslot;
1308 
1309 	/* Do nothing if planner didn't identify the outer relation's join key */
1310 	if (!OidIsValid(node->skewTable))
1311 		return;
1312 	/* Also, do nothing if we don't have room for at least one skew bucket */
1313 	if (mcvsToUse <= 0)
1314 		return;
1315 
1316 	/*
1317 	 * Try to find the MCV statistics for the outer relation's join key.
1318 	 */
1319 	statsTuple = SearchSysCache3(STATRELATTINH,
1320 								 ObjectIdGetDatum(node->skewTable),
1321 								 Int16GetDatum(node->skewColumn),
1322 								 BoolGetDatum(node->skewInherit));
1323 	if (!HeapTupleIsValid(statsTuple))
1324 		return;
1325 
1326 	if (get_attstatsslot(&sslot, statsTuple,
1327 						 STATISTIC_KIND_MCV, InvalidOid,
1328 						 ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
1329 	{
1330 		double		frac;
1331 		int			nbuckets;
1332 		FmgrInfo   *hashfunctions;
1333 		int			i;
1334 
1335 		if (mcvsToUse > sslot.nvalues)
1336 			mcvsToUse = sslot.nvalues;
1337 
1338 		/*
1339 		 * Calculate the expected fraction of outer relation that will
1340 		 * participate in the skew optimization.  If this isn't at least
1341 		 * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
1342 		 */
1343 		frac = 0;
1344 		for (i = 0; i < mcvsToUse; i++)
1345 			frac += sslot.numbers[i];
1346 		if (frac < SKEW_MIN_OUTER_FRACTION)
1347 		{
1348 			free_attstatsslot(&sslot);
1349 			ReleaseSysCache(statsTuple);
1350 			return;
1351 		}
1352 
1353 		/*
1354 		 * Okay, set up the skew hashtable.
1355 		 *
1356 		 * skewBucket[] is an open addressing hashtable with a power of 2 size
1357 		 * that is greater than the number of MCV values.  (This ensures there
1358 		 * will be at least one null entry, so searches will always
1359 		 * terminate.)
1360 		 *
1361 		 * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
1362 		 * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
1363 		 * since we limit pg_statistic entries to much less than that.
1364 		 */
1365 		nbuckets = 2;
1366 		while (nbuckets <= mcvsToUse)
1367 			nbuckets <<= 1;
1368 		/* use two more bits just to help avoid collisions */
1369 		nbuckets <<= 2;
1370 
1371 		hashtable->skewEnabled = true;
1372 		hashtable->skewBucketLen = nbuckets;
1373 
1374 		/*
1375 		 * We allocate the bucket memory in the hashtable's batch context. It
1376 		 * is only needed during the first batch, and this ensures it will be
1377 		 * automatically removed once the first batch is done.
1378 		 */
1379 		hashtable->skewBucket = (HashSkewBucket **)
1380 			MemoryContextAllocZero(hashtable->batchCxt,
1381 								   nbuckets * sizeof(HashSkewBucket *));
1382 		hashtable->skewBucketNums = (int *)
1383 			MemoryContextAllocZero(hashtable->batchCxt,
1384 								   mcvsToUse * sizeof(int));
1385 
1386 		hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
1387 			+ mcvsToUse * sizeof(int);
1388 		hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
1389 			+ mcvsToUse * sizeof(int);
1390 		if (hashtable->spaceUsed > hashtable->spacePeak)
1391 			hashtable->spacePeak = hashtable->spaceUsed;
1392 
1393 		/*
1394 		 * Create a skew bucket for each MCV hash value.
1395 		 *
1396 		 * Note: it is very important that we create the buckets in order of
1397 		 * decreasing MCV frequency.  If we have to remove some buckets, they
1398 		 * must be removed in reverse order of creation (see notes in
1399 		 * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
1400 		 * be removed first.
1401 		 */
1402 		hashfunctions = hashtable->outer_hashfunctions;
1403 
1404 		for (i = 0; i < mcvsToUse; i++)
1405 		{
1406 			uint32		hashvalue;
1407 			int			bucket;
1408 
1409 			hashvalue = DatumGetUInt32(FunctionCall1(&hashfunctions[0],
1410 													 sslot.values[i]));
1411 
1412 			/*
1413 			 * While we have not hit a hole in the hashtable and have not hit
1414 			 * the desired bucket, we have collided with some previous hash
1415 			 * value, so try the next bucket location.  NB: this code must
1416 			 * match ExecHashGetSkewBucket.
1417 			 */
1418 			bucket = hashvalue & (nbuckets - 1);
1419 			while (hashtable->skewBucket[bucket] != NULL &&
1420 				   hashtable->skewBucket[bucket]->hashvalue != hashvalue)
1421 				bucket = (bucket + 1) & (nbuckets - 1);
1422 
1423 			/*
1424 			 * If we found an existing bucket with the same hashvalue, leave
1425 			 * it alone.  It's okay for two MCVs to share a hashvalue.
1426 			 */
1427 			if (hashtable->skewBucket[bucket] != NULL)
1428 				continue;
1429 
1430 			/* Okay, create a new skew bucket for this hashvalue. */
1431 			hashtable->skewBucket[bucket] = (HashSkewBucket *)
1432 				MemoryContextAlloc(hashtable->batchCxt,
1433 								   sizeof(HashSkewBucket));
1434 			hashtable->skewBucket[bucket]->hashvalue = hashvalue;
1435 			hashtable->skewBucket[bucket]->tuples = NULL;
1436 			hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
1437 			hashtable->nSkewBuckets++;
1438 			hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
1439 			hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
1440 			if (hashtable->spaceUsed > hashtable->spacePeak)
1441 				hashtable->spacePeak = hashtable->spaceUsed;
1442 		}
1443 
1444 		free_attstatsslot(&sslot);
1445 	}
1446 
1447 	ReleaseSysCache(statsTuple);
1448 }
1449 
1450 /*
1451  * ExecHashGetSkewBucket
1452  *
1453  *		Returns the index of the skew bucket for this hashvalue,
1454  *		or INVALID_SKEW_BUCKET_NO if the hashvalue is not
1455  *		associated with any active skew bucket.
1456  */
1457 int
ExecHashGetSkewBucket(HashJoinTable hashtable,uint32 hashvalue)1458 ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
1459 {
1460 	int			bucket;
1461 
1462 	/*
1463 	 * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
1464 	 * particular, this happens after the initial batch is done).
1465 	 */
1466 	if (!hashtable->skewEnabled)
1467 		return INVALID_SKEW_BUCKET_NO;
1468 
1469 	/*
1470 	 * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
1471 	 */
1472 	bucket = hashvalue & (hashtable->skewBucketLen - 1);
1473 
1474 	/*
1475 	 * While we have not hit a hole in the hashtable and have not hit the
1476 	 * desired bucket, we have collided with some other hash value, so try the
1477 	 * next bucket location.
1478 	 */
1479 	while (hashtable->skewBucket[bucket] != NULL &&
1480 		   hashtable->skewBucket[bucket]->hashvalue != hashvalue)
1481 		bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
1482 
1483 	/*
1484 	 * Found the desired bucket?
1485 	 */
1486 	if (hashtable->skewBucket[bucket] != NULL)
1487 		return bucket;
1488 
1489 	/*
1490 	 * There must not be any hashtable entry for this hash value.
1491 	 */
1492 	return INVALID_SKEW_BUCKET_NO;
1493 }
1494 
1495 /*
1496  * ExecHashSkewTableInsert
1497  *
1498  *		Insert a tuple into the skew hashtable.
1499  *
1500  * This should generally match up with the current-batch case in
1501  * ExecHashTableInsert.
1502  */
1503 static void
ExecHashSkewTableInsert(HashJoinTable hashtable,TupleTableSlot * slot,uint32 hashvalue,int bucketNumber)1504 ExecHashSkewTableInsert(HashJoinTable hashtable,
1505 						TupleTableSlot *slot,
1506 						uint32 hashvalue,
1507 						int bucketNumber)
1508 {
1509 	MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot);
1510 	HashJoinTuple hashTuple;
1511 	int			hashTupleSize;
1512 
1513 	/* Create the HashJoinTuple */
1514 	hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1515 	hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
1516 												   hashTupleSize);
1517 	hashTuple->hashvalue = hashvalue;
1518 	memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1519 	HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1520 
1521 	/* Push it onto the front of the skew bucket's list */
1522 	hashTuple->next = hashtable->skewBucket[bucketNumber]->tuples;
1523 	hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
1524 
1525 	/* Account for space used, and back off if we've used too much */
1526 	hashtable->spaceUsed += hashTupleSize;
1527 	hashtable->spaceUsedSkew += hashTupleSize;
1528 	if (hashtable->spaceUsed > hashtable->spacePeak)
1529 		hashtable->spacePeak = hashtable->spaceUsed;
1530 	while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
1531 		ExecHashRemoveNextSkewBucket(hashtable);
1532 
1533 	/* Check we are not over the total spaceAllowed, either */
1534 	if (hashtable->spaceUsed > hashtable->spaceAllowed)
1535 		ExecHashIncreaseNumBatches(hashtable);
1536 }
1537 
1538 /*
1539  *		ExecHashRemoveNextSkewBucket
1540  *
1541  *		Remove the least valuable skew bucket by pushing its tuples into
1542  *		the main hash table.
1543  */
1544 static void
ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)1545 ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
1546 {
1547 	int			bucketToRemove;
1548 	HashSkewBucket *bucket;
1549 	uint32		hashvalue;
1550 	int			bucketno;
1551 	int			batchno;
1552 	HashJoinTuple hashTuple;
1553 
1554 	/* Locate the bucket to remove */
1555 	bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
1556 	bucket = hashtable->skewBucket[bucketToRemove];
1557 
1558 	/*
1559 	 * Calculate which bucket and batch the tuples belong to in the main
1560 	 * hashtable.  They all have the same hash value, so it's the same for all
1561 	 * of them.  Also note that it's not possible for nbatch to increase while
1562 	 * we are processing the tuples.
1563 	 */
1564 	hashvalue = bucket->hashvalue;
1565 	ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1566 
1567 	/* Process all tuples in the bucket */
1568 	hashTuple = bucket->tuples;
1569 	while (hashTuple != NULL)
1570 	{
1571 		HashJoinTuple nextHashTuple = hashTuple->next;
1572 		MinimalTuple tuple;
1573 		Size		tupleSize;
1574 
1575 		/*
1576 		 * This code must agree with ExecHashTableInsert.  We do not use
1577 		 * ExecHashTableInsert directly as ExecHashTableInsert expects a
1578 		 * TupleTableSlot while we already have HashJoinTuples.
1579 		 */
1580 		tuple = HJTUPLE_MINTUPLE(hashTuple);
1581 		tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1582 
1583 		/* Decide whether to put the tuple in the hash table or a temp file */
1584 		if (batchno == hashtable->curbatch)
1585 		{
1586 			/* Move the tuple to the main hash table */
1587 			HashJoinTuple copyTuple;
1588 
1589 			/*
1590 			 * We must copy the tuple into the dense storage, else it will not
1591 			 * be found by, eg, ExecHashIncreaseNumBatches.
1592 			 */
1593 			copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
1594 			memcpy(copyTuple, hashTuple, tupleSize);
1595 			pfree(hashTuple);
1596 
1597 			copyTuple->next = hashtable->buckets[bucketno];
1598 			hashtable->buckets[bucketno] = copyTuple;
1599 
1600 			/* We have reduced skew space, but overall space doesn't change */
1601 			hashtable->spaceUsedSkew -= tupleSize;
1602 		}
1603 		else
1604 		{
1605 			/* Put the tuple into a temp file for later batches */
1606 			Assert(batchno > hashtable->curbatch);
1607 			ExecHashJoinSaveTuple(tuple, hashvalue,
1608 								  &hashtable->innerBatchFile[batchno]);
1609 			pfree(hashTuple);
1610 			hashtable->spaceUsed -= tupleSize;
1611 			hashtable->spaceUsedSkew -= tupleSize;
1612 		}
1613 
1614 		hashTuple = nextHashTuple;
1615 
1616 		/* allow this loop to be cancellable */
1617 		CHECK_FOR_INTERRUPTS();
1618 	}
1619 
1620 	/*
1621 	 * Free the bucket struct itself and reset the hashtable entry to NULL.
1622 	 *
1623 	 * NOTE: this is not nearly as simple as it looks on the surface, because
1624 	 * of the possibility of collisions in the hashtable.  Suppose that hash
1625 	 * values A and B collide at a particular hashtable entry, and that A was
1626 	 * entered first so B gets shifted to a different table entry.  If we were
1627 	 * to remove A first then ExecHashGetSkewBucket would mistakenly start
1628 	 * reporting that B is not in the hashtable, because it would hit the NULL
1629 	 * before finding B.  However, we always remove entries in the reverse
1630 	 * order of creation, so this failure cannot happen.
1631 	 */
1632 	hashtable->skewBucket[bucketToRemove] = NULL;
1633 	hashtable->nSkewBuckets--;
1634 	pfree(bucket);
1635 	hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
1636 	hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
1637 
1638 	/*
1639 	 * If we have removed all skew buckets then give up on skew optimization.
1640 	 * Release the arrays since they aren't useful any more.
1641 	 */
1642 	if (hashtable->nSkewBuckets == 0)
1643 	{
1644 		hashtable->skewEnabled = false;
1645 		pfree(hashtable->skewBucket);
1646 		pfree(hashtable->skewBucketNums);
1647 		hashtable->skewBucket = NULL;
1648 		hashtable->skewBucketNums = NULL;
1649 		hashtable->spaceUsed -= hashtable->spaceUsedSkew;
1650 		hashtable->spaceUsedSkew = 0;
1651 	}
1652 }
1653 
1654 /*
1655  * Allocate 'size' bytes from the currently active HashMemoryChunk
1656  */
1657 static void *
dense_alloc(HashJoinTable hashtable,Size size)1658 dense_alloc(HashJoinTable hashtable, Size size)
1659 {
1660 	HashMemoryChunk newChunk;
1661 	char	   *ptr;
1662 
1663 	/* just in case the size is not already aligned properly */
1664 	size = MAXALIGN(size);
1665 
1666 	/*
1667 	 * If tuple size is larger than of 1/4 of chunk size, allocate a separate
1668 	 * chunk.
1669 	 */
1670 	if (size > HASH_CHUNK_THRESHOLD)
1671 	{
1672 		/* allocate new chunk and put it at the beginning of the list */
1673 		newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
1674 														offsetof(HashMemoryChunkData, data) + size);
1675 		newChunk->maxlen = size;
1676 		newChunk->used = 0;
1677 		newChunk->ntuples = 0;
1678 
1679 		/*
1680 		 * Add this chunk to the list after the first existing chunk, so that
1681 		 * we don't lose the remaining space in the "current" chunk.
1682 		 */
1683 		if (hashtable->chunks != NULL)
1684 		{
1685 			newChunk->next = hashtable->chunks->next;
1686 			hashtable->chunks->next = newChunk;
1687 		}
1688 		else
1689 		{
1690 			newChunk->next = hashtable->chunks;
1691 			hashtable->chunks = newChunk;
1692 		}
1693 
1694 		newChunk->used += size;
1695 		newChunk->ntuples += 1;
1696 
1697 		return newChunk->data;
1698 	}
1699 
1700 	/*
1701 	 * See if we have enough space for it in the current chunk (if any). If
1702 	 * not, allocate a fresh chunk.
1703 	 */
1704 	if ((hashtable->chunks == NULL) ||
1705 		(hashtable->chunks->maxlen - hashtable->chunks->used) < size)
1706 	{
1707 		/* allocate new chunk and put it at the beginning of the list */
1708 		newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
1709 														offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
1710 
1711 		newChunk->maxlen = HASH_CHUNK_SIZE;
1712 		newChunk->used = size;
1713 		newChunk->ntuples = 1;
1714 
1715 		newChunk->next = hashtable->chunks;
1716 		hashtable->chunks = newChunk;
1717 
1718 		return newChunk->data;
1719 	}
1720 
1721 	/* There is enough space in the current chunk, let's add the tuple */
1722 	ptr = hashtable->chunks->data + hashtable->chunks->used;
1723 	hashtable->chunks->used += size;
1724 	hashtable->chunks->ntuples += 1;
1725 
1726 	/* return pointer to the start of the tuple memory */
1727 	return ptr;
1728 }
1729