1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *	  Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *	seq_page_cost		Cost of a sequential page fetch
10  *	random_page_cost	Cost of a non-sequential page fetch
11  *	cpu_tuple_cost		Cost of typical CPU time to process a tuple
12  *	cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *	cpu_operator_cost	Cost of CPU time to execute an operator or function
14  *	parallel_tuple_cost Cost of CPU time to pass a tuple from worker to master backend
15  *	parallel_setup_cost Cost of setting up shared memory for parallelism
16  *
17  * We expect that the kernel will typically do some amount of read-ahead
18  * optimization; this in conjunction with seek costs means that seq_page_cost
19  * is normally considerably less than random_page_cost.  (However, if the
20  * database is fully cached in RAM, it is reasonable to set them equal.)
21  *
22  * We also use a rough estimate "effective_cache_size" of the number of
23  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
24  * NBuffers for this purpose because that would ignore the effects of
25  * the kernel's disk cache.)
26  *
27  * Obviously, taking constants for these values is an oversimplification,
28  * but it's tough enough to get any useful estimates even at this level of
29  * detail.  Note that all of these parameters are user-settable, in case
30  * the default values are drastically off for a particular platform.
31  *
32  * seq_page_cost and random_page_cost can also be overridden for an individual
33  * tablespace, in case some data is on a fast disk and other data is on a slow
34  * disk.  Per-tablespace overrides never apply to temporary work files such as
35  * an external sort or a materialize node that overflows work_mem.
36  *
37  * We compute two separate costs for each path:
38  *		total_cost: total estimated cost to fetch all tuples
39  *		startup_cost: cost that is expended before first tuple is fetched
40  * In some scenarios, such as when there is a LIMIT or we are implementing
41  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42  * path's result.  A caller can estimate the cost of fetching a partial
43  * result by interpolating between startup_cost and total_cost.  In detail:
44  *		actual_cost = startup_cost +
45  *			(total_cost - startup_cost) * tuples_to_fetch / path->rows;
46  * Note that a base relation's rows count (and, by extension, plan_rows for
47  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48  * that this equation works properly.  (Note: while path->rows is never zero
49  * for ordinary relations, it is zero for paths for provably-empty relations,
50  * so beware of division-by-zero.)	The LIMIT is applied as a top-level
51  * plan node.
52  *
53  * For largely historical reasons, most of the routines in this module use
54  * the passed result Path only to store their results (rows, startup_cost and
55  * total_cost) into.  All the input data they need is passed as separate
56  * parameters, even though much of it could be extracted from the Path.
57  * An exception is made for the cost_XXXjoin() routines, which expect all
58  * the other fields of the passed XXXPath to be filled in, and similarly
59  * cost_index() assumes the passed IndexPath is valid except for its output
60  * values.
61  *
62  *
63  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
64  * Portions Copyright (c) 1994, Regents of the University of California
65  *
66  * IDENTIFICATION
67  *	  src/backend/optimizer/path/costsize.c
68  *
69  *-------------------------------------------------------------------------
70  */
71 
72 #include "postgres.h"
73 
74 #ifdef _MSC_VER
75 #include <float.h>				/* for _isnan */
76 #endif
77 #include <math.h>
78 
79 #include "access/amapi.h"
80 #include "access/htup_details.h"
81 #include "access/tsmapi.h"
82 #include "executor/executor.h"
83 #include "executor/nodeHash.h"
84 #include "miscadmin.h"
85 #include "nodes/nodeFuncs.h"
86 #include "optimizer/clauses.h"
87 #include "optimizer/cost.h"
88 #include "optimizer/pathnode.h"
89 #include "optimizer/paths.h"
90 #include "optimizer/placeholder.h"
91 #include "optimizer/plancat.h"
92 #include "optimizer/planmain.h"
93 #include "optimizer/restrictinfo.h"
94 #include "parser/parsetree.h"
95 #include "utils/lsyscache.h"
96 #include "utils/selfuncs.h"
97 #include "utils/spccache.h"
98 #include "utils/tuplesort.h"
99 
100 
101 #define LOG2(x)  (log(x) / 0.693147180559945)
102 
103 
104 double		seq_page_cost = DEFAULT_SEQ_PAGE_COST;
105 double		random_page_cost = DEFAULT_RANDOM_PAGE_COST;
106 double		cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
107 double		cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
108 double		cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
109 double		parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
110 double		parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
111 
112 int			effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
113 
114 Cost		disable_cost = 1.0e10;
115 
116 int			max_parallel_workers_per_gather = 0;
117 
118 bool		enable_seqscan = true;
119 bool		enable_indexscan = true;
120 bool		enable_indexonlyscan = true;
121 bool		enable_bitmapscan = true;
122 bool		enable_tidscan = true;
123 bool		enable_sort = true;
124 bool		enable_hashagg = true;
125 bool		enable_nestloop = true;
126 bool		enable_material = true;
127 bool		enable_mergejoin = true;
128 bool		enable_hashjoin = true;
129 
130 typedef struct
131 {
132 	PlannerInfo *root;
133 	QualCost	total;
134 } cost_qual_eval_context;
135 
136 static List *extract_nonindex_conditions(List *qual_clauses, List *indexquals);
137 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
138 			   RestrictInfo *rinfo,
139 			   PathKey *pathkey);
140 static void cost_rescan(PlannerInfo *root, Path *path,
141 			Cost *rescan_startup_cost, Cost *rescan_total_cost);
142 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
143 static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
144 						  ParamPathInfo *param_info,
145 						  QualCost *qpqual_cost);
146 static bool has_indexed_join_quals(NestPath *joinpath);
147 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
148 				   List *quals);
149 static double calc_joinrel_size_estimate(PlannerInfo *root,
150 						   RelOptInfo *joinrel,
151 						   RelOptInfo *outer_rel,
152 						   RelOptInfo *inner_rel,
153 						   double outer_rows,
154 						   double inner_rows,
155 						   SpecialJoinInfo *sjinfo,
156 						   List *restrictlist);
157 static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
158 								 Relids outer_relids,
159 								 Relids inner_relids,
160 								 SpecialJoinInfo *sjinfo,
161 								 List **restrictlist);
162 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
163 static double relation_byte_size(double tuples, int width);
164 static double page_size(double tuples, int width);
165 static double get_parallel_divisor(Path *path);
166 
167 
168 /*
169  * clamp_row_est
170  *		Force a row-count estimate to a sane value.
171  */
172 double
clamp_row_est(double nrows)173 clamp_row_est(double nrows)
174 {
175 	/*
176 	 * Force estimate to be at least one row, to make explain output look
177 	 * better and to avoid possible divide-by-zero when interpolating costs.
178 	 * Make it an integer, too.
179 	 */
180 	if (nrows <= 1.0)
181 		nrows = 1.0;
182 	else
183 		nrows = rint(nrows);
184 
185 	return nrows;
186 }
187 
188 
189 /*
190  * cost_seqscan
191  *	  Determines and returns the cost of scanning a relation sequentially.
192  *
193  * 'baserel' is the relation to be scanned
194  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
195  */
196 void
cost_seqscan(Path * path,PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info)197 cost_seqscan(Path *path, PlannerInfo *root,
198 			 RelOptInfo *baserel, ParamPathInfo *param_info)
199 {
200 	Cost		startup_cost = 0;
201 	Cost		cpu_run_cost;
202 	Cost		disk_run_cost;
203 	double		spc_seq_page_cost;
204 	QualCost	qpqual_cost;
205 	Cost		cpu_per_tuple;
206 
207 	/* Should only be applied to base relations */
208 	Assert(baserel->relid > 0);
209 	Assert(baserel->rtekind == RTE_RELATION);
210 
211 	/* Mark the path with the correct row estimate */
212 	if (param_info)
213 		path->rows = param_info->ppi_rows;
214 	else
215 		path->rows = baserel->rows;
216 
217 	if (!enable_seqscan)
218 		startup_cost += disable_cost;
219 
220 	/* fetch estimated page cost for tablespace containing table */
221 	get_tablespace_page_costs(baserel->reltablespace,
222 							  NULL,
223 							  &spc_seq_page_cost);
224 
225 	/*
226 	 * disk costs
227 	 */
228 	disk_run_cost = spc_seq_page_cost * baserel->pages;
229 
230 	/* CPU costs */
231 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
232 
233 	startup_cost += qpqual_cost.startup;
234 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
235 	cpu_run_cost = cpu_per_tuple * baserel->tuples;
236 	/* tlist eval costs are paid per output row, not per tuple scanned */
237 	startup_cost += path->pathtarget->cost.startup;
238 	cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
239 
240 	/* Adjust costing for parallelism, if used. */
241 	if (path->parallel_workers > 0)
242 	{
243 		double		parallel_divisor = get_parallel_divisor(path);
244 
245 		/* The CPU cost is divided among all the workers. */
246 		cpu_run_cost /= parallel_divisor;
247 
248 		/*
249 		 * It may be possible to amortize some of the I/O cost, but probably
250 		 * not very much, because most operating systems already do aggressive
251 		 * prefetching.  For now, we assume that the disk run cost can't be
252 		 * amortized at all.
253 		 */
254 
255 		/*
256 		 * In the case of a parallel plan, the row count needs to represent
257 		 * the number of tuples processed per worker.
258 		 */
259 		path->rows = clamp_row_est(path->rows / parallel_divisor);
260 	}
261 
262 	path->startup_cost = startup_cost;
263 	path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
264 }
265 
266 /*
267  * cost_samplescan
268  *	  Determines and returns the cost of scanning a relation using sampling.
269  *
270  * 'baserel' is the relation to be scanned
271  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
272  */
273 void
cost_samplescan(Path * path,PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info)274 cost_samplescan(Path *path, PlannerInfo *root,
275 				RelOptInfo *baserel, ParamPathInfo *param_info)
276 {
277 	Cost		startup_cost = 0;
278 	Cost		run_cost = 0;
279 	RangeTblEntry *rte;
280 	TableSampleClause *tsc;
281 	TsmRoutine *tsm;
282 	double		spc_seq_page_cost,
283 				spc_random_page_cost,
284 				spc_page_cost;
285 	QualCost	qpqual_cost;
286 	Cost		cpu_per_tuple;
287 
288 	/* Should only be applied to base relations with tablesample clauses */
289 	Assert(baserel->relid > 0);
290 	rte = planner_rt_fetch(baserel->relid, root);
291 	Assert(rte->rtekind == RTE_RELATION);
292 	tsc = rte->tablesample;
293 	Assert(tsc != NULL);
294 	tsm = GetTsmRoutine(tsc->tsmhandler);
295 
296 	/* Mark the path with the correct row estimate */
297 	if (param_info)
298 		path->rows = param_info->ppi_rows;
299 	else
300 		path->rows = baserel->rows;
301 
302 	/* fetch estimated page cost for tablespace containing table */
303 	get_tablespace_page_costs(baserel->reltablespace,
304 							  &spc_random_page_cost,
305 							  &spc_seq_page_cost);
306 
307 	/* if NextSampleBlock is used, assume random access, else sequential */
308 	spc_page_cost = (tsm->NextSampleBlock != NULL) ?
309 		spc_random_page_cost : spc_seq_page_cost;
310 
311 	/*
312 	 * disk costs (recall that baserel->pages has already been set to the
313 	 * number of pages the sampling method will visit)
314 	 */
315 	run_cost += spc_page_cost * baserel->pages;
316 
317 	/*
318 	 * CPU costs (recall that baserel->tuples has already been set to the
319 	 * number of tuples the sampling method will select).  Note that we ignore
320 	 * execution cost of the TABLESAMPLE parameter expressions; they will be
321 	 * evaluated only once per scan, and in most usages they'll likely be
322 	 * simple constants anyway.  We also don't charge anything for the
323 	 * calculations the sampling method might do internally.
324 	 */
325 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
326 
327 	startup_cost += qpqual_cost.startup;
328 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
329 	run_cost += cpu_per_tuple * baserel->tuples;
330 	/* tlist eval costs are paid per output row, not per tuple scanned */
331 	startup_cost += path->pathtarget->cost.startup;
332 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
333 
334 	path->startup_cost = startup_cost;
335 	path->total_cost = startup_cost + run_cost;
336 }
337 
338 /*
339  * cost_gather
340  *	  Determines and returns the cost of gather path.
341  *
342  * 'rel' is the relation to be operated upon
343  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
344  * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
345  * both 'rel' and 'param_info'.  This is useful when the path doesn't exactly
346  * correspond to any particular RelOptInfo.
347  */
348 void
cost_gather(GatherPath * path,PlannerInfo * root,RelOptInfo * rel,ParamPathInfo * param_info,double * rows)349 cost_gather(GatherPath *path, PlannerInfo *root,
350 			RelOptInfo *rel, ParamPathInfo *param_info,
351 			double *rows)
352 {
353 	Cost		startup_cost = 0;
354 	Cost		run_cost = 0;
355 
356 	/* Mark the path with the correct row estimate */
357 	if (rows)
358 		path->path.rows = *rows;
359 	else if (param_info)
360 		path->path.rows = param_info->ppi_rows;
361 	else
362 		path->path.rows = rel->rows;
363 
364 	startup_cost = path->subpath->startup_cost;
365 
366 	run_cost = path->subpath->total_cost - path->subpath->startup_cost;
367 
368 	/* Parallel setup and communication cost. */
369 	startup_cost += parallel_setup_cost;
370 	run_cost += parallel_tuple_cost * path->path.rows;
371 
372 	path->path.startup_cost = startup_cost;
373 	path->path.total_cost = (startup_cost + run_cost);
374 }
375 
376 /*
377  * cost_index
378  *	  Determines and returns the cost of scanning a relation using an index.
379  *
380  * 'path' describes the indexscan under consideration, and is complete
381  *		except for the fields to be set by this routine
382  * 'loop_count' is the number of repetitions of the indexscan to factor into
383  *		estimates of caching behavior
384  *
385  * In addition to rows, startup_cost and total_cost, cost_index() sets the
386  * path's indextotalcost and indexselectivity fields.  These values will be
387  * needed if the IndexPath is used in a BitmapIndexScan.
388  *
389  * NOTE: path->indexquals must contain only clauses usable as index
390  * restrictions.  Any additional quals evaluated as qpquals may reduce the
391  * number of returned tuples, but they won't reduce the number of tuples
392  * we have to fetch from the table, so they don't reduce the scan cost.
393  */
394 void
cost_index(IndexPath * path,PlannerInfo * root,double loop_count)395 cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
396 {
397 	IndexOptInfo *index = path->indexinfo;
398 	RelOptInfo *baserel = index->rel;
399 	bool		indexonly = (path->path.pathtype == T_IndexOnlyScan);
400 	amcostestimate_function amcostestimate;
401 	List	   *qpquals;
402 	Cost		startup_cost = 0;
403 	Cost		run_cost = 0;
404 	Cost		indexStartupCost;
405 	Cost		indexTotalCost;
406 	Selectivity indexSelectivity;
407 	double		indexCorrelation,
408 				csquared;
409 	double		spc_seq_page_cost,
410 				spc_random_page_cost;
411 	Cost		min_IO_cost,
412 				max_IO_cost;
413 	QualCost	qpqual_cost;
414 	Cost		cpu_per_tuple;
415 	double		tuples_fetched;
416 	double		pages_fetched;
417 
418 	/* Should only be applied to base relations */
419 	Assert(IsA(baserel, RelOptInfo) &&
420 		   IsA(index, IndexOptInfo));
421 	Assert(baserel->relid > 0);
422 	Assert(baserel->rtekind == RTE_RELATION);
423 
424 	/*
425 	 * Mark the path with the correct row estimate, and identify which quals
426 	 * will need to be enforced as qpquals.  We need not check any quals that
427 	 * are implied by the index's predicate, so we can use indrestrictinfo not
428 	 * baserestrictinfo as the list of relevant restriction clauses for the
429 	 * rel.
430 	 */
431 	if (path->path.param_info)
432 	{
433 		path->path.rows = path->path.param_info->ppi_rows;
434 		/* qpquals come from the rel's restriction clauses and ppi_clauses */
435 		qpquals = list_concat(
436 				extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
437 											path->indexquals),
438 			  extract_nonindex_conditions(path->path.param_info->ppi_clauses,
439 										  path->indexquals));
440 	}
441 	else
442 	{
443 		path->path.rows = baserel->rows;
444 		/* qpquals come from just the rel's restriction clauses */
445 		qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
446 											  path->indexquals);
447 	}
448 
449 	if (!enable_indexscan)
450 		startup_cost += disable_cost;
451 	/* we don't need to check enable_indexonlyscan; indxpath.c does that */
452 
453 	/*
454 	 * Call index-access-method-specific code to estimate the processing cost
455 	 * for scanning the index, as well as the selectivity of the index (ie,
456 	 * the fraction of main-table tuples we will have to retrieve) and its
457 	 * correlation to the main-table tuple order.  We need a cast here because
458 	 * relation.h uses a weak function type to avoid including amapi.h.
459 	 */
460 	amcostestimate = (amcostestimate_function) index->amcostestimate;
461 	amcostestimate(root, path, loop_count,
462 				   &indexStartupCost, &indexTotalCost,
463 				   &indexSelectivity, &indexCorrelation);
464 
465 	/*
466 	 * Save amcostestimate's results for possible use in bitmap scan planning.
467 	 * We don't bother to save indexStartupCost or indexCorrelation, because a
468 	 * bitmap scan doesn't care about either.
469 	 */
470 	path->indextotalcost = indexTotalCost;
471 	path->indexselectivity = indexSelectivity;
472 
473 	/* all costs for touching index itself included here */
474 	startup_cost += indexStartupCost;
475 	run_cost += indexTotalCost - indexStartupCost;
476 
477 	/* estimate number of main-table tuples fetched */
478 	tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
479 
480 	/* fetch estimated page costs for tablespace containing table */
481 	get_tablespace_page_costs(baserel->reltablespace,
482 							  &spc_random_page_cost,
483 							  &spc_seq_page_cost);
484 
485 	/*----------
486 	 * Estimate number of main-table pages fetched, and compute I/O cost.
487 	 *
488 	 * When the index ordering is uncorrelated with the table ordering,
489 	 * we use an approximation proposed by Mackert and Lohman (see
490 	 * index_pages_fetched() for details) to compute the number of pages
491 	 * fetched, and then charge spc_random_page_cost per page fetched.
492 	 *
493 	 * When the index ordering is exactly correlated with the table ordering
494 	 * (just after a CLUSTER, for example), the number of pages fetched should
495 	 * be exactly selectivity * table_size.  What's more, all but the first
496 	 * will be sequential fetches, not the random fetches that occur in the
497 	 * uncorrelated case.  So if the number of pages is more than 1, we
498 	 * ought to charge
499 	 *		spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
500 	 * For partially-correlated indexes, we ought to charge somewhere between
501 	 * these two estimates.  We currently interpolate linearly between the
502 	 * estimates based on the correlation squared (XXX is that appropriate?).
503 	 *
504 	 * If it's an index-only scan, then we will not need to fetch any heap
505 	 * pages for which the visibility map shows all tuples are visible.
506 	 * Hence, reduce the estimated number of heap fetches accordingly.
507 	 * We use the measured fraction of the entire heap that is all-visible,
508 	 * which might not be particularly relevant to the subset of the heap
509 	 * that this query will fetch; but it's not clear how to do better.
510 	 *----------
511 	 */
512 	if (loop_count > 1)
513 	{
514 		/*
515 		 * For repeated indexscans, the appropriate estimate for the
516 		 * uncorrelated case is to scale up the number of tuples fetched in
517 		 * the Mackert and Lohman formula by the number of scans, so that we
518 		 * estimate the number of pages fetched by all the scans; then
519 		 * pro-rate the costs for one scan.  In this case we assume all the
520 		 * fetches are random accesses.
521 		 */
522 		pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
523 											baserel->pages,
524 											(double) index->pages,
525 											root);
526 
527 		if (indexonly)
528 			pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
529 
530 		max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
531 
532 		/*
533 		 * In the perfectly correlated case, the number of pages touched by
534 		 * each scan is selectivity * table_size, and we can use the Mackert
535 		 * and Lohman formula at the page level to estimate how much work is
536 		 * saved by caching across scans.  We still assume all the fetches are
537 		 * random, though, which is an overestimate that's hard to correct for
538 		 * without double-counting the cache effects.  (But in most cases
539 		 * where such a plan is actually interesting, only one page would get
540 		 * fetched per scan anyway, so it shouldn't matter much.)
541 		 */
542 		pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
543 
544 		pages_fetched = index_pages_fetched(pages_fetched * loop_count,
545 											baserel->pages,
546 											(double) index->pages,
547 											root);
548 
549 		if (indexonly)
550 			pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
551 
552 		min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
553 	}
554 	else
555 	{
556 		/*
557 		 * Normal case: apply the Mackert and Lohman formula, and then
558 		 * interpolate between that and the correlation-derived result.
559 		 */
560 		pages_fetched = index_pages_fetched(tuples_fetched,
561 											baserel->pages,
562 											(double) index->pages,
563 											root);
564 
565 		if (indexonly)
566 			pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
567 
568 		/* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
569 		max_IO_cost = pages_fetched * spc_random_page_cost;
570 
571 		/* min_IO_cost is for the perfectly correlated case (csquared=1) */
572 		pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
573 
574 		if (indexonly)
575 			pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
576 
577 		if (pages_fetched > 0)
578 		{
579 			min_IO_cost = spc_random_page_cost;
580 			if (pages_fetched > 1)
581 				min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
582 		}
583 		else
584 			min_IO_cost = 0;
585 	}
586 
587 	/*
588 	 * Now interpolate based on estimated index order correlation to get total
589 	 * disk I/O cost for main table accesses.
590 	 */
591 	csquared = indexCorrelation * indexCorrelation;
592 
593 	run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
594 
595 	/*
596 	 * Estimate CPU costs per tuple.
597 	 *
598 	 * What we want here is cpu_tuple_cost plus the evaluation costs of any
599 	 * qual clauses that we have to evaluate as qpquals.
600 	 */
601 	cost_qual_eval(&qpqual_cost, qpquals, root);
602 
603 	startup_cost += qpqual_cost.startup;
604 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
605 
606 	run_cost += cpu_per_tuple * tuples_fetched;
607 
608 	/* tlist eval costs are paid per output row, not per tuple scanned */
609 	startup_cost += path->path.pathtarget->cost.startup;
610 	run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
611 
612 	path->path.startup_cost = startup_cost;
613 	path->path.total_cost = startup_cost + run_cost;
614 }
615 
616 /*
617  * extract_nonindex_conditions
618  *
619  * Given a list of quals to be enforced in an indexscan, extract the ones that
620  * will have to be applied as qpquals (ie, the index machinery won't handle
621  * them).  The actual rules for this appear in create_indexscan_plan() in
622  * createplan.c, but the full rules are fairly expensive and we don't want to
623  * go to that much effort for index paths that don't get selected for the
624  * final plan.  So we approximate it as quals that don't appear directly in
625  * indexquals and also are not redundant children of the same EquivalenceClass
626  * as some indexqual.  This method neglects some infrequently-relevant
627  * considerations, specifically clauses that needn't be checked because they
628  * are implied by an indexqual.  It does not seem worth the cycles to try to
629  * factor that in at this stage, even though createplan.c will take pains to
630  * remove such unnecessary clauses from the qpquals list if this path is
631  * selected for use.
632  */
633 static List *
extract_nonindex_conditions(List * qual_clauses,List * indexquals)634 extract_nonindex_conditions(List *qual_clauses, List *indexquals)
635 {
636 	List	   *result = NIL;
637 	ListCell   *lc;
638 
639 	foreach(lc, qual_clauses)
640 	{
641 		RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
642 
643 		Assert(IsA(rinfo, RestrictInfo));
644 		if (rinfo->pseudoconstant)
645 			continue;			/* we may drop pseudoconstants here */
646 		if (list_member_ptr(indexquals, rinfo))
647 			continue;			/* simple duplicate */
648 		if (is_redundant_derived_clause(rinfo, indexquals))
649 			continue;			/* derived from same EquivalenceClass */
650 		/* ... skip the predicate proof attempt createplan.c will try ... */
651 		result = lappend(result, rinfo);
652 	}
653 	return result;
654 }
655 
656 /*
657  * index_pages_fetched
658  *	  Estimate the number of pages actually fetched after accounting for
659  *	  cache effects.
660  *
661  * We use an approximation proposed by Mackert and Lohman, "Index Scans
662  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
663  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
664  * The Mackert and Lohman approximation is that the number of pages
665  * fetched is
666  *	PF =
667  *		min(2TNs/(2T+Ns), T)			when T <= b
668  *		2TNs/(2T+Ns)					when T > b and Ns <= 2Tb/(2T-b)
669  *		b + (Ns - 2Tb/(2T-b))*(T-b)/T	when T > b and Ns > 2Tb/(2T-b)
670  * where
671  *		T = # pages in table
672  *		N = # tuples in table
673  *		s = selectivity = fraction of table to be scanned
674  *		b = # buffer pages available (we include kernel space here)
675  *
676  * We assume that effective_cache_size is the total number of buffer pages
677  * available for the whole query, and pro-rate that space across all the
678  * tables in the query and the index currently under consideration.  (This
679  * ignores space needed for other indexes used by the query, but since we
680  * don't know which indexes will get used, we can't estimate that very well;
681  * and in any case counting all the tables may well be an overestimate, since
682  * depending on the join plan not all the tables may be scanned concurrently.)
683  *
684  * The product Ns is the number of tuples fetched; we pass in that
685  * product rather than calculating it here.  "pages" is the number of pages
686  * in the object under consideration (either an index or a table).
687  * "index_pages" is the amount to add to the total table space, which was
688  * computed for us by query_planner.
689  *
690  * Caller is expected to have ensured that tuples_fetched is greater than zero
691  * and rounded to integer (see clamp_row_est).  The result will likewise be
692  * greater than zero and integral.
693  */
694 double
index_pages_fetched(double tuples_fetched,BlockNumber pages,double index_pages,PlannerInfo * root)695 index_pages_fetched(double tuples_fetched, BlockNumber pages,
696 					double index_pages, PlannerInfo *root)
697 {
698 	double		pages_fetched;
699 	double		total_pages;
700 	double		T,
701 				b;
702 
703 	/* T is # pages in table, but don't allow it to be zero */
704 	T = (pages > 1) ? (double) pages : 1.0;
705 
706 	/* Compute number of pages assumed to be competing for cache space */
707 	total_pages = root->total_table_pages + index_pages;
708 	total_pages = Max(total_pages, 1.0);
709 	Assert(T <= total_pages);
710 
711 	/* b is pro-rated share of effective_cache_size */
712 	b = (double) effective_cache_size *T / total_pages;
713 
714 	/* force it positive and integral */
715 	if (b <= 1.0)
716 		b = 1.0;
717 	else
718 		b = ceil(b);
719 
720 	/* This part is the Mackert and Lohman formula */
721 	if (T <= b)
722 	{
723 		pages_fetched =
724 			(2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
725 		if (pages_fetched >= T)
726 			pages_fetched = T;
727 		else
728 			pages_fetched = ceil(pages_fetched);
729 	}
730 	else
731 	{
732 		double		lim;
733 
734 		lim = (2.0 * T * b) / (2.0 * T - b);
735 		if (tuples_fetched <= lim)
736 		{
737 			pages_fetched =
738 				(2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
739 		}
740 		else
741 		{
742 			pages_fetched =
743 				b + (tuples_fetched - lim) * (T - b) / T;
744 		}
745 		pages_fetched = ceil(pages_fetched);
746 	}
747 	return pages_fetched;
748 }
749 
750 /*
751  * get_indexpath_pages
752  *		Determine the total size of the indexes used in a bitmap index path.
753  *
754  * Note: if the same index is used more than once in a bitmap tree, we will
755  * count it multiple times, which perhaps is the wrong thing ... but it's
756  * not completely clear, and detecting duplicates is difficult, so ignore it
757  * for now.
758  */
759 static double
get_indexpath_pages(Path * bitmapqual)760 get_indexpath_pages(Path *bitmapqual)
761 {
762 	double		result = 0;
763 	ListCell   *l;
764 
765 	if (IsA(bitmapqual, BitmapAndPath))
766 	{
767 		BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
768 
769 		foreach(l, apath->bitmapquals)
770 		{
771 			result += get_indexpath_pages((Path *) lfirst(l));
772 		}
773 	}
774 	else if (IsA(bitmapqual, BitmapOrPath))
775 	{
776 		BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
777 
778 		foreach(l, opath->bitmapquals)
779 		{
780 			result += get_indexpath_pages((Path *) lfirst(l));
781 		}
782 	}
783 	else if (IsA(bitmapqual, IndexPath))
784 	{
785 		IndexPath  *ipath = (IndexPath *) bitmapqual;
786 
787 		result = (double) ipath->indexinfo->pages;
788 	}
789 	else
790 		elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
791 
792 	return result;
793 }
794 
795 /*
796  * cost_bitmap_heap_scan
797  *	  Determines and returns the cost of scanning a relation using a bitmap
798  *	  index-then-heap plan.
799  *
800  * 'baserel' is the relation to be scanned
801  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
802  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
803  * 'loop_count' is the number of repetitions of the indexscan to factor into
804  *		estimates of caching behavior
805  *
806  * Note: the component IndexPaths in bitmapqual should have been costed
807  * using the same loop_count.
808  */
809 void
cost_bitmap_heap_scan(Path * path,PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info,Path * bitmapqual,double loop_count)810 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
811 					  ParamPathInfo *param_info,
812 					  Path *bitmapqual, double loop_count)
813 {
814 	Cost		startup_cost = 0;
815 	Cost		run_cost = 0;
816 	Cost		indexTotalCost;
817 	Selectivity indexSelectivity;
818 	QualCost	qpqual_cost;
819 	Cost		cpu_per_tuple;
820 	Cost		cost_per_page;
821 	double		tuples_fetched;
822 	double		pages_fetched;
823 	double		spc_seq_page_cost,
824 				spc_random_page_cost;
825 	double		T;
826 
827 	/* Should only be applied to base relations */
828 	Assert(IsA(baserel, RelOptInfo));
829 	Assert(baserel->relid > 0);
830 	Assert(baserel->rtekind == RTE_RELATION);
831 
832 	/* Mark the path with the correct row estimate */
833 	if (param_info)
834 		path->rows = param_info->ppi_rows;
835 	else
836 		path->rows = baserel->rows;
837 
838 	if (!enable_bitmapscan)
839 		startup_cost += disable_cost;
840 
841 	/*
842 	 * Fetch total cost of obtaining the bitmap, as well as its total
843 	 * selectivity.
844 	 */
845 	cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
846 
847 	startup_cost += indexTotalCost;
848 
849 	/* Fetch estimated page costs for tablespace containing table. */
850 	get_tablespace_page_costs(baserel->reltablespace,
851 							  &spc_random_page_cost,
852 							  &spc_seq_page_cost);
853 
854 	/*
855 	 * Estimate number of main-table pages fetched.
856 	 */
857 	tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
858 
859 	T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
860 
861 	if (loop_count > 1)
862 	{
863 		/*
864 		 * For repeated bitmap scans, scale up the number of tuples fetched in
865 		 * the Mackert and Lohman formula by the number of scans, so that we
866 		 * estimate the number of pages fetched by all the scans. Then
867 		 * pro-rate for one scan.
868 		 */
869 		pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
870 											baserel->pages,
871 											get_indexpath_pages(bitmapqual),
872 											root);
873 		pages_fetched /= loop_count;
874 	}
875 	else
876 	{
877 		/*
878 		 * For a single scan, the number of heap pages that need to be fetched
879 		 * is the same as the Mackert and Lohman formula for the case T <= b
880 		 * (ie, no re-reads needed).
881 		 */
882 		pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
883 	}
884 	if (pages_fetched >= T)
885 		pages_fetched = T;
886 	else
887 		pages_fetched = ceil(pages_fetched);
888 
889 	/*
890 	 * For small numbers of pages we should charge spc_random_page_cost
891 	 * apiece, while if nearly all the table's pages are being read, it's more
892 	 * appropriate to charge spc_seq_page_cost apiece.  The effect is
893 	 * nonlinear, too. For lack of a better idea, interpolate like this to
894 	 * determine the cost per page.
895 	 */
896 	if (pages_fetched >= 2.0)
897 		cost_per_page = spc_random_page_cost -
898 			(spc_random_page_cost - spc_seq_page_cost)
899 			* sqrt(pages_fetched / T);
900 	else
901 		cost_per_page = spc_random_page_cost;
902 
903 	run_cost += pages_fetched * cost_per_page;
904 
905 	/*
906 	 * Estimate CPU costs per tuple.
907 	 *
908 	 * Often the indexquals don't need to be rechecked at each tuple ... but
909 	 * not always, especially not if there are enough tuples involved that the
910 	 * bitmaps become lossy.  For the moment, just assume they will be
911 	 * rechecked always.  This means we charge the full freight for all the
912 	 * scan clauses.
913 	 */
914 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
915 
916 	startup_cost += qpqual_cost.startup;
917 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
918 
919 	run_cost += cpu_per_tuple * tuples_fetched;
920 
921 	/* tlist eval costs are paid per output row, not per tuple scanned */
922 	startup_cost += path->pathtarget->cost.startup;
923 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
924 
925 	path->startup_cost = startup_cost;
926 	path->total_cost = startup_cost + run_cost;
927 }
928 
929 /*
930  * cost_bitmap_tree_node
931  *		Extract cost and selectivity from a bitmap tree node (index/and/or)
932  */
933 void
cost_bitmap_tree_node(Path * path,Cost * cost,Selectivity * selec)934 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
935 {
936 	if (IsA(path, IndexPath))
937 	{
938 		*cost = ((IndexPath *) path)->indextotalcost;
939 		*selec = ((IndexPath *) path)->indexselectivity;
940 
941 		/*
942 		 * Charge a small amount per retrieved tuple to reflect the costs of
943 		 * manipulating the bitmap.  This is mostly to make sure that a bitmap
944 		 * scan doesn't look to be the same cost as an indexscan to retrieve a
945 		 * single tuple.
946 		 */
947 		*cost += 0.1 * cpu_operator_cost * path->rows;
948 	}
949 	else if (IsA(path, BitmapAndPath))
950 	{
951 		*cost = path->total_cost;
952 		*selec = ((BitmapAndPath *) path)->bitmapselectivity;
953 	}
954 	else if (IsA(path, BitmapOrPath))
955 	{
956 		*cost = path->total_cost;
957 		*selec = ((BitmapOrPath *) path)->bitmapselectivity;
958 	}
959 	else
960 	{
961 		elog(ERROR, "unrecognized node type: %d", nodeTag(path));
962 		*cost = *selec = 0;		/* keep compiler quiet */
963 	}
964 }
965 
966 /*
967  * cost_bitmap_and_node
968  *		Estimate the cost of a BitmapAnd node
969  *
970  * Note that this considers only the costs of index scanning and bitmap
971  * creation, not the eventual heap access.  In that sense the object isn't
972  * truly a Path, but it has enough path-like properties (costs in particular)
973  * to warrant treating it as one.  We don't bother to set the path rows field,
974  * however.
975  */
976 void
cost_bitmap_and_node(BitmapAndPath * path,PlannerInfo * root)977 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
978 {
979 	Cost		totalCost;
980 	Selectivity selec;
981 	ListCell   *l;
982 
983 	/*
984 	 * We estimate AND selectivity on the assumption that the inputs are
985 	 * independent.  This is probably often wrong, but we don't have the info
986 	 * to do better.
987 	 *
988 	 * The runtime cost of the BitmapAnd itself is estimated at 100x
989 	 * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
990 	 * definitely too simplistic?
991 	 */
992 	totalCost = 0.0;
993 	selec = 1.0;
994 	foreach(l, path->bitmapquals)
995 	{
996 		Path	   *subpath = (Path *) lfirst(l);
997 		Cost		subCost;
998 		Selectivity subselec;
999 
1000 		cost_bitmap_tree_node(subpath, &subCost, &subselec);
1001 
1002 		selec *= subselec;
1003 
1004 		totalCost += subCost;
1005 		if (l != list_head(path->bitmapquals))
1006 			totalCost += 100.0 * cpu_operator_cost;
1007 	}
1008 	path->bitmapselectivity = selec;
1009 	path->path.rows = 0;		/* per above, not used */
1010 	path->path.startup_cost = totalCost;
1011 	path->path.total_cost = totalCost;
1012 }
1013 
1014 /*
1015  * cost_bitmap_or_node
1016  *		Estimate the cost of a BitmapOr node
1017  *
1018  * See comments for cost_bitmap_and_node.
1019  */
1020 void
cost_bitmap_or_node(BitmapOrPath * path,PlannerInfo * root)1021 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1022 {
1023 	Cost		totalCost;
1024 	Selectivity selec;
1025 	ListCell   *l;
1026 
1027 	/*
1028 	 * We estimate OR selectivity on the assumption that the inputs are
1029 	 * non-overlapping, since that's often the case in "x IN (list)" type
1030 	 * situations.  Of course, we clamp to 1.0 at the end.
1031 	 *
1032 	 * The runtime cost of the BitmapOr itself is estimated at 100x
1033 	 * cpu_operator_cost for each tbm_union needed.  Probably too small,
1034 	 * definitely too simplistic?  We are aware that the tbm_unions are
1035 	 * optimized out when the inputs are BitmapIndexScans.
1036 	 */
1037 	totalCost = 0.0;
1038 	selec = 0.0;
1039 	foreach(l, path->bitmapquals)
1040 	{
1041 		Path	   *subpath = (Path *) lfirst(l);
1042 		Cost		subCost;
1043 		Selectivity subselec;
1044 
1045 		cost_bitmap_tree_node(subpath, &subCost, &subselec);
1046 
1047 		selec += subselec;
1048 
1049 		totalCost += subCost;
1050 		if (l != list_head(path->bitmapquals) &&
1051 			!IsA(subpath, IndexPath))
1052 			totalCost += 100.0 * cpu_operator_cost;
1053 	}
1054 	path->bitmapselectivity = Min(selec, 1.0);
1055 	path->path.rows = 0;		/* per above, not used */
1056 	path->path.startup_cost = totalCost;
1057 	path->path.total_cost = totalCost;
1058 }
1059 
1060 /*
1061  * cost_tidscan
1062  *	  Determines and returns the cost of scanning a relation using TIDs.
1063  *
1064  * 'baserel' is the relation to be scanned
1065  * 'tidquals' is the list of TID-checkable quals
1066  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1067  */
1068 void
cost_tidscan(Path * path,PlannerInfo * root,RelOptInfo * baserel,List * tidquals,ParamPathInfo * param_info)1069 cost_tidscan(Path *path, PlannerInfo *root,
1070 			 RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1071 {
1072 	Cost		startup_cost = 0;
1073 	Cost		run_cost = 0;
1074 	bool		isCurrentOf = false;
1075 	QualCost	qpqual_cost;
1076 	Cost		cpu_per_tuple;
1077 	QualCost	tid_qual_cost;
1078 	int			ntuples;
1079 	ListCell   *l;
1080 	double		spc_random_page_cost;
1081 
1082 	/* Should only be applied to base relations */
1083 	Assert(baserel->relid > 0);
1084 	Assert(baserel->rtekind == RTE_RELATION);
1085 
1086 	/* Mark the path with the correct row estimate */
1087 	if (param_info)
1088 		path->rows = param_info->ppi_rows;
1089 	else
1090 		path->rows = baserel->rows;
1091 
1092 	/* Count how many tuples we expect to retrieve */
1093 	ntuples = 0;
1094 	foreach(l, tidquals)
1095 	{
1096 		if (IsA(lfirst(l), ScalarArrayOpExpr))
1097 		{
1098 			/* Each element of the array yields 1 tuple */
1099 			ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
1100 			Node	   *arraynode = (Node *) lsecond(saop->args);
1101 
1102 			ntuples += estimate_array_length(arraynode);
1103 		}
1104 		else if (IsA(lfirst(l), CurrentOfExpr))
1105 		{
1106 			/* CURRENT OF yields 1 tuple */
1107 			isCurrentOf = true;
1108 			ntuples++;
1109 		}
1110 		else
1111 		{
1112 			/* It's just CTID = something, count 1 tuple */
1113 			ntuples++;
1114 		}
1115 	}
1116 
1117 	/*
1118 	 * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1119 	 * understands how to do it correctly.  Therefore, honor enable_tidscan
1120 	 * only when CURRENT OF isn't present.  Also note that cost_qual_eval
1121 	 * counts a CurrentOfExpr as having startup cost disable_cost, which we
1122 	 * subtract off here; that's to prevent other plan types such as seqscan
1123 	 * from winning.
1124 	 */
1125 	if (isCurrentOf)
1126 	{
1127 		Assert(baserel->baserestrictcost.startup >= disable_cost);
1128 		startup_cost -= disable_cost;
1129 	}
1130 	else if (!enable_tidscan)
1131 		startup_cost += disable_cost;
1132 
1133 	/*
1134 	 * The TID qual expressions will be computed once, any other baserestrict
1135 	 * quals once per retrieved tuple.
1136 	 */
1137 	cost_qual_eval(&tid_qual_cost, tidquals, root);
1138 
1139 	/* fetch estimated page cost for tablespace containing table */
1140 	get_tablespace_page_costs(baserel->reltablespace,
1141 							  &spc_random_page_cost,
1142 							  NULL);
1143 
1144 	/* disk costs --- assume each tuple on a different page */
1145 	run_cost += spc_random_page_cost * ntuples;
1146 
1147 	/* Add scanning CPU costs */
1148 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1149 
1150 	/* XXX currently we assume TID quals are a subset of qpquals */
1151 	startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1152 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1153 		tid_qual_cost.per_tuple;
1154 	run_cost += cpu_per_tuple * ntuples;
1155 
1156 	/* tlist eval costs are paid per output row, not per tuple scanned */
1157 	startup_cost += path->pathtarget->cost.startup;
1158 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
1159 
1160 	path->startup_cost = startup_cost;
1161 	path->total_cost = startup_cost + run_cost;
1162 }
1163 
1164 /*
1165  * cost_subqueryscan
1166  *	  Determines and returns the cost of scanning a subquery RTE.
1167  *
1168  * 'baserel' is the relation to be scanned
1169  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1170  */
1171 void
cost_subqueryscan(SubqueryScanPath * path,PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info)1172 cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1173 				  RelOptInfo *baserel, ParamPathInfo *param_info)
1174 {
1175 	Cost		startup_cost;
1176 	Cost		run_cost;
1177 	QualCost	qpqual_cost;
1178 	Cost		cpu_per_tuple;
1179 
1180 	/* Should only be applied to base relations that are subqueries */
1181 	Assert(baserel->relid > 0);
1182 	Assert(baserel->rtekind == RTE_SUBQUERY);
1183 
1184 	/* Mark the path with the correct row estimate */
1185 	if (param_info)
1186 		path->path.rows = param_info->ppi_rows;
1187 	else
1188 		path->path.rows = baserel->rows;
1189 
1190 	/*
1191 	 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1192 	 * any restriction clauses and tlist that will be attached to the
1193 	 * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1194 	 * projection overhead.
1195 	 */
1196 	path->path.startup_cost = path->subpath->startup_cost;
1197 	path->path.total_cost = path->subpath->total_cost;
1198 
1199 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1200 
1201 	startup_cost = qpqual_cost.startup;
1202 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1203 	run_cost = cpu_per_tuple * baserel->tuples;
1204 
1205 	/* tlist eval costs are paid per output row, not per tuple scanned */
1206 	startup_cost += path->path.pathtarget->cost.startup;
1207 	run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1208 
1209 	path->path.startup_cost += startup_cost;
1210 	path->path.total_cost += startup_cost + run_cost;
1211 }
1212 
1213 /*
1214  * cost_functionscan
1215  *	  Determines and returns the cost of scanning a function RTE.
1216  *
1217  * 'baserel' is the relation to be scanned
1218  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1219  */
1220 void
cost_functionscan(Path * path,PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info)1221 cost_functionscan(Path *path, PlannerInfo *root,
1222 				  RelOptInfo *baserel, ParamPathInfo *param_info)
1223 {
1224 	Cost		startup_cost = 0;
1225 	Cost		run_cost = 0;
1226 	QualCost	qpqual_cost;
1227 	Cost		cpu_per_tuple;
1228 	RangeTblEntry *rte;
1229 	QualCost	exprcost;
1230 
1231 	/* Should only be applied to base relations that are functions */
1232 	Assert(baserel->relid > 0);
1233 	rte = planner_rt_fetch(baserel->relid, root);
1234 	Assert(rte->rtekind == RTE_FUNCTION);
1235 
1236 	/* Mark the path with the correct row estimate */
1237 	if (param_info)
1238 		path->rows = param_info->ppi_rows;
1239 	else
1240 		path->rows = baserel->rows;
1241 
1242 	/*
1243 	 * Estimate costs of executing the function expression(s).
1244 	 *
1245 	 * Currently, nodeFunctionscan.c always executes the functions to
1246 	 * completion before returning any rows, and caches the results in a
1247 	 * tuplestore.  So the function eval cost is all startup cost, and per-row
1248 	 * costs are minimal.
1249 	 *
1250 	 * XXX in principle we ought to charge tuplestore spill costs if the
1251 	 * number of rows is large.  However, given how phony our rowcount
1252 	 * estimates for functions tend to be, there's not a lot of point in that
1253 	 * refinement right now.
1254 	 */
1255 	cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1256 
1257 	startup_cost += exprcost.startup + exprcost.per_tuple;
1258 
1259 	/* Add scanning CPU costs */
1260 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1261 
1262 	startup_cost += qpqual_cost.startup;
1263 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1264 	run_cost += cpu_per_tuple * baserel->tuples;
1265 
1266 	/* tlist eval costs are paid per output row, not per tuple scanned */
1267 	startup_cost += path->pathtarget->cost.startup;
1268 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
1269 
1270 	path->startup_cost = startup_cost;
1271 	path->total_cost = startup_cost + run_cost;
1272 }
1273 
1274 /*
1275  * cost_valuesscan
1276  *	  Determines and returns the cost of scanning a VALUES RTE.
1277  *
1278  * 'baserel' is the relation to be scanned
1279  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1280  */
1281 void
cost_valuesscan(Path * path,PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info)1282 cost_valuesscan(Path *path, PlannerInfo *root,
1283 				RelOptInfo *baserel, ParamPathInfo *param_info)
1284 {
1285 	Cost		startup_cost = 0;
1286 	Cost		run_cost = 0;
1287 	QualCost	qpqual_cost;
1288 	Cost		cpu_per_tuple;
1289 
1290 	/* Should only be applied to base relations that are values lists */
1291 	Assert(baserel->relid > 0);
1292 	Assert(baserel->rtekind == RTE_VALUES);
1293 
1294 	/* Mark the path with the correct row estimate */
1295 	if (param_info)
1296 		path->rows = param_info->ppi_rows;
1297 	else
1298 		path->rows = baserel->rows;
1299 
1300 	/*
1301 	 * For now, estimate list evaluation cost at one operator eval per list
1302 	 * (probably pretty bogus, but is it worth being smarter?)
1303 	 */
1304 	cpu_per_tuple = cpu_operator_cost;
1305 
1306 	/* Add scanning CPU costs */
1307 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1308 
1309 	startup_cost += qpqual_cost.startup;
1310 	cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1311 	run_cost += cpu_per_tuple * baserel->tuples;
1312 
1313 	/* tlist eval costs are paid per output row, not per tuple scanned */
1314 	startup_cost += path->pathtarget->cost.startup;
1315 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
1316 
1317 	path->startup_cost = startup_cost;
1318 	path->total_cost = startup_cost + run_cost;
1319 }
1320 
1321 /*
1322  * cost_ctescan
1323  *	  Determines and returns the cost of scanning a CTE RTE.
1324  *
1325  * Note: this is used for both self-reference and regular CTEs; the
1326  * possible cost differences are below the threshold of what we could
1327  * estimate accurately anyway.  Note that the costs of evaluating the
1328  * referenced CTE query are added into the final plan as initplan costs,
1329  * and should NOT be counted here.
1330  */
1331 void
cost_ctescan(Path * path,PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info)1332 cost_ctescan(Path *path, PlannerInfo *root,
1333 			 RelOptInfo *baserel, ParamPathInfo *param_info)
1334 {
1335 	Cost		startup_cost = 0;
1336 	Cost		run_cost = 0;
1337 	QualCost	qpqual_cost;
1338 	Cost		cpu_per_tuple;
1339 
1340 	/* Should only be applied to base relations that are CTEs */
1341 	Assert(baserel->relid > 0);
1342 	Assert(baserel->rtekind == RTE_CTE);
1343 
1344 	/* Mark the path with the correct row estimate */
1345 	if (param_info)
1346 		path->rows = param_info->ppi_rows;
1347 	else
1348 		path->rows = baserel->rows;
1349 
1350 	/* Charge one CPU tuple cost per row for tuplestore manipulation */
1351 	cpu_per_tuple = cpu_tuple_cost;
1352 
1353 	/* Add scanning CPU costs */
1354 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1355 
1356 	startup_cost += qpqual_cost.startup;
1357 	cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1358 	run_cost += cpu_per_tuple * baserel->tuples;
1359 
1360 	/* tlist eval costs are paid per output row, not per tuple scanned */
1361 	startup_cost += path->pathtarget->cost.startup;
1362 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
1363 
1364 	path->startup_cost = startup_cost;
1365 	path->total_cost = startup_cost + run_cost;
1366 }
1367 
1368 /*
1369  * cost_recursive_union
1370  *	  Determines and returns the cost of performing a recursive union,
1371  *	  and also the estimated output size.
1372  *
1373  * We are given Paths for the nonrecursive and recursive terms.
1374  */
1375 void
cost_recursive_union(Path * runion,Path * nrterm,Path * rterm)1376 cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1377 {
1378 	Cost		startup_cost;
1379 	Cost		total_cost;
1380 	double		total_rows;
1381 
1382 	/* We probably have decent estimates for the non-recursive term */
1383 	startup_cost = nrterm->startup_cost;
1384 	total_cost = nrterm->total_cost;
1385 	total_rows = nrterm->rows;
1386 
1387 	/*
1388 	 * We arbitrarily assume that about 10 recursive iterations will be
1389 	 * needed, and that we've managed to get a good fix on the cost and output
1390 	 * size of each one of them.  These are mighty shaky assumptions but it's
1391 	 * hard to see how to do better.
1392 	 */
1393 	total_cost += 10 * rterm->total_cost;
1394 	total_rows += 10 * rterm->rows;
1395 
1396 	/*
1397 	 * Also charge cpu_tuple_cost per row to account for the costs of
1398 	 * manipulating the tuplestores.  (We don't worry about possible
1399 	 * spill-to-disk costs.)
1400 	 */
1401 	total_cost += cpu_tuple_cost * total_rows;
1402 
1403 	runion->startup_cost = startup_cost;
1404 	runion->total_cost = total_cost;
1405 	runion->rows = total_rows;
1406 	runion->pathtarget->width = Max(nrterm->pathtarget->width,
1407 									rterm->pathtarget->width);
1408 }
1409 
1410 /*
1411  * cost_sort
1412  *	  Determines and returns the cost of sorting a relation, including
1413  *	  the cost of reading the input data.
1414  *
1415  * If the total volume of data to sort is less than sort_mem, we will do
1416  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1417  * comparisons for t tuples.
1418  *
1419  * If the total volume exceeds sort_mem, we switch to a tape-style merge
1420  * algorithm.  There will still be about t*log2(t) tuple comparisons in
1421  * total, but we will also need to write and read each tuple once per
1422  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
1423  * number of initial runs formed and M is the merge order used by tuplesort.c.
1424  * Since the average initial run should be about sort_mem, we have
1425  *		disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1426  *		cpu = comparison_cost * t * log2(t)
1427  *
1428  * If the sort is bounded (i.e., only the first k result tuples are needed)
1429  * and k tuples can fit into sort_mem, we use a heap method that keeps only
1430  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1431  *
1432  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1433  * accesses (XXX can't we refine that guess?)
1434  *
1435  * By default, we charge two operator evals per tuple comparison, which should
1436  * be in the right ballpark in most cases.  The caller can tweak this by
1437  * specifying nonzero comparison_cost; typically that's used for any extra
1438  * work that has to be done to prepare the inputs to the comparison operators.
1439  *
1440  * 'pathkeys' is a list of sort keys
1441  * 'input_cost' is the total cost for reading the input data
1442  * 'tuples' is the number of tuples in the relation
1443  * 'width' is the average tuple width in bytes
1444  * 'comparison_cost' is the extra cost per comparison, if any
1445  * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1446  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1447  *
1448  * NOTE: some callers currently pass NIL for pathkeys because they
1449  * can't conveniently supply the sort keys.  Since this routine doesn't
1450  * currently do anything with pathkeys anyway, that doesn't matter...
1451  * but if it ever does, it should react gracefully to lack of key data.
1452  * (Actually, the thing we'd most likely be interested in is just the number
1453  * of sort keys, which all callers *could* supply.)
1454  */
1455 void
cost_sort(Path * path,PlannerInfo * root,List * pathkeys,Cost input_cost,double tuples,int width,Cost comparison_cost,int sort_mem,double limit_tuples)1456 cost_sort(Path *path, PlannerInfo *root,
1457 		  List *pathkeys, Cost input_cost, double tuples, int width,
1458 		  Cost comparison_cost, int sort_mem,
1459 		  double limit_tuples)
1460 {
1461 	Cost		startup_cost = input_cost;
1462 	Cost		run_cost = 0;
1463 	double		input_bytes = relation_byte_size(tuples, width);
1464 	double		output_bytes;
1465 	double		output_tuples;
1466 	long		sort_mem_bytes = sort_mem * 1024L;
1467 
1468 	if (!enable_sort)
1469 		startup_cost += disable_cost;
1470 
1471 	path->rows = tuples;
1472 
1473 	/*
1474 	 * We want to be sure the cost of a sort is never estimated as zero, even
1475 	 * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
1476 	 */
1477 	if (tuples < 2.0)
1478 		tuples = 2.0;
1479 
1480 	/* Include the default cost-per-comparison */
1481 	comparison_cost += 2.0 * cpu_operator_cost;
1482 
1483 	/* Do we have a useful LIMIT? */
1484 	if (limit_tuples > 0 && limit_tuples < tuples)
1485 	{
1486 		output_tuples = limit_tuples;
1487 		output_bytes = relation_byte_size(output_tuples, width);
1488 	}
1489 	else
1490 	{
1491 		output_tuples = tuples;
1492 		output_bytes = input_bytes;
1493 	}
1494 
1495 	if (output_bytes > sort_mem_bytes)
1496 	{
1497 		/*
1498 		 * We'll have to use a disk-based sort of all the tuples
1499 		 */
1500 		double		npages = ceil(input_bytes / BLCKSZ);
1501 		double		nruns = input_bytes / sort_mem_bytes;
1502 		double		mergeorder = tuplesort_merge_order(sort_mem_bytes);
1503 		double		log_runs;
1504 		double		npageaccesses;
1505 
1506 		/*
1507 		 * CPU costs
1508 		 *
1509 		 * Assume about N log2 N comparisons
1510 		 */
1511 		startup_cost += comparison_cost * tuples * LOG2(tuples);
1512 
1513 		/* Disk costs */
1514 
1515 		/* Compute logM(r) as log(r) / log(M) */
1516 		if (nruns > mergeorder)
1517 			log_runs = ceil(log(nruns) / log(mergeorder));
1518 		else
1519 			log_runs = 1.0;
1520 		npageaccesses = 2.0 * npages * log_runs;
1521 		/* Assume 3/4ths of accesses are sequential, 1/4th are not */
1522 		startup_cost += npageaccesses *
1523 			(seq_page_cost * 0.75 + random_page_cost * 0.25);
1524 	}
1525 	else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1526 	{
1527 		/*
1528 		 * We'll use a bounded heap-sort keeping just K tuples in memory, for
1529 		 * a total number of tuple comparisons of N log2 K; but the constant
1530 		 * factor is a bit higher than for quicksort.  Tweak it so that the
1531 		 * cost curve is continuous at the crossover point.
1532 		 */
1533 		startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1534 	}
1535 	else
1536 	{
1537 		/* We'll use plain quicksort on all the input tuples */
1538 		startup_cost += comparison_cost * tuples * LOG2(tuples);
1539 	}
1540 
1541 	/*
1542 	 * Also charge a small amount (arbitrarily set equal to operator cost) per
1543 	 * extracted tuple.  We don't charge cpu_tuple_cost because a Sort node
1544 	 * doesn't do qual-checking or projection, so it has less overhead than
1545 	 * most plan nodes.  Note it's correct to use tuples not output_tuples
1546 	 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1547 	 * counting the LIMIT otherwise.
1548 	 */
1549 	run_cost += cpu_operator_cost * tuples;
1550 
1551 	path->startup_cost = startup_cost;
1552 	path->total_cost = startup_cost + run_cost;
1553 }
1554 
1555 /*
1556  * cost_merge_append
1557  *	  Determines and returns the cost of a MergeAppend node.
1558  *
1559  * MergeAppend merges several pre-sorted input streams, using a heap that
1560  * at any given instant holds the next tuple from each stream.  If there
1561  * are N streams, we need about N*log2(N) tuple comparisons to construct
1562  * the heap at startup, and then for each output tuple, about log2(N)
1563  * comparisons to delete the top heap entry and another log2(N) comparisons
1564  * to insert its successor from the same stream.
1565  *
1566  * (The effective value of N will drop once some of the input streams are
1567  * exhausted, but it seems unlikely to be worth trying to account for that.)
1568  *
1569  * The heap is never spilled to disk, since we assume N is not very large.
1570  * So this is much simpler than cost_sort.
1571  *
1572  * As in cost_sort, we charge two operator evals per tuple comparison.
1573  *
1574  * 'pathkeys' is a list of sort keys
1575  * 'n_streams' is the number of input streams
1576  * 'input_startup_cost' is the sum of the input streams' startup costs
1577  * 'input_total_cost' is the sum of the input streams' total costs
1578  * 'tuples' is the number of tuples in all the streams
1579  */
1580 void
cost_merge_append(Path * path,PlannerInfo * root,List * pathkeys,int n_streams,Cost input_startup_cost,Cost input_total_cost,double tuples)1581 cost_merge_append(Path *path, PlannerInfo *root,
1582 				  List *pathkeys, int n_streams,
1583 				  Cost input_startup_cost, Cost input_total_cost,
1584 				  double tuples)
1585 {
1586 	Cost		startup_cost = 0;
1587 	Cost		run_cost = 0;
1588 	Cost		comparison_cost;
1589 	double		N;
1590 	double		logN;
1591 
1592 	/*
1593 	 * Avoid log(0)...
1594 	 */
1595 	N = (n_streams < 2) ? 2.0 : (double) n_streams;
1596 	logN = LOG2(N);
1597 
1598 	/* Assumed cost per tuple comparison */
1599 	comparison_cost = 2.0 * cpu_operator_cost;
1600 
1601 	/* Heap creation cost */
1602 	startup_cost += comparison_cost * N * logN;
1603 
1604 	/* Per-tuple heap maintenance cost */
1605 	run_cost += tuples * comparison_cost * 2.0 * logN;
1606 
1607 	/*
1608 	 * Also charge a small amount (arbitrarily set equal to operator cost) per
1609 	 * extracted tuple.  We don't charge cpu_tuple_cost because a MergeAppend
1610 	 * node doesn't do qual-checking or projection, so it has less overhead
1611 	 * than most plan nodes.
1612 	 */
1613 	run_cost += cpu_operator_cost * tuples;
1614 
1615 	path->startup_cost = startup_cost + input_startup_cost;
1616 	path->total_cost = startup_cost + run_cost + input_total_cost;
1617 }
1618 
1619 /*
1620  * cost_material
1621  *	  Determines and returns the cost of materializing a relation, including
1622  *	  the cost of reading the input data.
1623  *
1624  * If the total volume of data to materialize exceeds work_mem, we will need
1625  * to write it to disk, so the cost is much higher in that case.
1626  *
1627  * Note that here we are estimating the costs for the first scan of the
1628  * relation, so the materialization is all overhead --- any savings will
1629  * occur only on rescan, which is estimated in cost_rescan.
1630  */
1631 void
cost_material(Path * path,Cost input_startup_cost,Cost input_total_cost,double tuples,int width)1632 cost_material(Path *path,
1633 			  Cost input_startup_cost, Cost input_total_cost,
1634 			  double tuples, int width)
1635 {
1636 	Cost		startup_cost = input_startup_cost;
1637 	Cost		run_cost = input_total_cost - input_startup_cost;
1638 	double		nbytes = relation_byte_size(tuples, width);
1639 	long		work_mem_bytes = work_mem * 1024L;
1640 
1641 	path->rows = tuples;
1642 
1643 	/*
1644 	 * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1645 	 * reflect bookkeeping overhead.  (This rate must be more than what
1646 	 * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1647 	 * if it is exactly the same then there will be a cost tie between
1648 	 * nestloop with A outer, materialized B inner and nestloop with B outer,
1649 	 * materialized A inner.  The extra cost ensures we'll prefer
1650 	 * materializing the smaller rel.)	Note that this is normally a good deal
1651 	 * less than cpu_tuple_cost; which is OK because a Material plan node
1652 	 * doesn't do qual-checking or projection, so it's got less overhead than
1653 	 * most plan nodes.
1654 	 */
1655 	run_cost += 2 * cpu_operator_cost * tuples;
1656 
1657 	/*
1658 	 * If we will spill to disk, charge at the rate of seq_page_cost per page.
1659 	 * This cost is assumed to be evenly spread through the plan run phase,
1660 	 * which isn't exactly accurate but our cost model doesn't allow for
1661 	 * nonuniform costs within the run phase.
1662 	 */
1663 	if (nbytes > work_mem_bytes)
1664 	{
1665 		double		npages = ceil(nbytes / BLCKSZ);
1666 
1667 		run_cost += seq_page_cost * npages;
1668 	}
1669 
1670 	path->startup_cost = startup_cost;
1671 	path->total_cost = startup_cost + run_cost;
1672 }
1673 
1674 /*
1675  * cost_agg
1676  *		Determines and returns the cost of performing an Agg plan node,
1677  *		including the cost of its input.
1678  *
1679  * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1680  * we are using a hashed Agg node just to do grouping).
1681  *
1682  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1683  * are for appropriately-sorted input.
1684  */
1685 void
cost_agg(Path * path,PlannerInfo * root,AggStrategy aggstrategy,const AggClauseCosts * aggcosts,int numGroupCols,double numGroups,Cost input_startup_cost,Cost input_total_cost,double input_tuples)1686 cost_agg(Path *path, PlannerInfo *root,
1687 		 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1688 		 int numGroupCols, double numGroups,
1689 		 Cost input_startup_cost, Cost input_total_cost,
1690 		 double input_tuples)
1691 {
1692 	double		output_tuples;
1693 	Cost		startup_cost;
1694 	Cost		total_cost;
1695 	AggClauseCosts dummy_aggcosts;
1696 
1697 	/* Use all-zero per-aggregate costs if NULL is passed */
1698 	if (aggcosts == NULL)
1699 	{
1700 		Assert(aggstrategy == AGG_HASHED);
1701 		MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1702 		aggcosts = &dummy_aggcosts;
1703 	}
1704 
1705 	/*
1706 	 * The transCost.per_tuple component of aggcosts should be charged once
1707 	 * per input tuple, corresponding to the costs of evaluating the aggregate
1708 	 * transfns and their input expressions (with any startup cost of course
1709 	 * charged but once).  The finalCost component is charged once per output
1710 	 * tuple, corresponding to the costs of evaluating the finalfns.
1711 	 *
1712 	 * If we are grouping, we charge an additional cpu_operator_cost per
1713 	 * grouping column per input tuple for grouping comparisons.
1714 	 *
1715 	 * We will produce a single output tuple if not grouping, and a tuple per
1716 	 * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1717 	 *
1718 	 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1719 	 * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1720 	 * input path is already sorted appropriately, AGG_SORTED should be
1721 	 * preferred (since it has no risk of memory overflow).  This will happen
1722 	 * as long as the computed total costs are indeed exactly equal --- but if
1723 	 * there's roundoff error we might do the wrong thing.  So be sure that
1724 	 * the computations below form the same intermediate values in the same
1725 	 * order.
1726 	 */
1727 	if (aggstrategy == AGG_PLAIN)
1728 	{
1729 		startup_cost = input_total_cost;
1730 		startup_cost += aggcosts->transCost.startup;
1731 		startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1732 		startup_cost += aggcosts->finalCost;
1733 		/* we aren't grouping */
1734 		total_cost = startup_cost + cpu_tuple_cost;
1735 		output_tuples = 1;
1736 	}
1737 	else if (aggstrategy == AGG_SORTED)
1738 	{
1739 		/* Here we are able to deliver output on-the-fly */
1740 		startup_cost = input_startup_cost;
1741 		total_cost = input_total_cost;
1742 		/* calcs phrased this way to match HASHED case, see note above */
1743 		total_cost += aggcosts->transCost.startup;
1744 		total_cost += aggcosts->transCost.per_tuple * input_tuples;
1745 		total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1746 		total_cost += aggcosts->finalCost * numGroups;
1747 		total_cost += cpu_tuple_cost * numGroups;
1748 		output_tuples = numGroups;
1749 	}
1750 	else
1751 	{
1752 		/* must be AGG_HASHED */
1753 		startup_cost = input_total_cost;
1754 		if (!enable_hashagg)
1755 			startup_cost += disable_cost;
1756 		startup_cost += aggcosts->transCost.startup;
1757 		startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1758 		startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1759 		total_cost = startup_cost;
1760 		total_cost += aggcosts->finalCost * numGroups;
1761 		total_cost += cpu_tuple_cost * numGroups;
1762 		output_tuples = numGroups;
1763 	}
1764 
1765 	path->rows = output_tuples;
1766 	path->startup_cost = startup_cost;
1767 	path->total_cost = total_cost;
1768 }
1769 
1770 /*
1771  * cost_windowagg
1772  *		Determines and returns the cost of performing a WindowAgg plan node,
1773  *		including the cost of its input.
1774  *
1775  * Input is assumed already properly sorted.
1776  */
1777 void
cost_windowagg(Path * path,PlannerInfo * root,List * windowFuncs,int numPartCols,int numOrderCols,Cost input_startup_cost,Cost input_total_cost,double input_tuples)1778 cost_windowagg(Path *path, PlannerInfo *root,
1779 			   List *windowFuncs, int numPartCols, int numOrderCols,
1780 			   Cost input_startup_cost, Cost input_total_cost,
1781 			   double input_tuples)
1782 {
1783 	Cost		startup_cost;
1784 	Cost		total_cost;
1785 	ListCell   *lc;
1786 
1787 	startup_cost = input_startup_cost;
1788 	total_cost = input_total_cost;
1789 
1790 	/*
1791 	 * Window functions are assumed to cost their stated execution cost, plus
1792 	 * the cost of evaluating their input expressions, per tuple.  Since they
1793 	 * may in fact evaluate their inputs at multiple rows during each cycle,
1794 	 * this could be a drastic underestimate; but without a way to know how
1795 	 * many rows the window function will fetch, it's hard to do better.  In
1796 	 * any case, it's a good estimate for all the built-in window functions,
1797 	 * so we'll just do this for now.
1798 	 */
1799 	foreach(lc, windowFuncs)
1800 	{
1801 		WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
1802 		Cost		wfunccost;
1803 		QualCost	argcosts;
1804 
1805 		Assert(IsA(wfunc, WindowFunc));
1806 
1807 		wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1808 
1809 		/* also add the input expressions' cost to per-input-row costs */
1810 		cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
1811 		startup_cost += argcosts.startup;
1812 		wfunccost += argcosts.per_tuple;
1813 
1814 		/*
1815 		 * Add the filter's cost to per-input-row costs.  XXX We should reduce
1816 		 * input expression costs according to filter selectivity.
1817 		 */
1818 		cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
1819 		startup_cost += argcosts.startup;
1820 		wfunccost += argcosts.per_tuple;
1821 
1822 		total_cost += wfunccost * input_tuples;
1823 	}
1824 
1825 	/*
1826 	 * We also charge cpu_operator_cost per grouping column per tuple for
1827 	 * grouping comparisons, plus cpu_tuple_cost per tuple for general
1828 	 * overhead.
1829 	 *
1830 	 * XXX this neglects costs of spooling the data to disk when it overflows
1831 	 * work_mem.  Sooner or later that should get accounted for.
1832 	 */
1833 	total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
1834 	total_cost += cpu_tuple_cost * input_tuples;
1835 
1836 	path->rows = input_tuples;
1837 	path->startup_cost = startup_cost;
1838 	path->total_cost = total_cost;
1839 }
1840 
1841 /*
1842  * cost_group
1843  *		Determines and returns the cost of performing a Group plan node,
1844  *		including the cost of its input.
1845  *
1846  * Note: caller must ensure that input costs are for appropriately-sorted
1847  * input.
1848  */
1849 void
cost_group(Path * path,PlannerInfo * root,int numGroupCols,double numGroups,Cost input_startup_cost,Cost input_total_cost,double input_tuples)1850 cost_group(Path *path, PlannerInfo *root,
1851 		   int numGroupCols, double numGroups,
1852 		   Cost input_startup_cost, Cost input_total_cost,
1853 		   double input_tuples)
1854 {
1855 	Cost		startup_cost;
1856 	Cost		total_cost;
1857 
1858 	startup_cost = input_startup_cost;
1859 	total_cost = input_total_cost;
1860 
1861 	/*
1862 	 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1863 	 * all columns get compared at most of the tuples.
1864 	 */
1865 	total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1866 
1867 	path->rows = numGroups;
1868 	path->startup_cost = startup_cost;
1869 	path->total_cost = total_cost;
1870 }
1871 
1872 /*
1873  * initial_cost_nestloop
1874  *	  Preliminary estimate of the cost of a nestloop join path.
1875  *
1876  * This must quickly produce lower-bound estimates of the path's startup and
1877  * total costs.  If we are unable to eliminate the proposed path from
1878  * consideration using the lower bounds, final_cost_nestloop will be called
1879  * to obtain the final estimates.
1880  *
1881  * The exact division of labor between this function and final_cost_nestloop
1882  * is private to them, and represents a tradeoff between speed of the initial
1883  * estimate and getting a tight lower bound.  We choose to not examine the
1884  * join quals here, since that's by far the most expensive part of the
1885  * calculations.  The end result is that CPU-cost considerations must be
1886  * left for the second phase; and for SEMI/ANTI joins, we must also postpone
1887  * incorporation of the inner path's run cost.
1888  *
1889  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
1890  *		other data to be used by final_cost_nestloop
1891  * 'jointype' is the type of join to be performed
1892  * 'outer_path' is the outer input to the join
1893  * 'inner_path' is the inner input to the join
1894  * 'sjinfo' is extra info about the join for selectivity estimation
1895  * 'semifactors' contains valid data if jointype is SEMI or ANTI
1896  */
1897 void
initial_cost_nestloop(PlannerInfo * root,JoinCostWorkspace * workspace,JoinType jointype,Path * outer_path,Path * inner_path,SpecialJoinInfo * sjinfo,SemiAntiJoinFactors * semifactors)1898 initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
1899 					  JoinType jointype,
1900 					  Path *outer_path, Path *inner_path,
1901 					  SpecialJoinInfo *sjinfo,
1902 					  SemiAntiJoinFactors *semifactors)
1903 {
1904 	Cost		startup_cost = 0;
1905 	Cost		run_cost = 0;
1906 	double		outer_path_rows = outer_path->rows;
1907 	Cost		inner_rescan_start_cost;
1908 	Cost		inner_rescan_total_cost;
1909 	Cost		inner_run_cost;
1910 	Cost		inner_rescan_run_cost;
1911 
1912 	/* estimate costs to rescan the inner relation */
1913 	cost_rescan(root, inner_path,
1914 				&inner_rescan_start_cost,
1915 				&inner_rescan_total_cost);
1916 
1917 	/* cost of source data */
1918 
1919 	/*
1920 	 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1921 	 * before we can start returning tuples, so the join's startup cost is
1922 	 * their sum.  We'll also pay the inner path's rescan startup cost
1923 	 * multiple times.
1924 	 */
1925 	startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1926 	run_cost += outer_path->total_cost - outer_path->startup_cost;
1927 	if (outer_path_rows > 1)
1928 		run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
1929 
1930 	inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
1931 	inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
1932 
1933 	if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
1934 	{
1935 		/*
1936 		 * SEMI or ANTI join: executor will stop after first match.
1937 		 *
1938 		 * Getting decent estimates requires inspection of the join quals,
1939 		 * which we choose to postpone to final_cost_nestloop.
1940 		 */
1941 
1942 		/* Save private data for final_cost_nestloop */
1943 		workspace->inner_run_cost = inner_run_cost;
1944 		workspace->inner_rescan_run_cost = inner_rescan_run_cost;
1945 	}
1946 	else
1947 	{
1948 		/* Normal case; we'll scan whole input rel for each outer row */
1949 		run_cost += inner_run_cost;
1950 		if (outer_path_rows > 1)
1951 			run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
1952 	}
1953 
1954 	/* CPU costs left for later */
1955 
1956 	/* Public result fields */
1957 	workspace->startup_cost = startup_cost;
1958 	workspace->total_cost = startup_cost + run_cost;
1959 	/* Save private data for final_cost_nestloop */
1960 	workspace->run_cost = run_cost;
1961 }
1962 
1963 /*
1964  * final_cost_nestloop
1965  *	  Final estimate of the cost and result size of a nestloop join path.
1966  *
1967  * 'path' is already filled in except for the rows and cost fields
1968  * 'workspace' is the result from initial_cost_nestloop
1969  * 'sjinfo' is extra info about the join for selectivity estimation
1970  * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
1971  */
1972 void
final_cost_nestloop(PlannerInfo * root,NestPath * path,JoinCostWorkspace * workspace,SpecialJoinInfo * sjinfo,SemiAntiJoinFactors * semifactors)1973 final_cost_nestloop(PlannerInfo *root, NestPath *path,
1974 					JoinCostWorkspace *workspace,
1975 					SpecialJoinInfo *sjinfo,
1976 					SemiAntiJoinFactors *semifactors)
1977 {
1978 	Path	   *outer_path = path->outerjoinpath;
1979 	Path	   *inner_path = path->innerjoinpath;
1980 	double		outer_path_rows = outer_path->rows;
1981 	double		inner_path_rows = inner_path->rows;
1982 	Cost		startup_cost = workspace->startup_cost;
1983 	Cost		run_cost = workspace->run_cost;
1984 	Cost		cpu_per_tuple;
1985 	QualCost	restrict_qual_cost;
1986 	double		ntuples;
1987 
1988 	/* Protect some assumptions below that rowcounts aren't zero or NaN */
1989 	if (outer_path_rows <= 0 || isnan(outer_path_rows))
1990 		outer_path_rows = 1;
1991 	if (inner_path_rows <= 0 || isnan(inner_path_rows))
1992 		inner_path_rows = 1;
1993 
1994 	/* Mark the path with the correct row estimate */
1995 	if (path->path.param_info)
1996 		path->path.rows = path->path.param_info->ppi_rows;
1997 	else
1998 		path->path.rows = path->path.parent->rows;
1999 
2000 	/* For partial paths, scale row estimate. */
2001 	if (path->path.parallel_workers > 0)
2002 	{
2003 		double	parallel_divisor = get_parallel_divisor(&path->path);
2004 
2005 		path->path.rows =
2006 			clamp_row_est(path->path.rows / parallel_divisor);
2007 	}
2008 
2009 	/*
2010 	 * We could include disable_cost in the preliminary estimate, but that
2011 	 * would amount to optimizing for the case where the join method is
2012 	 * disabled, which doesn't seem like the way to bet.
2013 	 */
2014 	if (!enable_nestloop)
2015 		startup_cost += disable_cost;
2016 
2017 	/* cost of inner-relation source data (we already dealt with outer rel) */
2018 
2019 	if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI)
2020 	{
2021 		/*
2022 		 * SEMI or ANTI join: executor will stop after first match.
2023 		 */
2024 		Cost		inner_run_cost = workspace->inner_run_cost;
2025 		Cost		inner_rescan_run_cost = workspace->inner_rescan_run_cost;
2026 		double		outer_matched_rows;
2027 		Selectivity inner_scan_frac;
2028 
2029 		/*
2030 		 * For an outer-rel row that has at least one match, we can expect the
2031 		 * inner scan to stop after a fraction 1/(match_count+1) of the inner
2032 		 * rows, if the matches are evenly distributed.  Since they probably
2033 		 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
2034 		 * that fraction.  (If we used a larger fuzz factor, we'd have to
2035 		 * clamp inner_scan_frac to at most 1.0; but since match_count is at
2036 		 * least 1, no such clamp is needed now.)
2037 		 */
2038 		outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
2039 		inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
2040 
2041 		/*
2042 		 * Compute number of tuples processed (not number emitted!).  First,
2043 		 * account for successfully-matched outer rows.
2044 		 */
2045 		ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
2046 
2047 		/*
2048 		 * Now we need to estimate the actual costs of scanning the inner
2049 		 * relation, which may be quite a bit less than N times inner_run_cost
2050 		 * due to early scan stops.  We consider two cases.  If the inner path
2051 		 * is an indexscan using all the joinquals as indexquals, then an
2052 		 * unmatched outer row results in an indexscan returning no rows,
2053 		 * which is probably quite cheap.  Otherwise, the executor will have
2054 		 * to scan the whole inner rel for an unmatched row; not so cheap.
2055 		 */
2056 		if (has_indexed_join_quals(path))
2057 		{
2058 			/*
2059 			 * Successfully-matched outer rows will only require scanning
2060 			 * inner_scan_frac of the inner relation.  In this case, we don't
2061 			 * need to charge the full inner_run_cost even when that's more
2062 			 * than inner_rescan_run_cost, because we can assume that none of
2063 			 * the inner scans ever scan the whole inner relation.  So it's
2064 			 * okay to assume that all the inner scan executions can be
2065 			 * fractions of the full cost, even if materialization is reducing
2066 			 * the rescan cost.  At this writing, it's impossible to get here
2067 			 * for a materialized inner scan, so inner_run_cost and
2068 			 * inner_rescan_run_cost will be the same anyway; but just in
2069 			 * case, use inner_run_cost for the first matched tuple and
2070 			 * inner_rescan_run_cost for additional ones.
2071 			 */
2072 			run_cost += inner_run_cost * inner_scan_frac;
2073 			if (outer_matched_rows > 1)
2074 				run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2075 
2076 			/*
2077 			 * Add the cost of inner-scan executions for unmatched outer rows.
2078 			 * We estimate this as the same cost as returning the first tuple
2079 			 * of a nonempty scan.  We consider that these are all rescans,
2080 			 * since we used inner_run_cost once already.
2081 			 */
2082 			run_cost += (outer_path_rows - outer_matched_rows) *
2083 				inner_rescan_run_cost / inner_path_rows;
2084 
2085 			/*
2086 			 * We won't be evaluating any quals at all for unmatched rows, so
2087 			 * don't add them to ntuples.
2088 			 */
2089 		}
2090 		else
2091 		{
2092 			/*
2093 			 * Here, a complicating factor is that rescans may be cheaper than
2094 			 * first scans.  If we never scan all the way to the end of the
2095 			 * inner rel, it might be (depending on the plan type) that we'd
2096 			 * never pay the whole inner first-scan run cost.  However it is
2097 			 * difficult to estimate whether that will happen (and it could
2098 			 * not happen if there are any unmatched outer rows!), so be
2099 			 * conservative and always charge the whole first-scan cost once.
2100 			 */
2101 			run_cost += inner_run_cost;
2102 
2103 			/* Add inner run cost for additional outer tuples having matches */
2104 			if (outer_matched_rows > 1)
2105 				run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2106 
2107 			/* Add inner run cost for unmatched outer tuples */
2108 			run_cost += (outer_path_rows - outer_matched_rows) *
2109 				inner_rescan_run_cost;
2110 
2111 			/* And count the unmatched join tuples as being processed */
2112 			ntuples += (outer_path_rows - outer_matched_rows) *
2113 				inner_path_rows;
2114 		}
2115 	}
2116 	else
2117 	{
2118 		/* Normal-case source costs were included in preliminary estimate */
2119 
2120 		/* Compute number of tuples processed (not number emitted!) */
2121 		ntuples = outer_path_rows * inner_path_rows;
2122 	}
2123 
2124 	/* CPU costs */
2125 	cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
2126 	startup_cost += restrict_qual_cost.startup;
2127 	cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
2128 	run_cost += cpu_per_tuple * ntuples;
2129 
2130 	/* tlist eval costs are paid per output row, not per tuple scanned */
2131 	startup_cost += path->path.pathtarget->cost.startup;
2132 	run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
2133 
2134 	path->path.startup_cost = startup_cost;
2135 	path->path.total_cost = startup_cost + run_cost;
2136 }
2137 
2138 /*
2139  * initial_cost_mergejoin
2140  *	  Preliminary estimate of the cost of a mergejoin path.
2141  *
2142  * This must quickly produce lower-bound estimates of the path's startup and
2143  * total costs.  If we are unable to eliminate the proposed path from
2144  * consideration using the lower bounds, final_cost_mergejoin will be called
2145  * to obtain the final estimates.
2146  *
2147  * The exact division of labor between this function and final_cost_mergejoin
2148  * is private to them, and represents a tradeoff between speed of the initial
2149  * estimate and getting a tight lower bound.  We choose to not examine the
2150  * join quals here, except for obtaining the scan selectivity estimate which
2151  * is really essential (but fortunately, use of caching keeps the cost of
2152  * getting that down to something reasonable).
2153  * We also assume that cost_sort is cheap enough to use here.
2154  *
2155  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2156  *		other data to be used by final_cost_mergejoin
2157  * 'jointype' is the type of join to be performed
2158  * 'mergeclauses' is the list of joinclauses to be used as merge clauses
2159  * 'outer_path' is the outer input to the join
2160  * 'inner_path' is the inner input to the join
2161  * 'outersortkeys' is the list of sort keys for the outer path
2162  * 'innersortkeys' is the list of sort keys for the inner path
2163  * 'sjinfo' is extra info about the join for selectivity estimation
2164  *
2165  * Note: outersortkeys and innersortkeys should be NIL if no explicit
2166  * sort is needed because the respective source path is already ordered.
2167  */
2168 void
initial_cost_mergejoin(PlannerInfo * root,JoinCostWorkspace * workspace,JoinType jointype,List * mergeclauses,Path * outer_path,Path * inner_path,List * outersortkeys,List * innersortkeys,SpecialJoinInfo * sjinfo)2169 initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2170 					   JoinType jointype,
2171 					   List *mergeclauses,
2172 					   Path *outer_path, Path *inner_path,
2173 					   List *outersortkeys, List *innersortkeys,
2174 					   SpecialJoinInfo *sjinfo)
2175 {
2176 	Cost		startup_cost = 0;
2177 	Cost		run_cost = 0;
2178 	double		outer_path_rows = outer_path->rows;
2179 	double		inner_path_rows = inner_path->rows;
2180 	Cost		inner_run_cost;
2181 	double		outer_rows,
2182 				inner_rows,
2183 				outer_skip_rows,
2184 				inner_skip_rows;
2185 	Selectivity outerstartsel,
2186 				outerendsel,
2187 				innerstartsel,
2188 				innerendsel;
2189 	Path		sort_path;		/* dummy for result of cost_sort */
2190 
2191 	/* Protect some assumptions below that rowcounts aren't zero or NaN */
2192 	if (outer_path_rows <= 0 || isnan(outer_path_rows))
2193 		outer_path_rows = 1;
2194 	if (inner_path_rows <= 0 || isnan(inner_path_rows))
2195 		inner_path_rows = 1;
2196 
2197 	/*
2198 	 * A merge join will stop as soon as it exhausts either input stream
2199 	 * (unless it's an outer join, in which case the outer side has to be
2200 	 * scanned all the way anyway).  Estimate fraction of the left and right
2201 	 * inputs that will actually need to be scanned.  Likewise, we can
2202 	 * estimate the number of rows that will be skipped before the first join
2203 	 * pair is found, which should be factored into startup cost. We use only
2204 	 * the first (most significant) merge clause for this purpose. Since
2205 	 * mergejoinscansel() is a fairly expensive computation, we cache the
2206 	 * results in the merge clause RestrictInfo.
2207 	 */
2208 	if (mergeclauses && jointype != JOIN_FULL)
2209 	{
2210 		RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
2211 		List	   *opathkeys;
2212 		List	   *ipathkeys;
2213 		PathKey    *opathkey;
2214 		PathKey    *ipathkey;
2215 		MergeScanSelCache *cache;
2216 
2217 		/* Get the input pathkeys to determine the sort-order details */
2218 		opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
2219 		ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
2220 		Assert(opathkeys);
2221 		Assert(ipathkeys);
2222 		opathkey = (PathKey *) linitial(opathkeys);
2223 		ipathkey = (PathKey *) linitial(ipathkeys);
2224 		/* debugging check */
2225 		if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
2226 			opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
2227 			opathkey->pk_strategy != ipathkey->pk_strategy ||
2228 			opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
2229 			elog(ERROR, "left and right pathkeys do not match in mergejoin");
2230 
2231 		/* Get the selectivity with caching */
2232 		cache = cached_scansel(root, firstclause, opathkey);
2233 
2234 		if (bms_is_subset(firstclause->left_relids,
2235 						  outer_path->parent->relids))
2236 		{
2237 			/* left side of clause is outer */
2238 			outerstartsel = cache->leftstartsel;
2239 			outerendsel = cache->leftendsel;
2240 			innerstartsel = cache->rightstartsel;
2241 			innerendsel = cache->rightendsel;
2242 		}
2243 		else
2244 		{
2245 			/* left side of clause is inner */
2246 			outerstartsel = cache->rightstartsel;
2247 			outerendsel = cache->rightendsel;
2248 			innerstartsel = cache->leftstartsel;
2249 			innerendsel = cache->leftendsel;
2250 		}
2251 		if (jointype == JOIN_LEFT ||
2252 			jointype == JOIN_ANTI)
2253 		{
2254 			outerstartsel = 0.0;
2255 			outerendsel = 1.0;
2256 		}
2257 		else if (jointype == JOIN_RIGHT)
2258 		{
2259 			innerstartsel = 0.0;
2260 			innerendsel = 1.0;
2261 		}
2262 	}
2263 	else
2264 	{
2265 		/* cope with clauseless or full mergejoin */
2266 		outerstartsel = innerstartsel = 0.0;
2267 		outerendsel = innerendsel = 1.0;
2268 	}
2269 
2270 	/*
2271 	 * Convert selectivities to row counts.  We force outer_rows and
2272 	 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
2273 	 */
2274 	outer_skip_rows = rint(outer_path_rows * outerstartsel);
2275 	inner_skip_rows = rint(inner_path_rows * innerstartsel);
2276 	outer_rows = clamp_row_est(outer_path_rows * outerendsel);
2277 	inner_rows = clamp_row_est(inner_path_rows * innerendsel);
2278 
2279 	/* skip rows can become NaN when path rows has become infinite */
2280 	Assert(outer_skip_rows <= outer_rows || isnan(outer_skip_rows));
2281 	Assert(inner_skip_rows <= inner_rows || isnan(inner_skip_rows));
2282 
2283 	/*
2284 	 * Readjust scan selectivities to account for above rounding.  This is
2285 	 * normally an insignificant effect, but when there are only a few rows in
2286 	 * the inputs, failing to do this makes for a large percentage error.
2287 	 */
2288 	outerstartsel = outer_skip_rows / outer_path_rows;
2289 	innerstartsel = inner_skip_rows / inner_path_rows;
2290 	outerendsel = outer_rows / outer_path_rows;
2291 	innerendsel = inner_rows / inner_path_rows;
2292 
2293 	/* start sel can become NaN when path rows has become infinite */
2294 	Assert(outerstartsel <= outerendsel || isnan(outerstartsel));
2295 	Assert(innerstartsel <= innerendsel || isnan(innerstartsel));
2296 
2297 	/* cost of source data */
2298 
2299 	if (outersortkeys)			/* do we need to sort outer? */
2300 	{
2301 		cost_sort(&sort_path,
2302 				  root,
2303 				  outersortkeys,
2304 				  outer_path->total_cost,
2305 				  outer_path_rows,
2306 				  outer_path->pathtarget->width,
2307 				  0.0,
2308 				  work_mem,
2309 				  -1.0);
2310 		startup_cost += sort_path.startup_cost;
2311 		startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2312 			* outerstartsel;
2313 		run_cost += (sort_path.total_cost - sort_path.startup_cost)
2314 			* (outerendsel - outerstartsel);
2315 	}
2316 	else
2317 	{
2318 		startup_cost += outer_path->startup_cost;
2319 		startup_cost += (outer_path->total_cost - outer_path->startup_cost)
2320 			* outerstartsel;
2321 		run_cost += (outer_path->total_cost - outer_path->startup_cost)
2322 			* (outerendsel - outerstartsel);
2323 	}
2324 
2325 	if (innersortkeys)			/* do we need to sort inner? */
2326 	{
2327 		cost_sort(&sort_path,
2328 				  root,
2329 				  innersortkeys,
2330 				  inner_path->total_cost,
2331 				  inner_path_rows,
2332 				  inner_path->pathtarget->width,
2333 				  0.0,
2334 				  work_mem,
2335 				  -1.0);
2336 		startup_cost += sort_path.startup_cost;
2337 		startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2338 			* innerstartsel;
2339 		inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
2340 			* (innerendsel - innerstartsel);
2341 	}
2342 	else
2343 	{
2344 		startup_cost += inner_path->startup_cost;
2345 		startup_cost += (inner_path->total_cost - inner_path->startup_cost)
2346 			* innerstartsel;
2347 		inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
2348 			* (innerendsel - innerstartsel);
2349 	}
2350 
2351 	/*
2352 	 * We can't yet determine whether rescanning occurs, or whether
2353 	 * materialization of the inner input should be done.  The minimum
2354 	 * possible inner input cost, regardless of rescan and materialization
2355 	 * considerations, is inner_run_cost.  We include that in
2356 	 * workspace->total_cost, but not yet in run_cost.
2357 	 */
2358 
2359 	/* CPU costs left for later */
2360 
2361 	/* Public result fields */
2362 	workspace->startup_cost = startup_cost;
2363 	workspace->total_cost = startup_cost + run_cost + inner_run_cost;
2364 	/* Save private data for final_cost_mergejoin */
2365 	workspace->run_cost = run_cost;
2366 	workspace->inner_run_cost = inner_run_cost;
2367 	workspace->outer_rows = outer_rows;
2368 	workspace->inner_rows = inner_rows;
2369 	workspace->outer_skip_rows = outer_skip_rows;
2370 	workspace->inner_skip_rows = inner_skip_rows;
2371 }
2372 
2373 /*
2374  * final_cost_mergejoin
2375  *	  Final estimate of the cost and result size of a mergejoin path.
2376  *
2377  * Unlike other costsize functions, this routine makes one actual decision:
2378  * whether we should materialize the inner path.  We do that either because
2379  * the inner path can't support mark/restore, or because it's cheaper to
2380  * use an interposed Material node to handle mark/restore.  When the decision
2381  * is cost-based it would be logically cleaner to build and cost two separate
2382  * paths with and without that flag set; but that would require repeating most
2383  * of the cost calculations, which are not all that cheap.  Since the choice
2384  * will not affect output pathkeys or startup cost, only total cost, there is
2385  * no possibility of wanting to keep both paths.  So it seems best to make
2386  * the decision here and record it in the path's materialize_inner field.
2387  *
2388  * 'path' is already filled in except for the rows and cost fields and
2389  *		materialize_inner
2390  * 'workspace' is the result from initial_cost_mergejoin
2391  * 'sjinfo' is extra info about the join for selectivity estimation
2392  */
2393 void
final_cost_mergejoin(PlannerInfo * root,MergePath * path,JoinCostWorkspace * workspace,SpecialJoinInfo * sjinfo)2394 final_cost_mergejoin(PlannerInfo *root, MergePath *path,
2395 					 JoinCostWorkspace *workspace,
2396 					 SpecialJoinInfo *sjinfo)
2397 {
2398 	Path	   *outer_path = path->jpath.outerjoinpath;
2399 	Path	   *inner_path = path->jpath.innerjoinpath;
2400 	double		inner_path_rows = inner_path->rows;
2401 	List	   *mergeclauses = path->path_mergeclauses;
2402 	List	   *innersortkeys = path->innersortkeys;
2403 	Cost		startup_cost = workspace->startup_cost;
2404 	Cost		run_cost = workspace->run_cost;
2405 	Cost		inner_run_cost = workspace->inner_run_cost;
2406 	double		outer_rows = workspace->outer_rows;
2407 	double		inner_rows = workspace->inner_rows;
2408 	double		outer_skip_rows = workspace->outer_skip_rows;
2409 	double		inner_skip_rows = workspace->inner_skip_rows;
2410 	Cost		cpu_per_tuple,
2411 				bare_inner_cost,
2412 				mat_inner_cost;
2413 	QualCost	merge_qual_cost;
2414 	QualCost	qp_qual_cost;
2415 	double		mergejointuples,
2416 				rescannedtuples;
2417 	double		rescanratio;
2418 
2419 	/* Protect some assumptions below that rowcounts aren't zero or NaN */
2420 	if (inner_path_rows <= 0 || isnan(inner_path_rows))
2421 		inner_path_rows = 1;
2422 
2423 	/* Mark the path with the correct row estimate */
2424 	if (path->jpath.path.param_info)
2425 		path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2426 	else
2427 		path->jpath.path.rows = path->jpath.path.parent->rows;
2428 
2429 	/* For partial paths, scale row estimate. */
2430 	if (path->jpath.path.parallel_workers > 0)
2431 	{
2432 		double	parallel_divisor = get_parallel_divisor(&path->jpath.path);
2433 
2434 		path->jpath.path.rows =
2435 			clamp_row_est(path->jpath.path.rows / parallel_divisor);
2436 	}
2437 
2438 	/*
2439 	 * We could include disable_cost in the preliminary estimate, but that
2440 	 * would amount to optimizing for the case where the join method is
2441 	 * disabled, which doesn't seem like the way to bet.
2442 	 */
2443 	if (!enable_mergejoin)
2444 		startup_cost += disable_cost;
2445 
2446 	/*
2447 	 * Compute cost of the mergequals and qpquals (other restriction clauses)
2448 	 * separately.
2449 	 */
2450 	cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2451 	cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2452 	qp_qual_cost.startup -= merge_qual_cost.startup;
2453 	qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2454 
2455 	/*
2456 	 * Get approx # tuples passing the mergequals.  We use approx_tuple_count
2457 	 * here because we need an estimate done with JOIN_INNER semantics.
2458 	 */
2459 	mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2460 
2461 	/*
2462 	 * When there are equal merge keys in the outer relation, the mergejoin
2463 	 * must rescan any matching tuples in the inner relation. This means
2464 	 * re-fetching inner tuples; we have to estimate how often that happens.
2465 	 *
2466 	 * For regular inner and outer joins, the number of re-fetches can be
2467 	 * estimated approximately as size of merge join output minus size of
2468 	 * inner relation. Assume that the distinct key values are 1, 2, ..., and
2469 	 * denote the number of values of each key in the outer relation as m1,
2470 	 * m2, ...; in the inner relation, n1, n2, ...  Then we have
2471 	 *
2472 	 * size of join = m1 * n1 + m2 * n2 + ...
2473 	 *
2474 	 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2475 	 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2476 	 * relation
2477 	 *
2478 	 * This equation works correctly for outer tuples having no inner match
2479 	 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2480 	 * are effectively subtracting those from the number of rescanned tuples,
2481 	 * when we should not.  Can we do better without expensive selectivity
2482 	 * computations?
2483 	 *
2484 	 * The whole issue is moot if we are working from a unique-ified outer
2485 	 * input.
2486 	 */
2487 	if (IsA(outer_path, UniquePath))
2488 		rescannedtuples = 0;
2489 	else
2490 	{
2491 		rescannedtuples = mergejointuples - inner_path_rows;
2492 		/* Must clamp because of possible underestimate */
2493 		if (rescannedtuples < 0)
2494 			rescannedtuples = 0;
2495 	}
2496 
2497 	/*
2498 	 * We'll inflate various costs this much to account for rescanning.  Note
2499 	 * that this is to be multiplied by something involving inner_rows, or
2500 	 * another number related to the portion of the inner rel we'll scan.
2501 	 */
2502 	rescanratio = 1.0 + (rescannedtuples / inner_rows);
2503 
2504 	/*
2505 	 * Decide whether we want to materialize the inner input to shield it from
2506 	 * mark/restore and performing re-fetches.  Our cost model for regular
2507 	 * re-fetches is that a re-fetch costs the same as an original fetch,
2508 	 * which is probably an overestimate; but on the other hand we ignore the
2509 	 * bookkeeping costs of mark/restore.  Not clear if it's worth developing
2510 	 * a more refined model.  So we just need to inflate the inner run cost by
2511 	 * rescanratio.
2512 	 */
2513 	bare_inner_cost = inner_run_cost * rescanratio;
2514 
2515 	/*
2516 	 * When we interpose a Material node the re-fetch cost is assumed to be
2517 	 * just cpu_operator_cost per tuple, independently of the underlying
2518 	 * plan's cost; and we charge an extra cpu_operator_cost per original
2519 	 * fetch as well.  Note that we're assuming the materialize node will
2520 	 * never spill to disk, since it only has to remember tuples back to the
2521 	 * last mark.  (If there are a huge number of duplicates, our other cost
2522 	 * factors will make the path so expensive that it probably won't get
2523 	 * chosen anyway.)	So we don't use cost_rescan here.
2524 	 *
2525 	 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2526 	 * of the generated Material node.
2527 	 */
2528 	mat_inner_cost = inner_run_cost +
2529 		cpu_operator_cost * inner_rows * rescanratio;
2530 
2531 	/*
2532 	 * Prefer materializing if it looks cheaper, unless the user has asked to
2533 	 * suppress materialization.
2534 	 */
2535 	if (enable_material && mat_inner_cost < bare_inner_cost)
2536 		path->materialize_inner = true;
2537 
2538 	/*
2539 	 * Even if materializing doesn't look cheaper, we *must* do it if the
2540 	 * inner path is to be used directly (without sorting) and it doesn't
2541 	 * support mark/restore.
2542 	 *
2543 	 * Since the inner side must be ordered, and only Sorts and IndexScans can
2544 	 * create order to begin with, and they both support mark/restore, you
2545 	 * might think there's no problem --- but you'd be wrong.  Nestloop and
2546 	 * merge joins can *preserve* the order of their inputs, so they can be
2547 	 * selected as the input of a mergejoin, and they don't support
2548 	 * mark/restore at present.
2549 	 *
2550 	 * We don't test the value of enable_material here, because
2551 	 * materialization is required for correctness in this case, and turning
2552 	 * it off does not entitle us to deliver an invalid plan.
2553 	 */
2554 	else if (innersortkeys == NIL &&
2555 			 !ExecSupportsMarkRestore(inner_path))
2556 		path->materialize_inner = true;
2557 
2558 	/*
2559 	 * Also, force materializing if the inner path is to be sorted and the
2560 	 * sort is expected to spill to disk.  This is because the final merge
2561 	 * pass can be done on-the-fly if it doesn't have to support mark/restore.
2562 	 * We don't try to adjust the cost estimates for this consideration,
2563 	 * though.
2564 	 *
2565 	 * Since materialization is a performance optimization in this case,
2566 	 * rather than necessary for correctness, we skip it if enable_material is
2567 	 * off.
2568 	 */
2569 	else if (enable_material && innersortkeys != NIL &&
2570 			 relation_byte_size(inner_path_rows,
2571 								inner_path->pathtarget->width) >
2572 			 (work_mem * 1024L))
2573 		path->materialize_inner = true;
2574 	else
2575 		path->materialize_inner = false;
2576 
2577 	/* Charge the right incremental cost for the chosen case */
2578 	if (path->materialize_inner)
2579 		run_cost += mat_inner_cost;
2580 	else
2581 		run_cost += bare_inner_cost;
2582 
2583 	/* CPU costs */
2584 
2585 	/*
2586 	 * The number of tuple comparisons needed is approximately number of outer
2587 	 * rows plus number of inner rows plus number of rescanned tuples (can we
2588 	 * refine this?).  At each one, we need to evaluate the mergejoin quals.
2589 	 */
2590 	startup_cost += merge_qual_cost.startup;
2591 	startup_cost += merge_qual_cost.per_tuple *
2592 		(outer_skip_rows + inner_skip_rows * rescanratio);
2593 	run_cost += merge_qual_cost.per_tuple *
2594 		((outer_rows - outer_skip_rows) +
2595 		 (inner_rows - inner_skip_rows) * rescanratio);
2596 
2597 	/*
2598 	 * For each tuple that gets through the mergejoin proper, we charge
2599 	 * cpu_tuple_cost plus the cost of evaluating additional restriction
2600 	 * clauses that are to be applied at the join.  (This is pessimistic since
2601 	 * not all of the quals may get evaluated at each tuple.)
2602 	 *
2603 	 * Note: we could adjust for SEMI/ANTI joins skipping some qual
2604 	 * evaluations here, but it's probably not worth the trouble.
2605 	 */
2606 	startup_cost += qp_qual_cost.startup;
2607 	cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2608 	run_cost += cpu_per_tuple * mergejointuples;
2609 
2610 	/* tlist eval costs are paid per output row, not per tuple scanned */
2611 	startup_cost += path->jpath.path.pathtarget->cost.startup;
2612 	run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2613 
2614 	path->jpath.path.startup_cost = startup_cost;
2615 	path->jpath.path.total_cost = startup_cost + run_cost;
2616 }
2617 
2618 /*
2619  * run mergejoinscansel() with caching
2620  */
2621 static MergeScanSelCache *
cached_scansel(PlannerInfo * root,RestrictInfo * rinfo,PathKey * pathkey)2622 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
2623 {
2624 	MergeScanSelCache *cache;
2625 	ListCell   *lc;
2626 	Selectivity leftstartsel,
2627 				leftendsel,
2628 				rightstartsel,
2629 				rightendsel;
2630 	MemoryContext oldcontext;
2631 
2632 	/* Do we have this result already? */
2633 	foreach(lc, rinfo->scansel_cache)
2634 	{
2635 		cache = (MergeScanSelCache *) lfirst(lc);
2636 		if (cache->opfamily == pathkey->pk_opfamily &&
2637 			cache->collation == pathkey->pk_eclass->ec_collation &&
2638 			cache->strategy == pathkey->pk_strategy &&
2639 			cache->nulls_first == pathkey->pk_nulls_first)
2640 			return cache;
2641 	}
2642 
2643 	/* Nope, do the computation */
2644 	mergejoinscansel(root,
2645 					 (Node *) rinfo->clause,
2646 					 pathkey->pk_opfamily,
2647 					 pathkey->pk_strategy,
2648 					 pathkey->pk_nulls_first,
2649 					 &leftstartsel,
2650 					 &leftendsel,
2651 					 &rightstartsel,
2652 					 &rightendsel);
2653 
2654 	/* Cache the result in suitably long-lived workspace */
2655 	oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2656 
2657 	cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2658 	cache->opfamily = pathkey->pk_opfamily;
2659 	cache->collation = pathkey->pk_eclass->ec_collation;
2660 	cache->strategy = pathkey->pk_strategy;
2661 	cache->nulls_first = pathkey->pk_nulls_first;
2662 	cache->leftstartsel = leftstartsel;
2663 	cache->leftendsel = leftendsel;
2664 	cache->rightstartsel = rightstartsel;
2665 	cache->rightendsel = rightendsel;
2666 
2667 	rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2668 
2669 	MemoryContextSwitchTo(oldcontext);
2670 
2671 	return cache;
2672 }
2673 
2674 /*
2675  * initial_cost_hashjoin
2676  *	  Preliminary estimate of the cost of a hashjoin path.
2677  *
2678  * This must quickly produce lower-bound estimates of the path's startup and
2679  * total costs.  If we are unable to eliminate the proposed path from
2680  * consideration using the lower bounds, final_cost_hashjoin will be called
2681  * to obtain the final estimates.
2682  *
2683  * The exact division of labor between this function and final_cost_hashjoin
2684  * is private to them, and represents a tradeoff between speed of the initial
2685  * estimate and getting a tight lower bound.  We choose to not examine the
2686  * join quals here (other than by counting the number of hash clauses),
2687  * so we can't do much with CPU costs.  We do assume that
2688  * ExecChooseHashTableSize is cheap enough to use here.
2689  *
2690  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2691  *		other data to be used by final_cost_hashjoin
2692  * 'jointype' is the type of join to be performed
2693  * 'hashclauses' is the list of joinclauses to be used as hash clauses
2694  * 'outer_path' is the outer input to the join
2695  * 'inner_path' is the inner input to the join
2696  * 'sjinfo' is extra info about the join for selectivity estimation
2697  * 'semifactors' contains valid data if jointype is SEMI or ANTI
2698  */
2699 void
initial_cost_hashjoin(PlannerInfo * root,JoinCostWorkspace * workspace,JoinType jointype,List * hashclauses,Path * outer_path,Path * inner_path,SpecialJoinInfo * sjinfo,SemiAntiJoinFactors * semifactors)2700 initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2701 					  JoinType jointype,
2702 					  List *hashclauses,
2703 					  Path *outer_path, Path *inner_path,
2704 					  SpecialJoinInfo *sjinfo,
2705 					  SemiAntiJoinFactors *semifactors)
2706 {
2707 	Cost		startup_cost = 0;
2708 	Cost		run_cost = 0;
2709 	double		outer_path_rows = outer_path->rows;
2710 	double		inner_path_rows = inner_path->rows;
2711 	int			num_hashclauses = list_length(hashclauses);
2712 	int			numbuckets;
2713 	int			numbatches;
2714 	int			num_skew_mcvs;
2715 
2716 	/* cost of source data */
2717 	startup_cost += outer_path->startup_cost;
2718 	run_cost += outer_path->total_cost - outer_path->startup_cost;
2719 	startup_cost += inner_path->total_cost;
2720 
2721 	/*
2722 	 * Cost of computing hash function: must do it once per input tuple. We
2723 	 * charge one cpu_operator_cost for each column's hash function.  Also,
2724 	 * tack on one cpu_tuple_cost per inner row, to model the costs of
2725 	 * inserting the row into the hashtable.
2726 	 *
2727 	 * XXX when a hashclause is more complex than a single operator, we really
2728 	 * should charge the extra eval costs of the left or right side, as
2729 	 * appropriate, here.  This seems more work than it's worth at the moment.
2730 	 */
2731 	startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2732 		* inner_path_rows;
2733 	run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2734 
2735 	/*
2736 	 * Get hash table size that executor would use for inner relation.
2737 	 *
2738 	 * XXX for the moment, always assume that skew optimization will be
2739 	 * performed.  As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2740 	 * trying to determine that for sure.
2741 	 *
2742 	 * XXX at some point it might be interesting to try to account for skew
2743 	 * optimization in the cost estimate, but for now, we don't.
2744 	 */
2745 	ExecChooseHashTableSize(inner_path_rows,
2746 							inner_path->pathtarget->width,
2747 							true,		/* useskew */
2748 							&numbuckets,
2749 							&numbatches,
2750 							&num_skew_mcvs);
2751 
2752 	/*
2753 	 * If inner relation is too big then we will need to "batch" the join,
2754 	 * which implies writing and reading most of the tuples to disk an extra
2755 	 * time.  Charge seq_page_cost per page, since the I/O should be nice and
2756 	 * sequential.  Writing the inner rel counts as startup cost, all the rest
2757 	 * as run cost.
2758 	 */
2759 	if (numbatches > 1)
2760 	{
2761 		double		outerpages = page_size(outer_path_rows,
2762 										   outer_path->pathtarget->width);
2763 		double		innerpages = page_size(inner_path_rows,
2764 										   inner_path->pathtarget->width);
2765 
2766 		startup_cost += seq_page_cost * innerpages;
2767 		run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2768 	}
2769 
2770 	/* CPU costs left for later */
2771 
2772 	/* Public result fields */
2773 	workspace->startup_cost = startup_cost;
2774 	workspace->total_cost = startup_cost + run_cost;
2775 	/* Save private data for final_cost_hashjoin */
2776 	workspace->run_cost = run_cost;
2777 	workspace->numbuckets = numbuckets;
2778 	workspace->numbatches = numbatches;
2779 }
2780 
2781 /*
2782  * final_cost_hashjoin
2783  *	  Final estimate of the cost and result size of a hashjoin path.
2784  *
2785  * Note: the numbatches estimate is also saved into 'path' for use later
2786  *
2787  * 'path' is already filled in except for the rows and cost fields and
2788  *		num_batches
2789  * 'workspace' is the result from initial_cost_hashjoin
2790  * 'sjinfo' is extra info about the join for selectivity estimation
2791  * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
2792  */
2793 void
final_cost_hashjoin(PlannerInfo * root,HashPath * path,JoinCostWorkspace * workspace,SpecialJoinInfo * sjinfo,SemiAntiJoinFactors * semifactors)2794 final_cost_hashjoin(PlannerInfo *root, HashPath *path,
2795 					JoinCostWorkspace *workspace,
2796 					SpecialJoinInfo *sjinfo,
2797 					SemiAntiJoinFactors *semifactors)
2798 {
2799 	Path	   *outer_path = path->jpath.outerjoinpath;
2800 	Path	   *inner_path = path->jpath.innerjoinpath;
2801 	double		outer_path_rows = outer_path->rows;
2802 	double		inner_path_rows = inner_path->rows;
2803 	List	   *hashclauses = path->path_hashclauses;
2804 	Cost		startup_cost = workspace->startup_cost;
2805 	Cost		run_cost = workspace->run_cost;
2806 	int			numbuckets = workspace->numbuckets;
2807 	int			numbatches = workspace->numbatches;
2808 	Cost		cpu_per_tuple;
2809 	QualCost	hash_qual_cost;
2810 	QualCost	qp_qual_cost;
2811 	double		hashjointuples;
2812 	double		virtualbuckets;
2813 	Selectivity innerbucketsize;
2814 	ListCell   *hcl;
2815 
2816 	/* Mark the path with the correct row estimate */
2817 	if (path->jpath.path.param_info)
2818 		path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2819 	else
2820 		path->jpath.path.rows = path->jpath.path.parent->rows;
2821 
2822 	/* For partial paths, scale row estimate. */
2823 	if (path->jpath.path.parallel_workers > 0)
2824 	{
2825 		double	parallel_divisor = get_parallel_divisor(&path->jpath.path);
2826 
2827 		path->jpath.path.rows =
2828 			clamp_row_est(path->jpath.path.rows / parallel_divisor);
2829 	}
2830 
2831 	/*
2832 	 * We could include disable_cost in the preliminary estimate, but that
2833 	 * would amount to optimizing for the case where the join method is
2834 	 * disabled, which doesn't seem like the way to bet.
2835 	 */
2836 	if (!enable_hashjoin)
2837 		startup_cost += disable_cost;
2838 
2839 	/* mark the path with estimated # of batches */
2840 	path->num_batches = numbatches;
2841 
2842 	/* and compute the number of "virtual" buckets in the whole join */
2843 	virtualbuckets = (double) numbuckets *(double) numbatches;
2844 
2845 	/*
2846 	 * Determine bucketsize fraction for inner relation.  We use the smallest
2847 	 * bucketsize estimated for any individual hashclause; this is undoubtedly
2848 	 * conservative.
2849 	 *
2850 	 * BUT: if inner relation has been unique-ified, we can assume it's good
2851 	 * for hashing.  This is important both because it's the right answer, and
2852 	 * because we avoid contaminating the cache with a value that's wrong for
2853 	 * non-unique-ified paths.
2854 	 */
2855 	if (IsA(inner_path, UniquePath))
2856 		innerbucketsize = 1.0 / virtualbuckets;
2857 	else
2858 	{
2859 		innerbucketsize = 1.0;
2860 		foreach(hcl, hashclauses)
2861 		{
2862 			RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
2863 			Selectivity thisbucketsize;
2864 
2865 			Assert(IsA(restrictinfo, RestrictInfo));
2866 
2867 			/*
2868 			 * First we have to figure out which side of the hashjoin clause
2869 			 * is the inner side.
2870 			 *
2871 			 * Since we tend to visit the same clauses over and over when
2872 			 * planning a large query, we cache the bucketsize estimate in the
2873 			 * RestrictInfo node to avoid repeated lookups of statistics.
2874 			 */
2875 			if (bms_is_subset(restrictinfo->right_relids,
2876 							  inner_path->parent->relids))
2877 			{
2878 				/* righthand side is inner */
2879 				thisbucketsize = restrictinfo->right_bucketsize;
2880 				if (thisbucketsize < 0)
2881 				{
2882 					/* not cached yet */
2883 					thisbucketsize =
2884 						estimate_hash_bucketsize(root,
2885 										   get_rightop(restrictinfo->clause),
2886 												 virtualbuckets);
2887 					restrictinfo->right_bucketsize = thisbucketsize;
2888 				}
2889 			}
2890 			else
2891 			{
2892 				Assert(bms_is_subset(restrictinfo->left_relids,
2893 									 inner_path->parent->relids));
2894 				/* lefthand side is inner */
2895 				thisbucketsize = restrictinfo->left_bucketsize;
2896 				if (thisbucketsize < 0)
2897 				{
2898 					/* not cached yet */
2899 					thisbucketsize =
2900 						estimate_hash_bucketsize(root,
2901 											get_leftop(restrictinfo->clause),
2902 												 virtualbuckets);
2903 					restrictinfo->left_bucketsize = thisbucketsize;
2904 				}
2905 			}
2906 
2907 			if (innerbucketsize > thisbucketsize)
2908 				innerbucketsize = thisbucketsize;
2909 		}
2910 	}
2911 
2912 	/*
2913 	 * Compute cost of the hashquals and qpquals (other restriction clauses)
2914 	 * separately.
2915 	 */
2916 	cost_qual_eval(&hash_qual_cost, hashclauses, root);
2917 	cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2918 	qp_qual_cost.startup -= hash_qual_cost.startup;
2919 	qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
2920 
2921 	/* CPU costs */
2922 
2923 	if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI)
2924 	{
2925 		double		outer_matched_rows;
2926 		Selectivity inner_scan_frac;
2927 
2928 		/*
2929 		 * SEMI or ANTI join: executor will stop after first match.
2930 		 *
2931 		 * For an outer-rel row that has at least one match, we can expect the
2932 		 * bucket scan to stop after a fraction 1/(match_count+1) of the
2933 		 * bucket's rows, if the matches are evenly distributed.  Since they
2934 		 * probably aren't quite evenly distributed, we apply a fuzz factor of
2935 		 * 2.0 to that fraction.  (If we used a larger fuzz factor, we'd have
2936 		 * to clamp inner_scan_frac to at most 1.0; but since match_count is
2937 		 * at least 1, no such clamp is needed now.)
2938 		 */
2939 		outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
2940 		inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
2941 
2942 		startup_cost += hash_qual_cost.startup;
2943 		run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
2944 			clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
2945 
2946 		/*
2947 		 * For unmatched outer-rel rows, the picture is quite a lot different.
2948 		 * In the first place, there is no reason to assume that these rows
2949 		 * preferentially hit heavily-populated buckets; instead assume they
2950 		 * are uncorrelated with the inner distribution and so they see an
2951 		 * average bucket size of inner_path_rows / virtualbuckets.  In the
2952 		 * second place, it seems likely that they will have few if any exact
2953 		 * hash-code matches and so very few of the tuples in the bucket will
2954 		 * actually require eval of the hash quals.  We don't have any good
2955 		 * way to estimate how many will, but for the moment assume that the
2956 		 * effective cost per bucket entry is one-tenth what it is for
2957 		 * matchable tuples.
2958 		 */
2959 		run_cost += hash_qual_cost.per_tuple *
2960 			(outer_path_rows - outer_matched_rows) *
2961 			clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
2962 
2963 		/* Get # of tuples that will pass the basic join */
2964 		if (path->jpath.jointype == JOIN_SEMI)
2965 			hashjointuples = outer_matched_rows;
2966 		else
2967 			hashjointuples = outer_path_rows - outer_matched_rows;
2968 	}
2969 	else
2970 	{
2971 		/*
2972 		 * The number of tuple comparisons needed is the number of outer
2973 		 * tuples times the typical number of tuples in a hash bucket, which
2974 		 * is the inner relation size times its bucketsize fraction.  At each
2975 		 * one, we need to evaluate the hashjoin quals.  But actually,
2976 		 * charging the full qual eval cost at each tuple is pessimistic,
2977 		 * since we don't evaluate the quals unless the hash values match
2978 		 * exactly.  For lack of a better idea, halve the cost estimate to
2979 		 * allow for that.
2980 		 */
2981 		startup_cost += hash_qual_cost.startup;
2982 		run_cost += hash_qual_cost.per_tuple * outer_path_rows *
2983 			clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
2984 
2985 		/*
2986 		 * Get approx # tuples passing the hashquals.  We use
2987 		 * approx_tuple_count here because we need an estimate done with
2988 		 * JOIN_INNER semantics.
2989 		 */
2990 		hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
2991 	}
2992 
2993 	/*
2994 	 * For each tuple that gets through the hashjoin proper, we charge
2995 	 * cpu_tuple_cost plus the cost of evaluating additional restriction
2996 	 * clauses that are to be applied at the join.  (This is pessimistic since
2997 	 * not all of the quals may get evaluated at each tuple.)
2998 	 */
2999 	startup_cost += qp_qual_cost.startup;
3000 	cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3001 	run_cost += cpu_per_tuple * hashjointuples;
3002 
3003 	/* tlist eval costs are paid per output row, not per tuple scanned */
3004 	startup_cost += path->jpath.path.pathtarget->cost.startup;
3005 	run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3006 
3007 	path->jpath.path.startup_cost = startup_cost;
3008 	path->jpath.path.total_cost = startup_cost + run_cost;
3009 }
3010 
3011 
3012 /*
3013  * cost_subplan
3014  *		Figure the costs for a SubPlan (or initplan).
3015  *
3016  * Note: we could dig the subplan's Plan out of the root list, but in practice
3017  * all callers have it handy already, so we make them pass it.
3018  */
3019 void
cost_subplan(PlannerInfo * root,SubPlan * subplan,Plan * plan)3020 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
3021 {
3022 	QualCost	sp_cost;
3023 
3024 	/* Figure any cost for evaluating the testexpr */
3025 	cost_qual_eval(&sp_cost,
3026 				   make_ands_implicit((Expr *) subplan->testexpr),
3027 				   root);
3028 
3029 	if (subplan->useHashTable)
3030 	{
3031 		/*
3032 		 * If we are using a hash table for the subquery outputs, then the
3033 		 * cost of evaluating the query is a one-time cost.  We charge one
3034 		 * cpu_operator_cost per tuple for the work of loading the hashtable,
3035 		 * too.
3036 		 */
3037 		sp_cost.startup += plan->total_cost +
3038 			cpu_operator_cost * plan->plan_rows;
3039 
3040 		/*
3041 		 * The per-tuple costs include the cost of evaluating the lefthand
3042 		 * expressions, plus the cost of probing the hashtable.  We already
3043 		 * accounted for the lefthand expressions as part of the testexpr, and
3044 		 * will also have counted one cpu_operator_cost for each comparison
3045 		 * operator.  That is probably too low for the probing cost, but it's
3046 		 * hard to make a better estimate, so live with it for now.
3047 		 */
3048 	}
3049 	else
3050 	{
3051 		/*
3052 		 * Otherwise we will be rescanning the subplan output on each
3053 		 * evaluation.  We need to estimate how much of the output we will
3054 		 * actually need to scan.  NOTE: this logic should agree with the
3055 		 * tuple_fraction estimates used by make_subplan() in
3056 		 * plan/subselect.c.
3057 		 */
3058 		Cost		plan_run_cost = plan->total_cost - plan->startup_cost;
3059 
3060 		if (subplan->subLinkType == EXISTS_SUBLINK)
3061 		{
3062 			/* we only need to fetch 1 tuple; clamp to avoid zero divide */
3063 			sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
3064 		}
3065 		else if (subplan->subLinkType == ALL_SUBLINK ||
3066 				 subplan->subLinkType == ANY_SUBLINK)
3067 		{
3068 			/* assume we need 50% of the tuples */
3069 			sp_cost.per_tuple += 0.50 * plan_run_cost;
3070 			/* also charge a cpu_operator_cost per row examined */
3071 			sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
3072 		}
3073 		else
3074 		{
3075 			/* assume we need all tuples */
3076 			sp_cost.per_tuple += plan_run_cost;
3077 		}
3078 
3079 		/*
3080 		 * Also account for subplan's startup cost. If the subplan is
3081 		 * uncorrelated or undirect correlated, AND its topmost node is one
3082 		 * that materializes its output, assume that we'll only need to pay
3083 		 * its startup cost once; otherwise assume we pay the startup cost
3084 		 * every time.
3085 		 */
3086 		if (subplan->parParam == NIL &&
3087 			ExecMaterializesOutput(nodeTag(plan)))
3088 			sp_cost.startup += plan->startup_cost;
3089 		else
3090 			sp_cost.per_tuple += plan->startup_cost;
3091 	}
3092 
3093 	subplan->startup_cost = sp_cost.startup;
3094 	subplan->per_call_cost = sp_cost.per_tuple;
3095 }
3096 
3097 
3098 /*
3099  * cost_rescan
3100  *		Given a finished Path, estimate the costs of rescanning it after
3101  *		having done so the first time.  For some Path types a rescan is
3102  *		cheaper than an original scan (if no parameters change), and this
3103  *		function embodies knowledge about that.  The default is to return
3104  *		the same costs stored in the Path.  (Note that the cost estimates
3105  *		actually stored in Paths are always for first scans.)
3106  *
3107  * This function is not currently intended to model effects such as rescans
3108  * being cheaper due to disk block caching; what we are concerned with is
3109  * plan types wherein the executor caches results explicitly, or doesn't
3110  * redo startup calculations, etc.
3111  */
3112 static void
cost_rescan(PlannerInfo * root,Path * path,Cost * rescan_startup_cost,Cost * rescan_total_cost)3113 cost_rescan(PlannerInfo *root, Path *path,
3114 			Cost *rescan_startup_cost,	/* output parameters */
3115 			Cost *rescan_total_cost)
3116 {
3117 	switch (path->pathtype)
3118 	{
3119 		case T_FunctionScan:
3120 
3121 			/*
3122 			 * Currently, nodeFunctionscan.c always executes the function to
3123 			 * completion before returning any rows, and caches the results in
3124 			 * a tuplestore.  So the function eval cost is all startup cost
3125 			 * and isn't paid over again on rescans. However, all run costs
3126 			 * will be paid over again.
3127 			 */
3128 			*rescan_startup_cost = 0;
3129 			*rescan_total_cost = path->total_cost - path->startup_cost;
3130 			break;
3131 		case T_HashJoin:
3132 
3133 			/*
3134 			 * If it's a single-batch join, we don't need to rebuild the hash
3135 			 * table during a rescan.
3136 			 */
3137 			if (((HashPath *) path)->num_batches == 1)
3138 			{
3139 				/* Startup cost is exactly the cost of hash table building */
3140 				*rescan_startup_cost = 0;
3141 				*rescan_total_cost = path->total_cost - path->startup_cost;
3142 			}
3143 			else
3144 			{
3145 				/* Otherwise, no special treatment */
3146 				*rescan_startup_cost = path->startup_cost;
3147 				*rescan_total_cost = path->total_cost;
3148 			}
3149 			break;
3150 		case T_CteScan:
3151 		case T_WorkTableScan:
3152 			{
3153 				/*
3154 				 * These plan types materialize their final result in a
3155 				 * tuplestore or tuplesort object.  So the rescan cost is only
3156 				 * cpu_tuple_cost per tuple, unless the result is large enough
3157 				 * to spill to disk.
3158 				 */
3159 				Cost		run_cost = cpu_tuple_cost * path->rows;
3160 				double		nbytes = relation_byte_size(path->rows,
3161 													path->pathtarget->width);
3162 				long		work_mem_bytes = work_mem * 1024L;
3163 
3164 				if (nbytes > work_mem_bytes)
3165 				{
3166 					/* It will spill, so account for re-read cost */
3167 					double		npages = ceil(nbytes / BLCKSZ);
3168 
3169 					run_cost += seq_page_cost * npages;
3170 				}
3171 				*rescan_startup_cost = 0;
3172 				*rescan_total_cost = run_cost;
3173 			}
3174 			break;
3175 		case T_Material:
3176 		case T_Sort:
3177 			{
3178 				/*
3179 				 * These plan types not only materialize their results, but do
3180 				 * not implement qual filtering or projection.  So they are
3181 				 * even cheaper to rescan than the ones above.  We charge only
3182 				 * cpu_operator_cost per tuple.  (Note: keep that in sync with
3183 				 * the run_cost charge in cost_sort, and also see comments in
3184 				 * cost_material before you change it.)
3185 				 */
3186 				Cost		run_cost = cpu_operator_cost * path->rows;
3187 				double		nbytes = relation_byte_size(path->rows,
3188 													path->pathtarget->width);
3189 				long		work_mem_bytes = work_mem * 1024L;
3190 
3191 				if (nbytes > work_mem_bytes)
3192 				{
3193 					/* It will spill, so account for re-read cost */
3194 					double		npages = ceil(nbytes / BLCKSZ);
3195 
3196 					run_cost += seq_page_cost * npages;
3197 				}
3198 				*rescan_startup_cost = 0;
3199 				*rescan_total_cost = run_cost;
3200 			}
3201 			break;
3202 		default:
3203 			*rescan_startup_cost = path->startup_cost;
3204 			*rescan_total_cost = path->total_cost;
3205 			break;
3206 	}
3207 }
3208 
3209 
3210 /*
3211  * cost_qual_eval
3212  *		Estimate the CPU costs of evaluating a WHERE clause.
3213  *		The input can be either an implicitly-ANDed list of boolean
3214  *		expressions, or a list of RestrictInfo nodes.  (The latter is
3215  *		preferred since it allows caching of the results.)
3216  *		The result includes both a one-time (startup) component,
3217  *		and a per-evaluation component.
3218  */
3219 void
cost_qual_eval(QualCost * cost,List * quals,PlannerInfo * root)3220 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
3221 {
3222 	cost_qual_eval_context context;
3223 	ListCell   *l;
3224 
3225 	context.root = root;
3226 	context.total.startup = 0;
3227 	context.total.per_tuple = 0;
3228 
3229 	/* We don't charge any cost for the implicit ANDing at top level ... */
3230 
3231 	foreach(l, quals)
3232 	{
3233 		Node	   *qual = (Node *) lfirst(l);
3234 
3235 		cost_qual_eval_walker(qual, &context);
3236 	}
3237 
3238 	*cost = context.total;
3239 }
3240 
3241 /*
3242  * cost_qual_eval_node
3243  *		As above, for a single RestrictInfo or expression.
3244  */
3245 void
cost_qual_eval_node(QualCost * cost,Node * qual,PlannerInfo * root)3246 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
3247 {
3248 	cost_qual_eval_context context;
3249 
3250 	context.root = root;
3251 	context.total.startup = 0;
3252 	context.total.per_tuple = 0;
3253 
3254 	cost_qual_eval_walker(qual, &context);
3255 
3256 	*cost = context.total;
3257 }
3258 
3259 static bool
cost_qual_eval_walker(Node * node,cost_qual_eval_context * context)3260 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
3261 {
3262 	if (node == NULL)
3263 		return false;
3264 
3265 	/*
3266 	 * RestrictInfo nodes contain an eval_cost field reserved for this
3267 	 * routine's use, so that it's not necessary to evaluate the qual clause's
3268 	 * cost more than once.  If the clause's cost hasn't been computed yet,
3269 	 * the field's startup value will contain -1.
3270 	 */
3271 	if (IsA(node, RestrictInfo))
3272 	{
3273 		RestrictInfo *rinfo = (RestrictInfo *) node;
3274 
3275 		if (rinfo->eval_cost.startup < 0)
3276 		{
3277 			cost_qual_eval_context locContext;
3278 
3279 			locContext.root = context->root;
3280 			locContext.total.startup = 0;
3281 			locContext.total.per_tuple = 0;
3282 
3283 			/*
3284 			 * For an OR clause, recurse into the marked-up tree so that we
3285 			 * set the eval_cost for contained RestrictInfos too.
3286 			 */
3287 			if (rinfo->orclause)
3288 				cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
3289 			else
3290 				cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
3291 
3292 			/*
3293 			 * If the RestrictInfo is marked pseudoconstant, it will be tested
3294 			 * only once, so treat its cost as all startup cost.
3295 			 */
3296 			if (rinfo->pseudoconstant)
3297 			{
3298 				/* count one execution during startup */
3299 				locContext.total.startup += locContext.total.per_tuple;
3300 				locContext.total.per_tuple = 0;
3301 			}
3302 			rinfo->eval_cost = locContext.total;
3303 		}
3304 		context->total.startup += rinfo->eval_cost.startup;
3305 		context->total.per_tuple += rinfo->eval_cost.per_tuple;
3306 		/* do NOT recurse into children */
3307 		return false;
3308 	}
3309 
3310 	/*
3311 	 * For each operator or function node in the given tree, we charge the
3312 	 * estimated execution cost given by pg_proc.procost (remember to multiply
3313 	 * this by cpu_operator_cost).
3314 	 *
3315 	 * Vars and Consts are charged zero, and so are boolean operators (AND,
3316 	 * OR, NOT). Simplistic, but a lot better than no model at all.
3317 	 *
3318 	 * Should we try to account for the possibility of short-circuit
3319 	 * evaluation of AND/OR?  Probably *not*, because that would make the
3320 	 * results depend on the clause ordering, and we are not in any position
3321 	 * to expect that the current ordering of the clauses is the one that's
3322 	 * going to end up being used.  The above per-RestrictInfo caching would
3323 	 * not mix well with trying to re-order clauses anyway.
3324 	 *
3325 	 * Another issue that is entirely ignored here is that if a set-returning
3326 	 * function is below top level in the tree, the functions/operators above
3327 	 * it will need to be evaluated multiple times.  In practical use, such
3328 	 * cases arise so seldom as to not be worth the added complexity needed;
3329 	 * moreover, since our rowcount estimates for functions tend to be pretty
3330 	 * phony, the results would also be pretty phony.
3331 	 */
3332 	if (IsA(node, FuncExpr))
3333 	{
3334 		context->total.per_tuple +=
3335 			get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3336 	}
3337 	else if (IsA(node, OpExpr) ||
3338 			 IsA(node, DistinctExpr) ||
3339 			 IsA(node, NullIfExpr))
3340 	{
3341 		/* rely on struct equivalence to treat these all alike */
3342 		set_opfuncid((OpExpr *) node);
3343 		context->total.per_tuple +=
3344 			get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3345 	}
3346 	else if (IsA(node, ScalarArrayOpExpr))
3347 	{
3348 		/*
3349 		 * Estimate that the operator will be applied to about half of the
3350 		 * array elements before the answer is determined.
3351 		 */
3352 		ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3353 		Node	   *arraynode = (Node *) lsecond(saop->args);
3354 
3355 		set_sa_opfuncid(saop);
3356 		context->total.per_tuple += get_func_cost(saop->opfuncid) *
3357 			cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3358 	}
3359 	else if (IsA(node, Aggref) ||
3360 			 IsA(node, WindowFunc))
3361 	{
3362 		/*
3363 		 * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3364 		 * ie, zero execution cost in the current model, because they behave
3365 		 * essentially like Vars in execQual.c.  We disregard the costs of
3366 		 * their input expressions for the same reason.  The actual execution
3367 		 * costs of the aggregate/window functions and their arguments have to
3368 		 * be factored into plan-node-specific costing of the Agg or WindowAgg
3369 		 * plan node.
3370 		 */
3371 		return false;			/* don't recurse into children */
3372 	}
3373 	else if (IsA(node, CoerceViaIO))
3374 	{
3375 		CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3376 		Oid			iofunc;
3377 		Oid			typioparam;
3378 		bool		typisvarlena;
3379 
3380 		/* check the result type's input function */
3381 		getTypeInputInfo(iocoerce->resulttype,
3382 						 &iofunc, &typioparam);
3383 		context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3384 		/* check the input type's output function */
3385 		getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3386 						  &iofunc, &typisvarlena);
3387 		context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3388 	}
3389 	else if (IsA(node, ArrayCoerceExpr))
3390 	{
3391 		ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3392 		Node	   *arraynode = (Node *) acoerce->arg;
3393 
3394 		if (OidIsValid(acoerce->elemfuncid))
3395 			context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
3396 				cpu_operator_cost * estimate_array_length(arraynode);
3397 	}
3398 	else if (IsA(node, RowCompareExpr))
3399 	{
3400 		/* Conservatively assume we will check all the columns */
3401 		RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3402 		ListCell   *lc;
3403 
3404 		foreach(lc, rcexpr->opnos)
3405 		{
3406 			Oid			opid = lfirst_oid(lc);
3407 
3408 			context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3409 				cpu_operator_cost;
3410 		}
3411 	}
3412 	else if (IsA(node, CurrentOfExpr))
3413 	{
3414 		/* Report high cost to prevent selection of anything but TID scan */
3415 		context->total.startup += disable_cost;
3416 	}
3417 	else if (IsA(node, SubLink))
3418 	{
3419 		/* This routine should not be applied to un-planned expressions */
3420 		elog(ERROR, "cannot handle unplanned sub-select");
3421 	}
3422 	else if (IsA(node, SubPlan))
3423 	{
3424 		/*
3425 		 * A subplan node in an expression typically indicates that the
3426 		 * subplan will be executed on each evaluation, so charge accordingly.
3427 		 * (Sub-selects that can be executed as InitPlans have already been
3428 		 * removed from the expression.)
3429 		 */
3430 		SubPlan    *subplan = (SubPlan *) node;
3431 
3432 		context->total.startup += subplan->startup_cost;
3433 		context->total.per_tuple += subplan->per_call_cost;
3434 
3435 		/*
3436 		 * We don't want to recurse into the testexpr, because it was already
3437 		 * counted in the SubPlan node's costs.  So we're done.
3438 		 */
3439 		return false;
3440 	}
3441 	else if (IsA(node, AlternativeSubPlan))
3442 	{
3443 		/*
3444 		 * Arbitrarily use the first alternative plan for costing.  (We should
3445 		 * certainly only include one alternative, and we don't yet have
3446 		 * enough information to know which one the executor is most likely to
3447 		 * use.)
3448 		 */
3449 		AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3450 
3451 		return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3452 									 context);
3453 	}
3454 	else if (IsA(node, PlaceHolderVar))
3455 	{
3456 		/*
3457 		 * A PlaceHolderVar should be given cost zero when considering general
3458 		 * expression evaluation costs.  The expense of doing the contained
3459 		 * expression is charged as part of the tlist eval costs of the scan
3460 		 * or join where the PHV is first computed (see set_rel_width and
3461 		 * add_placeholders_to_joinrel).  If we charged it again here, we'd be
3462 		 * double-counting the cost for each level of plan that the PHV
3463 		 * bubbles up through.  Hence, return without recursing into the
3464 		 * phexpr.
3465 		 */
3466 		return false;
3467 	}
3468 
3469 	/* recurse into children */
3470 	return expression_tree_walker(node, cost_qual_eval_walker,
3471 								  (void *) context);
3472 }
3473 
3474 /*
3475  * get_restriction_qual_cost
3476  *	  Compute evaluation costs of a baserel's restriction quals, plus any
3477  *	  movable join quals that have been pushed down to the scan.
3478  *	  Results are returned into *qpqual_cost.
3479  *
3480  * This is a convenience subroutine that works for seqscans and other cases
3481  * where all the given quals will be evaluated the hard way.  It's not useful
3482  * for cost_index(), for example, where the index machinery takes care of
3483  * some of the quals.  We assume baserestrictcost was previously set by
3484  * set_baserel_size_estimates().
3485  */
3486 static void
get_restriction_qual_cost(PlannerInfo * root,RelOptInfo * baserel,ParamPathInfo * param_info,QualCost * qpqual_cost)3487 get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
3488 						  ParamPathInfo *param_info,
3489 						  QualCost *qpqual_cost)
3490 {
3491 	if (param_info)
3492 	{
3493 		/* Include costs of pushed-down clauses */
3494 		cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
3495 
3496 		qpqual_cost->startup += baserel->baserestrictcost.startup;
3497 		qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
3498 	}
3499 	else
3500 		*qpqual_cost = baserel->baserestrictcost;
3501 }
3502 
3503 
3504 /*
3505  * compute_semi_anti_join_factors
3506  *	  Estimate how much of the inner input a SEMI or ANTI join
3507  *	  can be expected to scan.
3508  *
3509  * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
3510  * inner rows as soon as it finds a match to the current outer row.
3511  * We should therefore adjust some of the cost components for this effect.
3512  * This function computes some estimates needed for these adjustments.
3513  * These estimates will be the same regardless of the particular paths used
3514  * for the outer and inner relation, so we compute these once and then pass
3515  * them to all the join cost estimation functions.
3516  *
3517  * Input parameters:
3518  *	outerrel: outer relation under consideration
3519  *	innerrel: inner relation under consideration
3520  *	jointype: must be JOIN_SEMI or JOIN_ANTI
3521  *	sjinfo: SpecialJoinInfo relevant to this join
3522  *	restrictlist: join quals
3523  * Output parameters:
3524  *	*semifactors is filled in (see relation.h for field definitions)
3525  */
3526 void
compute_semi_anti_join_factors(PlannerInfo * root,RelOptInfo * outerrel,RelOptInfo * innerrel,JoinType jointype,SpecialJoinInfo * sjinfo,List * restrictlist,SemiAntiJoinFactors * semifactors)3527 compute_semi_anti_join_factors(PlannerInfo *root,
3528 							   RelOptInfo *outerrel,
3529 							   RelOptInfo *innerrel,
3530 							   JoinType jointype,
3531 							   SpecialJoinInfo *sjinfo,
3532 							   List *restrictlist,
3533 							   SemiAntiJoinFactors *semifactors)
3534 {
3535 	Selectivity jselec;
3536 	Selectivity nselec;
3537 	Selectivity avgmatch;
3538 	SpecialJoinInfo norm_sjinfo;
3539 	List	   *joinquals;
3540 	ListCell   *l;
3541 
3542 	/* Should only be called in these cases */
3543 	Assert(jointype == JOIN_SEMI || jointype == JOIN_ANTI);
3544 
3545 	/*
3546 	 * In an ANTI join, we must ignore clauses that are "pushed down", since
3547 	 * those won't affect the match logic.  In a SEMI join, we do not
3548 	 * distinguish joinquals from "pushed down" quals, so just use the whole
3549 	 * restrictinfo list.
3550 	 */
3551 	if (jointype == JOIN_ANTI)
3552 	{
3553 		Relids		joinrelids = bms_union(outerrel->relids, innerrel->relids);
3554 
3555 		joinquals = NIL;
3556 		foreach(l, restrictlist)
3557 		{
3558 			RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3559 
3560 			Assert(IsA(rinfo, RestrictInfo));
3561 			if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrelids))
3562 				joinquals = lappend(joinquals, rinfo);
3563 		}
3564 	}
3565 	else
3566 		joinquals = restrictlist;
3567 
3568 	/*
3569 	 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
3570 	 */
3571 	jselec = clauselist_selectivity(root,
3572 									joinquals,
3573 									0,
3574 									jointype,
3575 									sjinfo);
3576 
3577 	/*
3578 	 * Also get the normal inner-join selectivity of the join clauses.
3579 	 */
3580 	norm_sjinfo.type = T_SpecialJoinInfo;
3581 	norm_sjinfo.min_lefthand = outerrel->relids;
3582 	norm_sjinfo.min_righthand = innerrel->relids;
3583 	norm_sjinfo.syn_lefthand = outerrel->relids;
3584 	norm_sjinfo.syn_righthand = innerrel->relids;
3585 	norm_sjinfo.jointype = JOIN_INNER;
3586 	/* we don't bother trying to make the remaining fields valid */
3587 	norm_sjinfo.lhs_strict = false;
3588 	norm_sjinfo.delay_upper_joins = false;
3589 	norm_sjinfo.semi_can_btree = false;
3590 	norm_sjinfo.semi_can_hash = false;
3591 	norm_sjinfo.semi_operators = NIL;
3592 	norm_sjinfo.semi_rhs_exprs = NIL;
3593 
3594 	nselec = clauselist_selectivity(root,
3595 									joinquals,
3596 									0,
3597 									JOIN_INNER,
3598 									&norm_sjinfo);
3599 
3600 	/* Avoid leaking a lot of ListCells */
3601 	if (jointype == JOIN_ANTI)
3602 		list_free(joinquals);
3603 
3604 	/*
3605 	 * jselec can be interpreted as the fraction of outer-rel rows that have
3606 	 * any matches (this is true for both SEMI and ANTI cases).  And nselec is
3607 	 * the fraction of the Cartesian product that matches.  So, the average
3608 	 * number of matches for each outer-rel row that has at least one match is
3609 	 * nselec * inner_rows / jselec.
3610 	 *
3611 	 * Note: it is correct to use the inner rel's "rows" count here, even
3612 	 * though we might later be considering a parameterized inner path with
3613 	 * fewer rows.  This is because we have included all the join clauses in
3614 	 * the selectivity estimate.
3615 	 */
3616 	if (jselec > 0)				/* protect against zero divide */
3617 	{
3618 		avgmatch = nselec * innerrel->rows / jselec;
3619 		/* Clamp to sane range */
3620 		avgmatch = Max(1.0, avgmatch);
3621 	}
3622 	else
3623 		avgmatch = 1.0;
3624 
3625 	semifactors->outer_match_frac = jselec;
3626 	semifactors->match_count = avgmatch;
3627 }
3628 
3629 /*
3630  * has_indexed_join_quals
3631  *	  Check whether all the joinquals of a nestloop join are used as
3632  *	  inner index quals.
3633  *
3634  * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
3635  * indexscan) that uses all the joinquals as indexquals, we can assume that an
3636  * unmatched outer tuple is cheap to process, whereas otherwise it's probably
3637  * expensive.
3638  */
3639 static bool
has_indexed_join_quals(NestPath * joinpath)3640 has_indexed_join_quals(NestPath *joinpath)
3641 {
3642 	Relids		joinrelids = joinpath->path.parent->relids;
3643 	Path	   *innerpath = joinpath->innerjoinpath;
3644 	List	   *indexclauses;
3645 	bool		found_one;
3646 	ListCell   *lc;
3647 
3648 	/* If join still has quals to evaluate, it's not fast */
3649 	if (joinpath->joinrestrictinfo != NIL)
3650 		return false;
3651 	/* Nor if the inner path isn't parameterized at all */
3652 	if (innerpath->param_info == NULL)
3653 		return false;
3654 
3655 	/* Find the indexclauses list for the inner scan */
3656 	switch (innerpath->pathtype)
3657 	{
3658 		case T_IndexScan:
3659 		case T_IndexOnlyScan:
3660 			indexclauses = ((IndexPath *) innerpath)->indexclauses;
3661 			break;
3662 		case T_BitmapHeapScan:
3663 			{
3664 				/* Accept only a simple bitmap scan, not AND/OR cases */
3665 				Path	   *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
3666 
3667 				if (IsA(bmqual, IndexPath))
3668 					indexclauses = ((IndexPath *) bmqual)->indexclauses;
3669 				else
3670 					return false;
3671 				break;
3672 			}
3673 		default:
3674 
3675 			/*
3676 			 * If it's not a simple indexscan, it probably doesn't run quickly
3677 			 * for zero rows out, even if it's a parameterized path using all
3678 			 * the joinquals.
3679 			 */
3680 			return false;
3681 	}
3682 
3683 	/*
3684 	 * Examine the inner path's param clauses.  Any that are from the outer
3685 	 * path must be found in the indexclauses list, either exactly or in an
3686 	 * equivalent form generated by equivclass.c.  Also, we must find at least
3687 	 * one such clause, else it's a clauseless join which isn't fast.
3688 	 */
3689 	found_one = false;
3690 	foreach(lc, innerpath->param_info->ppi_clauses)
3691 	{
3692 		RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
3693 
3694 		if (join_clause_is_movable_into(rinfo,
3695 										innerpath->parent->relids,
3696 										joinrelids))
3697 		{
3698 			if (!(list_member_ptr(indexclauses, rinfo) ||
3699 				  is_redundant_derived_clause(rinfo, indexclauses)))
3700 				return false;
3701 			found_one = true;
3702 		}
3703 	}
3704 	return found_one;
3705 }
3706 
3707 
3708 /*
3709  * approx_tuple_count
3710  *		Quick-and-dirty estimation of the number of join rows passing
3711  *		a set of qual conditions.
3712  *
3713  * The quals can be either an implicitly-ANDed list of boolean expressions,
3714  * or a list of RestrictInfo nodes (typically the latter).
3715  *
3716  * We intentionally compute the selectivity under JOIN_INNER rules, even
3717  * if it's some type of outer join.  This is appropriate because we are
3718  * trying to figure out how many tuples pass the initial merge or hash
3719  * join step.
3720  *
3721  * This is quick-and-dirty because we bypass clauselist_selectivity, and
3722  * simply multiply the independent clause selectivities together.  Now
3723  * clauselist_selectivity often can't do any better than that anyhow, but
3724  * for some situations (such as range constraints) it is smarter.  However,
3725  * we can't effectively cache the results of clauselist_selectivity, whereas
3726  * the individual clause selectivities can be and are cached.
3727  *
3728  * Since we are only using the results to estimate how many potential
3729  * output tuples are generated and passed through qpqual checking, it
3730  * seems OK to live with the approximation.
3731  */
3732 static double
approx_tuple_count(PlannerInfo * root,JoinPath * path,List * quals)3733 approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
3734 {
3735 	double		tuples;
3736 	double		outer_tuples = path->outerjoinpath->rows;
3737 	double		inner_tuples = path->innerjoinpath->rows;
3738 	SpecialJoinInfo sjinfo;
3739 	Selectivity selec = 1.0;
3740 	ListCell   *l;
3741 
3742 	/*
3743 	 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3744 	 */
3745 	sjinfo.type = T_SpecialJoinInfo;
3746 	sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3747 	sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3748 	sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3749 	sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3750 	sjinfo.jointype = JOIN_INNER;
3751 	/* we don't bother trying to make the remaining fields valid */
3752 	sjinfo.lhs_strict = false;
3753 	sjinfo.delay_upper_joins = false;
3754 	sjinfo.semi_can_btree = false;
3755 	sjinfo.semi_can_hash = false;
3756 	sjinfo.semi_operators = NIL;
3757 	sjinfo.semi_rhs_exprs = NIL;
3758 
3759 	/* Get the approximate selectivity */
3760 	foreach(l, quals)
3761 	{
3762 		Node	   *qual = (Node *) lfirst(l);
3763 
3764 		/* Note that clause_selectivity will be able to cache its result */
3765 		selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
3766 	}
3767 
3768 	/* Apply it to the input relation sizes */
3769 	tuples = selec * outer_tuples * inner_tuples;
3770 
3771 	return clamp_row_est(tuples);
3772 }
3773 
3774 
3775 /*
3776  * set_baserel_size_estimates
3777  *		Set the size estimates for the given base relation.
3778  *
3779  * The rel's targetlist and restrictinfo list must have been constructed
3780  * already, and rel->tuples must be set.
3781  *
3782  * We set the following fields of the rel node:
3783  *	rows: the estimated number of output tuples (after applying
3784  *		  restriction clauses).
3785  *	width: the estimated average output tuple width in bytes.
3786  *	baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
3787  */
3788 void
set_baserel_size_estimates(PlannerInfo * root,RelOptInfo * rel)3789 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3790 {
3791 	double		nrows;
3792 
3793 	/* Should only be applied to base relations */
3794 	Assert(rel->relid > 0);
3795 
3796 	nrows = rel->tuples *
3797 		clauselist_selectivity(root,
3798 							   rel->baserestrictinfo,
3799 							   0,
3800 							   JOIN_INNER,
3801 							   NULL);
3802 
3803 	rel->rows = clamp_row_est(nrows);
3804 
3805 	cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
3806 
3807 	set_rel_width(root, rel);
3808 }
3809 
3810 /*
3811  * get_parameterized_baserel_size
3812  *		Make a size estimate for a parameterized scan of a base relation.
3813  *
3814  * 'param_clauses' lists the additional join clauses to be used.
3815  *
3816  * set_baserel_size_estimates must have been applied already.
3817  */
3818 double
get_parameterized_baserel_size(PlannerInfo * root,RelOptInfo * rel,List * param_clauses)3819 get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
3820 							   List *param_clauses)
3821 {
3822 	List	   *allclauses;
3823 	double		nrows;
3824 
3825 	/*
3826 	 * Estimate the number of rows returned by the parameterized scan, knowing
3827 	 * that it will apply all the extra join clauses as well as the rel's own
3828 	 * restriction clauses.  Note that we force the clauses to be treated as
3829 	 * non-join clauses during selectivity estimation.
3830 	 */
3831 	allclauses = list_concat(list_copy(param_clauses),
3832 							 rel->baserestrictinfo);
3833 	nrows = rel->tuples *
3834 		clauselist_selectivity(root,
3835 							   allclauses,
3836 							   rel->relid,		/* do not use 0! */
3837 							   JOIN_INNER,
3838 							   NULL);
3839 	nrows = clamp_row_est(nrows);
3840 	/* For safety, make sure result is not more than the base estimate */
3841 	if (nrows > rel->rows)
3842 		nrows = rel->rows;
3843 	return nrows;
3844 }
3845 
3846 /*
3847  * set_joinrel_size_estimates
3848  *		Set the size estimates for the given join relation.
3849  *
3850  * The rel's targetlist must have been constructed already, and a
3851  * restriction clause list that matches the given component rels must
3852  * be provided.
3853  *
3854  * Since there is more than one way to make a joinrel for more than two
3855  * base relations, the results we get here could depend on which component
3856  * rel pair is provided.  In theory we should get the same answers no matter
3857  * which pair is provided; in practice, since the selectivity estimation
3858  * routines don't handle all cases equally well, we might not.  But there's
3859  * not much to be done about it.  (Would it make sense to repeat the
3860  * calculations for each pair of input rels that's encountered, and somehow
3861  * average the results?  Probably way more trouble than it's worth, and
3862  * anyway we must keep the rowcount estimate the same for all paths for the
3863  * joinrel.)
3864  *
3865  * We set only the rows field here.  The reltarget field was already set by
3866  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
3867  */
3868 void
set_joinrel_size_estimates(PlannerInfo * root,RelOptInfo * rel,RelOptInfo * outer_rel,RelOptInfo * inner_rel,SpecialJoinInfo * sjinfo,List * restrictlist)3869 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
3870 						   RelOptInfo *outer_rel,
3871 						   RelOptInfo *inner_rel,
3872 						   SpecialJoinInfo *sjinfo,
3873 						   List *restrictlist)
3874 {
3875 	rel->rows = calc_joinrel_size_estimate(root,
3876 										   rel,
3877 										   outer_rel,
3878 										   inner_rel,
3879 										   outer_rel->rows,
3880 										   inner_rel->rows,
3881 										   sjinfo,
3882 										   restrictlist);
3883 }
3884 
3885 /*
3886  * get_parameterized_joinrel_size
3887  *		Make a size estimate for a parameterized scan of a join relation.
3888  *
3889  * 'rel' is the joinrel under consideration.
3890  * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
3891  *		produce the relations being joined.
3892  * 'sjinfo' is any SpecialJoinInfo relevant to this join.
3893  * 'restrict_clauses' lists the join clauses that need to be applied at the
3894  * join node (including any movable clauses that were moved down to this join,
3895  * and not including any movable clauses that were pushed down into the
3896  * child paths).
3897  *
3898  * set_joinrel_size_estimates must have been applied already.
3899  */
3900 double
get_parameterized_joinrel_size(PlannerInfo * root,RelOptInfo * rel,Path * outer_path,Path * inner_path,SpecialJoinInfo * sjinfo,List * restrict_clauses)3901 get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
3902 							   Path *outer_path,
3903 							   Path *inner_path,
3904 							   SpecialJoinInfo *sjinfo,
3905 							   List *restrict_clauses)
3906 {
3907 	double		nrows;
3908 
3909 	/*
3910 	 * Estimate the number of rows returned by the parameterized join as the
3911 	 * sizes of the input paths times the selectivity of the clauses that have
3912 	 * ended up at this join node.
3913 	 *
3914 	 * As with set_joinrel_size_estimates, the rowcount estimate could depend
3915 	 * on the pair of input paths provided, though ideally we'd get the same
3916 	 * estimate for any pair with the same parameterization.
3917 	 */
3918 	nrows = calc_joinrel_size_estimate(root,
3919 									   rel,
3920 									   outer_path->parent,
3921 									   inner_path->parent,
3922 									   outer_path->rows,
3923 									   inner_path->rows,
3924 									   sjinfo,
3925 									   restrict_clauses);
3926 	/* For safety, make sure result is not more than the base estimate */
3927 	if (nrows > rel->rows)
3928 		nrows = rel->rows;
3929 	return nrows;
3930 }
3931 
3932 /*
3933  * calc_joinrel_size_estimate
3934  *		Workhorse for set_joinrel_size_estimates and
3935  *		get_parameterized_joinrel_size.
3936  *
3937  * outer_rel/inner_rel are the relations being joined, but they should be
3938  * assumed to have sizes outer_rows/inner_rows; those numbers might be less
3939  * than what rel->rows says, when we are considering parameterized paths.
3940  */
3941 static double
calc_joinrel_size_estimate(PlannerInfo * root,RelOptInfo * joinrel,RelOptInfo * outer_rel,RelOptInfo * inner_rel,double outer_rows,double inner_rows,SpecialJoinInfo * sjinfo,List * restrictlist_in)3942 calc_joinrel_size_estimate(PlannerInfo *root,
3943 						   RelOptInfo *joinrel,
3944 						   RelOptInfo *outer_rel,
3945 						   RelOptInfo *inner_rel,
3946 						   double outer_rows,
3947 						   double inner_rows,
3948 						   SpecialJoinInfo *sjinfo,
3949 						   List *restrictlist_in)
3950 {
3951 	/* This apparently-useless variable dodges a compiler bug in VS2013: */
3952 	List	   *restrictlist = restrictlist_in;
3953 	JoinType	jointype = sjinfo->jointype;
3954 	Selectivity fkselec;
3955 	Selectivity jselec;
3956 	Selectivity pselec;
3957 	double		nrows;
3958 
3959 	/*
3960 	 * Compute joinclause selectivity.  Note that we are only considering
3961 	 * clauses that become restriction clauses at this join level; we are not
3962 	 * double-counting them because they were not considered in estimating the
3963 	 * sizes of the component rels.
3964 	 *
3965 	 * First, see whether any of the joinclauses can be matched to known FK
3966 	 * constraints.  If so, drop those clauses from the restrictlist, and
3967 	 * instead estimate their selectivity using FK semantics.  (We do this
3968 	 * without regard to whether said clauses are local or "pushed down".
3969 	 * Probably, an FK-matching clause could never be seen as pushed down at
3970 	 * an outer join, since it would be strict and hence would be grounds for
3971 	 * join strength reduction.)  fkselec gets the net selectivity for
3972 	 * FK-matching clauses, or 1.0 if there are none.
3973 	 */
3974 	fkselec = get_foreign_key_join_selectivity(root,
3975 											   outer_rel->relids,
3976 											   inner_rel->relids,
3977 											   sjinfo,
3978 											   &restrictlist);
3979 
3980 	/*
3981 	 * For an outer join, we have to distinguish the selectivity of the join's
3982 	 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
3983 	 * down".  For inner joins we just count them all as joinclauses.
3984 	 */
3985 	if (IS_OUTER_JOIN(jointype))
3986 	{
3987 		List	   *joinquals = NIL;
3988 		List	   *pushedquals = NIL;
3989 		ListCell   *l;
3990 
3991 		/* Grovel through the clauses to separate into two lists */
3992 		foreach(l, restrictlist)
3993 		{
3994 			RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3995 
3996 			Assert(IsA(rinfo, RestrictInfo));
3997 			if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
3998 				pushedquals = lappend(pushedquals, rinfo);
3999 			else
4000 				joinquals = lappend(joinquals, rinfo);
4001 		}
4002 
4003 		/* Get the separate selectivities */
4004 		jselec = clauselist_selectivity(root,
4005 										joinquals,
4006 										0,
4007 										jointype,
4008 										sjinfo);
4009 		pselec = clauselist_selectivity(root,
4010 										pushedquals,
4011 										0,
4012 										jointype,
4013 										sjinfo);
4014 
4015 		/* Avoid leaking a lot of ListCells */
4016 		list_free(joinquals);
4017 		list_free(pushedquals);
4018 	}
4019 	else
4020 	{
4021 		jselec = clauselist_selectivity(root,
4022 										restrictlist,
4023 										0,
4024 										jointype,
4025 										sjinfo);
4026 		pselec = 0.0;			/* not used, keep compiler quiet */
4027 	}
4028 
4029 	/*
4030 	 * Basically, we multiply size of Cartesian product by selectivity.
4031 	 *
4032 	 * If we are doing an outer join, take that into account: the joinqual
4033 	 * selectivity has to be clamped using the knowledge that the output must
4034 	 * be at least as large as the non-nullable input.  However, any
4035 	 * pushed-down quals are applied after the outer join, so their
4036 	 * selectivity applies fully.
4037 	 *
4038 	 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
4039 	 * of LHS rows that have matches, and we apply that straightforwardly.
4040 	 */
4041 	switch (jointype)
4042 	{
4043 		case JOIN_INNER:
4044 			nrows = outer_rows * inner_rows * fkselec * jselec;
4045 			/* pselec not used */
4046 			break;
4047 		case JOIN_LEFT:
4048 			nrows = outer_rows * inner_rows * fkselec * jselec;
4049 			if (nrows < outer_rows)
4050 				nrows = outer_rows;
4051 			nrows *= pselec;
4052 			break;
4053 		case JOIN_FULL:
4054 			nrows = outer_rows * inner_rows * fkselec * jselec;
4055 			if (nrows < outer_rows)
4056 				nrows = outer_rows;
4057 			if (nrows < inner_rows)
4058 				nrows = inner_rows;
4059 			nrows *= pselec;
4060 			break;
4061 		case JOIN_SEMI:
4062 			nrows = outer_rows * fkselec * jselec;
4063 			/* pselec not used */
4064 			break;
4065 		case JOIN_ANTI:
4066 			nrows = outer_rows * (1.0 - fkselec * jselec);
4067 			nrows *= pselec;
4068 			break;
4069 		default:
4070 			/* other values not expected here */
4071 			elog(ERROR, "unrecognized join type: %d", (int) jointype);
4072 			nrows = 0;			/* keep compiler quiet */
4073 			break;
4074 	}
4075 
4076 	return clamp_row_est(nrows);
4077 }
4078 
4079 /*
4080  * get_foreign_key_join_selectivity
4081  *		Estimate join selectivity for foreign-key-related clauses.
4082  *
4083  * Remove any clauses that can be matched to FK constraints from *restrictlist,
4084  * and return a substitute estimate of their selectivity.  1.0 is returned
4085  * when there are no such clauses.
4086  *
4087  * The reason for treating such clauses specially is that we can get better
4088  * estimates this way than by relying on clauselist_selectivity(), especially
4089  * for multi-column FKs where that function's assumption that the clauses are
4090  * independent falls down badly.  But even with single-column FKs, we may be
4091  * able to get a better answer when the pg_statistic stats are missing or out
4092  * of date.
4093  */
4094 static Selectivity
get_foreign_key_join_selectivity(PlannerInfo * root,Relids outer_relids,Relids inner_relids,SpecialJoinInfo * sjinfo,List ** restrictlist)4095 get_foreign_key_join_selectivity(PlannerInfo *root,
4096 								 Relids outer_relids,
4097 								 Relids inner_relids,
4098 								 SpecialJoinInfo *sjinfo,
4099 								 List **restrictlist)
4100 {
4101 	Selectivity fkselec = 1.0;
4102 	JoinType	jointype = sjinfo->jointype;
4103 	List	   *worklist = *restrictlist;
4104 	ListCell   *lc;
4105 
4106 	/* Consider each FK constraint that is known to match the query */
4107 	foreach(lc, root->fkey_list)
4108 	{
4109 		ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
4110 		bool		ref_is_outer;
4111 		List	   *removedlist;
4112 		ListCell   *cell;
4113 		ListCell   *prev;
4114 		ListCell   *next;
4115 
4116 		/*
4117 		 * This FK is not relevant unless it connects a baserel on one side of
4118 		 * this join to a baserel on the other side.
4119 		 */
4120 		if (bms_is_member(fkinfo->con_relid, outer_relids) &&
4121 			bms_is_member(fkinfo->ref_relid, inner_relids))
4122 			ref_is_outer = false;
4123 		else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
4124 				 bms_is_member(fkinfo->con_relid, inner_relids))
4125 			ref_is_outer = true;
4126 		else
4127 			continue;
4128 
4129 		/*
4130 		 * If we're dealing with a semi/anti join, and the FK's referenced
4131 		 * relation is on the outside, then knowledge of the FK doesn't help
4132 		 * us figure out what we need to know (which is the fraction of outer
4133 		 * rows that have matches).  On the other hand, if the referenced rel
4134 		 * is on the inside, then all outer rows must have matches in the
4135 		 * referenced table (ignoring nulls).  But any restriction or join
4136 		 * clauses that filter that table will reduce the fraction of matches.
4137 		 * We can account for restriction clauses, but it's too hard to guess
4138 		 * how many table rows would get through a join that's inside the RHS.
4139 		 * Hence, if either case applies, punt and ignore the FK.
4140 		 */
4141 		if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
4142 			(ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
4143 			continue;
4144 
4145 		/*
4146 		 * Modify the restrictlist by removing clauses that match the FK (and
4147 		 * putting them into removedlist instead).  It seems unsafe to modify
4148 		 * the originally-passed List structure, so we make a shallow copy the
4149 		 * first time through.
4150 		 */
4151 		if (worklist == *restrictlist)
4152 			worklist = list_copy(worklist);
4153 
4154 		removedlist = NIL;
4155 		prev = NULL;
4156 		for (cell = list_head(worklist); cell; cell = next)
4157 		{
4158 			RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
4159 			bool		remove_it = false;
4160 			int			i;
4161 
4162 			next = lnext(cell);
4163 			/* Drop this clause if it matches any column of the FK */
4164 			for (i = 0; i < fkinfo->nkeys; i++)
4165 			{
4166 				if (rinfo->parent_ec)
4167 				{
4168 					/*
4169 					 * EC-derived clauses can only match by EC.  It is okay to
4170 					 * consider any clause derived from the same EC as
4171 					 * matching the FK: even if equivclass.c chose to generate
4172 					 * a clause equating some other pair of Vars, it could
4173 					 * have generated one equating the FK's Vars.  So for
4174 					 * purposes of estimation, we can act as though it did so.
4175 					 *
4176 					 * Note: checking parent_ec is a bit of a cheat because
4177 					 * there are EC-derived clauses that don't have parent_ec
4178 					 * set; but such clauses must compare expressions that
4179 					 * aren't just Vars, so they cannot match the FK anyway.
4180 					 */
4181 					if (fkinfo->eclass[i] == rinfo->parent_ec)
4182 					{
4183 						remove_it = true;
4184 						break;
4185 					}
4186 				}
4187 				else
4188 				{
4189 					/*
4190 					 * Otherwise, see if rinfo was previously matched to FK as
4191 					 * a "loose" clause.
4192 					 */
4193 					if (list_member_ptr(fkinfo->rinfos[i], rinfo))
4194 					{
4195 						remove_it = true;
4196 						break;
4197 					}
4198 				}
4199 			}
4200 			if (remove_it)
4201 			{
4202 				worklist = list_delete_cell(worklist, cell, prev);
4203 				removedlist = lappend(removedlist, rinfo);
4204 			}
4205 			else
4206 				prev = cell;
4207 		}
4208 
4209 		/*
4210 		 * If we failed to remove all the matching clauses we expected to
4211 		 * find, chicken out and ignore this FK; applying its selectivity
4212 		 * might result in double-counting.  Put any clauses we did manage to
4213 		 * remove back into the worklist.
4214 		 *
4215 		 * Since the matching clauses are known not outerjoin-delayed, they
4216 		 * should certainly have appeared in the initial joinclause list.  If
4217 		 * we didn't find them, they must have been matched to, and removed
4218 		 * by, some other FK in a previous iteration of this loop.  (A likely
4219 		 * case is that two FKs are matched to the same EC; there will be only
4220 		 * one EC-derived clause in the initial list, so the first FK will
4221 		 * consume it.)  Applying both FKs' selectivity independently risks
4222 		 * underestimating the join size; in particular, this would undo one
4223 		 * of the main things that ECs were invented for, namely to avoid
4224 		 * double-counting the selectivity of redundant equality conditions.
4225 		 * Later we might think of a reasonable way to combine the estimates,
4226 		 * but for now, just punt, since this is a fairly uncommon situation.
4227 		 */
4228 		if (list_length(removedlist) !=
4229 			(fkinfo->nmatched_ec + fkinfo->nmatched_ri))
4230 		{
4231 			worklist = list_concat(worklist, removedlist);
4232 			continue;
4233 		}
4234 
4235 		/*
4236 		 * Finally we get to the payoff: estimate selectivity using the
4237 		 * knowledge that each referencing row will match exactly one row in
4238 		 * the referenced table.
4239 		 *
4240 		 * XXX that's not true in the presence of nulls in the referencing
4241 		 * column(s), so in principle we should derate the estimate for those.
4242 		 * However (1) if there are any strict restriction clauses for the
4243 		 * referencing column(s) elsewhere in the query, derating here would
4244 		 * be double-counting the null fraction, and (2) it's not very clear
4245 		 * how to combine null fractions for multiple referencing columns. So
4246 		 * we do nothing for now about correcting for nulls.
4247 		 *
4248 		 * XXX another point here is that if either side of an FK constraint
4249 		 * is an inheritance parent, we estimate as though the constraint
4250 		 * covers all its children as well.  This is not an unreasonable
4251 		 * assumption for a referencing table, ie the user probably applied
4252 		 * identical constraints to all child tables (though perhaps we ought
4253 		 * to check that).  But it's not possible to have done that for a
4254 		 * referenced table.  Fortunately, precisely because that doesn't
4255 		 * work, it is uncommon in practice to have an FK referencing a parent
4256 		 * table.  So, at least for now, disregard inheritance here.
4257 		 */
4258 		if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
4259 		{
4260 			/*
4261 			 * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
4262 			 * referenced table is exactly the inside of the join.  The join
4263 			 * selectivity is defined as the fraction of LHS rows that have
4264 			 * matches.  The FK implies that every LHS row has a match *in the
4265 			 * referenced table*; but any restriction clauses on it will
4266 			 * reduce the number of matches.  Hence we take the join
4267 			 * selectivity as equal to the selectivity of the table's
4268 			 * restriction clauses, which is rows / tuples; but we must guard
4269 			 * against tuples == 0.
4270 			 */
4271 			RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4272 			double		ref_tuples = Max(ref_rel->tuples, 1.0);
4273 
4274 			fkselec *= ref_rel->rows / ref_tuples;
4275 		}
4276 		else
4277 		{
4278 			/*
4279 			 * Otherwise, selectivity is exactly 1/referenced-table-size; but
4280 			 * guard against tuples == 0.  Note we should use the raw table
4281 			 * tuple count, not any estimate of its filtered or joined size.
4282 			 */
4283 			RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
4284 			double		ref_tuples = Max(ref_rel->tuples, 1.0);
4285 
4286 			fkselec *= 1.0 / ref_tuples;
4287 		}
4288 	}
4289 
4290 	*restrictlist = worklist;
4291 	return fkselec;
4292 }
4293 
4294 /*
4295  * set_subquery_size_estimates
4296  *		Set the size estimates for a base relation that is a subquery.
4297  *
4298  * The rel's targetlist and restrictinfo list must have been constructed
4299  * already, and the Paths for the subquery must have been completed.
4300  * We look at the subquery's PlannerInfo to extract data.
4301  *
4302  * We set the same fields as set_baserel_size_estimates.
4303  */
4304 void
set_subquery_size_estimates(PlannerInfo * root,RelOptInfo * rel)4305 set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4306 {
4307 	PlannerInfo *subroot = rel->subroot;
4308 	RelOptInfo *sub_final_rel;
4309 	RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
4310 	ListCell   *lc;
4311 
4312 	/* Should only be applied to base relations that are subqueries */
4313 	Assert(rel->relid > 0);
4314 	rte = planner_rt_fetch(rel->relid, root);
4315 	Assert(rte->rtekind == RTE_SUBQUERY);
4316 
4317 	/*
4318 	 * Copy raw number of output rows from subquery.  All of its paths should
4319 	 * have the same output rowcount, so just look at cheapest-total.
4320 	 */
4321 	sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
4322 	rel->tuples = sub_final_rel->cheapest_total_path->rows;
4323 
4324 	/*
4325 	 * Compute per-output-column width estimates by examining the subquery's
4326 	 * targetlist.  For any output that is a plain Var, get the width estimate
4327 	 * that was made while planning the subquery.  Otherwise, we leave it to
4328 	 * set_rel_width to fill in a datatype-based default estimate.
4329 	 */
4330 	foreach(lc, subroot->parse->targetList)
4331 	{
4332 		TargetEntry *te = (TargetEntry *) lfirst(lc);
4333 		Node	   *texpr = (Node *) te->expr;
4334 		int32		item_width = 0;
4335 
4336 		Assert(IsA(te, TargetEntry));
4337 		/* junk columns aren't visible to upper query */
4338 		if (te->resjunk)
4339 			continue;
4340 
4341 		/*
4342 		 * The subquery could be an expansion of a view that's had columns
4343 		 * added to it since the current query was parsed, so that there are
4344 		 * non-junk tlist columns in it that don't correspond to any column
4345 		 * visible at our query level.  Ignore such columns.
4346 		 */
4347 		if (te->resno < rel->min_attr || te->resno > rel->max_attr)
4348 			continue;
4349 
4350 		/*
4351 		 * XXX This currently doesn't work for subqueries containing set
4352 		 * operations, because the Vars in their tlists are bogus references
4353 		 * to the first leaf subquery, which wouldn't give the right answer
4354 		 * even if we could still get to its PlannerInfo.
4355 		 *
4356 		 * Also, the subquery could be an appendrel for which all branches are
4357 		 * known empty due to constraint exclusion, in which case
4358 		 * set_append_rel_pathlist will have left the attr_widths set to zero.
4359 		 *
4360 		 * In either case, we just leave the width estimate zero until
4361 		 * set_rel_width fixes it.
4362 		 */
4363 		if (IsA(texpr, Var) &&
4364 			subroot->parse->setOperations == NULL)
4365 		{
4366 			Var		   *var = (Var *) texpr;
4367 			RelOptInfo *subrel = find_base_rel(subroot, var->varno);
4368 
4369 			item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
4370 		}
4371 		rel->attr_widths[te->resno - rel->min_attr] = item_width;
4372 	}
4373 
4374 	/* Now estimate number of output rows, etc */
4375 	set_baserel_size_estimates(root, rel);
4376 }
4377 
4378 /*
4379  * set_function_size_estimates
4380  *		Set the size estimates for a base relation that is a function call.
4381  *
4382  * The rel's targetlist and restrictinfo list must have been constructed
4383  * already.
4384  *
4385  * We set the same fields as set_baserel_size_estimates.
4386  */
4387 void
set_function_size_estimates(PlannerInfo * root,RelOptInfo * rel)4388 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4389 {
4390 	RangeTblEntry *rte;
4391 	ListCell   *lc;
4392 
4393 	/* Should only be applied to base relations that are functions */
4394 	Assert(rel->relid > 0);
4395 	rte = planner_rt_fetch(rel->relid, root);
4396 	Assert(rte->rtekind == RTE_FUNCTION);
4397 
4398 	/*
4399 	 * Estimate number of rows the functions will return. The rowcount of the
4400 	 * node is that of the largest function result.
4401 	 */
4402 	rel->tuples = 0;
4403 	foreach(lc, rte->functions)
4404 	{
4405 		RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
4406 		double		ntup = expression_returns_set_rows(rtfunc->funcexpr);
4407 
4408 		if (ntup > rel->tuples)
4409 			rel->tuples = ntup;
4410 	}
4411 
4412 	/* Now estimate number of output rows, etc */
4413 	set_baserel_size_estimates(root, rel);
4414 }
4415 
4416 /*
4417  * set_values_size_estimates
4418  *		Set the size estimates for a base relation that is a values list.
4419  *
4420  * The rel's targetlist and restrictinfo list must have been constructed
4421  * already.
4422  *
4423  * We set the same fields as set_baserel_size_estimates.
4424  */
4425 void
set_values_size_estimates(PlannerInfo * root,RelOptInfo * rel)4426 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4427 {
4428 	RangeTblEntry *rte;
4429 
4430 	/* Should only be applied to base relations that are values lists */
4431 	Assert(rel->relid > 0);
4432 	rte = planner_rt_fetch(rel->relid, root);
4433 	Assert(rte->rtekind == RTE_VALUES);
4434 
4435 	/*
4436 	 * Estimate number of rows the values list will return. We know this
4437 	 * precisely based on the list length (well, barring set-returning
4438 	 * functions in list items, but that's a refinement not catered for
4439 	 * anywhere else either).
4440 	 */
4441 	rel->tuples = list_length(rte->values_lists);
4442 
4443 	/* Now estimate number of output rows, etc */
4444 	set_baserel_size_estimates(root, rel);
4445 }
4446 
4447 /*
4448  * set_cte_size_estimates
4449  *		Set the size estimates for a base relation that is a CTE reference.
4450  *
4451  * The rel's targetlist and restrictinfo list must have been constructed
4452  * already, and we need an estimate of the number of rows returned by the CTE
4453  * (if a regular CTE) or the non-recursive term (if a self-reference).
4454  *
4455  * We set the same fields as set_baserel_size_estimates.
4456  */
4457 void
set_cte_size_estimates(PlannerInfo * root,RelOptInfo * rel,double cte_rows)4458 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
4459 {
4460 	RangeTblEntry *rte;
4461 
4462 	/* Should only be applied to base relations that are CTE references */
4463 	Assert(rel->relid > 0);
4464 	rte = planner_rt_fetch(rel->relid, root);
4465 	Assert(rte->rtekind == RTE_CTE);
4466 
4467 	if (rte->self_reference)
4468 	{
4469 		/*
4470 		 * In a self-reference, arbitrarily assume the average worktable size
4471 		 * is about 10 times the nonrecursive term's size.
4472 		 */
4473 		rel->tuples = 10 * cte_rows;
4474 	}
4475 	else
4476 	{
4477 		/* Otherwise just believe the CTE's rowcount estimate */
4478 		rel->tuples = cte_rows;
4479 	}
4480 
4481 	/* Now estimate number of output rows, etc */
4482 	set_baserel_size_estimates(root, rel);
4483 }
4484 
4485 /*
4486  * set_foreign_size_estimates
4487  *		Set the size estimates for a base relation that is a foreign table.
4488  *
4489  * There is not a whole lot that we can do here; the foreign-data wrapper
4490  * is responsible for producing useful estimates.  We can do a decent job
4491  * of estimating baserestrictcost, so we set that, and we also set up width
4492  * using what will be purely datatype-driven estimates from the targetlist.
4493  * There is no way to do anything sane with the rows value, so we just put
4494  * a default estimate and hope that the wrapper can improve on it.  The
4495  * wrapper's GetForeignRelSize function will be called momentarily.
4496  *
4497  * The rel's targetlist and restrictinfo list must have been constructed
4498  * already.
4499  */
4500 void
set_foreign_size_estimates(PlannerInfo * root,RelOptInfo * rel)4501 set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4502 {
4503 	/* Should only be applied to base relations */
4504 	Assert(rel->relid > 0);
4505 
4506 	rel->rows = 1000;			/* entirely bogus default estimate */
4507 
4508 	cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
4509 
4510 	set_rel_width(root, rel);
4511 }
4512 
4513 
4514 /*
4515  * set_rel_width
4516  *		Set the estimated output width of a base relation.
4517  *
4518  * The estimated output width is the sum of the per-attribute width estimates
4519  * for the actually-referenced columns, plus any PHVs or other expressions
4520  * that have to be calculated at this relation.  This is the amount of data
4521  * we'd need to pass upwards in case of a sort, hash, etc.
4522  *
4523  * This function also sets reltarget->cost, so it's a bit misnamed now.
4524  *
4525  * NB: this works best on plain relations because it prefers to look at
4526  * real Vars.  For subqueries, set_subquery_size_estimates will already have
4527  * copied up whatever per-column estimates were made within the subquery,
4528  * and for other types of rels there isn't much we can do anyway.  We fall
4529  * back on (fairly stupid) datatype-based width estimates if we can't get
4530  * any better number.
4531  *
4532  * The per-attribute width estimates are cached for possible re-use while
4533  * building join relations or post-scan/join pathtargets.
4534  */
4535 static void
set_rel_width(PlannerInfo * root,RelOptInfo * rel)4536 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
4537 {
4538 	Oid			reloid = planner_rt_fetch(rel->relid, root)->relid;
4539 	int32		tuple_width = 0;
4540 	bool		have_wholerow_var = false;
4541 	ListCell   *lc;
4542 
4543 	/* Vars are assumed to have cost zero, but other exprs do not */
4544 	rel->reltarget->cost.startup = 0;
4545 	rel->reltarget->cost.per_tuple = 0;
4546 
4547 	foreach(lc, rel->reltarget->exprs)
4548 	{
4549 		Node	   *node = (Node *) lfirst(lc);
4550 
4551 		/*
4552 		 * Ordinarily, a Var in a rel's targetlist must belong to that rel;
4553 		 * but there are corner cases involving LATERAL references where that
4554 		 * isn't so.  If the Var has the wrong varno, fall through to the
4555 		 * generic case (it doesn't seem worth the trouble to be any smarter).
4556 		 */
4557 		if (IsA(node, Var) &&
4558 			((Var *) node)->varno == rel->relid)
4559 		{
4560 			Var		   *var = (Var *) node;
4561 			int			ndx;
4562 			int32		item_width;
4563 
4564 			Assert(var->varattno >= rel->min_attr);
4565 			Assert(var->varattno <= rel->max_attr);
4566 
4567 			ndx = var->varattno - rel->min_attr;
4568 
4569 			/*
4570 			 * If it's a whole-row Var, we'll deal with it below after we have
4571 			 * already cached as many attr widths as possible.
4572 			 */
4573 			if (var->varattno == 0)
4574 			{
4575 				have_wholerow_var = true;
4576 				continue;
4577 			}
4578 
4579 			/*
4580 			 * The width may have been cached already (especially if it's a
4581 			 * subquery), so don't duplicate effort.
4582 			 */
4583 			if (rel->attr_widths[ndx] > 0)
4584 			{
4585 				tuple_width += rel->attr_widths[ndx];
4586 				continue;
4587 			}
4588 
4589 			/* Try to get column width from statistics */
4590 			if (reloid != InvalidOid && var->varattno > 0)
4591 			{
4592 				item_width = get_attavgwidth(reloid, var->varattno);
4593 				if (item_width > 0)
4594 				{
4595 					rel->attr_widths[ndx] = item_width;
4596 					tuple_width += item_width;
4597 					continue;
4598 				}
4599 			}
4600 
4601 			/*
4602 			 * Not a plain relation, or can't find statistics for it. Estimate
4603 			 * using just the type info.
4604 			 */
4605 			item_width = get_typavgwidth(var->vartype, var->vartypmod);
4606 			Assert(item_width > 0);
4607 			rel->attr_widths[ndx] = item_width;
4608 			tuple_width += item_width;
4609 		}
4610 		else if (IsA(node, PlaceHolderVar))
4611 		{
4612 			/*
4613 			 * We will need to evaluate the PHV's contained expression while
4614 			 * scanning this rel, so be sure to include it in reltarget->cost.
4615 			 */
4616 			PlaceHolderVar *phv = (PlaceHolderVar *) node;
4617 			PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
4618 			QualCost	cost;
4619 
4620 			tuple_width += phinfo->ph_width;
4621 			cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
4622 			rel->reltarget->cost.startup += cost.startup;
4623 			rel->reltarget->cost.per_tuple += cost.per_tuple;
4624 		}
4625 		else
4626 		{
4627 			/*
4628 			 * We could be looking at an expression pulled up from a subquery,
4629 			 * or a ROW() representing a whole-row child Var, etc.  Do what we
4630 			 * can using the expression type information.
4631 			 */
4632 			int32		item_width;
4633 			QualCost	cost;
4634 
4635 			item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4636 			Assert(item_width > 0);
4637 			tuple_width += item_width;
4638 			/* Not entirely clear if we need to account for cost, but do so */
4639 			cost_qual_eval_node(&cost, node, root);
4640 			rel->reltarget->cost.startup += cost.startup;
4641 			rel->reltarget->cost.per_tuple += cost.per_tuple;
4642 		}
4643 	}
4644 
4645 	/*
4646 	 * If we have a whole-row reference, estimate its width as the sum of
4647 	 * per-column widths plus heap tuple header overhead.
4648 	 */
4649 	if (have_wholerow_var)
4650 	{
4651 		int32		wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
4652 
4653 		if (reloid != InvalidOid)
4654 		{
4655 			/* Real relation, so estimate true tuple width */
4656 			wholerow_width += get_relation_data_width(reloid,
4657 										   rel->attr_widths - rel->min_attr);
4658 		}
4659 		else
4660 		{
4661 			/* Do what we can with info for a phony rel */
4662 			AttrNumber	i;
4663 
4664 			for (i = 1; i <= rel->max_attr; i++)
4665 				wholerow_width += rel->attr_widths[i - rel->min_attr];
4666 		}
4667 
4668 		rel->attr_widths[0 - rel->min_attr] = wholerow_width;
4669 
4670 		/*
4671 		 * Include the whole-row Var as part of the output tuple.  Yes, that
4672 		 * really is what happens at runtime.
4673 		 */
4674 		tuple_width += wholerow_width;
4675 	}
4676 
4677 	Assert(tuple_width >= 0);
4678 	rel->reltarget->width = tuple_width;
4679 }
4680 
4681 /*
4682  * set_pathtarget_cost_width
4683  *		Set the estimated eval cost and output width of a PathTarget tlist.
4684  *
4685  * As a notational convenience, returns the same PathTarget pointer passed in.
4686  *
4687  * Most, though not quite all, uses of this function occur after we've run
4688  * set_rel_width() for base relations; so we can usually obtain cached width
4689  * estimates for Vars.  If we can't, fall back on datatype-based width
4690  * estimates.  Present early-planning uses of PathTargets don't need accurate
4691  * widths badly enough to justify going to the catalogs for better data.
4692  */
4693 PathTarget *
set_pathtarget_cost_width(PlannerInfo * root,PathTarget * target)4694 set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
4695 {
4696 	int32		tuple_width = 0;
4697 	ListCell   *lc;
4698 
4699 	/* Vars are assumed to have cost zero, but other exprs do not */
4700 	target->cost.startup = 0;
4701 	target->cost.per_tuple = 0;
4702 
4703 	foreach(lc, target->exprs)
4704 	{
4705 		Node	   *node = (Node *) lfirst(lc);
4706 
4707 		if (IsA(node, Var))
4708 		{
4709 			Var		   *var = (Var *) node;
4710 			int32		item_width;
4711 
4712 			/* We should not see any upper-level Vars here */
4713 			Assert(var->varlevelsup == 0);
4714 
4715 			/* Try to get data from RelOptInfo cache */
4716 			if (var->varno < root->simple_rel_array_size)
4717 			{
4718 				RelOptInfo *rel = root->simple_rel_array[var->varno];
4719 
4720 				if (rel != NULL &&
4721 					var->varattno >= rel->min_attr &&
4722 					var->varattno <= rel->max_attr)
4723 				{
4724 					int			ndx = var->varattno - rel->min_attr;
4725 
4726 					if (rel->attr_widths[ndx] > 0)
4727 					{
4728 						tuple_width += rel->attr_widths[ndx];
4729 						continue;
4730 					}
4731 				}
4732 			}
4733 
4734 			/*
4735 			 * No cached data available, so estimate using just the type info.
4736 			 */
4737 			item_width = get_typavgwidth(var->vartype, var->vartypmod);
4738 			Assert(item_width > 0);
4739 			tuple_width += item_width;
4740 		}
4741 		else
4742 		{
4743 			/*
4744 			 * Handle general expressions using type info.
4745 			 */
4746 			int32		item_width;
4747 			QualCost	cost;
4748 
4749 			item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4750 			Assert(item_width > 0);
4751 			tuple_width += item_width;
4752 
4753 			/* Account for cost, too */
4754 			cost_qual_eval_node(&cost, node, root);
4755 			target->cost.startup += cost.startup;
4756 			target->cost.per_tuple += cost.per_tuple;
4757 		}
4758 	}
4759 
4760 	Assert(tuple_width >= 0);
4761 	target->width = tuple_width;
4762 
4763 	return target;
4764 }
4765 
4766 /*
4767  * relation_byte_size
4768  *	  Estimate the storage space in bytes for a given number of tuples
4769  *	  of a given width (size in bytes).
4770  */
4771 static double
relation_byte_size(double tuples,int width)4772 relation_byte_size(double tuples, int width)
4773 {
4774 	return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
4775 }
4776 
4777 /*
4778  * page_size
4779  *	  Returns an estimate of the number of pages covered by a given
4780  *	  number of tuples of a given width (size in bytes).
4781  */
4782 static double
page_size(double tuples,int width)4783 page_size(double tuples, int width)
4784 {
4785 	return ceil(relation_byte_size(tuples, width) / BLCKSZ);
4786 }
4787 
4788 /*
4789  * Estimate the fraction of the work that each worker will do given the
4790  * number of workers budgeted for the path.
4791  */
4792 static double
get_parallel_divisor(Path * path)4793 get_parallel_divisor(Path *path)
4794 {
4795 	double		parallel_divisor = path->parallel_workers;
4796 	double		leader_contribution;
4797 
4798 	/*
4799 	 * Early experience with parallel query suggests that when there is only
4800 	 * one worker, the leader often makes a very substantial contribution to
4801 	 * executing the parallel portion of the plan, but as more workers are
4802 	 * added, it does less and less, because it's busy reading tuples from the
4803 	 * workers and doing whatever non-parallel post-processing is needed.  By
4804 	 * the time we reach 4 workers, the leader no longer makes a meaningful
4805 	 * contribution.  Thus, for now, estimate that the leader spends 30% of
4806 	 * its time servicing each worker, and the remainder executing the
4807 	 * parallel plan.
4808 	 */
4809 	leader_contribution = 1.0 - (0.3 * path->parallel_workers);
4810 	if (leader_contribution > 0)
4811 		parallel_divisor += leader_contribution;
4812 
4813 	return parallel_divisor;
4814 }
4815