1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  *	  Selectivity functions and index cost estimation functions for
5  *	  standard operators and index access methods.
6  *
7  *	  Selectivity routines are registered in the pg_operator catalog
8  *	  in the "oprrest" and "oprjoin" attributes.
9  *
10  *	  Index cost functions are located via the index AM's API struct,
11  *	  which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  *	  src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22 
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  *		Restriction clause estimation: the clause involves vars of just
28  *			one relation.
29  *		Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values).  So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  *		Selectivity oprrest (PlannerInfo *root,
42  *							 Oid operator,
43  *							 List *args,
44  *							 int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation.  May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  *		float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  *		Selectivity oprjoin (PlannerInfo *root,
67  *							 Oid operator,
68  *							 List *args,
69  *							 JoinType jointype,
70  *							 SpecialJoinInfo *sjinfo);
71  *
72  *		float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters.  That signature is still allowed, but deprecated.)  The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator.  For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION().  Note, however, that all
90  * statistics in pg_statistic are currently built using the relevant column's
91  * collation.
92  *----------
93  */
94 
95 #include "postgres.h"
96 
97 #include <ctype.h>
98 #include <math.h>
99 
100 #include "access/brin.h"
101 #include "access/brin_page.h"
102 #include "access/gin.h"
103 #include "access/table.h"
104 #include "access/tableam.h"
105 #include "access/visibilitymap.h"
106 #include "catalog/pg_am.h"
107 #include "catalog/pg_collation.h"
108 #include "catalog/pg_operator.h"
109 #include "catalog/pg_statistic.h"
110 #include "catalog/pg_statistic_ext.h"
111 #include "executor/nodeAgg.h"
112 #include "miscadmin.h"
113 #include "nodes/makefuncs.h"
114 #include "nodes/nodeFuncs.h"
115 #include "optimizer/clauses.h"
116 #include "optimizer/cost.h"
117 #include "optimizer/optimizer.h"
118 #include "optimizer/pathnode.h"
119 #include "optimizer/paths.h"
120 #include "optimizer/plancat.h"
121 #include "parser/parse_clause.h"
122 #include "parser/parsetree.h"
123 #include "statistics/statistics.h"
124 #include "storage/bufmgr.h"
125 #include "utils/acl.h"
126 #include "utils/builtins.h"
127 #include "utils/date.h"
128 #include "utils/datum.h"
129 #include "utils/fmgroids.h"
130 #include "utils/index_selfuncs.h"
131 #include "utils/lsyscache.h"
132 #include "utils/memutils.h"
133 #include "utils/pg_locale.h"
134 #include "utils/rel.h"
135 #include "utils/selfuncs.h"
136 #include "utils/snapmgr.h"
137 #include "utils/spccache.h"
138 #include "utils/syscache.h"
139 #include "utils/timestamp.h"
140 #include "utils/typcache.h"
141 
142 
143 /* Hooks for plugins to get control when we ask for stats */
144 get_relation_stats_hook_type get_relation_stats_hook = NULL;
145 get_index_stats_hook_type get_index_stats_hook = NULL;
146 
147 static double eqsel_internal(PG_FUNCTION_ARGS, bool negate);
148 static double eqjoinsel_inner(Oid opfuncoid, Oid collation,
149 							  VariableStatData *vardata1, VariableStatData *vardata2,
150 							  double nd1, double nd2,
151 							  bool isdefault1, bool isdefault2,
152 							  AttStatsSlot *sslot1, AttStatsSlot *sslot2,
153 							  Form_pg_statistic stats1, Form_pg_statistic stats2,
154 							  bool have_mcvs1, bool have_mcvs2);
155 static double eqjoinsel_semi(Oid opfuncoid, Oid collation,
156 							 VariableStatData *vardata1, VariableStatData *vardata2,
157 							 double nd1, double nd2,
158 							 bool isdefault1, bool isdefault2,
159 							 AttStatsSlot *sslot1, AttStatsSlot *sslot2,
160 							 Form_pg_statistic stats1, Form_pg_statistic stats2,
161 							 bool have_mcvs1, bool have_mcvs2,
162 							 RelOptInfo *inner_rel);
163 static bool estimate_multivariate_ndistinct(PlannerInfo *root,
164 											RelOptInfo *rel, List **varinfos, double *ndistinct);
165 static bool convert_to_scalar(Datum value, Oid valuetypid, Oid collid,
166 							  double *scaledvalue,
167 							  Datum lobound, Datum hibound, Oid boundstypid,
168 							  double *scaledlobound, double *scaledhibound);
169 static double convert_numeric_to_scalar(Datum value, Oid typid, bool *failure);
170 static void convert_string_to_scalar(char *value,
171 									 double *scaledvalue,
172 									 char *lobound,
173 									 double *scaledlobound,
174 									 char *hibound,
175 									 double *scaledhibound);
176 static void convert_bytea_to_scalar(Datum value,
177 									double *scaledvalue,
178 									Datum lobound,
179 									double *scaledlobound,
180 									Datum hibound,
181 									double *scaledhibound);
182 static double convert_one_string_to_scalar(char *value,
183 										   int rangelo, int rangehi);
184 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
185 										  int rangelo, int rangehi);
186 static char *convert_string_datum(Datum value, Oid typid, Oid collid,
187 								  bool *failure);
188 static double convert_timevalue_to_scalar(Datum value, Oid typid,
189 										  bool *failure);
190 static void examine_simple_variable(PlannerInfo *root, Var *var,
191 									VariableStatData *vardata);
192 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
193 							   Oid sortop, Oid collation,
194 							   Datum *min, Datum *max);
195 static void get_stats_slot_range(AttStatsSlot *sslot,
196 								 Oid opfuncoid, FmgrInfo *opproc,
197 								 Oid collation, int16 typLen, bool typByVal,
198 								 Datum *min, Datum *max, bool *p_have_data);
199 static bool get_actual_variable_range(PlannerInfo *root,
200 									  VariableStatData *vardata,
201 									  Oid sortop, Oid collation,
202 									  Datum *min, Datum *max);
203 static bool get_actual_variable_endpoint(Relation heapRel,
204 										 Relation indexRel,
205 										 ScanDirection indexscandir,
206 										 ScanKey scankeys,
207 										 int16 typLen,
208 										 bool typByVal,
209 										 TupleTableSlot *tableslot,
210 										 MemoryContext outercontext,
211 										 Datum *endpointDatum);
212 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
213 
214 
215 /*
216  *		eqsel			- Selectivity of "=" for any data types.
217  *
218  * Note: this routine is also used to estimate selectivity for some
219  * operators that are not "=" but have comparable selectivity behavior,
220  * such as "~=" (geometric approximate-match).  Even for "=", we must
221  * keep in mind that the left and right datatypes may differ.
222  */
223 Datum
eqsel(PG_FUNCTION_ARGS)224 eqsel(PG_FUNCTION_ARGS)
225 {
226 	PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, false));
227 }
228 
229 /*
230  * Common code for eqsel() and neqsel()
231  */
232 static double
eqsel_internal(PG_FUNCTION_ARGS,bool negate)233 eqsel_internal(PG_FUNCTION_ARGS, bool negate)
234 {
235 	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
236 	Oid			operator = PG_GETARG_OID(1);
237 	List	   *args = (List *) PG_GETARG_POINTER(2);
238 	int			varRelid = PG_GETARG_INT32(3);
239 	Oid			collation = PG_GET_COLLATION();
240 	VariableStatData vardata;
241 	Node	   *other;
242 	bool		varonleft;
243 	double		selec;
244 
245 	/*
246 	 * When asked about <>, we do the estimation using the corresponding =
247 	 * operator, then convert to <> via "1.0 - eq_selectivity - nullfrac".
248 	 */
249 	if (negate)
250 	{
251 		operator = get_negator(operator);
252 		if (!OidIsValid(operator))
253 		{
254 			/* Use default selectivity (should we raise an error instead?) */
255 			return 1.0 - DEFAULT_EQ_SEL;
256 		}
257 	}
258 
259 	/*
260 	 * If expression is not variable = something or something = variable, then
261 	 * punt and return a default estimate.
262 	 */
263 	if (!get_restriction_variable(root, args, varRelid,
264 								  &vardata, &other, &varonleft))
265 		return negate ? (1.0 - DEFAULT_EQ_SEL) : DEFAULT_EQ_SEL;
266 
267 	/*
268 	 * We can do a lot better if the something is a constant.  (Note: the
269 	 * Const might result from estimation rather than being a simple constant
270 	 * in the query.)
271 	 */
272 	if (IsA(other, Const))
273 		selec = var_eq_const(&vardata, operator, collation,
274 							 ((Const *) other)->constvalue,
275 							 ((Const *) other)->constisnull,
276 							 varonleft, negate);
277 	else
278 		selec = var_eq_non_const(&vardata, operator, collation, other,
279 								 varonleft, negate);
280 
281 	ReleaseVariableStats(vardata);
282 
283 	return selec;
284 }
285 
286 /*
287  * var_eq_const --- eqsel for var = const case
288  *
289  * This is exported so that some other estimation functions can use it.
290  */
291 double
var_eq_const(VariableStatData * vardata,Oid operator,Oid collation,Datum constval,bool constisnull,bool varonleft,bool negate)292 var_eq_const(VariableStatData *vardata, Oid operator, Oid collation,
293 			 Datum constval, bool constisnull,
294 			 bool varonleft, bool negate)
295 {
296 	double		selec;
297 	double		nullfrac = 0.0;
298 	bool		isdefault;
299 	Oid			opfuncoid;
300 
301 	/*
302 	 * If the constant is NULL, assume operator is strict and return zero, ie,
303 	 * operator will never return TRUE.  (It's zero even for a negator op.)
304 	 */
305 	if (constisnull)
306 		return 0.0;
307 
308 	/*
309 	 * Grab the nullfrac for use below.  Note we allow use of nullfrac
310 	 * regardless of security check.
311 	 */
312 	if (HeapTupleIsValid(vardata->statsTuple))
313 	{
314 		Form_pg_statistic stats;
315 
316 		stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
317 		nullfrac = stats->stanullfrac;
318 	}
319 
320 	/*
321 	 * If we matched the var to a unique index or DISTINCT clause, assume
322 	 * there is exactly one match regardless of anything else.  (This is
323 	 * slightly bogus, since the index or clause's equality operator might be
324 	 * different from ours, but it's much more likely to be right than
325 	 * ignoring the information.)
326 	 */
327 	if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
328 	{
329 		selec = 1.0 / vardata->rel->tuples;
330 	}
331 	else if (HeapTupleIsValid(vardata->statsTuple) &&
332 			 statistic_proc_security_check(vardata,
333 										   (opfuncoid = get_opcode(operator))))
334 	{
335 		AttStatsSlot sslot;
336 		bool		match = false;
337 		int			i;
338 
339 		/*
340 		 * Is the constant "=" to any of the column's most common values?
341 		 * (Although the given operator may not really be "=", we will assume
342 		 * that seeing whether it returns TRUE is an appropriate test.  If you
343 		 * don't like this, maybe you shouldn't be using eqsel for your
344 		 * operator...)
345 		 */
346 		if (get_attstatsslot(&sslot, vardata->statsTuple,
347 							 STATISTIC_KIND_MCV, InvalidOid,
348 							 ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
349 		{
350 			LOCAL_FCINFO(fcinfo, 2);
351 			FmgrInfo	eqproc;
352 
353 			fmgr_info(opfuncoid, &eqproc);
354 
355 			/*
356 			 * Save a few cycles by setting up the fcinfo struct just once.
357 			 * Using FunctionCallInvoke directly also avoids failure if the
358 			 * eqproc returns NULL, though really equality functions should
359 			 * never do that.
360 			 */
361 			InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
362 									 NULL, NULL);
363 			fcinfo->args[0].isnull = false;
364 			fcinfo->args[1].isnull = false;
365 			/* be careful to apply operator right way 'round */
366 			if (varonleft)
367 				fcinfo->args[1].value = constval;
368 			else
369 				fcinfo->args[0].value = constval;
370 
371 			for (i = 0; i < sslot.nvalues; i++)
372 			{
373 				Datum		fresult;
374 
375 				if (varonleft)
376 					fcinfo->args[0].value = sslot.values[i];
377 				else
378 					fcinfo->args[1].value = sslot.values[i];
379 				fcinfo->isnull = false;
380 				fresult = FunctionCallInvoke(fcinfo);
381 				if (!fcinfo->isnull && DatumGetBool(fresult))
382 				{
383 					match = true;
384 					break;
385 				}
386 			}
387 		}
388 		else
389 		{
390 			/* no most-common-value info available */
391 			i = 0;				/* keep compiler quiet */
392 		}
393 
394 		if (match)
395 		{
396 			/*
397 			 * Constant is "=" to this common value.  We know selectivity
398 			 * exactly (or as exactly as ANALYZE could calculate it, anyway).
399 			 */
400 			selec = sslot.numbers[i];
401 		}
402 		else
403 		{
404 			/*
405 			 * Comparison is against a constant that is neither NULL nor any
406 			 * of the common values.  Its selectivity cannot be more than
407 			 * this:
408 			 */
409 			double		sumcommon = 0.0;
410 			double		otherdistinct;
411 
412 			for (i = 0; i < sslot.nnumbers; i++)
413 				sumcommon += sslot.numbers[i];
414 			selec = 1.0 - sumcommon - nullfrac;
415 			CLAMP_PROBABILITY(selec);
416 
417 			/*
418 			 * and in fact it's probably a good deal less. We approximate that
419 			 * all the not-common values share this remaining fraction
420 			 * equally, so we divide by the number of other distinct values.
421 			 */
422 			otherdistinct = get_variable_numdistinct(vardata, &isdefault) -
423 				sslot.nnumbers;
424 			if (otherdistinct > 1)
425 				selec /= otherdistinct;
426 
427 			/*
428 			 * Another cross-check: selectivity shouldn't be estimated as more
429 			 * than the least common "most common value".
430 			 */
431 			if (sslot.nnumbers > 0 && selec > sslot.numbers[sslot.nnumbers - 1])
432 				selec = sslot.numbers[sslot.nnumbers - 1];
433 		}
434 
435 		free_attstatsslot(&sslot);
436 	}
437 	else
438 	{
439 		/*
440 		 * No ANALYZE stats available, so make a guess using estimated number
441 		 * of distinct values and assuming they are equally common. (The guess
442 		 * is unlikely to be very good, but we do know a few special cases.)
443 		 */
444 		selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
445 	}
446 
447 	/* now adjust if we wanted <> rather than = */
448 	if (negate)
449 		selec = 1.0 - selec - nullfrac;
450 
451 	/* result should be in range, but make sure... */
452 	CLAMP_PROBABILITY(selec);
453 
454 	return selec;
455 }
456 
457 /*
458  * var_eq_non_const --- eqsel for var = something-other-than-const case
459  *
460  * This is exported so that some other estimation functions can use it.
461  */
462 double
var_eq_non_const(VariableStatData * vardata,Oid operator,Oid collation,Node * other,bool varonleft,bool negate)463 var_eq_non_const(VariableStatData *vardata, Oid operator, Oid collation,
464 				 Node *other,
465 				 bool varonleft, bool negate)
466 {
467 	double		selec;
468 	double		nullfrac = 0.0;
469 	bool		isdefault;
470 
471 	/*
472 	 * Grab the nullfrac for use below.
473 	 */
474 	if (HeapTupleIsValid(vardata->statsTuple))
475 	{
476 		Form_pg_statistic stats;
477 
478 		stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
479 		nullfrac = stats->stanullfrac;
480 	}
481 
482 	/*
483 	 * If we matched the var to a unique index or DISTINCT clause, assume
484 	 * there is exactly one match regardless of anything else.  (This is
485 	 * slightly bogus, since the index or clause's equality operator might be
486 	 * different from ours, but it's much more likely to be right than
487 	 * ignoring the information.)
488 	 */
489 	if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
490 	{
491 		selec = 1.0 / vardata->rel->tuples;
492 	}
493 	else if (HeapTupleIsValid(vardata->statsTuple))
494 	{
495 		double		ndistinct;
496 		AttStatsSlot sslot;
497 
498 		/*
499 		 * Search is for a value that we do not know a priori, but we will
500 		 * assume it is not NULL.  Estimate the selectivity as non-null
501 		 * fraction divided by number of distinct values, so that we get a
502 		 * result averaged over all possible values whether common or
503 		 * uncommon.  (Essentially, we are assuming that the not-yet-known
504 		 * comparison value is equally likely to be any of the possible
505 		 * values, regardless of their frequency in the table.  Is that a good
506 		 * idea?)
507 		 */
508 		selec = 1.0 - nullfrac;
509 		ndistinct = get_variable_numdistinct(vardata, &isdefault);
510 		if (ndistinct > 1)
511 			selec /= ndistinct;
512 
513 		/*
514 		 * Cross-check: selectivity should never be estimated as more than the
515 		 * most common value's.
516 		 */
517 		if (get_attstatsslot(&sslot, vardata->statsTuple,
518 							 STATISTIC_KIND_MCV, InvalidOid,
519 							 ATTSTATSSLOT_NUMBERS))
520 		{
521 			if (sslot.nnumbers > 0 && selec > sslot.numbers[0])
522 				selec = sslot.numbers[0];
523 			free_attstatsslot(&sslot);
524 		}
525 	}
526 	else
527 	{
528 		/*
529 		 * No ANALYZE stats available, so make a guess using estimated number
530 		 * of distinct values and assuming they are equally common. (The guess
531 		 * is unlikely to be very good, but we do know a few special cases.)
532 		 */
533 		selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
534 	}
535 
536 	/* now adjust if we wanted <> rather than = */
537 	if (negate)
538 		selec = 1.0 - selec - nullfrac;
539 
540 	/* result should be in range, but make sure... */
541 	CLAMP_PROBABILITY(selec);
542 
543 	return selec;
544 }
545 
546 /*
547  *		neqsel			- Selectivity of "!=" for any data types.
548  *
549  * This routine is also used for some operators that are not "!="
550  * but have comparable selectivity behavior.  See above comments
551  * for eqsel().
552  */
553 Datum
neqsel(PG_FUNCTION_ARGS)554 neqsel(PG_FUNCTION_ARGS)
555 {
556 	PG_RETURN_FLOAT8((float8) eqsel_internal(fcinfo, true));
557 }
558 
559 /*
560  *	scalarineqsel		- Selectivity of "<", "<=", ">", ">=" for scalars.
561  *
562  * This is the guts of scalarltsel/scalarlesel/scalargtsel/scalargesel.
563  * The isgt and iseq flags distinguish which of the four cases apply.
564  *
565  * The caller has commuted the clause, if necessary, so that we can treat
566  * the variable as being on the left.  The caller must also make sure that
567  * the other side of the clause is a non-null Const, and dissect that into
568  * a value and datatype.  (This definition simplifies some callers that
569  * want to estimate against a computed value instead of a Const node.)
570  *
571  * This routine works for any datatype (or pair of datatypes) known to
572  * convert_to_scalar().  If it is applied to some other datatype,
573  * it will return an approximate estimate based on assuming that the constant
574  * value falls in the middle of the bin identified by binary search.
575  */
576 static double
scalarineqsel(PlannerInfo * root,Oid operator,bool isgt,bool iseq,Oid collation,VariableStatData * vardata,Datum constval,Oid consttype)577 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt, bool iseq,
578 			  Oid collation,
579 			  VariableStatData *vardata, Datum constval, Oid consttype)
580 {
581 	Form_pg_statistic stats;
582 	FmgrInfo	opproc;
583 	double		mcv_selec,
584 				hist_selec,
585 				sumcommon;
586 	double		selec;
587 
588 	if (!HeapTupleIsValid(vardata->statsTuple))
589 	{
590 		/*
591 		 * No stats are available.  Typically this means we have to fall back
592 		 * on the default estimate; but if the variable is CTID then we can
593 		 * make an estimate based on comparing the constant to the table size.
594 		 */
595 		if (vardata->var && IsA(vardata->var, Var) &&
596 			((Var *) vardata->var)->varattno == SelfItemPointerAttributeNumber)
597 		{
598 			ItemPointer itemptr;
599 			double		block;
600 			double		density;
601 
602 			/*
603 			 * If the relation's empty, we're going to include all of it.
604 			 * (This is mostly to avoid divide-by-zero below.)
605 			 */
606 			if (vardata->rel->pages == 0)
607 				return 1.0;
608 
609 			itemptr = (ItemPointer) DatumGetPointer(constval);
610 			block = ItemPointerGetBlockNumberNoCheck(itemptr);
611 
612 			/*
613 			 * Determine the average number of tuples per page (density).
614 			 *
615 			 * Since the last page will, on average, be only half full, we can
616 			 * estimate it to have half as many tuples as earlier pages.  So
617 			 * give it half the weight of a regular page.
618 			 */
619 			density = vardata->rel->tuples / (vardata->rel->pages - 0.5);
620 
621 			/* If target is the last page, use half the density. */
622 			if (block >= vardata->rel->pages - 1)
623 				density *= 0.5;
624 
625 			/*
626 			 * Using the average tuples per page, calculate how far into the
627 			 * page the itemptr is likely to be and adjust block accordingly,
628 			 * by adding that fraction of a whole block (but never more than a
629 			 * whole block, no matter how high the itemptr's offset is).  Here
630 			 * we are ignoring the possibility of dead-tuple line pointers,
631 			 * which is fairly bogus, but we lack the info to do better.
632 			 */
633 			if (density > 0.0)
634 			{
635 				OffsetNumber offset = ItemPointerGetOffsetNumberNoCheck(itemptr);
636 
637 				block += Min(offset / density, 1.0);
638 			}
639 
640 			/*
641 			 * Convert relative block number to selectivity.  Again, the last
642 			 * page has only half weight.
643 			 */
644 			selec = block / (vardata->rel->pages - 0.5);
645 
646 			/*
647 			 * The calculation so far gave us a selectivity for the "<=" case.
648 			 * We'll have one fewer tuple for "<" and one additional tuple for
649 			 * ">=", the latter of which we'll reverse the selectivity for
650 			 * below, so we can simply subtract one tuple for both cases.  The
651 			 * cases that need this adjustment can be identified by iseq being
652 			 * equal to isgt.
653 			 */
654 			if (iseq == isgt && vardata->rel->tuples >= 1.0)
655 				selec -= (1.0 / vardata->rel->tuples);
656 
657 			/* Finally, reverse the selectivity for the ">", ">=" cases. */
658 			if (isgt)
659 				selec = 1.0 - selec;
660 
661 			CLAMP_PROBABILITY(selec);
662 			return selec;
663 		}
664 
665 		/* no stats available, so default result */
666 		return DEFAULT_INEQ_SEL;
667 	}
668 	stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
669 
670 	fmgr_info(get_opcode(operator), &opproc);
671 
672 	/*
673 	 * If we have most-common-values info, add up the fractions of the MCV
674 	 * entries that satisfy MCV OP CONST.  These fractions contribute directly
675 	 * to the result selectivity.  Also add up the total fraction represented
676 	 * by MCV entries.
677 	 */
678 	mcv_selec = mcv_selectivity(vardata, &opproc, collation, constval, true,
679 								&sumcommon);
680 
681 	/*
682 	 * If there is a histogram, determine which bin the constant falls in, and
683 	 * compute the resulting contribution to selectivity.
684 	 */
685 	hist_selec = ineq_histogram_selectivity(root, vardata,
686 											operator, &opproc, isgt, iseq,
687 											collation,
688 											constval, consttype);
689 
690 	/*
691 	 * Now merge the results from the MCV and histogram calculations,
692 	 * realizing that the histogram covers only the non-null values that are
693 	 * not listed in MCV.
694 	 */
695 	selec = 1.0 - stats->stanullfrac - sumcommon;
696 
697 	if (hist_selec >= 0.0)
698 		selec *= hist_selec;
699 	else
700 	{
701 		/*
702 		 * If no histogram but there are values not accounted for by MCV,
703 		 * arbitrarily assume half of them will match.
704 		 */
705 		selec *= 0.5;
706 	}
707 
708 	selec += mcv_selec;
709 
710 	/* result should be in range, but make sure... */
711 	CLAMP_PROBABILITY(selec);
712 
713 	return selec;
714 }
715 
716 /*
717  *	mcv_selectivity			- Examine the MCV list for selectivity estimates
718  *
719  * Determine the fraction of the variable's MCV population that satisfies
720  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.  Also
721  * compute the fraction of the total column population represented by the MCV
722  * list.  This code will work for any boolean-returning predicate operator.
723  *
724  * The function result is the MCV selectivity, and the fraction of the
725  * total population is returned into *sumcommonp.  Zeroes are returned
726  * if there is no MCV list.
727  */
728 double
mcv_selectivity(VariableStatData * vardata,FmgrInfo * opproc,Oid collation,Datum constval,bool varonleft,double * sumcommonp)729 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, Oid collation,
730 				Datum constval, bool varonleft,
731 				double *sumcommonp)
732 {
733 	double		mcv_selec,
734 				sumcommon;
735 	AttStatsSlot sslot;
736 	int			i;
737 
738 	mcv_selec = 0.0;
739 	sumcommon = 0.0;
740 
741 	if (HeapTupleIsValid(vardata->statsTuple) &&
742 		statistic_proc_security_check(vardata, opproc->fn_oid) &&
743 		get_attstatsslot(&sslot, vardata->statsTuple,
744 						 STATISTIC_KIND_MCV, InvalidOid,
745 						 ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
746 	{
747 		LOCAL_FCINFO(fcinfo, 2);
748 
749 		/*
750 		 * We invoke the opproc "by hand" so that we won't fail on NULL
751 		 * results.  Such cases won't arise for normal comparison functions,
752 		 * but generic_restriction_selectivity could perhaps be used with
753 		 * operators that can return NULL.  A small side benefit is to not
754 		 * need to re-initialize the fcinfo struct from scratch each time.
755 		 */
756 		InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
757 								 NULL, NULL);
758 		fcinfo->args[0].isnull = false;
759 		fcinfo->args[1].isnull = false;
760 		/* be careful to apply operator right way 'round */
761 		if (varonleft)
762 			fcinfo->args[1].value = constval;
763 		else
764 			fcinfo->args[0].value = constval;
765 
766 		for (i = 0; i < sslot.nvalues; i++)
767 		{
768 			Datum		fresult;
769 
770 			if (varonleft)
771 				fcinfo->args[0].value = sslot.values[i];
772 			else
773 				fcinfo->args[1].value = sslot.values[i];
774 			fcinfo->isnull = false;
775 			fresult = FunctionCallInvoke(fcinfo);
776 			if (!fcinfo->isnull && DatumGetBool(fresult))
777 				mcv_selec += sslot.numbers[i];
778 			sumcommon += sslot.numbers[i];
779 		}
780 		free_attstatsslot(&sslot);
781 	}
782 
783 	*sumcommonp = sumcommon;
784 	return mcv_selec;
785 }
786 
787 /*
788  *	histogram_selectivity	- Examine the histogram for selectivity estimates
789  *
790  * Determine the fraction of the variable's histogram entries that satisfy
791  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
792  *
793  * This code will work for any boolean-returning predicate operator, whether
794  * or not it has anything to do with the histogram sort operator.  We are
795  * essentially using the histogram just as a representative sample.  However,
796  * small histograms are unlikely to be all that representative, so the caller
797  * should be prepared to fall back on some other estimation approach when the
798  * histogram is missing or very small.  It may also be prudent to combine this
799  * approach with another one when the histogram is small.
800  *
801  * If the actual histogram size is not at least min_hist_size, we won't bother
802  * to do the calculation at all.  Also, if the n_skip parameter is > 0, we
803  * ignore the first and last n_skip histogram elements, on the grounds that
804  * they are outliers and hence not very representative.  Typical values for
805  * these parameters are 10 and 1.
806  *
807  * The function result is the selectivity, or -1 if there is no histogram
808  * or it's smaller than min_hist_size.
809  *
810  * The output parameter *hist_size receives the actual histogram size,
811  * or zero if no histogram.  Callers may use this number to decide how
812  * much faith to put in the function result.
813  *
814  * Note that the result disregards both the most-common-values (if any) and
815  * null entries.  The caller is expected to combine this result with
816  * statistics for those portions of the column population.  It may also be
817  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
818  */
819 double
histogram_selectivity(VariableStatData * vardata,FmgrInfo * opproc,Oid collation,Datum constval,bool varonleft,int min_hist_size,int n_skip,int * hist_size)820 histogram_selectivity(VariableStatData *vardata,
821 					  FmgrInfo *opproc, Oid collation,
822 					  Datum constval, bool varonleft,
823 					  int min_hist_size, int n_skip,
824 					  int *hist_size)
825 {
826 	double		result;
827 	AttStatsSlot sslot;
828 
829 	/* check sanity of parameters */
830 	Assert(n_skip >= 0);
831 	Assert(min_hist_size > 2 * n_skip);
832 
833 	if (HeapTupleIsValid(vardata->statsTuple) &&
834 		statistic_proc_security_check(vardata, opproc->fn_oid) &&
835 		get_attstatsslot(&sslot, vardata->statsTuple,
836 						 STATISTIC_KIND_HISTOGRAM, InvalidOid,
837 						 ATTSTATSSLOT_VALUES))
838 	{
839 		*hist_size = sslot.nvalues;
840 		if (sslot.nvalues >= min_hist_size)
841 		{
842 			LOCAL_FCINFO(fcinfo, 2);
843 			int			nmatch = 0;
844 			int			i;
845 
846 			/*
847 			 * We invoke the opproc "by hand" so that we won't fail on NULL
848 			 * results.  Such cases won't arise for normal comparison
849 			 * functions, but generic_restriction_selectivity could perhaps be
850 			 * used with operators that can return NULL.  A small side benefit
851 			 * is to not need to re-initialize the fcinfo struct from scratch
852 			 * each time.
853 			 */
854 			InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
855 									 NULL, NULL);
856 			fcinfo->args[0].isnull = false;
857 			fcinfo->args[1].isnull = false;
858 			/* be careful to apply operator right way 'round */
859 			if (varonleft)
860 				fcinfo->args[1].value = constval;
861 			else
862 				fcinfo->args[0].value = constval;
863 
864 			for (i = n_skip; i < sslot.nvalues - n_skip; i++)
865 			{
866 				Datum		fresult;
867 
868 				if (varonleft)
869 					fcinfo->args[0].value = sslot.values[i];
870 				else
871 					fcinfo->args[1].value = sslot.values[i];
872 				fcinfo->isnull = false;
873 				fresult = FunctionCallInvoke(fcinfo);
874 				if (!fcinfo->isnull && DatumGetBool(fresult))
875 					nmatch++;
876 			}
877 			result = ((double) nmatch) / ((double) (sslot.nvalues - 2 * n_skip));
878 		}
879 		else
880 			result = -1;
881 		free_attstatsslot(&sslot);
882 	}
883 	else
884 	{
885 		*hist_size = 0;
886 		result = -1;
887 	}
888 
889 	return result;
890 }
891 
892 /*
893  *	generic_restriction_selectivity		- Selectivity for almost anything
894  *
895  * This function estimates selectivity for operators that we don't have any
896  * special knowledge about, but are on data types that we collect standard
897  * MCV and/or histogram statistics for.  (Additional assumptions are that
898  * the operator is strict and immutable, or at least stable.)
899  *
900  * If we have "VAR OP CONST" or "CONST OP VAR", selectivity is estimated by
901  * applying the operator to each element of the column's MCV and/or histogram
902  * stats, and merging the results using the assumption that the histogram is
903  * a reasonable random sample of the column's non-MCV population.  Note that
904  * if the operator's semantics are related to the histogram ordering, this
905  * might not be such a great assumption; other functions such as
906  * scalarineqsel() are probably a better match in such cases.
907  *
908  * Otherwise, fall back to the default selectivity provided by the caller.
909  */
910 double
generic_restriction_selectivity(PlannerInfo * root,Oid oproid,Oid collation,List * args,int varRelid,double default_selectivity)911 generic_restriction_selectivity(PlannerInfo *root, Oid oproid, Oid collation,
912 								List *args, int varRelid,
913 								double default_selectivity)
914 {
915 	double		selec;
916 	VariableStatData vardata;
917 	Node	   *other;
918 	bool		varonleft;
919 
920 	/*
921 	 * If expression is not variable OP something or something OP variable,
922 	 * then punt and return the default estimate.
923 	 */
924 	if (!get_restriction_variable(root, args, varRelid,
925 								  &vardata, &other, &varonleft))
926 		return default_selectivity;
927 
928 	/*
929 	 * If the something is a NULL constant, assume operator is strict and
930 	 * return zero, ie, operator will never return TRUE.
931 	 */
932 	if (IsA(other, Const) &&
933 		((Const *) other)->constisnull)
934 	{
935 		ReleaseVariableStats(vardata);
936 		return 0.0;
937 	}
938 
939 	if (IsA(other, Const))
940 	{
941 		/* Variable is being compared to a known non-null constant */
942 		Datum		constval = ((Const *) other)->constvalue;
943 		FmgrInfo	opproc;
944 		double		mcvsum;
945 		double		mcvsel;
946 		double		nullfrac;
947 		int			hist_size;
948 
949 		fmgr_info(get_opcode(oproid), &opproc);
950 
951 		/*
952 		 * Calculate the selectivity for the column's most common values.
953 		 */
954 		mcvsel = mcv_selectivity(&vardata, &opproc, collation,
955 								 constval, varonleft,
956 								 &mcvsum);
957 
958 		/*
959 		 * If the histogram is large enough, see what fraction of it matches
960 		 * the query, and assume that's representative of the non-MCV
961 		 * population.  Otherwise use the default selectivity for the non-MCV
962 		 * population.
963 		 */
964 		selec = histogram_selectivity(&vardata, &opproc, collation,
965 									  constval, varonleft,
966 									  10, 1, &hist_size);
967 		if (selec < 0)
968 		{
969 			/* Nope, fall back on default */
970 			selec = default_selectivity;
971 		}
972 		else if (hist_size < 100)
973 		{
974 			/*
975 			 * For histogram sizes from 10 to 100, we combine the histogram
976 			 * and default selectivities, putting increasingly more trust in
977 			 * the histogram for larger sizes.
978 			 */
979 			double		hist_weight = hist_size / 100.0;
980 
981 			selec = selec * hist_weight +
982 				default_selectivity * (1.0 - hist_weight);
983 		}
984 
985 		/* In any case, don't believe extremely small or large estimates. */
986 		if (selec < 0.0001)
987 			selec = 0.0001;
988 		else if (selec > 0.9999)
989 			selec = 0.9999;
990 
991 		/* Don't forget to account for nulls. */
992 		if (HeapTupleIsValid(vardata.statsTuple))
993 			nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
994 		else
995 			nullfrac = 0.0;
996 
997 		/*
998 		 * Now merge the results from the MCV and histogram calculations,
999 		 * realizing that the histogram covers only the non-null values that
1000 		 * are not listed in MCV.
1001 		 */
1002 		selec *= 1.0 - nullfrac - mcvsum;
1003 		selec += mcvsel;
1004 	}
1005 	else
1006 	{
1007 		/* Comparison value is not constant, so we can't do anything */
1008 		selec = default_selectivity;
1009 	}
1010 
1011 	ReleaseVariableStats(vardata);
1012 
1013 	/* result should be in range, but make sure... */
1014 	CLAMP_PROBABILITY(selec);
1015 
1016 	return selec;
1017 }
1018 
1019 /*
1020  *	ineq_histogram_selectivity	- Examine the histogram for scalarineqsel
1021  *
1022  * Determine the fraction of the variable's histogram population that
1023  * satisfies the inequality condition, ie, VAR < (or <=, >, >=) CONST.
1024  * The isgt and iseq flags distinguish which of the four cases apply.
1025  *
1026  * While opproc could be looked up from the operator OID, common callers
1027  * also need to call it separately, so we make the caller pass both.
1028  *
1029  * Returns -1 if there is no histogram (valid results will always be >= 0).
1030  *
1031  * Note that the result disregards both the most-common-values (if any) and
1032  * null entries.  The caller is expected to combine this result with
1033  * statistics for those portions of the column population.
1034  *
1035  * This is exported so that some other estimation functions can use it.
1036  */
1037 double
ineq_histogram_selectivity(PlannerInfo * root,VariableStatData * vardata,Oid opoid,FmgrInfo * opproc,bool isgt,bool iseq,Oid collation,Datum constval,Oid consttype)1038 ineq_histogram_selectivity(PlannerInfo *root,
1039 						   VariableStatData *vardata,
1040 						   Oid opoid, FmgrInfo *opproc, bool isgt, bool iseq,
1041 						   Oid collation,
1042 						   Datum constval, Oid consttype)
1043 {
1044 	double		hist_selec;
1045 	AttStatsSlot sslot;
1046 
1047 	hist_selec = -1.0;
1048 
1049 	/*
1050 	 * Someday, ANALYZE might store more than one histogram per rel/att,
1051 	 * corresponding to more than one possible sort ordering defined for the
1052 	 * column type.  Right now, we know there is only one, so just grab it and
1053 	 * see if it matches the query.
1054 	 *
1055 	 * Note that we can't use opoid as search argument; the staop appearing in
1056 	 * pg_statistic will be for the relevant '<' operator, but what we have
1057 	 * might be some other inequality operator such as '>='.  (Even if opoid
1058 	 * is a '<' operator, it could be cross-type.)  Hence we must use
1059 	 * comparison_ops_are_compatible() to see if the operators match.
1060 	 */
1061 	if (HeapTupleIsValid(vardata->statsTuple) &&
1062 		statistic_proc_security_check(vardata, opproc->fn_oid) &&
1063 		get_attstatsslot(&sslot, vardata->statsTuple,
1064 						 STATISTIC_KIND_HISTOGRAM, InvalidOid,
1065 						 ATTSTATSSLOT_VALUES))
1066 	{
1067 		if (sslot.nvalues > 1 &&
1068 			sslot.stacoll == collation &&
1069 			comparison_ops_are_compatible(sslot.staop, opoid))
1070 		{
1071 			/*
1072 			 * Use binary search to find the desired location, namely the
1073 			 * right end of the histogram bin containing the comparison value,
1074 			 * which is the leftmost entry for which the comparison operator
1075 			 * succeeds (if isgt) or fails (if !isgt).
1076 			 *
1077 			 * In this loop, we pay no attention to whether the operator iseq
1078 			 * or not; that detail will be mopped up below.  (We cannot tell,
1079 			 * anyway, whether the operator thinks the values are equal.)
1080 			 *
1081 			 * If the binary search accesses the first or last histogram
1082 			 * entry, we try to replace that endpoint with the true column min
1083 			 * or max as found by get_actual_variable_range().  This
1084 			 * ameliorates misestimates when the min or max is moving as a
1085 			 * result of changes since the last ANALYZE.  Note that this could
1086 			 * result in effectively including MCVs into the histogram that
1087 			 * weren't there before, but we don't try to correct for that.
1088 			 */
1089 			double		histfrac;
1090 			int			lobound = 0;	/* first possible slot to search */
1091 			int			hibound = sslot.nvalues;	/* last+1 slot to search */
1092 			bool		have_end = false;
1093 
1094 			/*
1095 			 * If there are only two histogram entries, we'll want up-to-date
1096 			 * values for both.  (If there are more than two, we need at most
1097 			 * one of them to be updated, so we deal with that within the
1098 			 * loop.)
1099 			 */
1100 			if (sslot.nvalues == 2)
1101 				have_end = get_actual_variable_range(root,
1102 													 vardata,
1103 													 sslot.staop,
1104 													 collation,
1105 													 &sslot.values[0],
1106 													 &sslot.values[1]);
1107 
1108 			while (lobound < hibound)
1109 			{
1110 				int			probe = (lobound + hibound) / 2;
1111 				bool		ltcmp;
1112 
1113 				/*
1114 				 * If we find ourselves about to compare to the first or last
1115 				 * histogram entry, first try to replace it with the actual
1116 				 * current min or max (unless we already did so above).
1117 				 */
1118 				if (probe == 0 && sslot.nvalues > 2)
1119 					have_end = get_actual_variable_range(root,
1120 														 vardata,
1121 														 sslot.staop,
1122 														 collation,
1123 														 &sslot.values[0],
1124 														 NULL);
1125 				else if (probe == sslot.nvalues - 1 && sslot.nvalues > 2)
1126 					have_end = get_actual_variable_range(root,
1127 														 vardata,
1128 														 sslot.staop,
1129 														 collation,
1130 														 NULL,
1131 														 &sslot.values[probe]);
1132 
1133 				ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
1134 													   collation,
1135 													   sslot.values[probe],
1136 													   constval));
1137 				if (isgt)
1138 					ltcmp = !ltcmp;
1139 				if (ltcmp)
1140 					lobound = probe + 1;
1141 				else
1142 					hibound = probe;
1143 			}
1144 
1145 			if (lobound <= 0)
1146 			{
1147 				/*
1148 				 * Constant is below lower histogram boundary.  More
1149 				 * precisely, we have found that no entry in the histogram
1150 				 * satisfies the inequality clause (if !isgt) or they all do
1151 				 * (if isgt).  We estimate that that's true of the entire
1152 				 * table, so set histfrac to 0.0 (which we'll flip to 1.0
1153 				 * below, if isgt).
1154 				 */
1155 				histfrac = 0.0;
1156 			}
1157 			else if (lobound >= sslot.nvalues)
1158 			{
1159 				/*
1160 				 * Inverse case: constant is above upper histogram boundary.
1161 				 */
1162 				histfrac = 1.0;
1163 			}
1164 			else
1165 			{
1166 				/* We have values[i-1] <= constant <= values[i]. */
1167 				int			i = lobound;
1168 				double		eq_selec = 0;
1169 				double		val,
1170 							high,
1171 							low;
1172 				double		binfrac;
1173 
1174 				/*
1175 				 * In the cases where we'll need it below, obtain an estimate
1176 				 * of the selectivity of "x = constval".  We use a calculation
1177 				 * similar to what var_eq_const() does for a non-MCV constant,
1178 				 * ie, estimate that all distinct non-MCV values occur equally
1179 				 * often.  But multiplication by "1.0 - sumcommon - nullfrac"
1180 				 * will be done by our caller, so we shouldn't do that here.
1181 				 * Therefore we can't try to clamp the estimate by reference
1182 				 * to the least common MCV; the result would be too small.
1183 				 *
1184 				 * Note: since this is effectively assuming that constval
1185 				 * isn't an MCV, it's logically dubious if constval in fact is
1186 				 * one.  But we have to apply *some* correction for equality,
1187 				 * and anyway we cannot tell if constval is an MCV, since we
1188 				 * don't have a suitable equality operator at hand.
1189 				 */
1190 				if (i == 1 || isgt == iseq)
1191 				{
1192 					double		otherdistinct;
1193 					bool		isdefault;
1194 					AttStatsSlot mcvslot;
1195 
1196 					/* Get estimated number of distinct values */
1197 					otherdistinct = get_variable_numdistinct(vardata,
1198 															 &isdefault);
1199 
1200 					/* Subtract off the number of known MCVs */
1201 					if (get_attstatsslot(&mcvslot, vardata->statsTuple,
1202 										 STATISTIC_KIND_MCV, InvalidOid,
1203 										 ATTSTATSSLOT_NUMBERS))
1204 					{
1205 						otherdistinct -= mcvslot.nnumbers;
1206 						free_attstatsslot(&mcvslot);
1207 					}
1208 
1209 					/* If result doesn't seem sane, leave eq_selec at 0 */
1210 					if (otherdistinct > 1)
1211 						eq_selec = 1.0 / otherdistinct;
1212 				}
1213 
1214 				/*
1215 				 * Convert the constant and the two nearest bin boundary
1216 				 * values to a uniform comparison scale, and do a linear
1217 				 * interpolation within this bin.
1218 				 */
1219 				if (convert_to_scalar(constval, consttype, collation,
1220 									  &val,
1221 									  sslot.values[i - 1], sslot.values[i],
1222 									  vardata->vartype,
1223 									  &low, &high))
1224 				{
1225 					if (high <= low)
1226 					{
1227 						/* cope if bin boundaries appear identical */
1228 						binfrac = 0.5;
1229 					}
1230 					else if (val <= low)
1231 						binfrac = 0.0;
1232 					else if (val >= high)
1233 						binfrac = 1.0;
1234 					else
1235 					{
1236 						binfrac = (val - low) / (high - low);
1237 
1238 						/*
1239 						 * Watch out for the possibility that we got a NaN or
1240 						 * Infinity from the division.  This can happen
1241 						 * despite the previous checks, if for example "low"
1242 						 * is -Infinity.
1243 						 */
1244 						if (isnan(binfrac) ||
1245 							binfrac < 0.0 || binfrac > 1.0)
1246 							binfrac = 0.5;
1247 					}
1248 				}
1249 				else
1250 				{
1251 					/*
1252 					 * Ideally we'd produce an error here, on the grounds that
1253 					 * the given operator shouldn't have scalarXXsel
1254 					 * registered as its selectivity func unless we can deal
1255 					 * with its operand types.  But currently, all manner of
1256 					 * stuff is invoking scalarXXsel, so give a default
1257 					 * estimate until that can be fixed.
1258 					 */
1259 					binfrac = 0.5;
1260 				}
1261 
1262 				/*
1263 				 * Now, compute the overall selectivity across the values
1264 				 * represented by the histogram.  We have i-1 full bins and
1265 				 * binfrac partial bin below the constant.
1266 				 */
1267 				histfrac = (double) (i - 1) + binfrac;
1268 				histfrac /= (double) (sslot.nvalues - 1);
1269 
1270 				/*
1271 				 * At this point, histfrac is an estimate of the fraction of
1272 				 * the population represented by the histogram that satisfies
1273 				 * "x <= constval".  Somewhat remarkably, this statement is
1274 				 * true regardless of which operator we were doing the probes
1275 				 * with, so long as convert_to_scalar() delivers reasonable
1276 				 * results.  If the probe constant is equal to some histogram
1277 				 * entry, we would have considered the bin to the left of that
1278 				 * entry if probing with "<" or ">=", or the bin to the right
1279 				 * if probing with "<=" or ">"; but binfrac would have come
1280 				 * out as 1.0 in the first case and 0.0 in the second, leading
1281 				 * to the same histfrac in either case.  For probe constants
1282 				 * between histogram entries, we find the same bin and get the
1283 				 * same estimate with any operator.
1284 				 *
1285 				 * The fact that the estimate corresponds to "x <= constval"
1286 				 * and not "x < constval" is because of the way that ANALYZE
1287 				 * constructs the histogram: each entry is, effectively, the
1288 				 * rightmost value in its sample bucket.  So selectivity
1289 				 * values that are exact multiples of 1/(histogram_size-1)
1290 				 * should be understood as estimates including a histogram
1291 				 * entry plus everything to its left.
1292 				 *
1293 				 * However, that breaks down for the first histogram entry,
1294 				 * which necessarily is the leftmost value in its sample
1295 				 * bucket.  That means the first histogram bin is slightly
1296 				 * narrower than the rest, by an amount equal to eq_selec.
1297 				 * Another way to say that is that we want "x <= leftmost" to
1298 				 * be estimated as eq_selec not zero.  So, if we're dealing
1299 				 * with the first bin (i==1), rescale to make that true while
1300 				 * adjusting the rest of that bin linearly.
1301 				 */
1302 				if (i == 1)
1303 					histfrac += eq_selec * (1.0 - binfrac);
1304 
1305 				/*
1306 				 * "x <= constval" is good if we want an estimate for "<=" or
1307 				 * ">", but if we are estimating for "<" or ">=", we now need
1308 				 * to decrease the estimate by eq_selec.
1309 				 */
1310 				if (isgt == iseq)
1311 					histfrac -= eq_selec;
1312 			}
1313 
1314 			/*
1315 			 * Now the estimate is finished for "<" and "<=" cases.  If we are
1316 			 * estimating for ">" or ">=", flip it.
1317 			 */
1318 			hist_selec = isgt ? (1.0 - histfrac) : histfrac;
1319 
1320 			/*
1321 			 * The histogram boundaries are only approximate to begin with,
1322 			 * and may well be out of date anyway.  Therefore, don't believe
1323 			 * extremely small or large selectivity estimates --- unless we
1324 			 * got actual current endpoint values from the table, in which
1325 			 * case just do the usual sanity clamp.  Somewhat arbitrarily, we
1326 			 * set the cutoff for other cases at a hundredth of the histogram
1327 			 * resolution.
1328 			 */
1329 			if (have_end)
1330 				CLAMP_PROBABILITY(hist_selec);
1331 			else
1332 			{
1333 				double		cutoff = 0.01 / (double) (sslot.nvalues - 1);
1334 
1335 				if (hist_selec < cutoff)
1336 					hist_selec = cutoff;
1337 				else if (hist_selec > 1.0 - cutoff)
1338 					hist_selec = 1.0 - cutoff;
1339 			}
1340 		}
1341 		else if (sslot.nvalues > 1)
1342 		{
1343 			/*
1344 			 * If we get here, we have a histogram but it's not sorted the way
1345 			 * we want.  Do a brute-force search to see how many of the
1346 			 * entries satisfy the comparison condition, and take that
1347 			 * fraction as our estimate.  (This is identical to the inner loop
1348 			 * of histogram_selectivity; maybe share code?)
1349 			 */
1350 			LOCAL_FCINFO(fcinfo, 2);
1351 			int			nmatch = 0;
1352 
1353 			InitFunctionCallInfoData(*fcinfo, opproc, 2, collation,
1354 									 NULL, NULL);
1355 			fcinfo->args[0].isnull = false;
1356 			fcinfo->args[1].isnull = false;
1357 			fcinfo->args[1].value = constval;
1358 			for (int i = 0; i < sslot.nvalues; i++)
1359 			{
1360 				Datum		fresult;
1361 
1362 				fcinfo->args[0].value = sslot.values[i];
1363 				fcinfo->isnull = false;
1364 				fresult = FunctionCallInvoke(fcinfo);
1365 				if (!fcinfo->isnull && DatumGetBool(fresult))
1366 					nmatch++;
1367 			}
1368 			hist_selec = ((double) nmatch) / ((double) sslot.nvalues);
1369 
1370 			/*
1371 			 * As above, clamp to a hundredth of the histogram resolution.
1372 			 * This case is surely even less trustworthy than the normal one,
1373 			 * so we shouldn't believe exact 0 or 1 selectivity.  (Maybe the
1374 			 * clamp should be more restrictive in this case?)
1375 			 */
1376 			{
1377 				double		cutoff = 0.01 / (double) (sslot.nvalues - 1);
1378 
1379 				if (hist_selec < cutoff)
1380 					hist_selec = cutoff;
1381 				else if (hist_selec > 1.0 - cutoff)
1382 					hist_selec = 1.0 - cutoff;
1383 			}
1384 		}
1385 
1386 		free_attstatsslot(&sslot);
1387 	}
1388 
1389 	return hist_selec;
1390 }
1391 
1392 /*
1393  * Common wrapper function for the selectivity estimators that simply
1394  * invoke scalarineqsel().
1395  */
1396 static Datum
scalarineqsel_wrapper(PG_FUNCTION_ARGS,bool isgt,bool iseq)1397 scalarineqsel_wrapper(PG_FUNCTION_ARGS, bool isgt, bool iseq)
1398 {
1399 	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1400 	Oid			operator = PG_GETARG_OID(1);
1401 	List	   *args = (List *) PG_GETARG_POINTER(2);
1402 	int			varRelid = PG_GETARG_INT32(3);
1403 	Oid			collation = PG_GET_COLLATION();
1404 	VariableStatData vardata;
1405 	Node	   *other;
1406 	bool		varonleft;
1407 	Datum		constval;
1408 	Oid			consttype;
1409 	double		selec;
1410 
1411 	/*
1412 	 * If expression is not variable op something or something op variable,
1413 	 * then punt and return a default estimate.
1414 	 */
1415 	if (!get_restriction_variable(root, args, varRelid,
1416 								  &vardata, &other, &varonleft))
1417 		PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1418 
1419 	/*
1420 	 * Can't do anything useful if the something is not a constant, either.
1421 	 */
1422 	if (!IsA(other, Const))
1423 	{
1424 		ReleaseVariableStats(vardata);
1425 		PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1426 	}
1427 
1428 	/*
1429 	 * If the constant is NULL, assume operator is strict and return zero, ie,
1430 	 * operator will never return TRUE.
1431 	 */
1432 	if (((Const *) other)->constisnull)
1433 	{
1434 		ReleaseVariableStats(vardata);
1435 		PG_RETURN_FLOAT8(0.0);
1436 	}
1437 	constval = ((Const *) other)->constvalue;
1438 	consttype = ((Const *) other)->consttype;
1439 
1440 	/*
1441 	 * Force the var to be on the left to simplify logic in scalarineqsel.
1442 	 */
1443 	if (!varonleft)
1444 	{
1445 		operator = get_commutator(operator);
1446 		if (!operator)
1447 		{
1448 			/* Use default selectivity (should we raise an error instead?) */
1449 			ReleaseVariableStats(vardata);
1450 			PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1451 		}
1452 		isgt = !isgt;
1453 	}
1454 
1455 	/* The rest of the work is done by scalarineqsel(). */
1456 	selec = scalarineqsel(root, operator, isgt, iseq, collation,
1457 						  &vardata, constval, consttype);
1458 
1459 	ReleaseVariableStats(vardata);
1460 
1461 	PG_RETURN_FLOAT8((float8) selec);
1462 }
1463 
1464 /*
1465  *		scalarltsel		- Selectivity of "<" for scalars.
1466  */
1467 Datum
scalarltsel(PG_FUNCTION_ARGS)1468 scalarltsel(PG_FUNCTION_ARGS)
1469 {
1470 	return scalarineqsel_wrapper(fcinfo, false, false);
1471 }
1472 
1473 /*
1474  *		scalarlesel		- Selectivity of "<=" for scalars.
1475  */
1476 Datum
scalarlesel(PG_FUNCTION_ARGS)1477 scalarlesel(PG_FUNCTION_ARGS)
1478 {
1479 	return scalarineqsel_wrapper(fcinfo, false, true);
1480 }
1481 
1482 /*
1483  *		scalargtsel		- Selectivity of ">" for scalars.
1484  */
1485 Datum
scalargtsel(PG_FUNCTION_ARGS)1486 scalargtsel(PG_FUNCTION_ARGS)
1487 {
1488 	return scalarineqsel_wrapper(fcinfo, true, false);
1489 }
1490 
1491 /*
1492  *		scalargesel		- Selectivity of ">=" for scalars.
1493  */
1494 Datum
scalargesel(PG_FUNCTION_ARGS)1495 scalargesel(PG_FUNCTION_ARGS)
1496 {
1497 	return scalarineqsel_wrapper(fcinfo, true, true);
1498 }
1499 
1500 /*
1501  *		boolvarsel		- Selectivity of Boolean variable.
1502  *
1503  * This can actually be called on any boolean-valued expression.  If it
1504  * involves only Vars of the specified relation, and if there are statistics
1505  * about the Var or expression (the latter is possible if it's indexed) then
1506  * we'll produce a real estimate; otherwise it's just a default.
1507  */
1508 Selectivity
boolvarsel(PlannerInfo * root,Node * arg,int varRelid)1509 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1510 {
1511 	VariableStatData vardata;
1512 	double		selec;
1513 
1514 	examine_variable(root, arg, varRelid, &vardata);
1515 	if (HeapTupleIsValid(vardata.statsTuple))
1516 	{
1517 		/*
1518 		 * A boolean variable V is equivalent to the clause V = 't', so we
1519 		 * compute the selectivity as if that is what we have.
1520 		 */
1521 		selec = var_eq_const(&vardata, BooleanEqualOperator, InvalidOid,
1522 							 BoolGetDatum(true), false, true, false);
1523 	}
1524 	else
1525 	{
1526 		/* Otherwise, the default estimate is 0.5 */
1527 		selec = 0.5;
1528 	}
1529 	ReleaseVariableStats(vardata);
1530 	return selec;
1531 }
1532 
1533 /*
1534  *		booltestsel		- Selectivity of BooleanTest Node.
1535  */
1536 Selectivity
booltestsel(PlannerInfo * root,BoolTestType booltesttype,Node * arg,int varRelid,JoinType jointype,SpecialJoinInfo * sjinfo)1537 booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
1538 			int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1539 {
1540 	VariableStatData vardata;
1541 	double		selec;
1542 
1543 	examine_variable(root, arg, varRelid, &vardata);
1544 
1545 	if (HeapTupleIsValid(vardata.statsTuple))
1546 	{
1547 		Form_pg_statistic stats;
1548 		double		freq_null;
1549 		AttStatsSlot sslot;
1550 
1551 		stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1552 		freq_null = stats->stanullfrac;
1553 
1554 		if (get_attstatsslot(&sslot, vardata.statsTuple,
1555 							 STATISTIC_KIND_MCV, InvalidOid,
1556 							 ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS)
1557 			&& sslot.nnumbers > 0)
1558 		{
1559 			double		freq_true;
1560 			double		freq_false;
1561 
1562 			/*
1563 			 * Get first MCV frequency and derive frequency for true.
1564 			 */
1565 			if (DatumGetBool(sslot.values[0]))
1566 				freq_true = sslot.numbers[0];
1567 			else
1568 				freq_true = 1.0 - sslot.numbers[0] - freq_null;
1569 
1570 			/*
1571 			 * Next derive frequency for false. Then use these as appropriate
1572 			 * to derive frequency for each case.
1573 			 */
1574 			freq_false = 1.0 - freq_true - freq_null;
1575 
1576 			switch (booltesttype)
1577 			{
1578 				case IS_UNKNOWN:
1579 					/* select only NULL values */
1580 					selec = freq_null;
1581 					break;
1582 				case IS_NOT_UNKNOWN:
1583 					/* select non-NULL values */
1584 					selec = 1.0 - freq_null;
1585 					break;
1586 				case IS_TRUE:
1587 					/* select only TRUE values */
1588 					selec = freq_true;
1589 					break;
1590 				case IS_NOT_TRUE:
1591 					/* select non-TRUE values */
1592 					selec = 1.0 - freq_true;
1593 					break;
1594 				case IS_FALSE:
1595 					/* select only FALSE values */
1596 					selec = freq_false;
1597 					break;
1598 				case IS_NOT_FALSE:
1599 					/* select non-FALSE values */
1600 					selec = 1.0 - freq_false;
1601 					break;
1602 				default:
1603 					elog(ERROR, "unrecognized booltesttype: %d",
1604 						 (int) booltesttype);
1605 					selec = 0.0;	/* Keep compiler quiet */
1606 					break;
1607 			}
1608 
1609 			free_attstatsslot(&sslot);
1610 		}
1611 		else
1612 		{
1613 			/*
1614 			 * No most-common-value info available. Still have null fraction
1615 			 * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1616 			 * for null fraction and assume a 50-50 split of TRUE and FALSE.
1617 			 */
1618 			switch (booltesttype)
1619 			{
1620 				case IS_UNKNOWN:
1621 					/* select only NULL values */
1622 					selec = freq_null;
1623 					break;
1624 				case IS_NOT_UNKNOWN:
1625 					/* select non-NULL values */
1626 					selec = 1.0 - freq_null;
1627 					break;
1628 				case IS_TRUE:
1629 				case IS_FALSE:
1630 					/* Assume we select half of the non-NULL values */
1631 					selec = (1.0 - freq_null) / 2.0;
1632 					break;
1633 				case IS_NOT_TRUE:
1634 				case IS_NOT_FALSE:
1635 					/* Assume we select NULLs plus half of the non-NULLs */
1636 					/* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1637 					selec = (freq_null + 1.0) / 2.0;
1638 					break;
1639 				default:
1640 					elog(ERROR, "unrecognized booltesttype: %d",
1641 						 (int) booltesttype);
1642 					selec = 0.0;	/* Keep compiler quiet */
1643 					break;
1644 			}
1645 		}
1646 	}
1647 	else
1648 	{
1649 		/*
1650 		 * If we can't get variable statistics for the argument, perhaps
1651 		 * clause_selectivity can do something with it.  We ignore the
1652 		 * possibility of a NULL value when using clause_selectivity, and just
1653 		 * assume the value is either TRUE or FALSE.
1654 		 */
1655 		switch (booltesttype)
1656 		{
1657 			case IS_UNKNOWN:
1658 				selec = DEFAULT_UNK_SEL;
1659 				break;
1660 			case IS_NOT_UNKNOWN:
1661 				selec = DEFAULT_NOT_UNK_SEL;
1662 				break;
1663 			case IS_TRUE:
1664 			case IS_NOT_FALSE:
1665 				selec = (double) clause_selectivity(root, arg,
1666 													varRelid,
1667 													jointype, sjinfo);
1668 				break;
1669 			case IS_FALSE:
1670 			case IS_NOT_TRUE:
1671 				selec = 1.0 - (double) clause_selectivity(root, arg,
1672 														  varRelid,
1673 														  jointype, sjinfo);
1674 				break;
1675 			default:
1676 				elog(ERROR, "unrecognized booltesttype: %d",
1677 					 (int) booltesttype);
1678 				selec = 0.0;	/* Keep compiler quiet */
1679 				break;
1680 		}
1681 	}
1682 
1683 	ReleaseVariableStats(vardata);
1684 
1685 	/* result should be in range, but make sure... */
1686 	CLAMP_PROBABILITY(selec);
1687 
1688 	return (Selectivity) selec;
1689 }
1690 
1691 /*
1692  *		nulltestsel		- Selectivity of NullTest Node.
1693  */
1694 Selectivity
nulltestsel(PlannerInfo * root,NullTestType nulltesttype,Node * arg,int varRelid,JoinType jointype,SpecialJoinInfo * sjinfo)1695 nulltestsel(PlannerInfo *root, NullTestType nulltesttype, Node *arg,
1696 			int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1697 {
1698 	VariableStatData vardata;
1699 	double		selec;
1700 
1701 	examine_variable(root, arg, varRelid, &vardata);
1702 
1703 	if (HeapTupleIsValid(vardata.statsTuple))
1704 	{
1705 		Form_pg_statistic stats;
1706 		double		freq_null;
1707 
1708 		stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1709 		freq_null = stats->stanullfrac;
1710 
1711 		switch (nulltesttype)
1712 		{
1713 			case IS_NULL:
1714 
1715 				/*
1716 				 * Use freq_null directly.
1717 				 */
1718 				selec = freq_null;
1719 				break;
1720 			case IS_NOT_NULL:
1721 
1722 				/*
1723 				 * Select not unknown (not null) values. Calculate from
1724 				 * freq_null.
1725 				 */
1726 				selec = 1.0 - freq_null;
1727 				break;
1728 			default:
1729 				elog(ERROR, "unrecognized nulltesttype: %d",
1730 					 (int) nulltesttype);
1731 				return (Selectivity) 0; /* keep compiler quiet */
1732 		}
1733 	}
1734 	else if (vardata.var && IsA(vardata.var, Var) &&
1735 			 ((Var *) vardata.var)->varattno < 0)
1736 	{
1737 		/*
1738 		 * There are no stats for system columns, but we know they are never
1739 		 * NULL.
1740 		 */
1741 		selec = (nulltesttype == IS_NULL) ? 0.0 : 1.0;
1742 	}
1743 	else
1744 	{
1745 		/*
1746 		 * No ANALYZE stats available, so make a guess
1747 		 */
1748 		switch (nulltesttype)
1749 		{
1750 			case IS_NULL:
1751 				selec = DEFAULT_UNK_SEL;
1752 				break;
1753 			case IS_NOT_NULL:
1754 				selec = DEFAULT_NOT_UNK_SEL;
1755 				break;
1756 			default:
1757 				elog(ERROR, "unrecognized nulltesttype: %d",
1758 					 (int) nulltesttype);
1759 				return (Selectivity) 0; /* keep compiler quiet */
1760 		}
1761 	}
1762 
1763 	ReleaseVariableStats(vardata);
1764 
1765 	/* result should be in range, but make sure... */
1766 	CLAMP_PROBABILITY(selec);
1767 
1768 	return (Selectivity) selec;
1769 }
1770 
1771 /*
1772  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1773  *
1774  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1775  * but it seems possible that RelabelType might show up.  Also, the planner
1776  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1777  * so we need to be ready to deal with more than one level.
1778  */
1779 static Node *
strip_array_coercion(Node * node)1780 strip_array_coercion(Node *node)
1781 {
1782 	for (;;)
1783 	{
1784 		if (node && IsA(node, ArrayCoerceExpr))
1785 		{
1786 			ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
1787 
1788 			/*
1789 			 * If the per-element expression is just a RelabelType on top of
1790 			 * CaseTestExpr, then we know it's a binary-compatible relabeling.
1791 			 */
1792 			if (IsA(acoerce->elemexpr, RelabelType) &&
1793 				IsA(((RelabelType *) acoerce->elemexpr)->arg, CaseTestExpr))
1794 				node = (Node *) acoerce->arg;
1795 			else
1796 				break;
1797 		}
1798 		else if (node && IsA(node, RelabelType))
1799 		{
1800 			/* We don't really expect this case, but may as well cope */
1801 			node = (Node *) ((RelabelType *) node)->arg;
1802 		}
1803 		else
1804 			break;
1805 	}
1806 	return node;
1807 }
1808 
1809 /*
1810  *		scalararraysel		- Selectivity of ScalarArrayOpExpr Node.
1811  */
1812 Selectivity
scalararraysel(PlannerInfo * root,ScalarArrayOpExpr * clause,bool is_join_clause,int varRelid,JoinType jointype,SpecialJoinInfo * sjinfo)1813 scalararraysel(PlannerInfo *root,
1814 			   ScalarArrayOpExpr *clause,
1815 			   bool is_join_clause,
1816 			   int varRelid,
1817 			   JoinType jointype,
1818 			   SpecialJoinInfo *sjinfo)
1819 {
1820 	Oid			operator = clause->opno;
1821 	bool		useOr = clause->useOr;
1822 	bool		isEquality = false;
1823 	bool		isInequality = false;
1824 	Node	   *leftop;
1825 	Node	   *rightop;
1826 	Oid			nominal_element_type;
1827 	Oid			nominal_element_collation;
1828 	TypeCacheEntry *typentry;
1829 	RegProcedure oprsel;
1830 	FmgrInfo	oprselproc;
1831 	Selectivity s1;
1832 	Selectivity s1disjoint;
1833 
1834 	/* First, deconstruct the expression */
1835 	Assert(list_length(clause->args) == 2);
1836 	leftop = (Node *) linitial(clause->args);
1837 	rightop = (Node *) lsecond(clause->args);
1838 
1839 	/* aggressively reduce both sides to constants */
1840 	leftop = estimate_expression_value(root, leftop);
1841 	rightop = estimate_expression_value(root, rightop);
1842 
1843 	/* get nominal (after relabeling) element type of rightop */
1844 	nominal_element_type = get_base_element_type(exprType(rightop));
1845 	if (!OidIsValid(nominal_element_type))
1846 		return (Selectivity) 0.5;	/* probably shouldn't happen */
1847 	/* get nominal collation, too, for generating constants */
1848 	nominal_element_collation = exprCollation(rightop);
1849 
1850 	/* look through any binary-compatible relabeling of rightop */
1851 	rightop = strip_array_coercion(rightop);
1852 
1853 	/*
1854 	 * Detect whether the operator is the default equality or inequality
1855 	 * operator of the array element type.
1856 	 */
1857 	typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1858 	if (OidIsValid(typentry->eq_opr))
1859 	{
1860 		if (operator == typentry->eq_opr)
1861 			isEquality = true;
1862 		else if (get_negator(operator) == typentry->eq_opr)
1863 			isInequality = true;
1864 	}
1865 
1866 	/*
1867 	 * If it is equality or inequality, we might be able to estimate this as a
1868 	 * form of array containment; for instance "const = ANY(column)" can be
1869 	 * treated as "ARRAY[const] <@ column".  scalararraysel_containment tries
1870 	 * that, and returns the selectivity estimate if successful, or -1 if not.
1871 	 */
1872 	if ((isEquality || isInequality) && !is_join_clause)
1873 	{
1874 		s1 = scalararraysel_containment(root, leftop, rightop,
1875 										nominal_element_type,
1876 										isEquality, useOr, varRelid);
1877 		if (s1 >= 0.0)
1878 			return s1;
1879 	}
1880 
1881 	/*
1882 	 * Look up the underlying operator's selectivity estimator. Punt if it
1883 	 * hasn't got one.
1884 	 */
1885 	if (is_join_clause)
1886 		oprsel = get_oprjoin(operator);
1887 	else
1888 		oprsel = get_oprrest(operator);
1889 	if (!oprsel)
1890 		return (Selectivity) 0.5;
1891 	fmgr_info(oprsel, &oprselproc);
1892 
1893 	/*
1894 	 * In the array-containment check above, we must only believe that an
1895 	 * operator is equality or inequality if it is the default btree equality
1896 	 * operator (or its negator) for the element type, since those are the
1897 	 * operators that array containment will use.  But in what follows, we can
1898 	 * be a little laxer, and also believe that any operators using eqsel() or
1899 	 * neqsel() as selectivity estimator act like equality or inequality.
1900 	 */
1901 	if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1902 		isEquality = true;
1903 	else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1904 		isInequality = true;
1905 
1906 	/*
1907 	 * We consider three cases:
1908 	 *
1909 	 * 1. rightop is an Array constant: deconstruct the array, apply the
1910 	 * operator's selectivity function for each array element, and merge the
1911 	 * results in the same way that clausesel.c does for AND/OR combinations.
1912 	 *
1913 	 * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1914 	 * function for each element of the ARRAY[] construct, and merge.
1915 	 *
1916 	 * 3. otherwise, make a guess ...
1917 	 */
1918 	if (rightop && IsA(rightop, Const))
1919 	{
1920 		Datum		arraydatum = ((Const *) rightop)->constvalue;
1921 		bool		arrayisnull = ((Const *) rightop)->constisnull;
1922 		ArrayType  *arrayval;
1923 		int16		elmlen;
1924 		bool		elmbyval;
1925 		char		elmalign;
1926 		int			num_elems;
1927 		Datum	   *elem_values;
1928 		bool	   *elem_nulls;
1929 		int			i;
1930 
1931 		if (arrayisnull)		/* qual can't succeed if null array */
1932 			return (Selectivity) 0.0;
1933 		arrayval = DatumGetArrayTypeP(arraydatum);
1934 		get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
1935 							 &elmlen, &elmbyval, &elmalign);
1936 		deconstruct_array(arrayval,
1937 						  ARR_ELEMTYPE(arrayval),
1938 						  elmlen, elmbyval, elmalign,
1939 						  &elem_values, &elem_nulls, &num_elems);
1940 
1941 		/*
1942 		 * For generic operators, we assume the probability of success is
1943 		 * independent for each array element.  But for "= ANY" or "<> ALL",
1944 		 * if the array elements are distinct (which'd typically be the case)
1945 		 * then the probabilities are disjoint, and we should just sum them.
1946 		 *
1947 		 * If we were being really tense we would try to confirm that the
1948 		 * elements are all distinct, but that would be expensive and it
1949 		 * doesn't seem to be worth the cycles; it would amount to penalizing
1950 		 * well-written queries in favor of poorly-written ones.  However, we
1951 		 * do protect ourselves a little bit by checking whether the
1952 		 * disjointness assumption leads to an impossible (out of range)
1953 		 * probability; if so, we fall back to the normal calculation.
1954 		 */
1955 		s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1956 
1957 		for (i = 0; i < num_elems; i++)
1958 		{
1959 			List	   *args;
1960 			Selectivity s2;
1961 
1962 			args = list_make2(leftop,
1963 							  makeConst(nominal_element_type,
1964 										-1,
1965 										nominal_element_collation,
1966 										elmlen,
1967 										elem_values[i],
1968 										elem_nulls[i],
1969 										elmbyval));
1970 			if (is_join_clause)
1971 				s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1972 													  clause->inputcollid,
1973 													  PointerGetDatum(root),
1974 													  ObjectIdGetDatum(operator),
1975 													  PointerGetDatum(args),
1976 													  Int16GetDatum(jointype),
1977 													  PointerGetDatum(sjinfo)));
1978 			else
1979 				s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1980 													  clause->inputcollid,
1981 													  PointerGetDatum(root),
1982 													  ObjectIdGetDatum(operator),
1983 													  PointerGetDatum(args),
1984 													  Int32GetDatum(varRelid)));
1985 
1986 			if (useOr)
1987 			{
1988 				s1 = s1 + s2 - s1 * s2;
1989 				if (isEquality)
1990 					s1disjoint += s2;
1991 			}
1992 			else
1993 			{
1994 				s1 = s1 * s2;
1995 				if (isInequality)
1996 					s1disjoint += s2 - 1.0;
1997 			}
1998 		}
1999 
2000 		/* accept disjoint-probability estimate if in range */
2001 		if ((useOr ? isEquality : isInequality) &&
2002 			s1disjoint >= 0.0 && s1disjoint <= 1.0)
2003 			s1 = s1disjoint;
2004 	}
2005 	else if (rightop && IsA(rightop, ArrayExpr) &&
2006 			 !((ArrayExpr *) rightop)->multidims)
2007 	{
2008 		ArrayExpr  *arrayexpr = (ArrayExpr *) rightop;
2009 		int16		elmlen;
2010 		bool		elmbyval;
2011 		ListCell   *l;
2012 
2013 		get_typlenbyval(arrayexpr->element_typeid,
2014 						&elmlen, &elmbyval);
2015 
2016 		/*
2017 		 * We use the assumption of disjoint probabilities here too, although
2018 		 * the odds of equal array elements are rather higher if the elements
2019 		 * are not all constants (which they won't be, else constant folding
2020 		 * would have reduced the ArrayExpr to a Const).  In this path it's
2021 		 * critical to have the sanity check on the s1disjoint estimate.
2022 		 */
2023 		s1 = s1disjoint = (useOr ? 0.0 : 1.0);
2024 
2025 		foreach(l, arrayexpr->elements)
2026 		{
2027 			Node	   *elem = (Node *) lfirst(l);
2028 			List	   *args;
2029 			Selectivity s2;
2030 
2031 			/*
2032 			 * Theoretically, if elem isn't of nominal_element_type we should
2033 			 * insert a RelabelType, but it seems unlikely that any operator
2034 			 * estimation function would really care ...
2035 			 */
2036 			args = list_make2(leftop, elem);
2037 			if (is_join_clause)
2038 				s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2039 													  clause->inputcollid,
2040 													  PointerGetDatum(root),
2041 													  ObjectIdGetDatum(operator),
2042 													  PointerGetDatum(args),
2043 													  Int16GetDatum(jointype),
2044 													  PointerGetDatum(sjinfo)));
2045 			else
2046 				s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2047 													  clause->inputcollid,
2048 													  PointerGetDatum(root),
2049 													  ObjectIdGetDatum(operator),
2050 													  PointerGetDatum(args),
2051 													  Int32GetDatum(varRelid)));
2052 
2053 			if (useOr)
2054 			{
2055 				s1 = s1 + s2 - s1 * s2;
2056 				if (isEquality)
2057 					s1disjoint += s2;
2058 			}
2059 			else
2060 			{
2061 				s1 = s1 * s2;
2062 				if (isInequality)
2063 					s1disjoint += s2 - 1.0;
2064 			}
2065 		}
2066 
2067 		/* accept disjoint-probability estimate if in range */
2068 		if ((useOr ? isEquality : isInequality) &&
2069 			s1disjoint >= 0.0 && s1disjoint <= 1.0)
2070 			s1 = s1disjoint;
2071 	}
2072 	else
2073 	{
2074 		CaseTestExpr *dummyexpr;
2075 		List	   *args;
2076 		Selectivity s2;
2077 		int			i;
2078 
2079 		/*
2080 		 * We need a dummy rightop to pass to the operator selectivity
2081 		 * routine.  It can be pretty much anything that doesn't look like a
2082 		 * constant; CaseTestExpr is a convenient choice.
2083 		 */
2084 		dummyexpr = makeNode(CaseTestExpr);
2085 		dummyexpr->typeId = nominal_element_type;
2086 		dummyexpr->typeMod = -1;
2087 		dummyexpr->collation = clause->inputcollid;
2088 		args = list_make2(leftop, dummyexpr);
2089 		if (is_join_clause)
2090 			s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2091 												  clause->inputcollid,
2092 												  PointerGetDatum(root),
2093 												  ObjectIdGetDatum(operator),
2094 												  PointerGetDatum(args),
2095 												  Int16GetDatum(jointype),
2096 												  PointerGetDatum(sjinfo)));
2097 		else
2098 			s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2099 												  clause->inputcollid,
2100 												  PointerGetDatum(root),
2101 												  ObjectIdGetDatum(operator),
2102 												  PointerGetDatum(args),
2103 												  Int32GetDatum(varRelid)));
2104 		s1 = useOr ? 0.0 : 1.0;
2105 
2106 		/*
2107 		 * Arbitrarily assume 10 elements in the eventual array value (see
2108 		 * also estimate_array_length).  We don't risk an assumption of
2109 		 * disjoint probabilities here.
2110 		 */
2111 		for (i = 0; i < 10; i++)
2112 		{
2113 			if (useOr)
2114 				s1 = s1 + s2 - s1 * s2;
2115 			else
2116 				s1 = s1 * s2;
2117 		}
2118 	}
2119 
2120 	/* result should be in range, but make sure... */
2121 	CLAMP_PROBABILITY(s1);
2122 
2123 	return s1;
2124 }
2125 
2126 /*
2127  * Estimate number of elements in the array yielded by an expression.
2128  *
2129  * It's important that this agree with scalararraysel.
2130  */
2131 int
estimate_array_length(Node * arrayexpr)2132 estimate_array_length(Node *arrayexpr)
2133 {
2134 	/* look through any binary-compatible relabeling of arrayexpr */
2135 	arrayexpr = strip_array_coercion(arrayexpr);
2136 
2137 	if (arrayexpr && IsA(arrayexpr, Const))
2138 	{
2139 		Datum		arraydatum = ((Const *) arrayexpr)->constvalue;
2140 		bool		arrayisnull = ((Const *) arrayexpr)->constisnull;
2141 		ArrayType  *arrayval;
2142 
2143 		if (arrayisnull)
2144 			return 0;
2145 		arrayval = DatumGetArrayTypeP(arraydatum);
2146 		return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2147 	}
2148 	else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2149 			 !((ArrayExpr *) arrayexpr)->multidims)
2150 	{
2151 		return list_length(((ArrayExpr *) arrayexpr)->elements);
2152 	}
2153 	else
2154 	{
2155 		/* default guess --- see also scalararraysel */
2156 		return 10;
2157 	}
2158 }
2159 
2160 /*
2161  *		rowcomparesel		- Selectivity of RowCompareExpr Node.
2162  *
2163  * We estimate RowCompare selectivity by considering just the first (high
2164  * order) columns, which makes it equivalent to an ordinary OpExpr.  While
2165  * this estimate could be refined by considering additional columns, it
2166  * seems unlikely that we could do a lot better without multi-column
2167  * statistics.
2168  */
2169 Selectivity
rowcomparesel(PlannerInfo * root,RowCompareExpr * clause,int varRelid,JoinType jointype,SpecialJoinInfo * sjinfo)2170 rowcomparesel(PlannerInfo *root,
2171 			  RowCompareExpr *clause,
2172 			  int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2173 {
2174 	Selectivity s1;
2175 	Oid			opno = linitial_oid(clause->opnos);
2176 	Oid			inputcollid = linitial_oid(clause->inputcollids);
2177 	List	   *opargs;
2178 	bool		is_join_clause;
2179 
2180 	/* Build equivalent arg list for single operator */
2181 	opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2182 
2183 	/*
2184 	 * Decide if it's a join clause.  This should match clausesel.c's
2185 	 * treat_as_join_clause(), except that we intentionally consider only the
2186 	 * leading columns and not the rest of the clause.
2187 	 */
2188 	if (varRelid != 0)
2189 	{
2190 		/*
2191 		 * Caller is forcing restriction mode (eg, because we are examining an
2192 		 * inner indexscan qual).
2193 		 */
2194 		is_join_clause = false;
2195 	}
2196 	else if (sjinfo == NULL)
2197 	{
2198 		/*
2199 		 * It must be a restriction clause, since it's being evaluated at a
2200 		 * scan node.
2201 		 */
2202 		is_join_clause = false;
2203 	}
2204 	else
2205 	{
2206 		/*
2207 		 * Otherwise, it's a join if there's more than one relation used.
2208 		 */
2209 		is_join_clause = (NumRelids(root, (Node *) opargs) > 1);
2210 	}
2211 
2212 	if (is_join_clause)
2213 	{
2214 		/* Estimate selectivity for a join clause. */
2215 		s1 = join_selectivity(root, opno,
2216 							  opargs,
2217 							  inputcollid,
2218 							  jointype,
2219 							  sjinfo);
2220 	}
2221 	else
2222 	{
2223 		/* Estimate selectivity for a restriction clause. */
2224 		s1 = restriction_selectivity(root, opno,
2225 									 opargs,
2226 									 inputcollid,
2227 									 varRelid);
2228 	}
2229 
2230 	return s1;
2231 }
2232 
2233 /*
2234  *		eqjoinsel		- Join selectivity of "="
2235  */
2236 Datum
eqjoinsel(PG_FUNCTION_ARGS)2237 eqjoinsel(PG_FUNCTION_ARGS)
2238 {
2239 	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2240 	Oid			operator = PG_GETARG_OID(1);
2241 	List	   *args = (List *) PG_GETARG_POINTER(2);
2242 
2243 #ifdef NOT_USED
2244 	JoinType	jointype = (JoinType) PG_GETARG_INT16(3);
2245 #endif
2246 	SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2247 	Oid			collation = PG_GET_COLLATION();
2248 	double		selec;
2249 	double		selec_inner;
2250 	VariableStatData vardata1;
2251 	VariableStatData vardata2;
2252 	double		nd1;
2253 	double		nd2;
2254 	bool		isdefault1;
2255 	bool		isdefault2;
2256 	Oid			opfuncoid;
2257 	AttStatsSlot sslot1;
2258 	AttStatsSlot sslot2;
2259 	Form_pg_statistic stats1 = NULL;
2260 	Form_pg_statistic stats2 = NULL;
2261 	bool		have_mcvs1 = false;
2262 	bool		have_mcvs2 = false;
2263 	bool		join_is_reversed;
2264 	RelOptInfo *inner_rel;
2265 
2266 	get_join_variables(root, args, sjinfo,
2267 					   &vardata1, &vardata2, &join_is_reversed);
2268 
2269 	nd1 = get_variable_numdistinct(&vardata1, &isdefault1);
2270 	nd2 = get_variable_numdistinct(&vardata2, &isdefault2);
2271 
2272 	opfuncoid = get_opcode(operator);
2273 
2274 	memset(&sslot1, 0, sizeof(sslot1));
2275 	memset(&sslot2, 0, sizeof(sslot2));
2276 
2277 	if (HeapTupleIsValid(vardata1.statsTuple))
2278 	{
2279 		/* note we allow use of nullfrac regardless of security check */
2280 		stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
2281 		if (statistic_proc_security_check(&vardata1, opfuncoid))
2282 			have_mcvs1 = get_attstatsslot(&sslot1, vardata1.statsTuple,
2283 										  STATISTIC_KIND_MCV, InvalidOid,
2284 										  ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS);
2285 	}
2286 
2287 	if (HeapTupleIsValid(vardata2.statsTuple))
2288 	{
2289 		/* note we allow use of nullfrac regardless of security check */
2290 		stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
2291 		if (statistic_proc_security_check(&vardata2, opfuncoid))
2292 			have_mcvs2 = get_attstatsslot(&sslot2, vardata2.statsTuple,
2293 										  STATISTIC_KIND_MCV, InvalidOid,
2294 										  ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS);
2295 	}
2296 
2297 	/* We need to compute the inner-join selectivity in all cases */
2298 	selec_inner = eqjoinsel_inner(opfuncoid, collation,
2299 								  &vardata1, &vardata2,
2300 								  nd1, nd2,
2301 								  isdefault1, isdefault2,
2302 								  &sslot1, &sslot2,
2303 								  stats1, stats2,
2304 								  have_mcvs1, have_mcvs2);
2305 
2306 	switch (sjinfo->jointype)
2307 	{
2308 		case JOIN_INNER:
2309 		case JOIN_LEFT:
2310 		case JOIN_FULL:
2311 			selec = selec_inner;
2312 			break;
2313 		case JOIN_SEMI:
2314 		case JOIN_ANTI:
2315 
2316 			/*
2317 			 * Look up the join's inner relation.  min_righthand is sufficient
2318 			 * information because neither SEMI nor ANTI joins permit any
2319 			 * reassociation into or out of their RHS, so the righthand will
2320 			 * always be exactly that set of rels.
2321 			 */
2322 			inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2323 
2324 			if (!join_is_reversed)
2325 				selec = eqjoinsel_semi(opfuncoid, collation,
2326 									   &vardata1, &vardata2,
2327 									   nd1, nd2,
2328 									   isdefault1, isdefault2,
2329 									   &sslot1, &sslot2,
2330 									   stats1, stats2,
2331 									   have_mcvs1, have_mcvs2,
2332 									   inner_rel);
2333 			else
2334 			{
2335 				Oid			commop = get_commutator(operator);
2336 				Oid			commopfuncoid = OidIsValid(commop) ? get_opcode(commop) : InvalidOid;
2337 
2338 				selec = eqjoinsel_semi(commopfuncoid, collation,
2339 									   &vardata2, &vardata1,
2340 									   nd2, nd1,
2341 									   isdefault2, isdefault1,
2342 									   &sslot2, &sslot1,
2343 									   stats2, stats1,
2344 									   have_mcvs2, have_mcvs1,
2345 									   inner_rel);
2346 			}
2347 
2348 			/*
2349 			 * We should never estimate the output of a semijoin to be more
2350 			 * rows than we estimate for an inner join with the same input
2351 			 * rels and join condition; it's obviously impossible for that to
2352 			 * happen.  The former estimate is N1 * Ssemi while the latter is
2353 			 * N1 * N2 * Sinner, so we may clamp Ssemi <= N2 * Sinner.  Doing
2354 			 * this is worthwhile because of the shakier estimation rules we
2355 			 * use in eqjoinsel_semi, particularly in cases where it has to
2356 			 * punt entirely.
2357 			 */
2358 			selec = Min(selec, inner_rel->rows * selec_inner);
2359 			break;
2360 		default:
2361 			/* other values not expected here */
2362 			elog(ERROR, "unrecognized join type: %d",
2363 				 (int) sjinfo->jointype);
2364 			selec = 0;			/* keep compiler quiet */
2365 			break;
2366 	}
2367 
2368 	free_attstatsslot(&sslot1);
2369 	free_attstatsslot(&sslot2);
2370 
2371 	ReleaseVariableStats(vardata1);
2372 	ReleaseVariableStats(vardata2);
2373 
2374 	CLAMP_PROBABILITY(selec);
2375 
2376 	PG_RETURN_FLOAT8((float8) selec);
2377 }
2378 
2379 /*
2380  * eqjoinsel_inner --- eqjoinsel for normal inner join
2381  *
2382  * We also use this for LEFT/FULL outer joins; it's not presently clear
2383  * that it's worth trying to distinguish them here.
2384  */
2385 static double
eqjoinsel_inner(Oid opfuncoid,Oid collation,VariableStatData * vardata1,VariableStatData * vardata2,double nd1,double nd2,bool isdefault1,bool isdefault2,AttStatsSlot * sslot1,AttStatsSlot * sslot2,Form_pg_statistic stats1,Form_pg_statistic stats2,bool have_mcvs1,bool have_mcvs2)2386 eqjoinsel_inner(Oid opfuncoid, Oid collation,
2387 				VariableStatData *vardata1, VariableStatData *vardata2,
2388 				double nd1, double nd2,
2389 				bool isdefault1, bool isdefault2,
2390 				AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2391 				Form_pg_statistic stats1, Form_pg_statistic stats2,
2392 				bool have_mcvs1, bool have_mcvs2)
2393 {
2394 	double		selec;
2395 
2396 	if (have_mcvs1 && have_mcvs2)
2397 	{
2398 		/*
2399 		 * We have most-common-value lists for both relations.  Run through
2400 		 * the lists to see which MCVs actually join to each other with the
2401 		 * given operator.  This allows us to determine the exact join
2402 		 * selectivity for the portion of the relations represented by the MCV
2403 		 * lists.  We still have to estimate for the remaining population, but
2404 		 * in a skewed distribution this gives us a big leg up in accuracy.
2405 		 * For motivation see the analysis in Y. Ioannidis and S.
2406 		 * Christodoulakis, "On the propagation of errors in the size of join
2407 		 * results", Technical Report 1018, Computer Science Dept., University
2408 		 * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2409 		 */
2410 		LOCAL_FCINFO(fcinfo, 2);
2411 		FmgrInfo	eqproc;
2412 		bool	   *hasmatch1;
2413 		bool	   *hasmatch2;
2414 		double		nullfrac1 = stats1->stanullfrac;
2415 		double		nullfrac2 = stats2->stanullfrac;
2416 		double		matchprodfreq,
2417 					matchfreq1,
2418 					matchfreq2,
2419 					unmatchfreq1,
2420 					unmatchfreq2,
2421 					otherfreq1,
2422 					otherfreq2,
2423 					totalsel1,
2424 					totalsel2;
2425 		int			i,
2426 					nmatches;
2427 
2428 		fmgr_info(opfuncoid, &eqproc);
2429 
2430 		/*
2431 		 * Save a few cycles by setting up the fcinfo struct just once. Using
2432 		 * FunctionCallInvoke directly also avoids failure if the eqproc
2433 		 * returns NULL, though really equality functions should never do
2434 		 * that.
2435 		 */
2436 		InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2437 								 NULL, NULL);
2438 		fcinfo->args[0].isnull = false;
2439 		fcinfo->args[1].isnull = false;
2440 
2441 		hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2442 		hasmatch2 = (bool *) palloc0(sslot2->nvalues * sizeof(bool));
2443 
2444 		/*
2445 		 * Note we assume that each MCV will match at most one member of the
2446 		 * other MCV list.  If the operator isn't really equality, there could
2447 		 * be multiple matches --- but we don't look for them, both for speed
2448 		 * and because the math wouldn't add up...
2449 		 */
2450 		matchprodfreq = 0.0;
2451 		nmatches = 0;
2452 		for (i = 0; i < sslot1->nvalues; i++)
2453 		{
2454 			int			j;
2455 
2456 			fcinfo->args[0].value = sslot1->values[i];
2457 
2458 			for (j = 0; j < sslot2->nvalues; j++)
2459 			{
2460 				Datum		fresult;
2461 
2462 				if (hasmatch2[j])
2463 					continue;
2464 				fcinfo->args[1].value = sslot2->values[j];
2465 				fcinfo->isnull = false;
2466 				fresult = FunctionCallInvoke(fcinfo);
2467 				if (!fcinfo->isnull && DatumGetBool(fresult))
2468 				{
2469 					hasmatch1[i] = hasmatch2[j] = true;
2470 					matchprodfreq += sslot1->numbers[i] * sslot2->numbers[j];
2471 					nmatches++;
2472 					break;
2473 				}
2474 			}
2475 		}
2476 		CLAMP_PROBABILITY(matchprodfreq);
2477 		/* Sum up frequencies of matched and unmatched MCVs */
2478 		matchfreq1 = unmatchfreq1 = 0.0;
2479 		for (i = 0; i < sslot1->nvalues; i++)
2480 		{
2481 			if (hasmatch1[i])
2482 				matchfreq1 += sslot1->numbers[i];
2483 			else
2484 				unmatchfreq1 += sslot1->numbers[i];
2485 		}
2486 		CLAMP_PROBABILITY(matchfreq1);
2487 		CLAMP_PROBABILITY(unmatchfreq1);
2488 		matchfreq2 = unmatchfreq2 = 0.0;
2489 		for (i = 0; i < sslot2->nvalues; i++)
2490 		{
2491 			if (hasmatch2[i])
2492 				matchfreq2 += sslot2->numbers[i];
2493 			else
2494 				unmatchfreq2 += sslot2->numbers[i];
2495 		}
2496 		CLAMP_PROBABILITY(matchfreq2);
2497 		CLAMP_PROBABILITY(unmatchfreq2);
2498 		pfree(hasmatch1);
2499 		pfree(hasmatch2);
2500 
2501 		/*
2502 		 * Compute total frequency of non-null values that are not in the MCV
2503 		 * lists.
2504 		 */
2505 		otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2506 		otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2507 		CLAMP_PROBABILITY(otherfreq1);
2508 		CLAMP_PROBABILITY(otherfreq2);
2509 
2510 		/*
2511 		 * We can estimate the total selectivity from the point of view of
2512 		 * relation 1 as: the known selectivity for matched MCVs, plus
2513 		 * unmatched MCVs that are assumed to match against random members of
2514 		 * relation 2's non-MCV population, plus non-MCV values that are
2515 		 * assumed to match against random members of relation 2's unmatched
2516 		 * MCVs plus non-MCV values.
2517 		 */
2518 		totalsel1 = matchprodfreq;
2519 		if (nd2 > sslot2->nvalues)
2520 			totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - sslot2->nvalues);
2521 		if (nd2 > nmatches)
2522 			totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2523 				(nd2 - nmatches);
2524 		/* Same estimate from the point of view of relation 2. */
2525 		totalsel2 = matchprodfreq;
2526 		if (nd1 > sslot1->nvalues)
2527 			totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - sslot1->nvalues);
2528 		if (nd1 > nmatches)
2529 			totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2530 				(nd1 - nmatches);
2531 
2532 		/*
2533 		 * Use the smaller of the two estimates.  This can be justified in
2534 		 * essentially the same terms as given below for the no-stats case: to
2535 		 * a first approximation, we are estimating from the point of view of
2536 		 * the relation with smaller nd.
2537 		 */
2538 		selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2539 	}
2540 	else
2541 	{
2542 		/*
2543 		 * We do not have MCV lists for both sides.  Estimate the join
2544 		 * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2545 		 * is plausible if we assume that the join operator is strict and the
2546 		 * non-null values are about equally distributed: a given non-null
2547 		 * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2548 		 * of rel2, so total join rows are at most
2549 		 * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2550 		 * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2551 		 * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2552 		 * with MIN() is an upper bound.  Using the MIN() means we estimate
2553 		 * from the point of view of the relation with smaller nd (since the
2554 		 * larger nd is determining the MIN).  It is reasonable to assume that
2555 		 * most tuples in this rel will have join partners, so the bound is
2556 		 * probably reasonably tight and should be taken as-is.
2557 		 *
2558 		 * XXX Can we be smarter if we have an MCV list for just one side? It
2559 		 * seems that if we assume equal distribution for the other side, we
2560 		 * end up with the same answer anyway.
2561 		 */
2562 		double		nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2563 		double		nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2564 
2565 		selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2566 		if (nd1 > nd2)
2567 			selec /= nd1;
2568 		else
2569 			selec /= nd2;
2570 	}
2571 
2572 	return selec;
2573 }
2574 
2575 /*
2576  * eqjoinsel_semi --- eqjoinsel for semi join
2577  *
2578  * (Also used for anti join, which we are supposed to estimate the same way.)
2579  * Caller has ensured that vardata1 is the LHS variable.
2580  * Unlike eqjoinsel_inner, we have to cope with opfuncoid being InvalidOid.
2581  */
2582 static double
eqjoinsel_semi(Oid opfuncoid,Oid collation,VariableStatData * vardata1,VariableStatData * vardata2,double nd1,double nd2,bool isdefault1,bool isdefault2,AttStatsSlot * sslot1,AttStatsSlot * sslot2,Form_pg_statistic stats1,Form_pg_statistic stats2,bool have_mcvs1,bool have_mcvs2,RelOptInfo * inner_rel)2583 eqjoinsel_semi(Oid opfuncoid, Oid collation,
2584 			   VariableStatData *vardata1, VariableStatData *vardata2,
2585 			   double nd1, double nd2,
2586 			   bool isdefault1, bool isdefault2,
2587 			   AttStatsSlot *sslot1, AttStatsSlot *sslot2,
2588 			   Form_pg_statistic stats1, Form_pg_statistic stats2,
2589 			   bool have_mcvs1, bool have_mcvs2,
2590 			   RelOptInfo *inner_rel)
2591 {
2592 	double		selec;
2593 
2594 	/*
2595 	 * We clamp nd2 to be not more than what we estimate the inner relation's
2596 	 * size to be.  This is intuitively somewhat reasonable since obviously
2597 	 * there can't be more than that many distinct values coming from the
2598 	 * inner rel.  The reason for the asymmetry (ie, that we don't clamp nd1
2599 	 * likewise) is that this is the only pathway by which restriction clauses
2600 	 * applied to the inner rel will affect the join result size estimate,
2601 	 * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2602 	 * only the outer rel's size.  If we clamped nd1 we'd be double-counting
2603 	 * the selectivity of outer-rel restrictions.
2604 	 *
2605 	 * We can apply this clamping both with respect to the base relation from
2606 	 * which the join variable comes (if there is just one), and to the
2607 	 * immediate inner input relation of the current join.
2608 	 *
2609 	 * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2610 	 * great, maybe, but it didn't come out of nowhere either.  This is most
2611 	 * helpful when the inner relation is empty and consequently has no stats.
2612 	 */
2613 	if (vardata2->rel)
2614 	{
2615 		if (nd2 >= vardata2->rel->rows)
2616 		{
2617 			nd2 = vardata2->rel->rows;
2618 			isdefault2 = false;
2619 		}
2620 	}
2621 	if (nd2 >= inner_rel->rows)
2622 	{
2623 		nd2 = inner_rel->rows;
2624 		isdefault2 = false;
2625 	}
2626 
2627 	if (have_mcvs1 && have_mcvs2 && OidIsValid(opfuncoid))
2628 	{
2629 		/*
2630 		 * We have most-common-value lists for both relations.  Run through
2631 		 * the lists to see which MCVs actually join to each other with the
2632 		 * given operator.  This allows us to determine the exact join
2633 		 * selectivity for the portion of the relations represented by the MCV
2634 		 * lists.  We still have to estimate for the remaining population, but
2635 		 * in a skewed distribution this gives us a big leg up in accuracy.
2636 		 */
2637 		LOCAL_FCINFO(fcinfo, 2);
2638 		FmgrInfo	eqproc;
2639 		bool	   *hasmatch1;
2640 		bool	   *hasmatch2;
2641 		double		nullfrac1 = stats1->stanullfrac;
2642 		double		matchfreq1,
2643 					uncertainfrac,
2644 					uncertain;
2645 		int			i,
2646 					nmatches,
2647 					clamped_nvalues2;
2648 
2649 		/*
2650 		 * The clamping above could have resulted in nd2 being less than
2651 		 * sslot2->nvalues; in which case, we assume that precisely the nd2
2652 		 * most common values in the relation will appear in the join input,
2653 		 * and so compare to only the first nd2 members of the MCV list.  Of
2654 		 * course this is frequently wrong, but it's the best bet we can make.
2655 		 */
2656 		clamped_nvalues2 = Min(sslot2->nvalues, nd2);
2657 
2658 		fmgr_info(opfuncoid, &eqproc);
2659 
2660 		/*
2661 		 * Save a few cycles by setting up the fcinfo struct just once. Using
2662 		 * FunctionCallInvoke directly also avoids failure if the eqproc
2663 		 * returns NULL, though really equality functions should never do
2664 		 * that.
2665 		 */
2666 		InitFunctionCallInfoData(*fcinfo, &eqproc, 2, collation,
2667 								 NULL, NULL);
2668 		fcinfo->args[0].isnull = false;
2669 		fcinfo->args[1].isnull = false;
2670 
2671 		hasmatch1 = (bool *) palloc0(sslot1->nvalues * sizeof(bool));
2672 		hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2673 
2674 		/*
2675 		 * Note we assume that each MCV will match at most one member of the
2676 		 * other MCV list.  If the operator isn't really equality, there could
2677 		 * be multiple matches --- but we don't look for them, both for speed
2678 		 * and because the math wouldn't add up...
2679 		 */
2680 		nmatches = 0;
2681 		for (i = 0; i < sslot1->nvalues; i++)
2682 		{
2683 			int			j;
2684 
2685 			fcinfo->args[0].value = sslot1->values[i];
2686 
2687 			for (j = 0; j < clamped_nvalues2; j++)
2688 			{
2689 				Datum		fresult;
2690 
2691 				if (hasmatch2[j])
2692 					continue;
2693 				fcinfo->args[1].value = sslot2->values[j];
2694 				fcinfo->isnull = false;
2695 				fresult = FunctionCallInvoke(fcinfo);
2696 				if (!fcinfo->isnull && DatumGetBool(fresult))
2697 				{
2698 					hasmatch1[i] = hasmatch2[j] = true;
2699 					nmatches++;
2700 					break;
2701 				}
2702 			}
2703 		}
2704 		/* Sum up frequencies of matched MCVs */
2705 		matchfreq1 = 0.0;
2706 		for (i = 0; i < sslot1->nvalues; i++)
2707 		{
2708 			if (hasmatch1[i])
2709 				matchfreq1 += sslot1->numbers[i];
2710 		}
2711 		CLAMP_PROBABILITY(matchfreq1);
2712 		pfree(hasmatch1);
2713 		pfree(hasmatch2);
2714 
2715 		/*
2716 		 * Now we need to estimate the fraction of relation 1 that has at
2717 		 * least one join partner.  We know for certain that the matched MCVs
2718 		 * do, so that gives us a lower bound, but we're really in the dark
2719 		 * about everything else.  Our crude approach is: if nd1 <= nd2 then
2720 		 * assume all non-null rel1 rows have join partners, else assume for
2721 		 * the uncertain rows that a fraction nd2/nd1 have join partners. We
2722 		 * can discount the known-matched MCVs from the distinct-values counts
2723 		 * before doing the division.
2724 		 *
2725 		 * Crude as the above is, it's completely useless if we don't have
2726 		 * reliable ndistinct values for both sides.  Hence, if either nd1 or
2727 		 * nd2 is default, punt and assume half of the uncertain rows have
2728 		 * join partners.
2729 		 */
2730 		if (!isdefault1 && !isdefault2)
2731 		{
2732 			nd1 -= nmatches;
2733 			nd2 -= nmatches;
2734 			if (nd1 <= nd2 || nd2 < 0)
2735 				uncertainfrac = 1.0;
2736 			else
2737 				uncertainfrac = nd2 / nd1;
2738 		}
2739 		else
2740 			uncertainfrac = 0.5;
2741 		uncertain = 1.0 - matchfreq1 - nullfrac1;
2742 		CLAMP_PROBABILITY(uncertain);
2743 		selec = matchfreq1 + uncertainfrac * uncertain;
2744 	}
2745 	else
2746 	{
2747 		/*
2748 		 * Without MCV lists for both sides, we can only use the heuristic
2749 		 * about nd1 vs nd2.
2750 		 */
2751 		double		nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2752 
2753 		if (!isdefault1 && !isdefault2)
2754 		{
2755 			if (nd1 <= nd2 || nd2 < 0)
2756 				selec = 1.0 - nullfrac1;
2757 			else
2758 				selec = (nd2 / nd1) * (1.0 - nullfrac1);
2759 		}
2760 		else
2761 			selec = 0.5 * (1.0 - nullfrac1);
2762 	}
2763 
2764 	return selec;
2765 }
2766 
2767 /*
2768  *		neqjoinsel		- Join selectivity of "!="
2769  */
2770 Datum
neqjoinsel(PG_FUNCTION_ARGS)2771 neqjoinsel(PG_FUNCTION_ARGS)
2772 {
2773 	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2774 	Oid			operator = PG_GETARG_OID(1);
2775 	List	   *args = (List *) PG_GETARG_POINTER(2);
2776 	JoinType	jointype = (JoinType) PG_GETARG_INT16(3);
2777 	SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2778 	Oid			collation = PG_GET_COLLATION();
2779 	float8		result;
2780 
2781 	if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
2782 	{
2783 		/*
2784 		 * For semi-joins, if there is more than one distinct value in the RHS
2785 		 * relation then every non-null LHS row must find a row to join since
2786 		 * it can only be equal to one of them.  We'll assume that there is
2787 		 * always more than one distinct RHS value for the sake of stability,
2788 		 * though in theory we could have special cases for empty RHS
2789 		 * (selectivity = 0) and single-distinct-value RHS (selectivity =
2790 		 * fraction of LHS that has the same value as the single RHS value).
2791 		 *
2792 		 * For anti-joins, if we use the same assumption that there is more
2793 		 * than one distinct key in the RHS relation, then every non-null LHS
2794 		 * row must be suppressed by the anti-join.
2795 		 *
2796 		 * So either way, the selectivity estimate should be 1 - nullfrac.
2797 		 */
2798 		VariableStatData leftvar;
2799 		VariableStatData rightvar;
2800 		bool		reversed;
2801 		HeapTuple	statsTuple;
2802 		double		nullfrac;
2803 
2804 		get_join_variables(root, args, sjinfo, &leftvar, &rightvar, &reversed);
2805 		statsTuple = reversed ? rightvar.statsTuple : leftvar.statsTuple;
2806 		if (HeapTupleIsValid(statsTuple))
2807 			nullfrac = ((Form_pg_statistic) GETSTRUCT(statsTuple))->stanullfrac;
2808 		else
2809 			nullfrac = 0.0;
2810 		ReleaseVariableStats(leftvar);
2811 		ReleaseVariableStats(rightvar);
2812 
2813 		result = 1.0 - nullfrac;
2814 	}
2815 	else
2816 	{
2817 		/*
2818 		 * We want 1 - eqjoinsel() where the equality operator is the one
2819 		 * associated with this != operator, that is, its negator.
2820 		 */
2821 		Oid			eqop = get_negator(operator);
2822 
2823 		if (eqop)
2824 		{
2825 			result =
2826 				DatumGetFloat8(DirectFunctionCall5Coll(eqjoinsel,
2827 													   collation,
2828 													   PointerGetDatum(root),
2829 													   ObjectIdGetDatum(eqop),
2830 													   PointerGetDatum(args),
2831 													   Int16GetDatum(jointype),
2832 													   PointerGetDatum(sjinfo)));
2833 		}
2834 		else
2835 		{
2836 			/* Use default selectivity (should we raise an error instead?) */
2837 			result = DEFAULT_EQ_SEL;
2838 		}
2839 		result = 1.0 - result;
2840 	}
2841 
2842 	PG_RETURN_FLOAT8(result);
2843 }
2844 
2845 /*
2846  *		scalarltjoinsel - Join selectivity of "<" for scalars
2847  */
2848 Datum
scalarltjoinsel(PG_FUNCTION_ARGS)2849 scalarltjoinsel(PG_FUNCTION_ARGS)
2850 {
2851 	PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2852 }
2853 
2854 /*
2855  *		scalarlejoinsel - Join selectivity of "<=" for scalars
2856  */
2857 Datum
scalarlejoinsel(PG_FUNCTION_ARGS)2858 scalarlejoinsel(PG_FUNCTION_ARGS)
2859 {
2860 	PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2861 }
2862 
2863 /*
2864  *		scalargtjoinsel - Join selectivity of ">" for scalars
2865  */
2866 Datum
scalargtjoinsel(PG_FUNCTION_ARGS)2867 scalargtjoinsel(PG_FUNCTION_ARGS)
2868 {
2869 	PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2870 }
2871 
2872 /*
2873  *		scalargejoinsel - Join selectivity of ">=" for scalars
2874  */
2875 Datum
scalargejoinsel(PG_FUNCTION_ARGS)2876 scalargejoinsel(PG_FUNCTION_ARGS)
2877 {
2878 	PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2879 }
2880 
2881 
2882 /*
2883  * mergejoinscansel			- Scan selectivity of merge join.
2884  *
2885  * A merge join will stop as soon as it exhausts either input stream.
2886  * Therefore, if we can estimate the ranges of both input variables,
2887  * we can estimate how much of the input will actually be read.  This
2888  * can have a considerable impact on the cost when using indexscans.
2889  *
2890  * Also, we can estimate how much of each input has to be read before the
2891  * first join pair is found, which will affect the join's startup time.
2892  *
2893  * clause should be a clause already known to be mergejoinable.  opfamily,
2894  * strategy, and nulls_first specify the sort ordering being used.
2895  *
2896  * The outputs are:
2897  *		*leftstart is set to the fraction of the left-hand variable expected
2898  *		 to be scanned before the first join pair is found (0 to 1).
2899  *		*leftend is set to the fraction of the left-hand variable expected
2900  *		 to be scanned before the join terminates (0 to 1).
2901  *		*rightstart, *rightend similarly for the right-hand variable.
2902  */
2903 void
mergejoinscansel(PlannerInfo * root,Node * clause,Oid opfamily,int strategy,bool nulls_first,Selectivity * leftstart,Selectivity * leftend,Selectivity * rightstart,Selectivity * rightend)2904 mergejoinscansel(PlannerInfo *root, Node *clause,
2905 				 Oid opfamily, int strategy, bool nulls_first,
2906 				 Selectivity *leftstart, Selectivity *leftend,
2907 				 Selectivity *rightstart, Selectivity *rightend)
2908 {
2909 	Node	   *left,
2910 			   *right;
2911 	VariableStatData leftvar,
2912 				rightvar;
2913 	int			op_strategy;
2914 	Oid			op_lefttype;
2915 	Oid			op_righttype;
2916 	Oid			opno,
2917 				collation,
2918 				lsortop,
2919 				rsortop,
2920 				lstatop,
2921 				rstatop,
2922 				ltop,
2923 				leop,
2924 				revltop,
2925 				revleop;
2926 	bool		isgt;
2927 	Datum		leftmin,
2928 				leftmax,
2929 				rightmin,
2930 				rightmax;
2931 	double		selec;
2932 
2933 	/* Set default results if we can't figure anything out. */
2934 	/* XXX should default "start" fraction be a bit more than 0? */
2935 	*leftstart = *rightstart = 0.0;
2936 	*leftend = *rightend = 1.0;
2937 
2938 	/* Deconstruct the merge clause */
2939 	if (!is_opclause(clause))
2940 		return;					/* shouldn't happen */
2941 	opno = ((OpExpr *) clause)->opno;
2942 	collation = ((OpExpr *) clause)->inputcollid;
2943 	left = get_leftop((Expr *) clause);
2944 	right = get_rightop((Expr *) clause);
2945 	if (!right)
2946 		return;					/* shouldn't happen */
2947 
2948 	/* Look for stats for the inputs */
2949 	examine_variable(root, left, 0, &leftvar);
2950 	examine_variable(root, right, 0, &rightvar);
2951 
2952 	/* Extract the operator's declared left/right datatypes */
2953 	get_op_opfamily_properties(opno, opfamily, false,
2954 							   &op_strategy,
2955 							   &op_lefttype,
2956 							   &op_righttype);
2957 	Assert(op_strategy == BTEqualStrategyNumber);
2958 
2959 	/*
2960 	 * Look up the various operators we need.  If we don't find them all, it
2961 	 * probably means the opfamily is broken, but we just fail silently.
2962 	 *
2963 	 * Note: we expect that pg_statistic histograms will be sorted by the '<'
2964 	 * operator, regardless of which sort direction we are considering.
2965 	 */
2966 	switch (strategy)
2967 	{
2968 		case BTLessStrategyNumber:
2969 			isgt = false;
2970 			if (op_lefttype == op_righttype)
2971 			{
2972 				/* easy case */
2973 				ltop = get_opfamily_member(opfamily,
2974 										   op_lefttype, op_righttype,
2975 										   BTLessStrategyNumber);
2976 				leop = get_opfamily_member(opfamily,
2977 										   op_lefttype, op_righttype,
2978 										   BTLessEqualStrategyNumber);
2979 				lsortop = ltop;
2980 				rsortop = ltop;
2981 				lstatop = lsortop;
2982 				rstatop = rsortop;
2983 				revltop = ltop;
2984 				revleop = leop;
2985 			}
2986 			else
2987 			{
2988 				ltop = get_opfamily_member(opfamily,
2989 										   op_lefttype, op_righttype,
2990 										   BTLessStrategyNumber);
2991 				leop = get_opfamily_member(opfamily,
2992 										   op_lefttype, op_righttype,
2993 										   BTLessEqualStrategyNumber);
2994 				lsortop = get_opfamily_member(opfamily,
2995 											  op_lefttype, op_lefttype,
2996 											  BTLessStrategyNumber);
2997 				rsortop = get_opfamily_member(opfamily,
2998 											  op_righttype, op_righttype,
2999 											  BTLessStrategyNumber);
3000 				lstatop = lsortop;
3001 				rstatop = rsortop;
3002 				revltop = get_opfamily_member(opfamily,
3003 											  op_righttype, op_lefttype,
3004 											  BTLessStrategyNumber);
3005 				revleop = get_opfamily_member(opfamily,
3006 											  op_righttype, op_lefttype,
3007 											  BTLessEqualStrategyNumber);
3008 			}
3009 			break;
3010 		case BTGreaterStrategyNumber:
3011 			/* descending-order case */
3012 			isgt = true;
3013 			if (op_lefttype == op_righttype)
3014 			{
3015 				/* easy case */
3016 				ltop = get_opfamily_member(opfamily,
3017 										   op_lefttype, op_righttype,
3018 										   BTGreaterStrategyNumber);
3019 				leop = get_opfamily_member(opfamily,
3020 										   op_lefttype, op_righttype,
3021 										   BTGreaterEqualStrategyNumber);
3022 				lsortop = ltop;
3023 				rsortop = ltop;
3024 				lstatop = get_opfamily_member(opfamily,
3025 											  op_lefttype, op_lefttype,
3026 											  BTLessStrategyNumber);
3027 				rstatop = lstatop;
3028 				revltop = ltop;
3029 				revleop = leop;
3030 			}
3031 			else
3032 			{
3033 				ltop = get_opfamily_member(opfamily,
3034 										   op_lefttype, op_righttype,
3035 										   BTGreaterStrategyNumber);
3036 				leop = get_opfamily_member(opfamily,
3037 										   op_lefttype, op_righttype,
3038 										   BTGreaterEqualStrategyNumber);
3039 				lsortop = get_opfamily_member(opfamily,
3040 											  op_lefttype, op_lefttype,
3041 											  BTGreaterStrategyNumber);
3042 				rsortop = get_opfamily_member(opfamily,
3043 											  op_righttype, op_righttype,
3044 											  BTGreaterStrategyNumber);
3045 				lstatop = get_opfamily_member(opfamily,
3046 											  op_lefttype, op_lefttype,
3047 											  BTLessStrategyNumber);
3048 				rstatop = get_opfamily_member(opfamily,
3049 											  op_righttype, op_righttype,
3050 											  BTLessStrategyNumber);
3051 				revltop = get_opfamily_member(opfamily,
3052 											  op_righttype, op_lefttype,
3053 											  BTGreaterStrategyNumber);
3054 				revleop = get_opfamily_member(opfamily,
3055 											  op_righttype, op_lefttype,
3056 											  BTGreaterEqualStrategyNumber);
3057 			}
3058 			break;
3059 		default:
3060 			goto fail;			/* shouldn't get here */
3061 	}
3062 
3063 	if (!OidIsValid(lsortop) ||
3064 		!OidIsValid(rsortop) ||
3065 		!OidIsValid(lstatop) ||
3066 		!OidIsValid(rstatop) ||
3067 		!OidIsValid(ltop) ||
3068 		!OidIsValid(leop) ||
3069 		!OidIsValid(revltop) ||
3070 		!OidIsValid(revleop))
3071 		goto fail;				/* insufficient info in catalogs */
3072 
3073 	/* Try to get ranges of both inputs */
3074 	if (!isgt)
3075 	{
3076 		if (!get_variable_range(root, &leftvar, lstatop, collation,
3077 								&leftmin, &leftmax))
3078 			goto fail;			/* no range available from stats */
3079 		if (!get_variable_range(root, &rightvar, rstatop, collation,
3080 								&rightmin, &rightmax))
3081 			goto fail;			/* no range available from stats */
3082 	}
3083 	else
3084 	{
3085 		/* need to swap the max and min */
3086 		if (!get_variable_range(root, &leftvar, lstatop, collation,
3087 								&leftmax, &leftmin))
3088 			goto fail;			/* no range available from stats */
3089 		if (!get_variable_range(root, &rightvar, rstatop, collation,
3090 								&rightmax, &rightmin))
3091 			goto fail;			/* no range available from stats */
3092 	}
3093 
3094 	/*
3095 	 * Now, the fraction of the left variable that will be scanned is the
3096 	 * fraction that's <= the right-side maximum value.  But only believe
3097 	 * non-default estimates, else stick with our 1.0.
3098 	 */
3099 	selec = scalarineqsel(root, leop, isgt, true, collation, &leftvar,
3100 						  rightmax, op_righttype);
3101 	if (selec != DEFAULT_INEQ_SEL)
3102 		*leftend = selec;
3103 
3104 	/* And similarly for the right variable. */
3105 	selec = scalarineqsel(root, revleop, isgt, true, collation, &rightvar,
3106 						  leftmax, op_lefttype);
3107 	if (selec != DEFAULT_INEQ_SEL)
3108 		*rightend = selec;
3109 
3110 	/*
3111 	 * Only one of the two "end" fractions can really be less than 1.0;
3112 	 * believe the smaller estimate and reset the other one to exactly 1.0. If
3113 	 * we get exactly equal estimates (as can easily happen with self-joins),
3114 	 * believe neither.
3115 	 */
3116 	if (*leftend > *rightend)
3117 		*leftend = 1.0;
3118 	else if (*leftend < *rightend)
3119 		*rightend = 1.0;
3120 	else
3121 		*leftend = *rightend = 1.0;
3122 
3123 	/*
3124 	 * Also, the fraction of the left variable that will be scanned before the
3125 	 * first join pair is found is the fraction that's < the right-side
3126 	 * minimum value.  But only believe non-default estimates, else stick with
3127 	 * our own default.
3128 	 */
3129 	selec = scalarineqsel(root, ltop, isgt, false, collation, &leftvar,
3130 						  rightmin, op_righttype);
3131 	if (selec != DEFAULT_INEQ_SEL)
3132 		*leftstart = selec;
3133 
3134 	/* And similarly for the right variable. */
3135 	selec = scalarineqsel(root, revltop, isgt, false, collation, &rightvar,
3136 						  leftmin, op_lefttype);
3137 	if (selec != DEFAULT_INEQ_SEL)
3138 		*rightstart = selec;
3139 
3140 	/*
3141 	 * Only one of the two "start" fractions can really be more than zero;
3142 	 * believe the larger estimate and reset the other one to exactly 0.0. If
3143 	 * we get exactly equal estimates (as can easily happen with self-joins),
3144 	 * believe neither.
3145 	 */
3146 	if (*leftstart < *rightstart)
3147 		*leftstart = 0.0;
3148 	else if (*leftstart > *rightstart)
3149 		*rightstart = 0.0;
3150 	else
3151 		*leftstart = *rightstart = 0.0;
3152 
3153 	/*
3154 	 * If the sort order is nulls-first, we're going to have to skip over any
3155 	 * nulls too.  These would not have been counted by scalarineqsel, and we
3156 	 * can safely add in this fraction regardless of whether we believe
3157 	 * scalarineqsel's results or not.  But be sure to clamp the sum to 1.0!
3158 	 */
3159 	if (nulls_first)
3160 	{
3161 		Form_pg_statistic stats;
3162 
3163 		if (HeapTupleIsValid(leftvar.statsTuple))
3164 		{
3165 			stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3166 			*leftstart += stats->stanullfrac;
3167 			CLAMP_PROBABILITY(*leftstart);
3168 			*leftend += stats->stanullfrac;
3169 			CLAMP_PROBABILITY(*leftend);
3170 		}
3171 		if (HeapTupleIsValid(rightvar.statsTuple))
3172 		{
3173 			stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3174 			*rightstart += stats->stanullfrac;
3175 			CLAMP_PROBABILITY(*rightstart);
3176 			*rightend += stats->stanullfrac;
3177 			CLAMP_PROBABILITY(*rightend);
3178 		}
3179 	}
3180 
3181 	/* Disbelieve start >= end, just in case that can happen */
3182 	if (*leftstart >= *leftend)
3183 	{
3184 		*leftstart = 0.0;
3185 		*leftend = 1.0;
3186 	}
3187 	if (*rightstart >= *rightend)
3188 	{
3189 		*rightstart = 0.0;
3190 		*rightend = 1.0;
3191 	}
3192 
3193 fail:
3194 	ReleaseVariableStats(leftvar);
3195 	ReleaseVariableStats(rightvar);
3196 }
3197 
3198 
3199 /*
3200  *	matchingsel -- generic matching-operator selectivity support
3201  *
3202  * Use these for any operators that (a) are on data types for which we collect
3203  * standard statistics, and (b) have behavior for which the default estimate
3204  * (twice DEFAULT_EQ_SEL) is sane.  Typically that is good for match-like
3205  * operators.
3206  */
3207 
3208 Datum
matchingsel(PG_FUNCTION_ARGS)3209 matchingsel(PG_FUNCTION_ARGS)
3210 {
3211 	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
3212 	Oid			operator = PG_GETARG_OID(1);
3213 	List	   *args = (List *) PG_GETARG_POINTER(2);
3214 	int			varRelid = PG_GETARG_INT32(3);
3215 	Oid			collation = PG_GET_COLLATION();
3216 	double		selec;
3217 
3218 	/* Use generic restriction selectivity logic. */
3219 	selec = generic_restriction_selectivity(root, operator, collation,
3220 											args, varRelid,
3221 											DEFAULT_MATCHING_SEL);
3222 
3223 	PG_RETURN_FLOAT8((float8) selec);
3224 }
3225 
3226 Datum
matchingjoinsel(PG_FUNCTION_ARGS)3227 matchingjoinsel(PG_FUNCTION_ARGS)
3228 {
3229 	/* Just punt, for the moment. */
3230 	PG_RETURN_FLOAT8(DEFAULT_MATCHING_SEL);
3231 }
3232 
3233 
3234 /*
3235  * Helper routine for estimate_num_groups: add an item to a list of
3236  * GroupVarInfos, but only if it's not known equal to any of the existing
3237  * entries.
3238  */
3239 typedef struct
3240 {
3241 	Node	   *var;			/* might be an expression, not just a Var */
3242 	RelOptInfo *rel;			/* relation it belongs to */
3243 	double		ndistinct;		/* # distinct values */
3244 	bool		isdefault;		/* true if DEFAULT_NUM_DISTINCT was used */
3245 } GroupVarInfo;
3246 
3247 static List *
add_unique_group_var(PlannerInfo * root,List * varinfos,Node * var,VariableStatData * vardata)3248 add_unique_group_var(PlannerInfo *root, List *varinfos,
3249 					 Node *var, VariableStatData *vardata)
3250 {
3251 	GroupVarInfo *varinfo;
3252 	double		ndistinct;
3253 	bool		isdefault;
3254 	ListCell   *lc;
3255 
3256 	ndistinct = get_variable_numdistinct(vardata, &isdefault);
3257 
3258 	foreach(lc, varinfos)
3259 	{
3260 		varinfo = (GroupVarInfo *) lfirst(lc);
3261 
3262 		/* Drop exact duplicates */
3263 		if (equal(var, varinfo->var))
3264 			return varinfos;
3265 
3266 		/*
3267 		 * Drop known-equal vars, but only if they belong to different
3268 		 * relations (see comments for estimate_num_groups)
3269 		 */
3270 		if (vardata->rel != varinfo->rel &&
3271 			exprs_known_equal(root, var, varinfo->var))
3272 		{
3273 			if (varinfo->ndistinct <= ndistinct)
3274 			{
3275 				/* Keep older item, forget new one */
3276 				return varinfos;
3277 			}
3278 			else
3279 			{
3280 				/* Delete the older item */
3281 				varinfos = foreach_delete_current(varinfos, lc);
3282 			}
3283 		}
3284 	}
3285 
3286 	varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3287 
3288 	varinfo->var = var;
3289 	varinfo->rel = vardata->rel;
3290 	varinfo->ndistinct = ndistinct;
3291 	varinfo->isdefault = isdefault;
3292 	varinfos = lappend(varinfos, varinfo);
3293 	return varinfos;
3294 }
3295 
3296 /*
3297  * estimate_num_groups		- Estimate number of groups in a grouped query
3298  *
3299  * Given a query having a GROUP BY clause, estimate how many groups there
3300  * will be --- ie, the number of distinct combinations of the GROUP BY
3301  * expressions.
3302  *
3303  * This routine is also used to estimate the number of rows emitted by
3304  * a DISTINCT filtering step; that is an isomorphic problem.  (Note:
3305  * actually, we only use it for DISTINCT when there's no grouping or
3306  * aggregation ahead of the DISTINCT.)
3307  *
3308  * Inputs:
3309  *	root - the query
3310  *	groupExprs - list of expressions being grouped by
3311  *	input_rows - number of rows estimated to arrive at the group/unique
3312  *		filter step
3313  *	pgset - NULL, or a List** pointing to a grouping set to filter the
3314  *		groupExprs against
3315  *
3316  * Outputs:
3317  *	estinfo - When passed as non-NULL, the function will set bits in the
3318  *		"flags" field in order to provide callers with additional information
3319  *		about the estimation.  Currently, we only set the SELFLAG_USED_DEFAULT
3320  *		bit if we used any default values in the estimation.
3321  *
3322  * Given the lack of any cross-correlation statistics in the system, it's
3323  * impossible to do anything really trustworthy with GROUP BY conditions
3324  * involving multiple Vars.  We should however avoid assuming the worst
3325  * case (all possible cross-product terms actually appear as groups) since
3326  * very often the grouped-by Vars are highly correlated.  Our current approach
3327  * is as follows:
3328  *	1.  Expressions yielding boolean are assumed to contribute two groups,
3329  *		independently of their content, and are ignored in the subsequent
3330  *		steps.  This is mainly because tests like "col IS NULL" break the
3331  *		heuristic used in step 2 especially badly.
3332  *	2.  Reduce the given expressions to a list of unique Vars used.  For
3333  *		example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3334  *		It is clearly correct not to count the same Var more than once.
3335  *		It is also reasonable to treat f(x) the same as x: f() cannot
3336  *		increase the number of distinct values (unless it is volatile,
3337  *		which we consider unlikely for grouping), but it probably won't
3338  *		reduce the number of distinct values much either.
3339  *		As a special case, if a GROUP BY expression can be matched to an
3340  *		expressional index for which we have statistics, then we treat the
3341  *		whole expression as though it were just a Var.
3342  *	3.  If the list contains Vars of different relations that are known equal
3343  *		due to equivalence classes, then drop all but one of the Vars from each
3344  *		known-equal set, keeping the one with smallest estimated # of values
3345  *		(since the extra values of the others can't appear in joined rows).
3346  *		Note the reason we only consider Vars of different relations is that
3347  *		if we considered ones of the same rel, we'd be double-counting the
3348  *		restriction selectivity of the equality in the next step.
3349  *	4.  For Vars within a single source rel, we multiply together the numbers
3350  *		of values, clamp to the number of rows in the rel (divided by 10 if
3351  *		more than one Var), and then multiply by a factor based on the
3352  *		selectivity of the restriction clauses for that rel.  When there's
3353  *		more than one Var, the initial product is probably too high (it's the
3354  *		worst case) but clamping to a fraction of the rel's rows seems to be a
3355  *		helpful heuristic for not letting the estimate get out of hand.  (The
3356  *		factor of 10 is derived from pre-Postgres-7.4 practice.)  The factor
3357  *		we multiply by to adjust for the restriction selectivity assumes that
3358  *		the restriction clauses are independent of the grouping, which may not
3359  *		be a valid assumption, but it's hard to do better.
3360  *	5.  If there are Vars from multiple rels, we repeat step 4 for each such
3361  *		rel, and multiply the results together.
3362  * Note that rels not containing grouped Vars are ignored completely, as are
3363  * join clauses.  Such rels cannot increase the number of groups, and we
3364  * assume such clauses do not reduce the number either (somewhat bogus,
3365  * but we don't have the info to do better).
3366  */
3367 double
estimate_num_groups(PlannerInfo * root,List * groupExprs,double input_rows,List ** pgset,EstimationInfo * estinfo)3368 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3369 					List **pgset, EstimationInfo *estinfo)
3370 {
3371 	List	   *varinfos = NIL;
3372 	double		srf_multiplier = 1.0;
3373 	double		numdistinct;
3374 	ListCell   *l;
3375 	int			i;
3376 
3377 	/* Zero the estinfo output parameter, if non-NULL */
3378 	if (estinfo != NULL)
3379 		memset(estinfo, 0, sizeof(EstimationInfo));
3380 
3381 	/*
3382 	 * We don't ever want to return an estimate of zero groups, as that tends
3383 	 * to lead to division-by-zero and other unpleasantness.  The input_rows
3384 	 * estimate is usually already at least 1, but clamp it just in case it
3385 	 * isn't.
3386 	 */
3387 	input_rows = clamp_row_est(input_rows);
3388 
3389 	/*
3390 	 * If no grouping columns, there's exactly one group.  (This can't happen
3391 	 * for normal cases with GROUP BY or DISTINCT, but it is possible for
3392 	 * corner cases with set operations.)
3393 	 */
3394 	if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3395 		return 1.0;
3396 
3397 	/*
3398 	 * Count groups derived from boolean grouping expressions.  For other
3399 	 * expressions, find the unique Vars used, treating an expression as a Var
3400 	 * if we can find stats for it.  For each one, record the statistical
3401 	 * estimate of number of distinct values (total in its table, without
3402 	 * regard for filtering).
3403 	 */
3404 	numdistinct = 1.0;
3405 
3406 	i = 0;
3407 	foreach(l, groupExprs)
3408 	{
3409 		Node	   *groupexpr = (Node *) lfirst(l);
3410 		double		this_srf_multiplier;
3411 		VariableStatData vardata;
3412 		List	   *varshere;
3413 		ListCell   *l2;
3414 
3415 		/* is expression in this grouping set? */
3416 		if (pgset && !list_member_int(*pgset, i++))
3417 			continue;
3418 
3419 		/*
3420 		 * Set-returning functions in grouping columns are a bit problematic.
3421 		 * The code below will effectively ignore their SRF nature and come up
3422 		 * with a numdistinct estimate as though they were scalar functions.
3423 		 * We compensate by scaling up the end result by the largest SRF
3424 		 * rowcount estimate.  (This will be an overestimate if the SRF
3425 		 * produces multiple copies of any output value, but it seems best to
3426 		 * assume the SRF's outputs are distinct.  In any case, it's probably
3427 		 * pointless to worry too much about this without much better
3428 		 * estimates for SRF output rowcounts than we have today.)
3429 		 */
3430 		this_srf_multiplier = expression_returns_set_rows(root, groupexpr);
3431 		if (srf_multiplier < this_srf_multiplier)
3432 			srf_multiplier = this_srf_multiplier;
3433 
3434 		/* Short-circuit for expressions returning boolean */
3435 		if (exprType(groupexpr) == BOOLOID)
3436 		{
3437 			numdistinct *= 2.0;
3438 			continue;
3439 		}
3440 
3441 		/*
3442 		 * If examine_variable is able to deduce anything about the GROUP BY
3443 		 * expression, treat it as a single variable even if it's really more
3444 		 * complicated.
3445 		 *
3446 		 * XXX This has the consequence that if there's a statistics object on
3447 		 * the expression, we don't split it into individual Vars. This
3448 		 * affects our selection of statistics in
3449 		 * estimate_multivariate_ndistinct, because it's probably better to
3450 		 * use more accurate estimate for each expression and treat them as
3451 		 * independent, than to combine estimates for the extracted variables
3452 		 * when we don't know how that relates to the expressions.
3453 		 */
3454 		examine_variable(root, groupexpr, 0, &vardata);
3455 		if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3456 		{
3457 			varinfos = add_unique_group_var(root, varinfos,
3458 											groupexpr, &vardata);
3459 			ReleaseVariableStats(vardata);
3460 			continue;
3461 		}
3462 		ReleaseVariableStats(vardata);
3463 
3464 		/*
3465 		 * Else pull out the component Vars.  Handle PlaceHolderVars by
3466 		 * recursing into their arguments (effectively assuming that the
3467 		 * PlaceHolderVar doesn't change the number of groups, which boils
3468 		 * down to ignoring the possible addition of nulls to the result set).
3469 		 */
3470 		varshere = pull_var_clause(groupexpr,
3471 								   PVC_RECURSE_AGGREGATES |
3472 								   PVC_RECURSE_WINDOWFUNCS |
3473 								   PVC_RECURSE_PLACEHOLDERS);
3474 
3475 		/*
3476 		 * If we find any variable-free GROUP BY item, then either it is a
3477 		 * constant (and we can ignore it) or it contains a volatile function;
3478 		 * in the latter case we punt and assume that each input row will
3479 		 * yield a distinct group.
3480 		 */
3481 		if (varshere == NIL)
3482 		{
3483 			if (contain_volatile_functions(groupexpr))
3484 				return input_rows;
3485 			continue;
3486 		}
3487 
3488 		/*
3489 		 * Else add variables to varinfos list
3490 		 */
3491 		foreach(l2, varshere)
3492 		{
3493 			Node	   *var = (Node *) lfirst(l2);
3494 
3495 			examine_variable(root, var, 0, &vardata);
3496 			varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3497 			ReleaseVariableStats(vardata);
3498 		}
3499 	}
3500 
3501 	/*
3502 	 * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3503 	 * list.
3504 	 */
3505 	if (varinfos == NIL)
3506 	{
3507 		/* Apply SRF multiplier as we would do in the long path */
3508 		numdistinct *= srf_multiplier;
3509 		/* Round off */
3510 		numdistinct = ceil(numdistinct);
3511 		/* Guard against out-of-range answers */
3512 		if (numdistinct > input_rows)
3513 			numdistinct = input_rows;
3514 		if (numdistinct < 1.0)
3515 			numdistinct = 1.0;
3516 		return numdistinct;
3517 	}
3518 
3519 	/*
3520 	 * Group Vars by relation and estimate total numdistinct.
3521 	 *
3522 	 * For each iteration of the outer loop, we process the frontmost Var in
3523 	 * varinfos, plus all other Vars in the same relation.  We remove these
3524 	 * Vars from the newvarinfos list for the next iteration. This is the
3525 	 * easiest way to group Vars of same rel together.
3526 	 */
3527 	do
3528 	{
3529 		GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3530 		RelOptInfo *rel = varinfo1->rel;
3531 		double		reldistinct = 1;
3532 		double		relmaxndistinct = reldistinct;
3533 		int			relvarcount = 0;
3534 		List	   *newvarinfos = NIL;
3535 		List	   *relvarinfos = NIL;
3536 
3537 		/*
3538 		 * Split the list of varinfos in two - one for the current rel, one
3539 		 * for remaining Vars on other rels.
3540 		 */
3541 		relvarinfos = lappend(relvarinfos, varinfo1);
3542 		for_each_from(l, varinfos, 1)
3543 		{
3544 			GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3545 
3546 			if (varinfo2->rel == varinfo1->rel)
3547 			{
3548 				/* varinfos on current rel */
3549 				relvarinfos = lappend(relvarinfos, varinfo2);
3550 			}
3551 			else
3552 			{
3553 				/* not time to process varinfo2 yet */
3554 				newvarinfos = lappend(newvarinfos, varinfo2);
3555 			}
3556 		}
3557 
3558 		/*
3559 		 * Get the numdistinct estimate for the Vars of this rel.  We
3560 		 * iteratively search for multivariate n-distinct with maximum number
3561 		 * of vars; assuming that each var group is independent of the others,
3562 		 * we multiply them together.  Any remaining relvarinfos after no more
3563 		 * multivariate matches are found are assumed independent too, so
3564 		 * their individual ndistinct estimates are multiplied also.
3565 		 *
3566 		 * While iterating, count how many separate numdistinct values we
3567 		 * apply.  We apply a fudge factor below, but only if we multiplied
3568 		 * more than one such values.
3569 		 */
3570 		while (relvarinfos)
3571 		{
3572 			double		mvndistinct;
3573 
3574 			if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3575 												&mvndistinct))
3576 			{
3577 				reldistinct *= mvndistinct;
3578 				if (relmaxndistinct < mvndistinct)
3579 					relmaxndistinct = mvndistinct;
3580 				relvarcount++;
3581 			}
3582 			else
3583 			{
3584 				foreach(l, relvarinfos)
3585 				{
3586 					GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3587 
3588 					reldistinct *= varinfo2->ndistinct;
3589 					if (relmaxndistinct < varinfo2->ndistinct)
3590 						relmaxndistinct = varinfo2->ndistinct;
3591 					relvarcount++;
3592 
3593 					/*
3594 					 * When varinfo2's isdefault is set then we'd better set
3595 					 * the SELFLAG_USED_DEFAULT bit in the EstimationInfo.
3596 					 */
3597 					if (estinfo != NULL && varinfo2->isdefault)
3598 						estinfo->flags |= SELFLAG_USED_DEFAULT;
3599 
3600 				}
3601 
3602 				/* we're done with this relation */
3603 				relvarinfos = NIL;
3604 			}
3605 		}
3606 
3607 		/*
3608 		 * Sanity check --- don't divide by zero if empty relation.
3609 		 */
3610 		Assert(IS_SIMPLE_REL(rel));
3611 		if (rel->tuples > 0)
3612 		{
3613 			/*
3614 			 * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3615 			 * fudge factor is because the Vars are probably correlated but we
3616 			 * don't know by how much.  We should never clamp to less than the
3617 			 * largest ndistinct value for any of the Vars, though, since
3618 			 * there will surely be at least that many groups.
3619 			 */
3620 			double		clamp = rel->tuples;
3621 
3622 			if (relvarcount > 1)
3623 			{
3624 				clamp *= 0.1;
3625 				if (clamp < relmaxndistinct)
3626 				{
3627 					clamp = relmaxndistinct;
3628 					/* for sanity in case some ndistinct is too large: */
3629 					if (clamp > rel->tuples)
3630 						clamp = rel->tuples;
3631 				}
3632 			}
3633 			if (reldistinct > clamp)
3634 				reldistinct = clamp;
3635 
3636 			/*
3637 			 * Update the estimate based on the restriction selectivity,
3638 			 * guarding against division by zero when reldistinct is zero.
3639 			 * Also skip this if we know that we are returning all rows.
3640 			 */
3641 			if (reldistinct > 0 && rel->rows < rel->tuples)
3642 			{
3643 				/*
3644 				 * Given a table containing N rows with n distinct values in a
3645 				 * uniform distribution, if we select p rows at random then
3646 				 * the expected number of distinct values selected is
3647 				 *
3648 				 * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3649 				 *
3650 				 * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3651 				 *
3652 				 * See "Approximating block accesses in database
3653 				 * organizations", S. B. Yao, Communications of the ACM,
3654 				 * Volume 20 Issue 4, April 1977 Pages 260-261.
3655 				 *
3656 				 * Alternatively, re-arranging the terms from the factorials,
3657 				 * this may be written as
3658 				 *
3659 				 * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3660 				 *
3661 				 * This form of the formula is more efficient to compute in
3662 				 * the common case where p is larger than N/n.  Additionally,
3663 				 * as pointed out by Dell'Era, if i << N for all terms in the
3664 				 * product, it can be approximated by
3665 				 *
3666 				 * n * (1 - ((N-p)/N)^(N/n))
3667 				 *
3668 				 * See "Expected distinct values when selecting from a bag
3669 				 * without replacement", Alberto Dell'Era,
3670 				 * http://www.adellera.it/investigations/distinct_balls/.
3671 				 *
3672 				 * The condition i << N is equivalent to n >> 1, so this is a
3673 				 * good approximation when the number of distinct values in
3674 				 * the table is large.  It turns out that this formula also
3675 				 * works well even when n is small.
3676 				 */
3677 				reldistinct *=
3678 					(1 - pow((rel->tuples - rel->rows) / rel->tuples,
3679 							 rel->tuples / reldistinct));
3680 			}
3681 			reldistinct = clamp_row_est(reldistinct);
3682 
3683 			/*
3684 			 * Update estimate of total distinct groups.
3685 			 */
3686 			numdistinct *= reldistinct;
3687 		}
3688 
3689 		varinfos = newvarinfos;
3690 	} while (varinfos != NIL);
3691 
3692 	/* Now we can account for the effects of any SRFs */
3693 	numdistinct *= srf_multiplier;
3694 
3695 	/* Round off */
3696 	numdistinct = ceil(numdistinct);
3697 
3698 	/* Guard against out-of-range answers */
3699 	if (numdistinct > input_rows)
3700 		numdistinct = input_rows;
3701 	if (numdistinct < 1.0)
3702 		numdistinct = 1.0;
3703 
3704 	return numdistinct;
3705 }
3706 
3707 /*
3708  * Estimate hash bucket statistics when the specified expression is used
3709  * as a hash key for the given number of buckets.
3710  *
3711  * This attempts to determine two values:
3712  *
3713  * 1. The frequency of the most common value of the expression (returns
3714  * zero into *mcv_freq if we can't get that).
3715  *
3716  * 2. The "bucketsize fraction", ie, average number of entries in a bucket
3717  * divided by total tuples in relation.
3718  *
3719  * XXX This is really pretty bogus since we're effectively assuming that the
3720  * distribution of hash keys will be the same after applying restriction
3721  * clauses as it was in the underlying relation.  However, we are not nearly
3722  * smart enough to figure out how the restrict clauses might change the
3723  * distribution, so this will have to do for now.
3724  *
3725  * We are passed the number of buckets the executor will use for the given
3726  * input relation.  If the data were perfectly distributed, with the same
3727  * number of tuples going into each available bucket, then the bucketsize
3728  * fraction would be 1/nbuckets.  But this happy state of affairs will occur
3729  * only if (a) there are at least nbuckets distinct data values, and (b)
3730  * we have a not-too-skewed data distribution.  Otherwise the buckets will
3731  * be nonuniformly occupied.  If the other relation in the join has a key
3732  * distribution similar to this one's, then the most-loaded buckets are
3733  * exactly those that will be probed most often.  Therefore, the "average"
3734  * bucket size for costing purposes should really be taken as something close
3735  * to the "worst case" bucket size.  We try to estimate this by adjusting the
3736  * fraction if there are too few distinct data values, and then scaling up
3737  * by the ratio of the most common value's frequency to the average frequency.
3738  *
3739  * If no statistics are available, use a default estimate of 0.1.  This will
3740  * discourage use of a hash rather strongly if the inner relation is large,
3741  * which is what we want.  We do not want to hash unless we know that the
3742  * inner rel is well-dispersed (or the alternatives seem much worse).
3743  *
3744  * The caller should also check that the mcv_freq is not so large that the
3745  * most common value would by itself require an impractically large bucket.
3746  * In a hash join, the executor can split buckets if they get too big, but
3747  * obviously that doesn't help for a bucket that contains many duplicates of
3748  * the same value.
3749  */
3750 void
estimate_hash_bucket_stats(PlannerInfo * root,Node * hashkey,double nbuckets,Selectivity * mcv_freq,Selectivity * bucketsize_frac)3751 estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets,
3752 						   Selectivity *mcv_freq,
3753 						   Selectivity *bucketsize_frac)
3754 {
3755 	VariableStatData vardata;
3756 	double		estfract,
3757 				ndistinct,
3758 				stanullfrac,
3759 				avgfreq;
3760 	bool		isdefault;
3761 	AttStatsSlot sslot;
3762 
3763 	examine_variable(root, hashkey, 0, &vardata);
3764 
3765 	/* Look up the frequency of the most common value, if available */
3766 	*mcv_freq = 0.0;
3767 
3768 	if (HeapTupleIsValid(vardata.statsTuple))
3769 	{
3770 		if (get_attstatsslot(&sslot, vardata.statsTuple,
3771 							 STATISTIC_KIND_MCV, InvalidOid,
3772 							 ATTSTATSSLOT_NUMBERS))
3773 		{
3774 			/*
3775 			 * The first MCV stat is for the most common value.
3776 			 */
3777 			if (sslot.nnumbers > 0)
3778 				*mcv_freq = sslot.numbers[0];
3779 			free_attstatsslot(&sslot);
3780 		}
3781 	}
3782 
3783 	/* Get number of distinct values */
3784 	ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3785 
3786 	/*
3787 	 * If ndistinct isn't real, punt.  We normally return 0.1, but if the
3788 	 * mcv_freq is known to be even higher than that, use it instead.
3789 	 */
3790 	if (isdefault)
3791 	{
3792 		*bucketsize_frac = (Selectivity) Max(0.1, *mcv_freq);
3793 		ReleaseVariableStats(vardata);
3794 		return;
3795 	}
3796 
3797 	/* Get fraction that are null */
3798 	if (HeapTupleIsValid(vardata.statsTuple))
3799 	{
3800 		Form_pg_statistic stats;
3801 
3802 		stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3803 		stanullfrac = stats->stanullfrac;
3804 	}
3805 	else
3806 		stanullfrac = 0.0;
3807 
3808 	/* Compute avg freq of all distinct data values in raw relation */
3809 	avgfreq = (1.0 - stanullfrac) / ndistinct;
3810 
3811 	/*
3812 	 * Adjust ndistinct to account for restriction clauses.  Observe we are
3813 	 * assuming that the data distribution is affected uniformly by the
3814 	 * restriction clauses!
3815 	 *
3816 	 * XXX Possibly better way, but much more expensive: multiply by
3817 	 * selectivity of rel's restriction clauses that mention the target Var.
3818 	 */
3819 	if (vardata.rel && vardata.rel->tuples > 0)
3820 	{
3821 		ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3822 		ndistinct = clamp_row_est(ndistinct);
3823 	}
3824 
3825 	/*
3826 	 * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3827 	 * number of buckets is less than the expected number of distinct values;
3828 	 * otherwise it is 1/ndistinct.
3829 	 */
3830 	if (ndistinct > nbuckets)
3831 		estfract = 1.0 / nbuckets;
3832 	else
3833 		estfract = 1.0 / ndistinct;
3834 
3835 	/*
3836 	 * Adjust estimated bucketsize upward to account for skewed distribution.
3837 	 */
3838 	if (avgfreq > 0.0 && *mcv_freq > avgfreq)
3839 		estfract *= *mcv_freq / avgfreq;
3840 
3841 	/*
3842 	 * Clamp bucketsize to sane range (the above adjustment could easily
3843 	 * produce an out-of-range result).  We set the lower bound a little above
3844 	 * zero, since zero isn't a very sane result.
3845 	 */
3846 	if (estfract < 1.0e-6)
3847 		estfract = 1.0e-6;
3848 	else if (estfract > 1.0)
3849 		estfract = 1.0;
3850 
3851 	*bucketsize_frac = (Selectivity) estfract;
3852 
3853 	ReleaseVariableStats(vardata);
3854 }
3855 
3856 /*
3857  * estimate_hashagg_tablesize
3858  *	  estimate the number of bytes that a hash aggregate hashtable will
3859  *	  require based on the agg_costs, path width and number of groups.
3860  *
3861  * We return the result as "double" to forestall any possible overflow
3862  * problem in the multiplication by dNumGroups.
3863  *
3864  * XXX this may be over-estimating the size now that hashagg knows to omit
3865  * unneeded columns from the hashtable.  Also for mixed-mode grouping sets,
3866  * grouping columns not in the hashed set are counted here even though hashagg
3867  * won't store them.  Is this a problem?
3868  */
3869 double
estimate_hashagg_tablesize(PlannerInfo * root,Path * path,const AggClauseCosts * agg_costs,double dNumGroups)3870 estimate_hashagg_tablesize(PlannerInfo *root, Path *path,
3871 						   const AggClauseCosts *agg_costs, double dNumGroups)
3872 {
3873 	Size		hashentrysize;
3874 
3875 	hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
3876 										path->pathtarget->width,
3877 										agg_costs->transitionSpace);
3878 
3879 	/*
3880 	 * Note that this disregards the effect of fill-factor and growth policy
3881 	 * of the hash table.  That's probably ok, given that the default
3882 	 * fill-factor is relatively high.  It'd be hard to meaningfully factor in
3883 	 * "double-in-size" growth policies here.
3884 	 */
3885 	return hashentrysize * dNumGroups;
3886 }
3887 
3888 
3889 /*-------------------------------------------------------------------------
3890  *
3891  * Support routines
3892  *
3893  *-------------------------------------------------------------------------
3894  */
3895 
3896 /*
3897  * Find applicable ndistinct statistics for the given list of VarInfos (which
3898  * must all belong to the given rel), and update *ndistinct to the estimate of
3899  * the MVNDistinctItem that best matches.  If a match it found, *varinfos is
3900  * updated to remove the list of matched varinfos.
3901  *
3902  * Varinfos that aren't for simple Vars are ignored.
3903  *
3904  * Return true if we're able to find a match, false otherwise.
3905  */
3906 static bool
estimate_multivariate_ndistinct(PlannerInfo * root,RelOptInfo * rel,List ** varinfos,double * ndistinct)3907 estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
3908 								List **varinfos, double *ndistinct)
3909 {
3910 	ListCell   *lc;
3911 	int			nmatches_vars;
3912 	int			nmatches_exprs;
3913 	Oid			statOid = InvalidOid;
3914 	MVNDistinct *stats;
3915 	StatisticExtInfo *matched_info = NULL;
3916 
3917 	/* bail out immediately if the table has no extended statistics */
3918 	if (!rel->statlist)
3919 		return false;
3920 
3921 	/* look for the ndistinct statistics object matching the most vars */
3922 	nmatches_vars = 0;			/* we require at least two matches */
3923 	nmatches_exprs = 0;
3924 	foreach(lc, rel->statlist)
3925 	{
3926 		ListCell   *lc2;
3927 		StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3928 		int			nshared_vars = 0;
3929 		int			nshared_exprs = 0;
3930 
3931 		/* skip statistics of other kinds */
3932 		if (info->kind != STATS_EXT_NDISTINCT)
3933 			continue;
3934 
3935 		/*
3936 		 * Determine how many expressions (and variables in non-matched
3937 		 * expressions) match. We'll then use these numbers to pick the
3938 		 * statistics object that best matches the clauses.
3939 		 */
3940 		foreach(lc2, *varinfos)
3941 		{
3942 			ListCell   *lc3;
3943 			GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
3944 			AttrNumber	attnum;
3945 
3946 			Assert(varinfo->rel == rel);
3947 
3948 			/* simple Var, search in statistics keys directly */
3949 			if (IsA(varinfo->var, Var))
3950 			{
3951 				attnum = ((Var *) varinfo->var)->varattno;
3952 
3953 				/*
3954 				 * Ignore system attributes - we don't support statistics on
3955 				 * them, so can't match them (and it'd fail as the values are
3956 				 * negative).
3957 				 */
3958 				if (!AttrNumberIsForUserDefinedAttr(attnum))
3959 					continue;
3960 
3961 				if (bms_is_member(attnum, info->keys))
3962 					nshared_vars++;
3963 
3964 				continue;
3965 			}
3966 
3967 			/* expression - see if it's in the statistics object */
3968 			foreach(lc3, info->exprs)
3969 			{
3970 				Node	   *expr = (Node *) lfirst(lc3);
3971 
3972 				if (equal(varinfo->var, expr))
3973 				{
3974 					nshared_exprs++;
3975 					break;
3976 				}
3977 			}
3978 		}
3979 
3980 		if (nshared_vars + nshared_exprs < 2)
3981 			continue;
3982 
3983 		/*
3984 		 * Does this statistics object match more columns than the currently
3985 		 * best object?  If so, use this one instead.
3986 		 *
3987 		 * XXX This should break ties using name of the object, or something
3988 		 * like that, to make the outcome stable.
3989 		 */
3990 		if ((nshared_exprs > nmatches_exprs) ||
3991 			(((nshared_exprs == nmatches_exprs)) && (nshared_vars > nmatches_vars)))
3992 		{
3993 			statOid = info->statOid;
3994 			nmatches_vars = nshared_vars;
3995 			nmatches_exprs = nshared_exprs;
3996 			matched_info = info;
3997 		}
3998 	}
3999 
4000 	/* No match? */
4001 	if (statOid == InvalidOid)
4002 		return false;
4003 
4004 	Assert(nmatches_vars + nmatches_exprs > 1);
4005 
4006 	stats = statext_ndistinct_load(statOid);
4007 
4008 	/*
4009 	 * If we have a match, search it for the specific item that matches (there
4010 	 * must be one), and construct the output values.
4011 	 */
4012 	if (stats)
4013 	{
4014 		int			i;
4015 		List	   *newlist = NIL;
4016 		MVNDistinctItem *item = NULL;
4017 		ListCell   *lc2;
4018 		Bitmapset  *matched = NULL;
4019 		AttrNumber	attnum_offset;
4020 
4021 		/*
4022 		 * How much we need to offset the attnums? If there are no
4023 		 * expressions, no offset is needed. Otherwise offset enough to move
4024 		 * the lowest one (which is equal to number of expressions) to 1.
4025 		 */
4026 		if (matched_info->exprs)
4027 			attnum_offset = (list_length(matched_info->exprs) + 1);
4028 		else
4029 			attnum_offset = 0;
4030 
4031 		/* see what actually matched */
4032 		foreach(lc2, *varinfos)
4033 		{
4034 			ListCell   *lc3;
4035 			int			idx;
4036 			bool		found = false;
4037 
4038 			GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc2);
4039 
4040 			/*
4041 			 * Process a simple Var expression, by matching it to keys
4042 			 * directly. If there's a matching expression, we'll try matching
4043 			 * it later.
4044 			 */
4045 			if (IsA(varinfo->var, Var))
4046 			{
4047 				AttrNumber	attnum = ((Var *) varinfo->var)->varattno;
4048 
4049 				/*
4050 				 * Ignore expressions on system attributes. Can't rely on the
4051 				 * bms check for negative values.
4052 				 */
4053 				if (!AttrNumberIsForUserDefinedAttr(attnum))
4054 					continue;
4055 
4056 				/* Is the variable covered by the statistics object? */
4057 				if (!bms_is_member(attnum, matched_info->keys))
4058 					continue;
4059 
4060 				attnum = attnum + attnum_offset;
4061 
4062 				/* ensure sufficient offset */
4063 				Assert(AttrNumberIsForUserDefinedAttr(attnum));
4064 
4065 				matched = bms_add_member(matched, attnum);
4066 
4067 				found = true;
4068 			}
4069 
4070 			/*
4071 			 * XXX Maybe we should allow searching the expressions even if we
4072 			 * found an attribute matching the expression? That would handle
4073 			 * trivial expressions like "(a)" but it seems fairly useless.
4074 			 */
4075 			if (found)
4076 				continue;
4077 
4078 			/* expression - see if it's in the statistics object */
4079 			idx = 0;
4080 			foreach(lc3, matched_info->exprs)
4081 			{
4082 				Node	   *expr = (Node *) lfirst(lc3);
4083 
4084 				if (equal(varinfo->var, expr))
4085 				{
4086 					AttrNumber	attnum = -(idx + 1);
4087 
4088 					attnum = attnum + attnum_offset;
4089 
4090 					/* ensure sufficient offset */
4091 					Assert(AttrNumberIsForUserDefinedAttr(attnum));
4092 
4093 					matched = bms_add_member(matched, attnum);
4094 
4095 					/* there should be just one matching expression */
4096 					break;
4097 				}
4098 
4099 				idx++;
4100 			}
4101 		}
4102 
4103 		/* Find the specific item that exactly matches the combination */
4104 		for (i = 0; i < stats->nitems; i++)
4105 		{
4106 			int			j;
4107 			MVNDistinctItem *tmpitem = &stats->items[i];
4108 
4109 			if (tmpitem->nattributes != bms_num_members(matched))
4110 				continue;
4111 
4112 			/* assume it's the right item */
4113 			item = tmpitem;
4114 
4115 			/* check that all item attributes/expressions fit the match */
4116 			for (j = 0; j < tmpitem->nattributes; j++)
4117 			{
4118 				AttrNumber	attnum = tmpitem->attributes[j];
4119 
4120 				/*
4121 				 * Thanks to how we constructed the matched bitmap above, we
4122 				 * can just offset all attnums the same way.
4123 				 */
4124 				attnum = attnum + attnum_offset;
4125 
4126 				if (!bms_is_member(attnum, matched))
4127 				{
4128 					/* nah, it's not this item */
4129 					item = NULL;
4130 					break;
4131 				}
4132 			}
4133 
4134 			/*
4135 			 * If the item has all the matched attributes, we know it's the
4136 			 * right one - there can't be a better one. matching more.
4137 			 */
4138 			if (item)
4139 				break;
4140 		}
4141 
4142 		/*
4143 		 * Make sure we found an item. There has to be one, because ndistinct
4144 		 * statistics includes all combinations of attributes.
4145 		 */
4146 		if (!item)
4147 			elog(ERROR, "corrupt MVNDistinct entry");
4148 
4149 		/* Form the output varinfo list, keeping only unmatched ones */
4150 		foreach(lc, *varinfos)
4151 		{
4152 			GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
4153 			ListCell   *lc3;
4154 			bool		found = false;
4155 
4156 			/*
4157 			 * Let's look at plain variables first, because it's the most
4158 			 * common case and the check is quite cheap. We can simply get the
4159 			 * attnum and check (with an offset) matched bitmap.
4160 			 */
4161 			if (IsA(varinfo->var, Var))
4162 			{
4163 				AttrNumber	attnum = ((Var *) varinfo->var)->varattno;
4164 
4165 				/*
4166 				 * If it's a system attribute, we're done. We don't support
4167 				 * extended statistics on system attributes, so it's clearly
4168 				 * not matched. Just keep the expression and continue.
4169 				 */
4170 				if (!AttrNumberIsForUserDefinedAttr(attnum))
4171 				{
4172 					newlist = lappend(newlist, varinfo);
4173 					continue;
4174 				}
4175 
4176 				/* apply the same offset as above */
4177 				attnum += attnum_offset;
4178 
4179 				/* if it's not matched, keep the varinfo */
4180 				if (!bms_is_member(attnum, matched))
4181 					newlist = lappend(newlist, varinfo);
4182 
4183 				/* The rest of the loop deals with complex expressions. */
4184 				continue;
4185 			}
4186 
4187 			/*
4188 			 * Process complex expressions, not just simple Vars.
4189 			 *
4190 			 * First, we search for an exact match of an expression. If we
4191 			 * find one, we can just discard the whole GroupExprInfo, with all
4192 			 * the variables we extracted from it.
4193 			 *
4194 			 * Otherwise we inspect the individual vars, and try matching it
4195 			 * to variables in the item.
4196 			 */
4197 			foreach(lc3, matched_info->exprs)
4198 			{
4199 				Node	   *expr = (Node *) lfirst(lc3);
4200 
4201 				if (equal(varinfo->var, expr))
4202 				{
4203 					found = true;
4204 					break;
4205 				}
4206 			}
4207 
4208 			/* found exact match, skip */
4209 			if (found)
4210 				continue;
4211 
4212 			newlist = lappend(newlist, varinfo);
4213 		}
4214 
4215 		*varinfos = newlist;
4216 		*ndistinct = item->ndistinct;
4217 		return true;
4218 	}
4219 
4220 	return false;
4221 }
4222 
4223 /*
4224  * convert_to_scalar
4225  *	  Convert non-NULL values of the indicated types to the comparison
4226  *	  scale needed by scalarineqsel().
4227  *	  Returns "true" if successful.
4228  *
4229  * XXX this routine is a hack: ideally we should look up the conversion
4230  * subroutines in pg_type.
4231  *
4232  * All numeric datatypes are simply converted to their equivalent
4233  * "double" values.  (NUMERIC values that are outside the range of "double"
4234  * are clamped to +/- HUGE_VAL.)
4235  *
4236  * String datatypes are converted by convert_string_to_scalar(),
4237  * which is explained below.  The reason why this routine deals with
4238  * three values at a time, not just one, is that we need it for strings.
4239  *
4240  * The bytea datatype is just enough different from strings that it has
4241  * to be treated separately.
4242  *
4243  * The several datatypes representing absolute times are all converted
4244  * to Timestamp, which is actually an int64, and then we promote that to
4245  * a double.  Note this will give correct results even for the "special"
4246  * values of Timestamp, since those are chosen to compare correctly;
4247  * see timestamp_cmp.
4248  *
4249  * The several datatypes representing relative times (intervals) are all
4250  * converted to measurements expressed in seconds.
4251  */
4252 static bool
convert_to_scalar(Datum value,Oid valuetypid,Oid collid,double * scaledvalue,Datum lobound,Datum hibound,Oid boundstypid,double * scaledlobound,double * scaledhibound)4253 convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue,
4254 				  Datum lobound, Datum hibound, Oid boundstypid,
4255 				  double *scaledlobound, double *scaledhibound)
4256 {
4257 	bool		failure = false;
4258 
4259 	/*
4260 	 * Both the valuetypid and the boundstypid should exactly match the
4261 	 * declared input type(s) of the operator we are invoked for.  However,
4262 	 * extensions might try to use scalarineqsel as estimator for operators
4263 	 * with input type(s) we don't handle here; in such cases, we want to
4264 	 * return false, not fail.  In any case, we mustn't assume that valuetypid
4265 	 * and boundstypid are identical.
4266 	 *
4267 	 * XXX The histogram we are interpolating between points of could belong
4268 	 * to a column that's only binary-compatible with the declared type. In
4269 	 * essence we are assuming that the semantics of binary-compatible types
4270 	 * are enough alike that we can use a histogram generated with one type's
4271 	 * operators to estimate selectivity for the other's.  This is outright
4272 	 * wrong in some cases --- in particular signed versus unsigned
4273 	 * interpretation could trip us up.  But it's useful enough in the
4274 	 * majority of cases that we do it anyway.  Should think about more
4275 	 * rigorous ways to do it.
4276 	 */
4277 	switch (valuetypid)
4278 	{
4279 			/*
4280 			 * Built-in numeric types
4281 			 */
4282 		case BOOLOID:
4283 		case INT2OID:
4284 		case INT4OID:
4285 		case INT8OID:
4286 		case FLOAT4OID:
4287 		case FLOAT8OID:
4288 		case NUMERICOID:
4289 		case OIDOID:
4290 		case REGPROCOID:
4291 		case REGPROCEDUREOID:
4292 		case REGOPEROID:
4293 		case REGOPERATOROID:
4294 		case REGCLASSOID:
4295 		case REGTYPEOID:
4296 		case REGCONFIGOID:
4297 		case REGDICTIONARYOID:
4298 		case REGROLEOID:
4299 		case REGNAMESPACEOID:
4300 			*scaledvalue = convert_numeric_to_scalar(value, valuetypid,
4301 													 &failure);
4302 			*scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
4303 													   &failure);
4304 			*scaledhibound = convert_numeric_to_scalar(hibound, boundstypid,
4305 													   &failure);
4306 			return !failure;
4307 
4308 			/*
4309 			 * Built-in string types
4310 			 */
4311 		case CHAROID:
4312 		case BPCHAROID:
4313 		case VARCHAROID:
4314 		case TEXTOID:
4315 		case NAMEOID:
4316 			{
4317 				char	   *valstr = convert_string_datum(value, valuetypid,
4318 														  collid, &failure);
4319 				char	   *lostr = convert_string_datum(lobound, boundstypid,
4320 														 collid, &failure);
4321 				char	   *histr = convert_string_datum(hibound, boundstypid,
4322 														 collid, &failure);
4323 
4324 				/*
4325 				 * Bail out if any of the values is not of string type.  We
4326 				 * might leak converted strings for the other value(s), but
4327 				 * that's not worth troubling over.
4328 				 */
4329 				if (failure)
4330 					return false;
4331 
4332 				convert_string_to_scalar(valstr, scaledvalue,
4333 										 lostr, scaledlobound,
4334 										 histr, scaledhibound);
4335 				pfree(valstr);
4336 				pfree(lostr);
4337 				pfree(histr);
4338 				return true;
4339 			}
4340 
4341 			/*
4342 			 * Built-in bytea type
4343 			 */
4344 		case BYTEAOID:
4345 			{
4346 				/* We only support bytea vs bytea comparison */
4347 				if (boundstypid != BYTEAOID)
4348 					return false;
4349 				convert_bytea_to_scalar(value, scaledvalue,
4350 										lobound, scaledlobound,
4351 										hibound, scaledhibound);
4352 				return true;
4353 			}
4354 
4355 			/*
4356 			 * Built-in time types
4357 			 */
4358 		case TIMESTAMPOID:
4359 		case TIMESTAMPTZOID:
4360 		case DATEOID:
4361 		case INTERVALOID:
4362 		case TIMEOID:
4363 		case TIMETZOID:
4364 			*scaledvalue = convert_timevalue_to_scalar(value, valuetypid,
4365 													   &failure);
4366 			*scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid,
4367 														 &failure);
4368 			*scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid,
4369 														 &failure);
4370 			return !failure;
4371 
4372 			/*
4373 			 * Built-in network types
4374 			 */
4375 		case INETOID:
4376 		case CIDROID:
4377 		case MACADDROID:
4378 		case MACADDR8OID:
4379 			*scaledvalue = convert_network_to_scalar(value, valuetypid,
4380 													 &failure);
4381 			*scaledlobound = convert_network_to_scalar(lobound, boundstypid,
4382 													   &failure);
4383 			*scaledhibound = convert_network_to_scalar(hibound, boundstypid,
4384 													   &failure);
4385 			return !failure;
4386 	}
4387 	/* Don't know how to convert */
4388 	*scaledvalue = *scaledlobound = *scaledhibound = 0;
4389 	return false;
4390 }
4391 
4392 /*
4393  * Do convert_to_scalar()'s work for any numeric data type.
4394  *
4395  * On failure (e.g., unsupported typid), set *failure to true;
4396  * otherwise, that variable is not changed.
4397  */
4398 static double
convert_numeric_to_scalar(Datum value,Oid typid,bool * failure)4399 convert_numeric_to_scalar(Datum value, Oid typid, bool *failure)
4400 {
4401 	switch (typid)
4402 	{
4403 		case BOOLOID:
4404 			return (double) DatumGetBool(value);
4405 		case INT2OID:
4406 			return (double) DatumGetInt16(value);
4407 		case INT4OID:
4408 			return (double) DatumGetInt32(value);
4409 		case INT8OID:
4410 			return (double) DatumGetInt64(value);
4411 		case FLOAT4OID:
4412 			return (double) DatumGetFloat4(value);
4413 		case FLOAT8OID:
4414 			return (double) DatumGetFloat8(value);
4415 		case NUMERICOID:
4416 			/* Note: out-of-range values will be clamped to +-HUGE_VAL */
4417 			return (double)
4418 				DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow,
4419 												   value));
4420 		case OIDOID:
4421 		case REGPROCOID:
4422 		case REGPROCEDUREOID:
4423 		case REGOPEROID:
4424 		case REGOPERATOROID:
4425 		case REGCLASSOID:
4426 		case REGTYPEOID:
4427 		case REGCONFIGOID:
4428 		case REGDICTIONARYOID:
4429 		case REGROLEOID:
4430 		case REGNAMESPACEOID:
4431 			/* we can treat OIDs as integers... */
4432 			return (double) DatumGetObjectId(value);
4433 	}
4434 
4435 	*failure = true;
4436 	return 0;
4437 }
4438 
4439 /*
4440  * Do convert_to_scalar()'s work for any character-string data type.
4441  *
4442  * String datatypes are converted to a scale that ranges from 0 to 1,
4443  * where we visualize the bytes of the string as fractional digits.
4444  *
4445  * We do not want the base to be 256, however, since that tends to
4446  * generate inflated selectivity estimates; few databases will have
4447  * occurrences of all 256 possible byte values at each position.
4448  * Instead, use the smallest and largest byte values seen in the bounds
4449  * as the estimated range for each byte, after some fudging to deal with
4450  * the fact that we probably aren't going to see the full range that way.
4451  *
4452  * An additional refinement is that we discard any common prefix of the
4453  * three strings before computing the scaled values.  This allows us to
4454  * "zoom in" when we encounter a narrow data range.  An example is a phone
4455  * number database where all the values begin with the same area code.
4456  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
4457  * so this is more likely to happen than you might think.)
4458  */
4459 static void
convert_string_to_scalar(char * value,double * scaledvalue,char * lobound,double * scaledlobound,char * hibound,double * scaledhibound)4460 convert_string_to_scalar(char *value,
4461 						 double *scaledvalue,
4462 						 char *lobound,
4463 						 double *scaledlobound,
4464 						 char *hibound,
4465 						 double *scaledhibound)
4466 {
4467 	int			rangelo,
4468 				rangehi;
4469 	char	   *sptr;
4470 
4471 	rangelo = rangehi = (unsigned char) hibound[0];
4472 	for (sptr = lobound; *sptr; sptr++)
4473 	{
4474 		if (rangelo > (unsigned char) *sptr)
4475 			rangelo = (unsigned char) *sptr;
4476 		if (rangehi < (unsigned char) *sptr)
4477 			rangehi = (unsigned char) *sptr;
4478 	}
4479 	for (sptr = hibound; *sptr; sptr++)
4480 	{
4481 		if (rangelo > (unsigned char) *sptr)
4482 			rangelo = (unsigned char) *sptr;
4483 		if (rangehi < (unsigned char) *sptr)
4484 			rangehi = (unsigned char) *sptr;
4485 	}
4486 	/* If range includes any upper-case ASCII chars, make it include all */
4487 	if (rangelo <= 'Z' && rangehi >= 'A')
4488 	{
4489 		if (rangelo > 'A')
4490 			rangelo = 'A';
4491 		if (rangehi < 'Z')
4492 			rangehi = 'Z';
4493 	}
4494 	/* Ditto lower-case */
4495 	if (rangelo <= 'z' && rangehi >= 'a')
4496 	{
4497 		if (rangelo > 'a')
4498 			rangelo = 'a';
4499 		if (rangehi < 'z')
4500 			rangehi = 'z';
4501 	}
4502 	/* Ditto digits */
4503 	if (rangelo <= '9' && rangehi >= '0')
4504 	{
4505 		if (rangelo > '0')
4506 			rangelo = '0';
4507 		if (rangehi < '9')
4508 			rangehi = '9';
4509 	}
4510 
4511 	/*
4512 	 * If range includes less than 10 chars, assume we have not got enough
4513 	 * data, and make it include regular ASCII set.
4514 	 */
4515 	if (rangehi - rangelo < 9)
4516 	{
4517 		rangelo = ' ';
4518 		rangehi = 127;
4519 	}
4520 
4521 	/*
4522 	 * Now strip any common prefix of the three strings.
4523 	 */
4524 	while (*lobound)
4525 	{
4526 		if (*lobound != *hibound || *lobound != *value)
4527 			break;
4528 		lobound++, hibound++, value++;
4529 	}
4530 
4531 	/*
4532 	 * Now we can do the conversions.
4533 	 */
4534 	*scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4535 	*scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4536 	*scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4537 }
4538 
4539 static double
convert_one_string_to_scalar(char * value,int rangelo,int rangehi)4540 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
4541 {
4542 	int			slen = strlen(value);
4543 	double		num,
4544 				denom,
4545 				base;
4546 
4547 	if (slen <= 0)
4548 		return 0.0;				/* empty string has scalar value 0 */
4549 
4550 	/*
4551 	 * There seems little point in considering more than a dozen bytes from
4552 	 * the string.  Since base is at least 10, that will give us nominal
4553 	 * resolution of at least 12 decimal digits, which is surely far more
4554 	 * precision than this estimation technique has got anyway (especially in
4555 	 * non-C locales).  Also, even with the maximum possible base of 256, this
4556 	 * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4557 	 * overflow on any known machine.
4558 	 */
4559 	if (slen > 12)
4560 		slen = 12;
4561 
4562 	/* Convert initial characters to fraction */
4563 	base = rangehi - rangelo + 1;
4564 	num = 0.0;
4565 	denom = base;
4566 	while (slen-- > 0)
4567 	{
4568 		int			ch = (unsigned char) *value++;
4569 
4570 		if (ch < rangelo)
4571 			ch = rangelo - 1;
4572 		else if (ch > rangehi)
4573 			ch = rangehi + 1;
4574 		num += ((double) (ch - rangelo)) / denom;
4575 		denom *= base;
4576 	}
4577 
4578 	return num;
4579 }
4580 
4581 /*
4582  * Convert a string-type Datum into a palloc'd, null-terminated string.
4583  *
4584  * On failure (e.g., unsupported typid), set *failure to true;
4585  * otherwise, that variable is not changed.  (We'll return NULL on failure.)
4586  *
4587  * When using a non-C locale, we must pass the string through strxfrm()
4588  * before continuing, so as to generate correct locale-specific results.
4589  */
4590 static char *
convert_string_datum(Datum value,Oid typid,Oid collid,bool * failure)4591 convert_string_datum(Datum value, Oid typid, Oid collid, bool *failure)
4592 {
4593 	char	   *val;
4594 
4595 	switch (typid)
4596 	{
4597 		case CHAROID:
4598 			val = (char *) palloc(2);
4599 			val[0] = DatumGetChar(value);
4600 			val[1] = '\0';
4601 			break;
4602 		case BPCHAROID:
4603 		case VARCHAROID:
4604 		case TEXTOID:
4605 			val = TextDatumGetCString(value);
4606 			break;
4607 		case NAMEOID:
4608 			{
4609 				NameData   *nm = (NameData *) DatumGetPointer(value);
4610 
4611 				val = pstrdup(NameStr(*nm));
4612 				break;
4613 			}
4614 		default:
4615 			*failure = true;
4616 			return NULL;
4617 	}
4618 
4619 	if (!lc_collate_is_c(collid))
4620 	{
4621 		char	   *xfrmstr;
4622 		size_t		xfrmlen;
4623 		size_t		xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4624 
4625 		/*
4626 		 * XXX: We could guess at a suitable output buffer size and only call
4627 		 * strxfrm twice if our guess is too small.
4628 		 *
4629 		 * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4630 		 * bogus data or set an error. This is not really a problem unless it
4631 		 * crashes since it will only give an estimation error and nothing
4632 		 * fatal.
4633 		 */
4634 		xfrmlen = strxfrm(NULL, val, 0);
4635 #ifdef WIN32
4636 
4637 		/*
4638 		 * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4639 		 * of trying to allocate this much memory (and fail), just return the
4640 		 * original string unmodified as if we were in the C locale.
4641 		 */
4642 		if (xfrmlen == INT_MAX)
4643 			return val;
4644 #endif
4645 		xfrmstr = (char *) palloc(xfrmlen + 1);
4646 		xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4647 
4648 		/*
4649 		 * Some systems (e.g., glibc) can return a smaller value from the
4650 		 * second call than the first; thus the Assert must be <= not ==.
4651 		 */
4652 		Assert(xfrmlen2 <= xfrmlen);
4653 		pfree(val);
4654 		val = xfrmstr;
4655 	}
4656 
4657 	return val;
4658 }
4659 
4660 /*
4661  * Do convert_to_scalar()'s work for any bytea data type.
4662  *
4663  * Very similar to convert_string_to_scalar except we can't assume
4664  * null-termination and therefore pass explicit lengths around.
4665  *
4666  * Also, assumptions about likely "normal" ranges of characters have been
4667  * removed - a data range of 0..255 is always used, for now.  (Perhaps
4668  * someday we will add information about actual byte data range to
4669  * pg_statistic.)
4670  */
4671 static void
convert_bytea_to_scalar(Datum value,double * scaledvalue,Datum lobound,double * scaledlobound,Datum hibound,double * scaledhibound)4672 convert_bytea_to_scalar(Datum value,
4673 						double *scaledvalue,
4674 						Datum lobound,
4675 						double *scaledlobound,
4676 						Datum hibound,
4677 						double *scaledhibound)
4678 {
4679 	bytea	   *valuep = DatumGetByteaPP(value);
4680 	bytea	   *loboundp = DatumGetByteaPP(lobound);
4681 	bytea	   *hiboundp = DatumGetByteaPP(hibound);
4682 	int			rangelo,
4683 				rangehi,
4684 				valuelen = VARSIZE_ANY_EXHDR(valuep),
4685 				loboundlen = VARSIZE_ANY_EXHDR(loboundp),
4686 				hiboundlen = VARSIZE_ANY_EXHDR(hiboundp),
4687 				i,
4688 				minlen;
4689 	unsigned char *valstr = (unsigned char *) VARDATA_ANY(valuep);
4690 	unsigned char *lostr = (unsigned char *) VARDATA_ANY(loboundp);
4691 	unsigned char *histr = (unsigned char *) VARDATA_ANY(hiboundp);
4692 
4693 	/*
4694 	 * Assume bytea data is uniformly distributed across all byte values.
4695 	 */
4696 	rangelo = 0;
4697 	rangehi = 255;
4698 
4699 	/*
4700 	 * Now strip any common prefix of the three strings.
4701 	 */
4702 	minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4703 	for (i = 0; i < minlen; i++)
4704 	{
4705 		if (*lostr != *histr || *lostr != *valstr)
4706 			break;
4707 		lostr++, histr++, valstr++;
4708 		loboundlen--, hiboundlen--, valuelen--;
4709 	}
4710 
4711 	/*
4712 	 * Now we can do the conversions.
4713 	 */
4714 	*scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4715 	*scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4716 	*scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4717 }
4718 
4719 static double
convert_one_bytea_to_scalar(unsigned char * value,int valuelen,int rangelo,int rangehi)4720 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4721 							int rangelo, int rangehi)
4722 {
4723 	double		num,
4724 				denom,
4725 				base;
4726 
4727 	if (valuelen <= 0)
4728 		return 0.0;				/* empty string has scalar value 0 */
4729 
4730 	/*
4731 	 * Since base is 256, need not consider more than about 10 chars (even
4732 	 * this many seems like overkill)
4733 	 */
4734 	if (valuelen > 10)
4735 		valuelen = 10;
4736 
4737 	/* Convert initial characters to fraction */
4738 	base = rangehi - rangelo + 1;
4739 	num = 0.0;
4740 	denom = base;
4741 	while (valuelen-- > 0)
4742 	{
4743 		int			ch = *value++;
4744 
4745 		if (ch < rangelo)
4746 			ch = rangelo - 1;
4747 		else if (ch > rangehi)
4748 			ch = rangehi + 1;
4749 		num += ((double) (ch - rangelo)) / denom;
4750 		denom *= base;
4751 	}
4752 
4753 	return num;
4754 }
4755 
4756 /*
4757  * Do convert_to_scalar()'s work for any timevalue data type.
4758  *
4759  * On failure (e.g., unsupported typid), set *failure to true;
4760  * otherwise, that variable is not changed.
4761  */
4762 static double
convert_timevalue_to_scalar(Datum value,Oid typid,bool * failure)4763 convert_timevalue_to_scalar(Datum value, Oid typid, bool *failure)
4764 {
4765 	switch (typid)
4766 	{
4767 		case TIMESTAMPOID:
4768 			return DatumGetTimestamp(value);
4769 		case TIMESTAMPTZOID:
4770 			return DatumGetTimestampTz(value);
4771 		case DATEOID:
4772 			return date2timestamp_no_overflow(DatumGetDateADT(value));
4773 		case INTERVALOID:
4774 			{
4775 				Interval   *interval = DatumGetIntervalP(value);
4776 
4777 				/*
4778 				 * Convert the month part of Interval to days using assumed
4779 				 * average month length of 365.25/12.0 days.  Not too
4780 				 * accurate, but plenty good enough for our purposes.
4781 				 */
4782 				return interval->time + interval->day * (double) USECS_PER_DAY +
4783 					interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4784 			}
4785 		case TIMEOID:
4786 			return DatumGetTimeADT(value);
4787 		case TIMETZOID:
4788 			{
4789 				TimeTzADT  *timetz = DatumGetTimeTzADTP(value);
4790 
4791 				/* use GMT-equivalent time */
4792 				return (double) (timetz->time + (timetz->zone * 1000000.0));
4793 			}
4794 	}
4795 
4796 	*failure = true;
4797 	return 0;
4798 }
4799 
4800 
4801 /*
4802  * get_restriction_variable
4803  *		Examine the args of a restriction clause to see if it's of the
4804  *		form (variable op pseudoconstant) or (pseudoconstant op variable),
4805  *		where "variable" could be either a Var or an expression in vars of a
4806  *		single relation.  If so, extract information about the variable,
4807  *		and also indicate which side it was on and the other argument.
4808  *
4809  * Inputs:
4810  *	root: the planner info
4811  *	args: clause argument list
4812  *	varRelid: see specs for restriction selectivity functions
4813  *
4814  * Outputs: (these are valid only if true is returned)
4815  *	*vardata: gets information about variable (see examine_variable)
4816  *	*other: gets other clause argument, aggressively reduced to a constant
4817  *	*varonleft: set true if variable is on the left, false if on the right
4818  *
4819  * Returns true if a variable is identified, otherwise false.
4820  *
4821  * Note: if there are Vars on both sides of the clause, we must fail, because
4822  * callers are expecting that the other side will act like a pseudoconstant.
4823  */
4824 bool
get_restriction_variable(PlannerInfo * root,List * args,int varRelid,VariableStatData * vardata,Node ** other,bool * varonleft)4825 get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
4826 						 VariableStatData *vardata, Node **other,
4827 						 bool *varonleft)
4828 {
4829 	Node	   *left,
4830 			   *right;
4831 	VariableStatData rdata;
4832 
4833 	/* Fail if not a binary opclause (probably shouldn't happen) */
4834 	if (list_length(args) != 2)
4835 		return false;
4836 
4837 	left = (Node *) linitial(args);
4838 	right = (Node *) lsecond(args);
4839 
4840 	/*
4841 	 * Examine both sides.  Note that when varRelid is nonzero, Vars of other
4842 	 * relations will be treated as pseudoconstants.
4843 	 */
4844 	examine_variable(root, left, varRelid, vardata);
4845 	examine_variable(root, right, varRelid, &rdata);
4846 
4847 	/*
4848 	 * If one side is a variable and the other not, we win.
4849 	 */
4850 	if (vardata->rel && rdata.rel == NULL)
4851 	{
4852 		*varonleft = true;
4853 		*other = estimate_expression_value(root, rdata.var);
4854 		/* Assume we need no ReleaseVariableStats(rdata) here */
4855 		return true;
4856 	}
4857 
4858 	if (vardata->rel == NULL && rdata.rel)
4859 	{
4860 		*varonleft = false;
4861 		*other = estimate_expression_value(root, vardata->var);
4862 		/* Assume we need no ReleaseVariableStats(*vardata) here */
4863 		*vardata = rdata;
4864 		return true;
4865 	}
4866 
4867 	/* Oops, clause has wrong structure (probably var op var) */
4868 	ReleaseVariableStats(*vardata);
4869 	ReleaseVariableStats(rdata);
4870 
4871 	return false;
4872 }
4873 
4874 /*
4875  * get_join_variables
4876  *		Apply examine_variable() to each side of a join clause.
4877  *		Also, attempt to identify whether the join clause has the same
4878  *		or reversed sense compared to the SpecialJoinInfo.
4879  *
4880  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4881  * or "reversed" if it is "rhs_var OP lhs_var".  In complicated cases
4882  * where we can't tell for sure, we default to assuming it's normal.
4883  */
4884 void
get_join_variables(PlannerInfo * root,List * args,SpecialJoinInfo * sjinfo,VariableStatData * vardata1,VariableStatData * vardata2,bool * join_is_reversed)4885 get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo,
4886 				   VariableStatData *vardata1, VariableStatData *vardata2,
4887 				   bool *join_is_reversed)
4888 {
4889 	Node	   *left,
4890 			   *right;
4891 
4892 	if (list_length(args) != 2)
4893 		elog(ERROR, "join operator should take two arguments");
4894 
4895 	left = (Node *) linitial(args);
4896 	right = (Node *) lsecond(args);
4897 
4898 	examine_variable(root, left, 0, vardata1);
4899 	examine_variable(root, right, 0, vardata2);
4900 
4901 	if (vardata1->rel &&
4902 		bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4903 		*join_is_reversed = true;	/* var1 is on RHS */
4904 	else if (vardata2->rel &&
4905 			 bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4906 		*join_is_reversed = true;	/* var2 is on LHS */
4907 	else
4908 		*join_is_reversed = false;
4909 }
4910 
4911 /* statext_expressions_load copies the tuple, so just pfree it. */
4912 static void
ReleaseDummy(HeapTuple tuple)4913 ReleaseDummy(HeapTuple tuple)
4914 {
4915 	pfree(tuple);
4916 }
4917 
4918 /*
4919  * examine_variable
4920  *		Try to look up statistical data about an expression.
4921  *		Fill in a VariableStatData struct to describe the expression.
4922  *
4923  * Inputs:
4924  *	root: the planner info
4925  *	node: the expression tree to examine
4926  *	varRelid: see specs for restriction selectivity functions
4927  *
4928  * Outputs: *vardata is filled as follows:
4929  *	var: the input expression (with any binary relabeling stripped, if
4930  *		it is or contains a variable; but otherwise the type is preserved)
4931  *	rel: RelOptInfo for relation containing variable; NULL if expression
4932  *		contains no Vars (NOTE this could point to a RelOptInfo of a
4933  *		subquery, not one in the current query).
4934  *	statsTuple: the pg_statistic entry for the variable, if one exists;
4935  *		otherwise NULL.
4936  *	freefunc: pointer to a function to release statsTuple with.
4937  *	vartype: exposed type of the expression; this should always match
4938  *		the declared input type of the operator we are estimating for.
4939  *	atttype, atttypmod: actual type/typmod of the "var" expression.  This is
4940  *		commonly the same as the exposed type of the variable argument,
4941  *		but can be different in binary-compatible-type cases.
4942  *	isunique: true if we were able to match the var to a unique index or a
4943  *		single-column DISTINCT clause, implying its values are unique for
4944  *		this query.  (Caution: this should be trusted for statistical
4945  *		purposes only, since we do not check indimmediate nor verify that
4946  *		the exact same definition of equality applies.)
4947  *	acl_ok: true if current user has permission to read the column(s)
4948  *		underlying the pg_statistic entry.  This is consulted by
4949  *		statistic_proc_security_check().
4950  *
4951  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4952  */
4953 void
examine_variable(PlannerInfo * root,Node * node,int varRelid,VariableStatData * vardata)4954 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4955 				 VariableStatData *vardata)
4956 {
4957 	Node	   *basenode;
4958 	Relids		varnos;
4959 	RelOptInfo *onerel;
4960 
4961 	/* Make sure we don't return dangling pointers in vardata */
4962 	MemSet(vardata, 0, sizeof(VariableStatData));
4963 
4964 	/* Save the exposed type of the expression */
4965 	vardata->vartype = exprType(node);
4966 
4967 	/* Look inside any binary-compatible relabeling */
4968 
4969 	if (IsA(node, RelabelType))
4970 		basenode = (Node *) ((RelabelType *) node)->arg;
4971 	else
4972 		basenode = node;
4973 
4974 	/* Fast path for a simple Var */
4975 
4976 	if (IsA(basenode, Var) &&
4977 		(varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4978 	{
4979 		Var		   *var = (Var *) basenode;
4980 
4981 		/* Set up result fields other than the stats tuple */
4982 		vardata->var = basenode;	/* return Var without relabeling */
4983 		vardata->rel = find_base_rel(root, var->varno);
4984 		vardata->atttype = var->vartype;
4985 		vardata->atttypmod = var->vartypmod;
4986 		vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4987 
4988 		/* Try to locate some stats */
4989 		examine_simple_variable(root, var, vardata);
4990 
4991 		return;
4992 	}
4993 
4994 	/*
4995 	 * Okay, it's a more complicated expression.  Determine variable
4996 	 * membership.  Note that when varRelid isn't zero, only vars of that
4997 	 * relation are considered "real" vars.
4998 	 */
4999 	varnos = pull_varnos(root, basenode);
5000 
5001 	onerel = NULL;
5002 
5003 	switch (bms_membership(varnos))
5004 	{
5005 		case BMS_EMPTY_SET:
5006 			/* No Vars at all ... must be pseudo-constant clause */
5007 			break;
5008 		case BMS_SINGLETON:
5009 			if (varRelid == 0 || bms_is_member(varRelid, varnos))
5010 			{
5011 				onerel = find_base_rel(root,
5012 									   (varRelid ? varRelid : bms_singleton_member(varnos)));
5013 				vardata->rel = onerel;
5014 				node = basenode;	/* strip any relabeling */
5015 			}
5016 			/* else treat it as a constant */
5017 			break;
5018 		case BMS_MULTIPLE:
5019 			if (varRelid == 0)
5020 			{
5021 				/* treat it as a variable of a join relation */
5022 				vardata->rel = find_join_rel(root, varnos);
5023 				node = basenode;	/* strip any relabeling */
5024 			}
5025 			else if (bms_is_member(varRelid, varnos))
5026 			{
5027 				/* ignore the vars belonging to other relations */
5028 				vardata->rel = find_base_rel(root, varRelid);
5029 				node = basenode;	/* strip any relabeling */
5030 				/* note: no point in expressional-index search here */
5031 			}
5032 			/* else treat it as a constant */
5033 			break;
5034 	}
5035 
5036 	bms_free(varnos);
5037 
5038 	vardata->var = node;
5039 	vardata->atttype = exprType(node);
5040 	vardata->atttypmod = exprTypmod(node);
5041 
5042 	if (onerel)
5043 	{
5044 		/*
5045 		 * We have an expression in vars of a single relation.  Try to match
5046 		 * it to expressional index columns, in hopes of finding some
5047 		 * statistics.
5048 		 *
5049 		 * Note that we consider all index columns including INCLUDE columns,
5050 		 * since there could be stats for such columns.  But the test for
5051 		 * uniqueness needs to be warier.
5052 		 *
5053 		 * XXX it's conceivable that there are multiple matches with different
5054 		 * index opfamilies; if so, we need to pick one that matches the
5055 		 * operator we are estimating for.  FIXME later.
5056 		 */
5057 		ListCell   *ilist;
5058 		ListCell   *slist;
5059 
5060 		foreach(ilist, onerel->indexlist)
5061 		{
5062 			IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
5063 			ListCell   *indexpr_item;
5064 			int			pos;
5065 
5066 			indexpr_item = list_head(index->indexprs);
5067 			if (indexpr_item == NULL)
5068 				continue;		/* no expressions here... */
5069 
5070 			for (pos = 0; pos < index->ncolumns; pos++)
5071 			{
5072 				if (index->indexkeys[pos] == 0)
5073 				{
5074 					Node	   *indexkey;
5075 
5076 					if (indexpr_item == NULL)
5077 						elog(ERROR, "too few entries in indexprs list");
5078 					indexkey = (Node *) lfirst(indexpr_item);
5079 					if (indexkey && IsA(indexkey, RelabelType))
5080 						indexkey = (Node *) ((RelabelType *) indexkey)->arg;
5081 					if (equal(node, indexkey))
5082 					{
5083 						/*
5084 						 * Found a match ... is it a unique index? Tests here
5085 						 * should match has_unique_index().
5086 						 */
5087 						if (index->unique &&
5088 							index->nkeycolumns == 1 &&
5089 							pos == 0 &&
5090 							(index->indpred == NIL || index->predOK))
5091 							vardata->isunique = true;
5092 
5093 						/*
5094 						 * Has it got stats?  We only consider stats for
5095 						 * non-partial indexes, since partial indexes probably
5096 						 * don't reflect whole-relation statistics; the above
5097 						 * check for uniqueness is the only info we take from
5098 						 * a partial index.
5099 						 *
5100 						 * An index stats hook, however, must make its own
5101 						 * decisions about what to do with partial indexes.
5102 						 */
5103 						if (get_index_stats_hook &&
5104 							(*get_index_stats_hook) (root, index->indexoid,
5105 													 pos + 1, vardata))
5106 						{
5107 							/*
5108 							 * The hook took control of acquiring a stats
5109 							 * tuple.  If it did supply a tuple, it'd better
5110 							 * have supplied a freefunc.
5111 							 */
5112 							if (HeapTupleIsValid(vardata->statsTuple) &&
5113 								!vardata->freefunc)
5114 								elog(ERROR, "no function provided to release variable stats with");
5115 						}
5116 						else if (index->indpred == NIL)
5117 						{
5118 							vardata->statsTuple =
5119 								SearchSysCache3(STATRELATTINH,
5120 												ObjectIdGetDatum(index->indexoid),
5121 												Int16GetDatum(pos + 1),
5122 												BoolGetDatum(false));
5123 							vardata->freefunc = ReleaseSysCache;
5124 
5125 							if (HeapTupleIsValid(vardata->statsTuple))
5126 							{
5127 								/* Get index's table for permission check */
5128 								RangeTblEntry *rte;
5129 								Oid			userid;
5130 
5131 								rte = planner_rt_fetch(index->rel->relid, root);
5132 								Assert(rte->rtekind == RTE_RELATION);
5133 
5134 								/*
5135 								 * Use checkAsUser if it's set, in case we're
5136 								 * accessing the table via a view.
5137 								 */
5138 								userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5139 
5140 								/*
5141 								 * For simplicity, we insist on the whole
5142 								 * table being selectable, rather than trying
5143 								 * to identify which column(s) the index
5144 								 * depends on.  Also require all rows to be
5145 								 * selectable --- there must be no
5146 								 * securityQuals from security barrier views
5147 								 * or RLS policies.
5148 								 */
5149 								vardata->acl_ok =
5150 									rte->securityQuals == NIL &&
5151 									(pg_class_aclcheck(rte->relid, userid,
5152 													   ACL_SELECT) == ACLCHECK_OK);
5153 
5154 								/*
5155 								 * If the user doesn't have permissions to
5156 								 * access an inheritance child relation, check
5157 								 * the permissions of the table actually
5158 								 * mentioned in the query, since most likely
5159 								 * the user does have that permission.  Note
5160 								 * that whole-table select privilege on the
5161 								 * parent doesn't quite guarantee that the
5162 								 * user could read all columns of the child.
5163 								 * But in practice it's unlikely that any
5164 								 * interesting security violation could result
5165 								 * from allowing access to the expression
5166 								 * index's stats, so we allow it anyway.  See
5167 								 * similar code in examine_simple_variable()
5168 								 * for additional comments.
5169 								 */
5170 								if (!vardata->acl_ok &&
5171 									root->append_rel_array != NULL)
5172 								{
5173 									AppendRelInfo *appinfo;
5174 									Index		varno = index->rel->relid;
5175 
5176 									appinfo = root->append_rel_array[varno];
5177 									while (appinfo &&
5178 										   planner_rt_fetch(appinfo->parent_relid,
5179 															root)->rtekind == RTE_RELATION)
5180 									{
5181 										varno = appinfo->parent_relid;
5182 										appinfo = root->append_rel_array[varno];
5183 									}
5184 									if (varno != index->rel->relid)
5185 									{
5186 										/* Repeat access check on this rel */
5187 										rte = planner_rt_fetch(varno, root);
5188 										Assert(rte->rtekind == RTE_RELATION);
5189 
5190 										userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5191 
5192 										vardata->acl_ok =
5193 											rte->securityQuals == NIL &&
5194 											(pg_class_aclcheck(rte->relid,
5195 															   userid,
5196 															   ACL_SELECT) == ACLCHECK_OK);
5197 									}
5198 								}
5199 							}
5200 							else
5201 							{
5202 								/* suppress leakproofness checks later */
5203 								vardata->acl_ok = true;
5204 							}
5205 						}
5206 						if (vardata->statsTuple)
5207 							break;
5208 					}
5209 					indexpr_item = lnext(index->indexprs, indexpr_item);
5210 				}
5211 			}
5212 			if (vardata->statsTuple)
5213 				break;
5214 		}
5215 
5216 		/*
5217 		 * Search extended statistics for one with a matching expression.
5218 		 * There might be multiple ones, so just grab the first one. In the
5219 		 * future, we might consider the statistics target (and pick the most
5220 		 * accurate statistics) and maybe some other parameters.
5221 		 */
5222 		foreach(slist, onerel->statlist)
5223 		{
5224 			StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist);
5225 			ListCell   *expr_item;
5226 			int			pos;
5227 
5228 			/*
5229 			 * Stop once we've found statistics for the expression (either
5230 			 * from extended stats, or for an index in the preceding loop).
5231 			 */
5232 			if (vardata->statsTuple)
5233 				break;
5234 
5235 			/* skip stats without per-expression stats */
5236 			if (info->kind != STATS_EXT_EXPRESSIONS)
5237 				continue;
5238 
5239 			pos = 0;
5240 			foreach(expr_item, info->exprs)
5241 			{
5242 				Node	   *expr = (Node *) lfirst(expr_item);
5243 
5244 				Assert(expr);
5245 
5246 				/* strip RelabelType before comparing it */
5247 				if (expr && IsA(expr, RelabelType))
5248 					expr = (Node *) ((RelabelType *) expr)->arg;
5249 
5250 				/* found a match, see if we can extract pg_statistic row */
5251 				if (equal(node, expr))
5252 				{
5253 					HeapTuple	t = statext_expressions_load(info->statOid, pos);
5254 
5255 					/* Get statistics object's table for permission check */
5256 					RangeTblEntry *rte;
5257 					Oid			userid;
5258 
5259 					vardata->statsTuple = t;
5260 
5261 					/*
5262 					 * XXX Not sure if we should cache the tuple somewhere.
5263 					 * Now we just create a new copy every time.
5264 					 */
5265 					vardata->freefunc = ReleaseDummy;
5266 
5267 					rte = planner_rt_fetch(onerel->relid, root);
5268 					Assert(rte->rtekind == RTE_RELATION);
5269 
5270 					/*
5271 					 * Use checkAsUser if it's set, in case we're accessing
5272 					 * the table via a view.
5273 					 */
5274 					userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5275 
5276 					/*
5277 					 * For simplicity, we insist on the whole table being
5278 					 * selectable, rather than trying to identify which
5279 					 * column(s) the statistics object depends on.  Also
5280 					 * require all rows to be selectable --- there must be no
5281 					 * securityQuals from security barrier views or RLS
5282 					 * policies.
5283 					 */
5284 					vardata->acl_ok =
5285 						rte->securityQuals == NIL &&
5286 						(pg_class_aclcheck(rte->relid, userid,
5287 										   ACL_SELECT) == ACLCHECK_OK);
5288 
5289 					/*
5290 					 * If the user doesn't have permissions to access an
5291 					 * inheritance child relation, check the permissions of
5292 					 * the table actually mentioned in the query, since most
5293 					 * likely the user does have that permission.  Note that
5294 					 * whole-table select privilege on the parent doesn't
5295 					 * quite guarantee that the user could read all columns of
5296 					 * the child. But in practice it's unlikely that any
5297 					 * interesting security violation could result from
5298 					 * allowing access to the expression stats, so we allow it
5299 					 * anyway.  See similar code in examine_simple_variable()
5300 					 * for additional comments.
5301 					 */
5302 					if (!vardata->acl_ok &&
5303 						root->append_rel_array != NULL)
5304 					{
5305 						AppendRelInfo *appinfo;
5306 						Index		varno = onerel->relid;
5307 
5308 						appinfo = root->append_rel_array[varno];
5309 						while (appinfo &&
5310 							   planner_rt_fetch(appinfo->parent_relid,
5311 												root)->rtekind == RTE_RELATION)
5312 						{
5313 							varno = appinfo->parent_relid;
5314 							appinfo = root->append_rel_array[varno];
5315 						}
5316 						if (varno != onerel->relid)
5317 						{
5318 							/* Repeat access check on this rel */
5319 							rte = planner_rt_fetch(varno, root);
5320 							Assert(rte->rtekind == RTE_RELATION);
5321 
5322 							userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5323 
5324 							vardata->acl_ok =
5325 								rte->securityQuals == NIL &&
5326 								(pg_class_aclcheck(rte->relid,
5327 												   userid,
5328 												   ACL_SELECT) == ACLCHECK_OK);
5329 						}
5330 					}
5331 
5332 					break;
5333 				}
5334 
5335 				pos++;
5336 			}
5337 		}
5338 	}
5339 }
5340 
5341 /*
5342  * examine_simple_variable
5343  *		Handle a simple Var for examine_variable
5344  *
5345  * This is split out as a subroutine so that we can recurse to deal with
5346  * Vars referencing subqueries.
5347  *
5348  * We already filled in all the fields of *vardata except for the stats tuple.
5349  */
5350 static void
examine_simple_variable(PlannerInfo * root,Var * var,VariableStatData * vardata)5351 examine_simple_variable(PlannerInfo *root, Var *var,
5352 						VariableStatData *vardata)
5353 {
5354 	RangeTblEntry *rte = root->simple_rte_array[var->varno];
5355 
5356 	Assert(IsA(rte, RangeTblEntry));
5357 
5358 	if (get_relation_stats_hook &&
5359 		(*get_relation_stats_hook) (root, rte, var->varattno, vardata))
5360 	{
5361 		/*
5362 		 * The hook took control of acquiring a stats tuple.  If it did supply
5363 		 * a tuple, it'd better have supplied a freefunc.
5364 		 */
5365 		if (HeapTupleIsValid(vardata->statsTuple) &&
5366 			!vardata->freefunc)
5367 			elog(ERROR, "no function provided to release variable stats with");
5368 	}
5369 	else if (rte->rtekind == RTE_RELATION)
5370 	{
5371 		/*
5372 		 * Plain table or parent of an inheritance appendrel, so look up the
5373 		 * column in pg_statistic
5374 		 */
5375 		vardata->statsTuple = SearchSysCache3(STATRELATTINH,
5376 											  ObjectIdGetDatum(rte->relid),
5377 											  Int16GetDatum(var->varattno),
5378 											  BoolGetDatum(rte->inh));
5379 		vardata->freefunc = ReleaseSysCache;
5380 
5381 		if (HeapTupleIsValid(vardata->statsTuple))
5382 		{
5383 			Oid			userid;
5384 
5385 			/*
5386 			 * Check if user has permission to read this column.  We require
5387 			 * all rows to be accessible, so there must be no securityQuals
5388 			 * from security barrier views or RLS policies.  Use checkAsUser
5389 			 * if it's set, in case we're accessing the table via a view.
5390 			 */
5391 			userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5392 
5393 			vardata->acl_ok =
5394 				rte->securityQuals == NIL &&
5395 				((pg_class_aclcheck(rte->relid, userid,
5396 									ACL_SELECT) == ACLCHECK_OK) ||
5397 				 (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
5398 										ACL_SELECT) == ACLCHECK_OK));
5399 
5400 			/*
5401 			 * If the user doesn't have permissions to access an inheritance
5402 			 * child relation or specifically this attribute, check the
5403 			 * permissions of the table/column actually mentioned in the
5404 			 * query, since most likely the user does have that permission
5405 			 * (else the query will fail at runtime), and if the user can read
5406 			 * the column there then he can get the values of the child table
5407 			 * too.  To do that, we must find out which of the root parent's
5408 			 * attributes the child relation's attribute corresponds to.
5409 			 */
5410 			if (!vardata->acl_ok && var->varattno > 0 &&
5411 				root->append_rel_array != NULL)
5412 			{
5413 				AppendRelInfo *appinfo;
5414 				Index		varno = var->varno;
5415 				int			varattno = var->varattno;
5416 				bool		found = false;
5417 
5418 				appinfo = root->append_rel_array[varno];
5419 
5420 				/*
5421 				 * Partitions are mapped to their immediate parent, not the
5422 				 * root parent, so must be ready to walk up multiple
5423 				 * AppendRelInfos.  But stop if we hit a parent that is not
5424 				 * RTE_RELATION --- that's a flattened UNION ALL subquery, not
5425 				 * an inheritance parent.
5426 				 */
5427 				while (appinfo &&
5428 					   planner_rt_fetch(appinfo->parent_relid,
5429 										root)->rtekind == RTE_RELATION)
5430 				{
5431 					int			parent_varattno;
5432 
5433 					found = false;
5434 					if (varattno <= 0 || varattno > appinfo->num_child_cols)
5435 						break;	/* safety check */
5436 					parent_varattno = appinfo->parent_colnos[varattno - 1];
5437 					if (parent_varattno == 0)
5438 						break;	/* Var is local to child */
5439 
5440 					varno = appinfo->parent_relid;
5441 					varattno = parent_varattno;
5442 					found = true;
5443 
5444 					/* If the parent is itself a child, continue up. */
5445 					appinfo = root->append_rel_array[varno];
5446 				}
5447 
5448 				/*
5449 				 * In rare cases, the Var may be local to the child table, in
5450 				 * which case, we've got to live with having no access to this
5451 				 * column's stats.
5452 				 */
5453 				if (!found)
5454 					return;
5455 
5456 				/* Repeat the access check on this parent rel & column */
5457 				rte = planner_rt_fetch(varno, root);
5458 				Assert(rte->rtekind == RTE_RELATION);
5459 
5460 				userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
5461 
5462 				vardata->acl_ok =
5463 					rte->securityQuals == NIL &&
5464 					((pg_class_aclcheck(rte->relid, userid,
5465 										ACL_SELECT) == ACLCHECK_OK) ||
5466 					 (pg_attribute_aclcheck(rte->relid, varattno, userid,
5467 											ACL_SELECT) == ACLCHECK_OK));
5468 			}
5469 		}
5470 		else
5471 		{
5472 			/* suppress any possible leakproofness checks later */
5473 			vardata->acl_ok = true;
5474 		}
5475 	}
5476 	else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
5477 	{
5478 		/*
5479 		 * Plain subquery (not one that was converted to an appendrel).
5480 		 */
5481 		Query	   *subquery = rte->subquery;
5482 		RelOptInfo *rel;
5483 		TargetEntry *ste;
5484 
5485 		/*
5486 		 * Punt if it's a whole-row var rather than a plain column reference.
5487 		 */
5488 		if (var->varattno == InvalidAttrNumber)
5489 			return;
5490 
5491 		/*
5492 		 * Punt if subquery uses set operations or GROUP BY, as these will
5493 		 * mash underlying columns' stats beyond recognition.  (Set ops are
5494 		 * particularly nasty; if we forged ahead, we would return stats
5495 		 * relevant to only the leftmost subselect...)	DISTINCT is also
5496 		 * problematic, but we check that later because there is a possibility
5497 		 * of learning something even with it.
5498 		 */
5499 		if (subquery->setOperations ||
5500 			subquery->groupClause ||
5501 			subquery->groupingSets)
5502 			return;
5503 
5504 		/*
5505 		 * OK, fetch RelOptInfo for subquery.  Note that we don't change the
5506 		 * rel returned in vardata, since caller expects it to be a rel of the
5507 		 * caller's query level.  Because we might already be recursing, we
5508 		 * can't use that rel pointer either, but have to look up the Var's
5509 		 * rel afresh.
5510 		 */
5511 		rel = find_base_rel(root, var->varno);
5512 
5513 		/* If the subquery hasn't been planned yet, we have to punt */
5514 		if (rel->subroot == NULL)
5515 			return;
5516 		Assert(IsA(rel->subroot, PlannerInfo));
5517 
5518 		/*
5519 		 * Switch our attention to the subquery as mangled by the planner. It
5520 		 * was okay to look at the pre-planning version for the tests above,
5521 		 * but now we need a Var that will refer to the subroot's live
5522 		 * RelOptInfos.  For instance, if any subquery pullup happened during
5523 		 * planning, Vars in the targetlist might have gotten replaced, and we
5524 		 * need to see the replacement expressions.
5525 		 */
5526 		subquery = rel->subroot->parse;
5527 		Assert(IsA(subquery, Query));
5528 
5529 		/* Get the subquery output expression referenced by the upper Var */
5530 		ste = get_tle_by_resno(subquery->targetList, var->varattno);
5531 		if (ste == NULL || ste->resjunk)
5532 			elog(ERROR, "subquery %s does not have attribute %d",
5533 				 rte->eref->aliasname, var->varattno);
5534 		var = (Var *) ste->expr;
5535 
5536 		/*
5537 		 * If subquery uses DISTINCT, we can't make use of any stats for the
5538 		 * variable ... but, if it's the only DISTINCT column, we are entitled
5539 		 * to consider it unique.  We do the test this way so that it works
5540 		 * for cases involving DISTINCT ON.
5541 		 */
5542 		if (subquery->distinctClause)
5543 		{
5544 			if (list_length(subquery->distinctClause) == 1 &&
5545 				targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
5546 				vardata->isunique = true;
5547 			/* cannot go further */
5548 			return;
5549 		}
5550 
5551 		/*
5552 		 * If the sub-query originated from a view with the security_barrier
5553 		 * attribute, we must not look at the variable's statistics, though it
5554 		 * seems all right to notice the existence of a DISTINCT clause. So
5555 		 * stop here.
5556 		 *
5557 		 * This is probably a harsher restriction than necessary; it's
5558 		 * certainly OK for the selectivity estimator (which is a C function,
5559 		 * and therefore omnipotent anyway) to look at the statistics.  But
5560 		 * many selectivity estimators will happily *invoke the operator
5561 		 * function* to try to work out a good estimate - and that's not OK.
5562 		 * So for now, don't dig down for stats.
5563 		 */
5564 		if (rte->security_barrier)
5565 			return;
5566 
5567 		/* Can only handle a simple Var of subquery's query level */
5568 		if (var && IsA(var, Var) &&
5569 			var->varlevelsup == 0)
5570 		{
5571 			/*
5572 			 * OK, recurse into the subquery.  Note that the original setting
5573 			 * of vardata->isunique (which will surely be false) is left
5574 			 * unchanged in this situation.  That's what we want, since even
5575 			 * if the underlying column is unique, the subquery may have
5576 			 * joined to other tables in a way that creates duplicates.
5577 			 */
5578 			examine_simple_variable(rel->subroot, var, vardata);
5579 		}
5580 	}
5581 	else
5582 	{
5583 		/*
5584 		 * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE.  (We
5585 		 * won't see RTE_JOIN here because join alias Vars have already been
5586 		 * flattened.)	There's not much we can do with function outputs, but
5587 		 * maybe someday try to be smarter about VALUES and/or CTEs.
5588 		 */
5589 	}
5590 }
5591 
5592 /*
5593  * Check whether it is permitted to call func_oid passing some of the
5594  * pg_statistic data in vardata.  We allow this either if the user has SELECT
5595  * privileges on the table or column underlying the pg_statistic data or if
5596  * the function is marked leak-proof.
5597  */
5598 bool
statistic_proc_security_check(VariableStatData * vardata,Oid func_oid)5599 statistic_proc_security_check(VariableStatData *vardata, Oid func_oid)
5600 {
5601 	if (vardata->acl_ok)
5602 		return true;
5603 
5604 	if (!OidIsValid(func_oid))
5605 		return false;
5606 
5607 	if (get_func_leakproof(func_oid))
5608 		return true;
5609 
5610 	ereport(DEBUG2,
5611 			(errmsg_internal("not using statistics because function \"%s\" is not leak-proof",
5612 							 get_func_name(func_oid))));
5613 	return false;
5614 }
5615 
5616 /*
5617  * get_variable_numdistinct
5618  *	  Estimate the number of distinct values of a variable.
5619  *
5620  * vardata: results of examine_variable
5621  * *isdefault: set to true if the result is a default rather than based on
5622  * anything meaningful.
5623  *
5624  * NB: be careful to produce a positive integral result, since callers may
5625  * compare the result to exact integer counts, or might divide by it.
5626  */
5627 double
get_variable_numdistinct(VariableStatData * vardata,bool * isdefault)5628 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
5629 {
5630 	double		stadistinct;
5631 	double		stanullfrac = 0.0;
5632 	double		ntuples;
5633 
5634 	*isdefault = false;
5635 
5636 	/*
5637 	 * Determine the stadistinct value to use.  There are cases where we can
5638 	 * get an estimate even without a pg_statistic entry, or can get a better
5639 	 * value than is in pg_statistic.  Grab stanullfrac too if we can find it
5640 	 * (otherwise, assume no nulls, for lack of any better idea).
5641 	 */
5642 	if (HeapTupleIsValid(vardata->statsTuple))
5643 	{
5644 		/* Use the pg_statistic entry */
5645 		Form_pg_statistic stats;
5646 
5647 		stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
5648 		stadistinct = stats->stadistinct;
5649 		stanullfrac = stats->stanullfrac;
5650 	}
5651 	else if (vardata->vartype == BOOLOID)
5652 	{
5653 		/*
5654 		 * Special-case boolean columns: presumably, two distinct values.
5655 		 *
5656 		 * Are there any other datatypes we should wire in special estimates
5657 		 * for?
5658 		 */
5659 		stadistinct = 2.0;
5660 	}
5661 	else if (vardata->rel && vardata->rel->rtekind == RTE_VALUES)
5662 	{
5663 		/*
5664 		 * If the Var represents a column of a VALUES RTE, assume it's unique.
5665 		 * This could of course be very wrong, but it should tend to be true
5666 		 * in well-written queries.  We could consider examining the VALUES'
5667 		 * contents to get some real statistics; but that only works if the
5668 		 * entries are all constants, and it would be pretty expensive anyway.
5669 		 */
5670 		stadistinct = -1.0;		/* unique (and all non null) */
5671 	}
5672 	else
5673 	{
5674 		/*
5675 		 * We don't keep statistics for system columns, but in some cases we
5676 		 * can infer distinctness anyway.
5677 		 */
5678 		if (vardata->var && IsA(vardata->var, Var))
5679 		{
5680 			switch (((Var *) vardata->var)->varattno)
5681 			{
5682 				case SelfItemPointerAttributeNumber:
5683 					stadistinct = -1.0; /* unique (and all non null) */
5684 					break;
5685 				case TableOidAttributeNumber:
5686 					stadistinct = 1.0;	/* only 1 value */
5687 					break;
5688 				default:
5689 					stadistinct = 0.0;	/* means "unknown" */
5690 					break;
5691 			}
5692 		}
5693 		else
5694 			stadistinct = 0.0;	/* means "unknown" */
5695 
5696 		/*
5697 		 * XXX consider using estimate_num_groups on expressions?
5698 		 */
5699 	}
5700 
5701 	/*
5702 	 * If there is a unique index or DISTINCT clause for the variable, assume
5703 	 * it is unique no matter what pg_statistic says; the statistics could be
5704 	 * out of date, or we might have found a partial unique index that proves
5705 	 * the var is unique for this query.  However, we'd better still believe
5706 	 * the null-fraction statistic.
5707 	 */
5708 	if (vardata->isunique)
5709 		stadistinct = -1.0 * (1.0 - stanullfrac);
5710 
5711 	/*
5712 	 * If we had an absolute estimate, use that.
5713 	 */
5714 	if (stadistinct > 0.0)
5715 		return clamp_row_est(stadistinct);
5716 
5717 	/*
5718 	 * Otherwise we need to get the relation size; punt if not available.
5719 	 */
5720 	if (vardata->rel == NULL)
5721 	{
5722 		*isdefault = true;
5723 		return DEFAULT_NUM_DISTINCT;
5724 	}
5725 	ntuples = vardata->rel->tuples;
5726 	if (ntuples <= 0.0)
5727 	{
5728 		*isdefault = true;
5729 		return DEFAULT_NUM_DISTINCT;
5730 	}
5731 
5732 	/*
5733 	 * If we had a relative estimate, use that.
5734 	 */
5735 	if (stadistinct < 0.0)
5736 		return clamp_row_est(-stadistinct * ntuples);
5737 
5738 	/*
5739 	 * With no data, estimate ndistinct = ntuples if the table is small, else
5740 	 * use default.  We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5741 	 * that the behavior isn't discontinuous.
5742 	 */
5743 	if (ntuples < DEFAULT_NUM_DISTINCT)
5744 		return clamp_row_est(ntuples);
5745 
5746 	*isdefault = true;
5747 	return DEFAULT_NUM_DISTINCT;
5748 }
5749 
5750 /*
5751  * get_variable_range
5752  *		Estimate the minimum and maximum value of the specified variable.
5753  *		If successful, store values in *min and *max, and return true.
5754  *		If no data available, return false.
5755  *
5756  * sortop is the "<" comparison operator to use.  This should generally
5757  * be "<" not ">", as only the former is likely to be found in pg_statistic.
5758  * The collation must be specified too.
5759  */
5760 static bool
get_variable_range(PlannerInfo * root,VariableStatData * vardata,Oid sortop,Oid collation,Datum * min,Datum * max)5761 get_variable_range(PlannerInfo *root, VariableStatData *vardata,
5762 				   Oid sortop, Oid collation,
5763 				   Datum *min, Datum *max)
5764 {
5765 	Datum		tmin = 0;
5766 	Datum		tmax = 0;
5767 	bool		have_data = false;
5768 	int16		typLen;
5769 	bool		typByVal;
5770 	Oid			opfuncoid;
5771 	FmgrInfo	opproc;
5772 	AttStatsSlot sslot;
5773 
5774 	/*
5775 	 * XXX It's very tempting to try to use the actual column min and max, if
5776 	 * we can get them relatively-cheaply with an index probe.  However, since
5777 	 * this function is called many times during join planning, that could
5778 	 * have unpleasant effects on planning speed.  Need more investigation
5779 	 * before enabling this.
5780 	 */
5781 #ifdef NOT_USED
5782 	if (get_actual_variable_range(root, vardata, sortop, collation, min, max))
5783 		return true;
5784 #endif
5785 
5786 	if (!HeapTupleIsValid(vardata->statsTuple))
5787 	{
5788 		/* no stats available, so default result */
5789 		return false;
5790 	}
5791 
5792 	/*
5793 	 * If we can't apply the sortop to the stats data, just fail.  In
5794 	 * principle, if there's a histogram and no MCVs, we could return the
5795 	 * histogram endpoints without ever applying the sortop ... but it's
5796 	 * probably not worth trying, because whatever the caller wants to do with
5797 	 * the endpoints would likely fail the security check too.
5798 	 */
5799 	if (!statistic_proc_security_check(vardata,
5800 									   (opfuncoid = get_opcode(sortop))))
5801 		return false;
5802 
5803 	opproc.fn_oid = InvalidOid; /* mark this as not looked up yet */
5804 
5805 	get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5806 
5807 	/*
5808 	 * If there is a histogram with the ordering we want, grab the first and
5809 	 * last values.
5810 	 */
5811 	if (get_attstatsslot(&sslot, vardata->statsTuple,
5812 						 STATISTIC_KIND_HISTOGRAM, sortop,
5813 						 ATTSTATSSLOT_VALUES))
5814 	{
5815 		if (sslot.stacoll == collation && sslot.nvalues > 0)
5816 		{
5817 			tmin = datumCopy(sslot.values[0], typByVal, typLen);
5818 			tmax = datumCopy(sslot.values[sslot.nvalues - 1], typByVal, typLen);
5819 			have_data = true;
5820 		}
5821 		free_attstatsslot(&sslot);
5822 	}
5823 
5824 	/*
5825 	 * Otherwise, if there is a histogram with some other ordering, scan it
5826 	 * and get the min and max values according to the ordering we want.  This
5827 	 * of course may not find values that are really extremal according to our
5828 	 * ordering, but it beats ignoring available data.
5829 	 */
5830 	if (!have_data &&
5831 		get_attstatsslot(&sslot, vardata->statsTuple,
5832 						 STATISTIC_KIND_HISTOGRAM, InvalidOid,
5833 						 ATTSTATSSLOT_VALUES))
5834 	{
5835 		get_stats_slot_range(&sslot, opfuncoid, &opproc,
5836 							 collation, typLen, typByVal,
5837 							 &tmin, &tmax, &have_data);
5838 		free_attstatsslot(&sslot);
5839 	}
5840 
5841 	/*
5842 	 * If we have most-common-values info, look for extreme MCVs.  This is
5843 	 * needed even if we also have a histogram, since the histogram excludes
5844 	 * the MCVs.  However, if we *only* have MCVs and no histogram, we should
5845 	 * be pretty wary of deciding that that is a full representation of the
5846 	 * data.  Proceed only if the MCVs represent the whole table (to within
5847 	 * roundoff error).
5848 	 */
5849 	if (get_attstatsslot(&sslot, vardata->statsTuple,
5850 						 STATISTIC_KIND_MCV, InvalidOid,
5851 						 have_data ? ATTSTATSSLOT_VALUES :
5852 						 (ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS)))
5853 	{
5854 		bool		use_mcvs = have_data;
5855 
5856 		if (!have_data)
5857 		{
5858 			double		sumcommon = 0.0;
5859 			double		nullfrac;
5860 			int			i;
5861 
5862 			for (i = 0; i < sslot.nnumbers; i++)
5863 				sumcommon += sslot.numbers[i];
5864 			nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata->statsTuple))->stanullfrac;
5865 			if (sumcommon + nullfrac > 0.99999)
5866 				use_mcvs = true;
5867 		}
5868 
5869 		if (use_mcvs)
5870 			get_stats_slot_range(&sslot, opfuncoid, &opproc,
5871 								 collation, typLen, typByVal,
5872 								 &tmin, &tmax, &have_data);
5873 		free_attstatsslot(&sslot);
5874 	}
5875 
5876 	*min = tmin;
5877 	*max = tmax;
5878 	return have_data;
5879 }
5880 
5881 /*
5882  * get_stats_slot_range: scan sslot for min/max values
5883  *
5884  * Subroutine for get_variable_range: update min/max/have_data according
5885  * to what we find in the statistics array.
5886  */
5887 static void
get_stats_slot_range(AttStatsSlot * sslot,Oid opfuncoid,FmgrInfo * opproc,Oid collation,int16 typLen,bool typByVal,Datum * min,Datum * max,bool * p_have_data)5888 get_stats_slot_range(AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc,
5889 					 Oid collation, int16 typLen, bool typByVal,
5890 					 Datum *min, Datum *max, bool *p_have_data)
5891 {
5892 	Datum		tmin = *min;
5893 	Datum		tmax = *max;
5894 	bool		have_data = *p_have_data;
5895 	bool		found_tmin = false;
5896 	bool		found_tmax = false;
5897 
5898 	/* Look up the comparison function, if we didn't already do so */
5899 	if (opproc->fn_oid != opfuncoid)
5900 		fmgr_info(opfuncoid, opproc);
5901 
5902 	/* Scan all the slot's values */
5903 	for (int i = 0; i < sslot->nvalues; i++)
5904 	{
5905 		if (!have_data)
5906 		{
5907 			tmin = tmax = sslot->values[i];
5908 			found_tmin = found_tmax = true;
5909 			*p_have_data = have_data = true;
5910 			continue;
5911 		}
5912 		if (DatumGetBool(FunctionCall2Coll(opproc,
5913 										   collation,
5914 										   sslot->values[i], tmin)))
5915 		{
5916 			tmin = sslot->values[i];
5917 			found_tmin = true;
5918 		}
5919 		if (DatumGetBool(FunctionCall2Coll(opproc,
5920 										   collation,
5921 										   tmax, sslot->values[i])))
5922 		{
5923 			tmax = sslot->values[i];
5924 			found_tmax = true;
5925 		}
5926 	}
5927 
5928 	/*
5929 	 * Copy the slot's values, if we found new extreme values.
5930 	 */
5931 	if (found_tmin)
5932 		*min = datumCopy(tmin, typByVal, typLen);
5933 	if (found_tmax)
5934 		*max = datumCopy(tmax, typByVal, typLen);
5935 }
5936 
5937 
5938 /*
5939  * get_actual_variable_range
5940  *		Attempt to identify the current *actual* minimum and/or maximum
5941  *		of the specified variable, by looking for a suitable btree index
5942  *		and fetching its low and/or high values.
5943  *		If successful, store values in *min and *max, and return true.
5944  *		(Either pointer can be NULL if that endpoint isn't needed.)
5945  *		If no data available, return false.
5946  *
5947  * sortop is the "<" comparison operator to use.
5948  * collation is the required collation.
5949  */
5950 static bool
get_actual_variable_range(PlannerInfo * root,VariableStatData * vardata,Oid sortop,Oid collation,Datum * min,Datum * max)5951 get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
5952 						  Oid sortop, Oid collation,
5953 						  Datum *min, Datum *max)
5954 {
5955 	bool		have_data = false;
5956 	RelOptInfo *rel = vardata->rel;
5957 	RangeTblEntry *rte;
5958 	ListCell   *lc;
5959 
5960 	/* No hope if no relation or it doesn't have indexes */
5961 	if (rel == NULL || rel->indexlist == NIL)
5962 		return false;
5963 	/* If it has indexes it must be a plain relation */
5964 	rte = root->simple_rte_array[rel->relid];
5965 	Assert(rte->rtekind == RTE_RELATION);
5966 
5967 	/* Search through the indexes to see if any match our problem */
5968 	foreach(lc, rel->indexlist)
5969 	{
5970 		IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
5971 		ScanDirection indexscandir;
5972 
5973 		/* Ignore non-btree indexes */
5974 		if (index->relam != BTREE_AM_OID)
5975 			continue;
5976 
5977 		/*
5978 		 * Ignore partial indexes --- we only want stats that cover the entire
5979 		 * relation.
5980 		 */
5981 		if (index->indpred != NIL)
5982 			continue;
5983 
5984 		/*
5985 		 * The index list might include hypothetical indexes inserted by a
5986 		 * get_relation_info hook --- don't try to access them.
5987 		 */
5988 		if (index->hypothetical)
5989 			continue;
5990 
5991 		/*
5992 		 * The first index column must match the desired variable, sortop, and
5993 		 * collation --- but we can use a descending-order index.
5994 		 */
5995 		if (collation != index->indexcollations[0])
5996 			continue;			/* test first 'cause it's cheapest */
5997 		if (!match_index_to_operand(vardata->var, 0, index))
5998 			continue;
5999 		switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
6000 		{
6001 			case BTLessStrategyNumber:
6002 				if (index->reverse_sort[0])
6003 					indexscandir = BackwardScanDirection;
6004 				else
6005 					indexscandir = ForwardScanDirection;
6006 				break;
6007 			case BTGreaterStrategyNumber:
6008 				if (index->reverse_sort[0])
6009 					indexscandir = ForwardScanDirection;
6010 				else
6011 					indexscandir = BackwardScanDirection;
6012 				break;
6013 			default:
6014 				/* index doesn't match the sortop */
6015 				continue;
6016 		}
6017 
6018 		/*
6019 		 * Found a suitable index to extract data from.  Set up some data that
6020 		 * can be used by both invocations of get_actual_variable_endpoint.
6021 		 */
6022 		{
6023 			MemoryContext tmpcontext;
6024 			MemoryContext oldcontext;
6025 			Relation	heapRel;
6026 			Relation	indexRel;
6027 			TupleTableSlot *slot;
6028 			int16		typLen;
6029 			bool		typByVal;
6030 			ScanKeyData scankeys[1];
6031 
6032 			/* Make sure any cruft gets recycled when we're done */
6033 			tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
6034 											   "get_actual_variable_range workspace",
6035 											   ALLOCSET_DEFAULT_SIZES);
6036 			oldcontext = MemoryContextSwitchTo(tmpcontext);
6037 
6038 			/*
6039 			 * Open the table and index so we can read from them.  We should
6040 			 * already have some type of lock on each.
6041 			 */
6042 			heapRel = table_open(rte->relid, NoLock);
6043 			indexRel = index_open(index->indexoid, NoLock);
6044 
6045 			/* build some stuff needed for indexscan execution */
6046 			slot = table_slot_create(heapRel, NULL);
6047 			get_typlenbyval(vardata->atttype, &typLen, &typByVal);
6048 
6049 			/* set up an IS NOT NULL scan key so that we ignore nulls */
6050 			ScanKeyEntryInitialize(&scankeys[0],
6051 								   SK_ISNULL | SK_SEARCHNOTNULL,
6052 								   1,	/* index col to scan */
6053 								   InvalidStrategy, /* no strategy */
6054 								   InvalidOid,	/* no strategy subtype */
6055 								   InvalidOid,	/* no collation */
6056 								   InvalidOid,	/* no reg proc for this */
6057 								   (Datum) 0);	/* constant */
6058 
6059 			/* If min is requested ... */
6060 			if (min)
6061 			{
6062 				have_data = get_actual_variable_endpoint(heapRel,
6063 														 indexRel,
6064 														 indexscandir,
6065 														 scankeys,
6066 														 typLen,
6067 														 typByVal,
6068 														 slot,
6069 														 oldcontext,
6070 														 min);
6071 			}
6072 			else
6073 			{
6074 				/* If min not requested, assume index is nonempty */
6075 				have_data = true;
6076 			}
6077 
6078 			/* If max is requested, and we didn't find the index is empty */
6079 			if (max && have_data)
6080 			{
6081 				/* scan in the opposite direction; all else is the same */
6082 				have_data = get_actual_variable_endpoint(heapRel,
6083 														 indexRel,
6084 														 -indexscandir,
6085 														 scankeys,
6086 														 typLen,
6087 														 typByVal,
6088 														 slot,
6089 														 oldcontext,
6090 														 max);
6091 			}
6092 
6093 			/* Clean everything up */
6094 			ExecDropSingleTupleTableSlot(slot);
6095 
6096 			index_close(indexRel, NoLock);
6097 			table_close(heapRel, NoLock);
6098 
6099 			MemoryContextSwitchTo(oldcontext);
6100 			MemoryContextDelete(tmpcontext);
6101 
6102 			/* And we're done */
6103 			break;
6104 		}
6105 	}
6106 
6107 	return have_data;
6108 }
6109 
6110 /*
6111  * Get one endpoint datum (min or max depending on indexscandir) from the
6112  * specified index.  Return true if successful, false if index is empty.
6113  * On success, endpoint value is stored to *endpointDatum (and copied into
6114  * outercontext).
6115  *
6116  * scankeys is a 1-element scankey array set up to reject nulls.
6117  * typLen/typByVal describe the datatype of the index's first column.
6118  * tableslot is a slot suitable to hold table tuples, in case we need
6119  * to probe the heap.
6120  * (We could compute these values locally, but that would mean computing them
6121  * twice when get_actual_variable_range needs both the min and the max.)
6122  */
6123 static bool
get_actual_variable_endpoint(Relation heapRel,Relation indexRel,ScanDirection indexscandir,ScanKey scankeys,int16 typLen,bool typByVal,TupleTableSlot * tableslot,MemoryContext outercontext,Datum * endpointDatum)6124 get_actual_variable_endpoint(Relation heapRel,
6125 							 Relation indexRel,
6126 							 ScanDirection indexscandir,
6127 							 ScanKey scankeys,
6128 							 int16 typLen,
6129 							 bool typByVal,
6130 							 TupleTableSlot *tableslot,
6131 							 MemoryContext outercontext,
6132 							 Datum *endpointDatum)
6133 {
6134 	bool		have_data = false;
6135 	SnapshotData SnapshotNonVacuumable;
6136 	IndexScanDesc index_scan;
6137 	Buffer		vmbuffer = InvalidBuffer;
6138 	ItemPointer tid;
6139 	Datum		values[INDEX_MAX_KEYS];
6140 	bool		isnull[INDEX_MAX_KEYS];
6141 	MemoryContext oldcontext;
6142 
6143 	/*
6144 	 * We use the index-only-scan machinery for this.  With mostly-static
6145 	 * tables that's a win because it avoids a heap visit.  It's also a win
6146 	 * for dynamic data, but the reason is less obvious; read on for details.
6147 	 *
6148 	 * In principle, we should scan the index with our current active
6149 	 * snapshot, which is the best approximation we've got to what the query
6150 	 * will see when executed.  But that won't be exact if a new snap is taken
6151 	 * before running the query, and it can be very expensive if a lot of
6152 	 * recently-dead or uncommitted rows exist at the beginning or end of the
6153 	 * index (because we'll laboriously fetch each one and reject it).
6154 	 * Instead, we use SnapshotNonVacuumable.  That will accept recently-dead
6155 	 * and uncommitted rows as well as normal visible rows.  On the other
6156 	 * hand, it will reject known-dead rows, and thus not give a bogus answer
6157 	 * when the extreme value has been deleted (unless the deletion was quite
6158 	 * recent); that case motivates not using SnapshotAny here.
6159 	 *
6160 	 * A crucial point here is that SnapshotNonVacuumable, with
6161 	 * GlobalVisTestFor(heapRel) as horizon, yields the inverse of the
6162 	 * condition that the indexscan will use to decide that index entries are
6163 	 * killable (see heap_hot_search_buffer()).  Therefore, if the snapshot
6164 	 * rejects a tuple (or more precisely, all tuples of a HOT chain) and we
6165 	 * have to continue scanning past it, we know that the indexscan will mark
6166 	 * that index entry killed.  That means that the next
6167 	 * get_actual_variable_endpoint() call will not have to re-consider that
6168 	 * index entry.  In this way we avoid repetitive work when this function
6169 	 * is used a lot during planning.
6170 	 *
6171 	 * But using SnapshotNonVacuumable creates a hazard of its own.  In a
6172 	 * recently-created index, some index entries may point at "broken" HOT
6173 	 * chains in which not all the tuple versions contain data matching the
6174 	 * index entry.  The live tuple version(s) certainly do match the index,
6175 	 * but SnapshotNonVacuumable can accept recently-dead tuple versions that
6176 	 * don't match.  Hence, if we took data from the selected heap tuple, we
6177 	 * might get a bogus answer that's not close to the index extremal value,
6178 	 * or could even be NULL.  We avoid this hazard because we take the data
6179 	 * from the index entry not the heap.
6180 	 */
6181 	InitNonVacuumableSnapshot(SnapshotNonVacuumable,
6182 							  GlobalVisTestFor(heapRel));
6183 
6184 	index_scan = index_beginscan(heapRel, indexRel,
6185 								 &SnapshotNonVacuumable,
6186 								 1, 0);
6187 	/* Set it up for index-only scan */
6188 	index_scan->xs_want_itup = true;
6189 	index_rescan(index_scan, scankeys, 1, NULL, 0);
6190 
6191 	/* Fetch first/next tuple in specified direction */
6192 	while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL)
6193 	{
6194 		if (!VM_ALL_VISIBLE(heapRel,
6195 							ItemPointerGetBlockNumber(tid),
6196 							&vmbuffer))
6197 		{
6198 			/* Rats, we have to visit the heap to check visibility */
6199 			if (!index_fetch_heap(index_scan, tableslot))
6200 				continue;		/* no visible tuple, try next index entry */
6201 
6202 			/* We don't actually need the heap tuple for anything */
6203 			ExecClearTuple(tableslot);
6204 
6205 			/*
6206 			 * We don't care whether there's more than one visible tuple in
6207 			 * the HOT chain; if any are visible, that's good enough.
6208 			 */
6209 		}
6210 
6211 		/*
6212 		 * We expect that btree will return data in IndexTuple not HeapTuple
6213 		 * format.  It's not lossy either.
6214 		 */
6215 		if (!index_scan->xs_itup)
6216 			elog(ERROR, "no data returned for index-only scan");
6217 		if (index_scan->xs_recheck)
6218 			elog(ERROR, "unexpected recheck indication from btree");
6219 
6220 		/* OK to deconstruct the index tuple */
6221 		index_deform_tuple(index_scan->xs_itup,
6222 						   index_scan->xs_itupdesc,
6223 						   values, isnull);
6224 
6225 		/* Shouldn't have got a null, but be careful */
6226 		if (isnull[0])
6227 			elog(ERROR, "found unexpected null value in index \"%s\"",
6228 				 RelationGetRelationName(indexRel));
6229 
6230 		/* Copy the index column value out to caller's context */
6231 		oldcontext = MemoryContextSwitchTo(outercontext);
6232 		*endpointDatum = datumCopy(values[0], typByVal, typLen);
6233 		MemoryContextSwitchTo(oldcontext);
6234 		have_data = true;
6235 		break;
6236 	}
6237 
6238 	if (vmbuffer != InvalidBuffer)
6239 		ReleaseBuffer(vmbuffer);
6240 	index_endscan(index_scan);
6241 
6242 	return have_data;
6243 }
6244 
6245 /*
6246  * find_join_input_rel
6247  *		Look up the input relation for a join.
6248  *
6249  * We assume that the input relation's RelOptInfo must have been constructed
6250  * already.
6251  */
6252 static RelOptInfo *
find_join_input_rel(PlannerInfo * root,Relids relids)6253 find_join_input_rel(PlannerInfo *root, Relids relids)
6254 {
6255 	RelOptInfo *rel = NULL;
6256 
6257 	switch (bms_membership(relids))
6258 	{
6259 		case BMS_EMPTY_SET:
6260 			/* should not happen */
6261 			break;
6262 		case BMS_SINGLETON:
6263 			rel = find_base_rel(root, bms_singleton_member(relids));
6264 			break;
6265 		case BMS_MULTIPLE:
6266 			rel = find_join_rel(root, relids);
6267 			break;
6268 	}
6269 
6270 	if (rel == NULL)
6271 		elog(ERROR, "could not find RelOptInfo for given relids");
6272 
6273 	return rel;
6274 }
6275 
6276 
6277 /*-------------------------------------------------------------------------
6278  *
6279  * Index cost estimation functions
6280  *
6281  *-------------------------------------------------------------------------
6282  */
6283 
6284 /*
6285  * Extract the actual indexquals (as RestrictInfos) from an IndexClause list
6286  */
6287 List *
get_quals_from_indexclauses(List * indexclauses)6288 get_quals_from_indexclauses(List *indexclauses)
6289 {
6290 	List	   *result = NIL;
6291 	ListCell   *lc;
6292 
6293 	foreach(lc, indexclauses)
6294 	{
6295 		IndexClause *iclause = lfirst_node(IndexClause, lc);
6296 		ListCell   *lc2;
6297 
6298 		foreach(lc2, iclause->indexquals)
6299 		{
6300 			RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6301 
6302 			result = lappend(result, rinfo);
6303 		}
6304 	}
6305 	return result;
6306 }
6307 
6308 /*
6309  * Compute the total evaluation cost of the comparison operands in a list
6310  * of index qual expressions.  Since we know these will be evaluated just
6311  * once per scan, there's no need to distinguish startup from per-row cost.
6312  *
6313  * This can be used either on the result of get_quals_from_indexclauses(),
6314  * or directly on an indexorderbys list.  In both cases, we expect that the
6315  * index key expression is on the left side of binary clauses.
6316  */
6317 Cost
index_other_operands_eval_cost(PlannerInfo * root,List * indexquals)6318 index_other_operands_eval_cost(PlannerInfo *root, List *indexquals)
6319 {
6320 	Cost		qual_arg_cost = 0;
6321 	ListCell   *lc;
6322 
6323 	foreach(lc, indexquals)
6324 	{
6325 		Expr	   *clause = (Expr *) lfirst(lc);
6326 		Node	   *other_operand;
6327 		QualCost	index_qual_cost;
6328 
6329 		/*
6330 		 * Index quals will have RestrictInfos, indexorderbys won't.  Look
6331 		 * through RestrictInfo if present.
6332 		 */
6333 		if (IsA(clause, RestrictInfo))
6334 			clause = ((RestrictInfo *) clause)->clause;
6335 
6336 		if (IsA(clause, OpExpr))
6337 		{
6338 			OpExpr	   *op = (OpExpr *) clause;
6339 
6340 			other_operand = (Node *) lsecond(op->args);
6341 		}
6342 		else if (IsA(clause, RowCompareExpr))
6343 		{
6344 			RowCompareExpr *rc = (RowCompareExpr *) clause;
6345 
6346 			other_operand = (Node *) rc->rargs;
6347 		}
6348 		else if (IsA(clause, ScalarArrayOpExpr))
6349 		{
6350 			ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6351 
6352 			other_operand = (Node *) lsecond(saop->args);
6353 		}
6354 		else if (IsA(clause, NullTest))
6355 		{
6356 			other_operand = NULL;
6357 		}
6358 		else
6359 		{
6360 			elog(ERROR, "unsupported indexqual type: %d",
6361 				 (int) nodeTag(clause));
6362 			other_operand = NULL;	/* keep compiler quiet */
6363 		}
6364 
6365 		cost_qual_eval_node(&index_qual_cost, other_operand, root);
6366 		qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6367 	}
6368 	return qual_arg_cost;
6369 }
6370 
6371 void
genericcostestimate(PlannerInfo * root,IndexPath * path,double loop_count,GenericCosts * costs)6372 genericcostestimate(PlannerInfo *root,
6373 					IndexPath *path,
6374 					double loop_count,
6375 					GenericCosts *costs)
6376 {
6377 	IndexOptInfo *index = path->indexinfo;
6378 	List	   *indexQuals = get_quals_from_indexclauses(path->indexclauses);
6379 	List	   *indexOrderBys = path->indexorderbys;
6380 	Cost		indexStartupCost;
6381 	Cost		indexTotalCost;
6382 	Selectivity indexSelectivity;
6383 	double		indexCorrelation;
6384 	double		numIndexPages;
6385 	double		numIndexTuples;
6386 	double		spc_random_page_cost;
6387 	double		num_sa_scans;
6388 	double		num_outer_scans;
6389 	double		num_scans;
6390 	double		qual_op_cost;
6391 	double		qual_arg_cost;
6392 	List	   *selectivityQuals;
6393 	ListCell   *l;
6394 
6395 	/*
6396 	 * If the index is partial, AND the index predicate with the explicitly
6397 	 * given indexquals to produce a more accurate idea of the index
6398 	 * selectivity.
6399 	 */
6400 	selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
6401 
6402 	/*
6403 	 * Check for ScalarArrayOpExpr index quals, and estimate the number of
6404 	 * index scans that will be performed.
6405 	 */
6406 	num_sa_scans = 1;
6407 	foreach(l, indexQuals)
6408 	{
6409 		RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6410 
6411 		if (IsA(rinfo->clause, ScalarArrayOpExpr))
6412 		{
6413 			ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6414 			int			alength = estimate_array_length(lsecond(saop->args));
6415 
6416 			if (alength > 1)
6417 				num_sa_scans *= alength;
6418 		}
6419 	}
6420 
6421 	/* Estimate the fraction of main-table tuples that will be visited */
6422 	indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6423 											  index->rel->relid,
6424 											  JOIN_INNER,
6425 											  NULL);
6426 
6427 	/*
6428 	 * If caller didn't give us an estimate, estimate the number of index
6429 	 * tuples that will be visited.  We do it in this rather peculiar-looking
6430 	 * way in order to get the right answer for partial indexes.
6431 	 */
6432 	numIndexTuples = costs->numIndexTuples;
6433 	if (numIndexTuples <= 0.0)
6434 	{
6435 		numIndexTuples = indexSelectivity * index->rel->tuples;
6436 
6437 		/*
6438 		 * The above calculation counts all the tuples visited across all
6439 		 * scans induced by ScalarArrayOpExpr nodes.  We want to consider the
6440 		 * average per-indexscan number, so adjust.  This is a handy place to
6441 		 * round to integer, too.  (If caller supplied tuple estimate, it's
6442 		 * responsible for handling these considerations.)
6443 		 */
6444 		numIndexTuples = rint(numIndexTuples / num_sa_scans);
6445 	}
6446 
6447 	/*
6448 	 * We can bound the number of tuples by the index size in any case. Also,
6449 	 * always estimate at least one tuple is touched, even when
6450 	 * indexSelectivity estimate is tiny.
6451 	 */
6452 	if (numIndexTuples > index->tuples)
6453 		numIndexTuples = index->tuples;
6454 	if (numIndexTuples < 1.0)
6455 		numIndexTuples = 1.0;
6456 
6457 	/*
6458 	 * Estimate the number of index pages that will be retrieved.
6459 	 *
6460 	 * We use the simplistic method of taking a pro-rata fraction of the total
6461 	 * number of index pages.  In effect, this counts only leaf pages and not
6462 	 * any overhead such as index metapage or upper tree levels.
6463 	 *
6464 	 * In practice access to upper index levels is often nearly free because
6465 	 * those tend to stay in cache under load; moreover, the cost involved is
6466 	 * highly dependent on index type.  We therefore ignore such costs here
6467 	 * and leave it to the caller to add a suitable charge if needed.
6468 	 */
6469 	if (index->pages > 1 && index->tuples > 1)
6470 		numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6471 	else
6472 		numIndexPages = 1.0;
6473 
6474 	/* fetch estimated page cost for tablespace containing index */
6475 	get_tablespace_page_costs(index->reltablespace,
6476 							  &spc_random_page_cost,
6477 							  NULL);
6478 
6479 	/*
6480 	 * Now compute the disk access costs.
6481 	 *
6482 	 * The above calculations are all per-index-scan.  However, if we are in a
6483 	 * nestloop inner scan, we can expect the scan to be repeated (with
6484 	 * different search keys) for each row of the outer relation.  Likewise,
6485 	 * ScalarArrayOpExpr quals result in multiple index scans.  This creates
6486 	 * the potential for cache effects to reduce the number of disk page
6487 	 * fetches needed.  We want to estimate the average per-scan I/O cost in
6488 	 * the presence of caching.
6489 	 *
6490 	 * We use the Mackert-Lohman formula (see costsize.c for details) to
6491 	 * estimate the total number of page fetches that occur.  While this
6492 	 * wasn't what it was designed for, it seems a reasonable model anyway.
6493 	 * Note that we are counting pages not tuples anymore, so we take N = T =
6494 	 * index size, as if there were one "tuple" per page.
6495 	 */
6496 	num_outer_scans = loop_count;
6497 	num_scans = num_sa_scans * num_outer_scans;
6498 
6499 	if (num_scans > 1)
6500 	{
6501 		double		pages_fetched;
6502 
6503 		/* total page fetches ignoring cache effects */
6504 		pages_fetched = numIndexPages * num_scans;
6505 
6506 		/* use Mackert and Lohman formula to adjust for cache effects */
6507 		pages_fetched = index_pages_fetched(pages_fetched,
6508 											index->pages,
6509 											(double) index->pages,
6510 											root);
6511 
6512 		/*
6513 		 * Now compute the total disk access cost, and then report a pro-rated
6514 		 * share for each outer scan.  (Don't pro-rate for ScalarArrayOpExpr,
6515 		 * since that's internal to the indexscan.)
6516 		 */
6517 		indexTotalCost = (pages_fetched * spc_random_page_cost)
6518 			/ num_outer_scans;
6519 	}
6520 	else
6521 	{
6522 		/*
6523 		 * For a single index scan, we just charge spc_random_page_cost per
6524 		 * page touched.
6525 		 */
6526 		indexTotalCost = numIndexPages * spc_random_page_cost;
6527 	}
6528 
6529 	/*
6530 	 * CPU cost: any complex expressions in the indexquals will need to be
6531 	 * evaluated once at the start of the scan to reduce them to runtime keys
6532 	 * to pass to the index AM (see nodeIndexscan.c).  We model the per-tuple
6533 	 * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6534 	 * indexqual operator.  Because we have numIndexTuples as a per-scan
6535 	 * number, we have to multiply by num_sa_scans to get the correct result
6536 	 * for ScalarArrayOpExpr cases.  Similarly add in costs for any index
6537 	 * ORDER BY expressions.
6538 	 *
6539 	 * Note: this neglects the possible costs of rechecking lossy operators.
6540 	 * Detecting that that might be needed seems more expensive than it's
6541 	 * worth, though, considering all the other inaccuracies here ...
6542 	 */
6543 	qual_arg_cost = index_other_operands_eval_cost(root, indexQuals) +
6544 		index_other_operands_eval_cost(root, indexOrderBys);
6545 	qual_op_cost = cpu_operator_cost *
6546 		(list_length(indexQuals) + list_length(indexOrderBys));
6547 
6548 	indexStartupCost = qual_arg_cost;
6549 	indexTotalCost += qual_arg_cost;
6550 	indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6551 
6552 	/*
6553 	 * Generic assumption about index correlation: there isn't any.
6554 	 */
6555 	indexCorrelation = 0.0;
6556 
6557 	/*
6558 	 * Return everything to caller.
6559 	 */
6560 	costs->indexStartupCost = indexStartupCost;
6561 	costs->indexTotalCost = indexTotalCost;
6562 	costs->indexSelectivity = indexSelectivity;
6563 	costs->indexCorrelation = indexCorrelation;
6564 	costs->numIndexPages = numIndexPages;
6565 	costs->numIndexTuples = numIndexTuples;
6566 	costs->spc_random_page_cost = spc_random_page_cost;
6567 	costs->num_sa_scans = num_sa_scans;
6568 }
6569 
6570 /*
6571  * If the index is partial, add its predicate to the given qual list.
6572  *
6573  * ANDing the index predicate with the explicitly given indexquals produces
6574  * a more accurate idea of the index's selectivity.  However, we need to be
6575  * careful not to insert redundant clauses, because clauselist_selectivity()
6576  * is easily fooled into computing a too-low selectivity estimate.  Our
6577  * approach is to add only the predicate clause(s) that cannot be proven to
6578  * be implied by the given indexquals.  This successfully handles cases such
6579  * as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
6580  * There are many other cases where we won't detect redundancy, leading to a
6581  * too-low selectivity estimate, which will bias the system in favor of using
6582  * partial indexes where possible.  That is not necessarily bad though.
6583  *
6584  * Note that indexQuals contains RestrictInfo nodes while the indpred
6585  * does not, so the output list will be mixed.  This is OK for both
6586  * predicate_implied_by() and clauselist_selectivity(), but might be
6587  * problematic if the result were passed to other things.
6588  */
6589 List *
add_predicate_to_index_quals(IndexOptInfo * index,List * indexQuals)6590 add_predicate_to_index_quals(IndexOptInfo *index, List *indexQuals)
6591 {
6592 	List	   *predExtraQuals = NIL;
6593 	ListCell   *lc;
6594 
6595 	if (index->indpred == NIL)
6596 		return indexQuals;
6597 
6598 	foreach(lc, index->indpred)
6599 	{
6600 		Node	   *predQual = (Node *) lfirst(lc);
6601 		List	   *oneQual = list_make1(predQual);
6602 
6603 		if (!predicate_implied_by(oneQual, indexQuals, false))
6604 			predExtraQuals = list_concat(predExtraQuals, oneQual);
6605 	}
6606 	return list_concat(predExtraQuals, indexQuals);
6607 }
6608 
6609 
6610 void
btcostestimate(PlannerInfo * root,IndexPath * path,double loop_count,Cost * indexStartupCost,Cost * indexTotalCost,Selectivity * indexSelectivity,double * indexCorrelation,double * indexPages)6611 btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6612 			   Cost *indexStartupCost, Cost *indexTotalCost,
6613 			   Selectivity *indexSelectivity, double *indexCorrelation,
6614 			   double *indexPages)
6615 {
6616 	IndexOptInfo *index = path->indexinfo;
6617 	GenericCosts costs;
6618 	Oid			relid;
6619 	AttrNumber	colnum;
6620 	VariableStatData vardata;
6621 	double		numIndexTuples;
6622 	Cost		descentCost;
6623 	List	   *indexBoundQuals;
6624 	int			indexcol;
6625 	bool		eqQualHere;
6626 	bool		found_saop;
6627 	bool		found_is_null_op;
6628 	double		num_sa_scans;
6629 	ListCell   *lc;
6630 
6631 	/*
6632 	 * For a btree scan, only leading '=' quals plus inequality quals for the
6633 	 * immediately next attribute contribute to index selectivity (these are
6634 	 * the "boundary quals" that determine the starting and stopping points of
6635 	 * the index scan).  Additional quals can suppress visits to the heap, so
6636 	 * it's OK to count them in indexSelectivity, but they should not count
6637 	 * for estimating numIndexTuples.  So we must examine the given indexquals
6638 	 * to find out which ones count as boundary quals.  We rely on the
6639 	 * knowledge that they are given in index column order.
6640 	 *
6641 	 * For a RowCompareExpr, we consider only the first column, just as
6642 	 * rowcomparesel() does.
6643 	 *
6644 	 * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6645 	 * index scans not one, but the ScalarArrayOpExpr's operator can be
6646 	 * considered to act the same as it normally does.
6647 	 */
6648 	indexBoundQuals = NIL;
6649 	indexcol = 0;
6650 	eqQualHere = false;
6651 	found_saop = false;
6652 	found_is_null_op = false;
6653 	num_sa_scans = 1;
6654 	foreach(lc, path->indexclauses)
6655 	{
6656 		IndexClause *iclause = lfirst_node(IndexClause, lc);
6657 		ListCell   *lc2;
6658 
6659 		if (indexcol != iclause->indexcol)
6660 		{
6661 			/* Beginning of a new column's quals */
6662 			if (!eqQualHere)
6663 				break;			/* done if no '=' qual for indexcol */
6664 			eqQualHere = false;
6665 			indexcol++;
6666 			if (indexcol != iclause->indexcol)
6667 				break;			/* no quals at all for indexcol */
6668 		}
6669 
6670 		/* Examine each indexqual associated with this index clause */
6671 		foreach(lc2, iclause->indexquals)
6672 		{
6673 			RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
6674 			Expr	   *clause = rinfo->clause;
6675 			Oid			clause_op = InvalidOid;
6676 			int			op_strategy;
6677 
6678 			if (IsA(clause, OpExpr))
6679 			{
6680 				OpExpr	   *op = (OpExpr *) clause;
6681 
6682 				clause_op = op->opno;
6683 			}
6684 			else if (IsA(clause, RowCompareExpr))
6685 			{
6686 				RowCompareExpr *rc = (RowCompareExpr *) clause;
6687 
6688 				clause_op = linitial_oid(rc->opnos);
6689 			}
6690 			else if (IsA(clause, ScalarArrayOpExpr))
6691 			{
6692 				ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6693 				Node	   *other_operand = (Node *) lsecond(saop->args);
6694 				int			alength = estimate_array_length(other_operand);
6695 
6696 				clause_op = saop->opno;
6697 				found_saop = true;
6698 				/* count number of SA scans induced by indexBoundQuals only */
6699 				if (alength > 1)
6700 					num_sa_scans *= alength;
6701 			}
6702 			else if (IsA(clause, NullTest))
6703 			{
6704 				NullTest   *nt = (NullTest *) clause;
6705 
6706 				if (nt->nulltesttype == IS_NULL)
6707 				{
6708 					found_is_null_op = true;
6709 					/* IS NULL is like = for selectivity purposes */
6710 					eqQualHere = true;
6711 				}
6712 			}
6713 			else
6714 				elog(ERROR, "unsupported indexqual type: %d",
6715 					 (int) nodeTag(clause));
6716 
6717 			/* check for equality operator */
6718 			if (OidIsValid(clause_op))
6719 			{
6720 				op_strategy = get_op_opfamily_strategy(clause_op,
6721 													   index->opfamily[indexcol]);
6722 				Assert(op_strategy != 0);	/* not a member of opfamily?? */
6723 				if (op_strategy == BTEqualStrategyNumber)
6724 					eqQualHere = true;
6725 			}
6726 
6727 			indexBoundQuals = lappend(indexBoundQuals, rinfo);
6728 		}
6729 	}
6730 
6731 	/*
6732 	 * If index is unique and we found an '=' clause for each column, we can
6733 	 * just assume numIndexTuples = 1 and skip the expensive
6734 	 * clauselist_selectivity calculations.  However, a ScalarArrayOp or
6735 	 * NullTest invalidates that theory, even though it sets eqQualHere.
6736 	 */
6737 	if (index->unique &&
6738 		indexcol == index->nkeycolumns - 1 &&
6739 		eqQualHere &&
6740 		!found_saop &&
6741 		!found_is_null_op)
6742 		numIndexTuples = 1.0;
6743 	else
6744 	{
6745 		List	   *selectivityQuals;
6746 		Selectivity btreeSelectivity;
6747 
6748 		/*
6749 		 * If the index is partial, AND the index predicate with the
6750 		 * index-bound quals to produce a more accurate idea of the number of
6751 		 * rows covered by the bound conditions.
6752 		 */
6753 		selectivityQuals = add_predicate_to_index_quals(index, indexBoundQuals);
6754 
6755 		btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6756 												  index->rel->relid,
6757 												  JOIN_INNER,
6758 												  NULL);
6759 		numIndexTuples = btreeSelectivity * index->rel->tuples;
6760 
6761 		/*
6762 		 * As in genericcostestimate(), we have to adjust for any
6763 		 * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6764 		 * to integer.
6765 		 */
6766 		numIndexTuples = rint(numIndexTuples / num_sa_scans);
6767 	}
6768 
6769 	/*
6770 	 * Now do generic index cost estimation.
6771 	 */
6772 	MemSet(&costs, 0, sizeof(costs));
6773 	costs.numIndexTuples = numIndexTuples;
6774 
6775 	genericcostestimate(root, path, loop_count, &costs);
6776 
6777 	/*
6778 	 * Add a CPU-cost component to represent the costs of initial btree
6779 	 * descent.  We don't charge any I/O cost for touching upper btree levels,
6780 	 * since they tend to stay in cache, but we still have to do about log2(N)
6781 	 * comparisons to descend a btree of N leaf tuples.  We charge one
6782 	 * cpu_operator_cost per comparison.
6783 	 *
6784 	 * If there are ScalarArrayOpExprs, charge this once per SA scan.  The
6785 	 * ones after the first one are not startup cost so far as the overall
6786 	 * plan is concerned, so add them only to "total" cost.
6787 	 */
6788 	if (index->tuples > 1)		/* avoid computing log(0) */
6789 	{
6790 		descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6791 		costs.indexStartupCost += descentCost;
6792 		costs.indexTotalCost += costs.num_sa_scans * descentCost;
6793 	}
6794 
6795 	/*
6796 	 * Even though we're not charging I/O cost for touching upper btree pages,
6797 	 * it's still reasonable to charge some CPU cost per page descended
6798 	 * through.  Moreover, if we had no such charge at all, bloated indexes
6799 	 * would appear to have the same search cost as unbloated ones, at least
6800 	 * in cases where only a single leaf page is expected to be visited.  This
6801 	 * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6802 	 * touched.  The number of such pages is btree tree height plus one (ie,
6803 	 * we charge for the leaf page too).  As above, charge once per SA scan.
6804 	 */
6805 	descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6806 	costs.indexStartupCost += descentCost;
6807 	costs.indexTotalCost += costs.num_sa_scans * descentCost;
6808 
6809 	/*
6810 	 * If we can get an estimate of the first column's ordering correlation C
6811 	 * from pg_statistic, estimate the index correlation as C for a
6812 	 * single-column index, or C * 0.75 for multiple columns. (The idea here
6813 	 * is that multiple columns dilute the importance of the first column's
6814 	 * ordering, but don't negate it entirely.  Before 8.0 we divided the
6815 	 * correlation by the number of columns, but that seems too strong.)
6816 	 */
6817 	MemSet(&vardata, 0, sizeof(vardata));
6818 
6819 	if (index->indexkeys[0] != 0)
6820 	{
6821 		/* Simple variable --- look to stats for the underlying table */
6822 		RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6823 
6824 		Assert(rte->rtekind == RTE_RELATION);
6825 		relid = rte->relid;
6826 		Assert(relid != InvalidOid);
6827 		colnum = index->indexkeys[0];
6828 
6829 		if (get_relation_stats_hook &&
6830 			(*get_relation_stats_hook) (root, rte, colnum, &vardata))
6831 		{
6832 			/*
6833 			 * The hook took control of acquiring a stats tuple.  If it did
6834 			 * supply a tuple, it'd better have supplied a freefunc.
6835 			 */
6836 			if (HeapTupleIsValid(vardata.statsTuple) &&
6837 				!vardata.freefunc)
6838 				elog(ERROR, "no function provided to release variable stats with");
6839 		}
6840 		else
6841 		{
6842 			vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6843 												 ObjectIdGetDatum(relid),
6844 												 Int16GetDatum(colnum),
6845 												 BoolGetDatum(rte->inh));
6846 			vardata.freefunc = ReleaseSysCache;
6847 		}
6848 	}
6849 	else
6850 	{
6851 		/* Expression --- maybe there are stats for the index itself */
6852 		relid = index->indexoid;
6853 		colnum = 1;
6854 
6855 		if (get_index_stats_hook &&
6856 			(*get_index_stats_hook) (root, relid, colnum, &vardata))
6857 		{
6858 			/*
6859 			 * The hook took control of acquiring a stats tuple.  If it did
6860 			 * supply a tuple, it'd better have supplied a freefunc.
6861 			 */
6862 			if (HeapTupleIsValid(vardata.statsTuple) &&
6863 				!vardata.freefunc)
6864 				elog(ERROR, "no function provided to release variable stats with");
6865 		}
6866 		else
6867 		{
6868 			vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6869 												 ObjectIdGetDatum(relid),
6870 												 Int16GetDatum(colnum),
6871 												 BoolGetDatum(false));
6872 			vardata.freefunc = ReleaseSysCache;
6873 		}
6874 	}
6875 
6876 	if (HeapTupleIsValid(vardata.statsTuple))
6877 	{
6878 		Oid			sortop;
6879 		AttStatsSlot sslot;
6880 
6881 		sortop = get_opfamily_member(index->opfamily[0],
6882 									 index->opcintype[0],
6883 									 index->opcintype[0],
6884 									 BTLessStrategyNumber);
6885 		if (OidIsValid(sortop) &&
6886 			get_attstatsslot(&sslot, vardata.statsTuple,
6887 							 STATISTIC_KIND_CORRELATION, sortop,
6888 							 ATTSTATSSLOT_NUMBERS))
6889 		{
6890 			double		varCorrelation;
6891 
6892 			Assert(sslot.nnumbers == 1);
6893 			varCorrelation = sslot.numbers[0];
6894 
6895 			if (index->reverse_sort[0])
6896 				varCorrelation = -varCorrelation;
6897 
6898 			if (index->nkeycolumns > 1)
6899 				costs.indexCorrelation = varCorrelation * 0.75;
6900 			else
6901 				costs.indexCorrelation = varCorrelation;
6902 
6903 			free_attstatsslot(&sslot);
6904 		}
6905 	}
6906 
6907 	ReleaseVariableStats(vardata);
6908 
6909 	*indexStartupCost = costs.indexStartupCost;
6910 	*indexTotalCost = costs.indexTotalCost;
6911 	*indexSelectivity = costs.indexSelectivity;
6912 	*indexCorrelation = costs.indexCorrelation;
6913 	*indexPages = costs.numIndexPages;
6914 }
6915 
6916 void
hashcostestimate(PlannerInfo * root,IndexPath * path,double loop_count,Cost * indexStartupCost,Cost * indexTotalCost,Selectivity * indexSelectivity,double * indexCorrelation,double * indexPages)6917 hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6918 				 Cost *indexStartupCost, Cost *indexTotalCost,
6919 				 Selectivity *indexSelectivity, double *indexCorrelation,
6920 				 double *indexPages)
6921 {
6922 	GenericCosts costs;
6923 
6924 	MemSet(&costs, 0, sizeof(costs));
6925 
6926 	genericcostestimate(root, path, loop_count, &costs);
6927 
6928 	/*
6929 	 * A hash index has no descent costs as such, since the index AM can go
6930 	 * directly to the target bucket after computing the hash value.  There
6931 	 * are a couple of other hash-specific costs that we could conceivably add
6932 	 * here, though:
6933 	 *
6934 	 * Ideally we'd charge spc_random_page_cost for each page in the target
6935 	 * bucket, not just the numIndexPages pages that genericcostestimate
6936 	 * thought we'd visit.  However in most cases we don't know which bucket
6937 	 * that will be.  There's no point in considering the average bucket size
6938 	 * because the hash AM makes sure that's always one page.
6939 	 *
6940 	 * Likewise, we could consider charging some CPU for each index tuple in
6941 	 * the bucket, if we knew how many there were.  But the per-tuple cost is
6942 	 * just a hash value comparison, not a general datatype-dependent
6943 	 * comparison, so any such charge ought to be quite a bit less than
6944 	 * cpu_operator_cost; which makes it probably not worth worrying about.
6945 	 *
6946 	 * A bigger issue is that chance hash-value collisions will result in
6947 	 * wasted probes into the heap.  We don't currently attempt to model this
6948 	 * cost on the grounds that it's rare, but maybe it's not rare enough.
6949 	 * (Any fix for this ought to consider the generic lossy-operator problem,
6950 	 * though; it's not entirely hash-specific.)
6951 	 */
6952 
6953 	*indexStartupCost = costs.indexStartupCost;
6954 	*indexTotalCost = costs.indexTotalCost;
6955 	*indexSelectivity = costs.indexSelectivity;
6956 	*indexCorrelation = costs.indexCorrelation;
6957 	*indexPages = costs.numIndexPages;
6958 }
6959 
6960 void
gistcostestimate(PlannerInfo * root,IndexPath * path,double loop_count,Cost * indexStartupCost,Cost * indexTotalCost,Selectivity * indexSelectivity,double * indexCorrelation,double * indexPages)6961 gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6962 				 Cost *indexStartupCost, Cost *indexTotalCost,
6963 				 Selectivity *indexSelectivity, double *indexCorrelation,
6964 				 double *indexPages)
6965 {
6966 	IndexOptInfo *index = path->indexinfo;
6967 	GenericCosts costs;
6968 	Cost		descentCost;
6969 
6970 	MemSet(&costs, 0, sizeof(costs));
6971 
6972 	genericcostestimate(root, path, loop_count, &costs);
6973 
6974 	/*
6975 	 * We model index descent costs similarly to those for btree, but to do
6976 	 * that we first need an idea of the tree height.  We somewhat arbitrarily
6977 	 * assume that the fanout is 100, meaning the tree height is at most
6978 	 * log100(index->pages).
6979 	 *
6980 	 * Although this computation isn't really expensive enough to require
6981 	 * caching, we might as well use index->tree_height to cache it.
6982 	 */
6983 	if (index->tree_height < 0) /* unknown? */
6984 	{
6985 		if (index->pages > 1)	/* avoid computing log(0) */
6986 			index->tree_height = (int) (log(index->pages) / log(100.0));
6987 		else
6988 			index->tree_height = 0;
6989 	}
6990 
6991 	/*
6992 	 * Add a CPU-cost component to represent the costs of initial descent. We
6993 	 * just use log(N) here not log2(N) since the branching factor isn't
6994 	 * necessarily two anyway.  As for btree, charge once per SA scan.
6995 	 */
6996 	if (index->tuples > 1)		/* avoid computing log(0) */
6997 	{
6998 		descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6999 		costs.indexStartupCost += descentCost;
7000 		costs.indexTotalCost += costs.num_sa_scans * descentCost;
7001 	}
7002 
7003 	/*
7004 	 * Likewise add a per-page charge, calculated the same as for btrees.
7005 	 */
7006 	descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7007 	costs.indexStartupCost += descentCost;
7008 	costs.indexTotalCost += costs.num_sa_scans * descentCost;
7009 
7010 	*indexStartupCost = costs.indexStartupCost;
7011 	*indexTotalCost = costs.indexTotalCost;
7012 	*indexSelectivity = costs.indexSelectivity;
7013 	*indexCorrelation = costs.indexCorrelation;
7014 	*indexPages = costs.numIndexPages;
7015 }
7016 
7017 void
spgcostestimate(PlannerInfo * root,IndexPath * path,double loop_count,Cost * indexStartupCost,Cost * indexTotalCost,Selectivity * indexSelectivity,double * indexCorrelation,double * indexPages)7018 spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7019 				Cost *indexStartupCost, Cost *indexTotalCost,
7020 				Selectivity *indexSelectivity, double *indexCorrelation,
7021 				double *indexPages)
7022 {
7023 	IndexOptInfo *index = path->indexinfo;
7024 	GenericCosts costs;
7025 	Cost		descentCost;
7026 
7027 	MemSet(&costs, 0, sizeof(costs));
7028 
7029 	genericcostestimate(root, path, loop_count, &costs);
7030 
7031 	/*
7032 	 * We model index descent costs similarly to those for btree, but to do
7033 	 * that we first need an idea of the tree height.  We somewhat arbitrarily
7034 	 * assume that the fanout is 100, meaning the tree height is at most
7035 	 * log100(index->pages).
7036 	 *
7037 	 * Although this computation isn't really expensive enough to require
7038 	 * caching, we might as well use index->tree_height to cache it.
7039 	 */
7040 	if (index->tree_height < 0) /* unknown? */
7041 	{
7042 		if (index->pages > 1)	/* avoid computing log(0) */
7043 			index->tree_height = (int) (log(index->pages) / log(100.0));
7044 		else
7045 			index->tree_height = 0;
7046 	}
7047 
7048 	/*
7049 	 * Add a CPU-cost component to represent the costs of initial descent. We
7050 	 * just use log(N) here not log2(N) since the branching factor isn't
7051 	 * necessarily two anyway.  As for btree, charge once per SA scan.
7052 	 */
7053 	if (index->tuples > 1)		/* avoid computing log(0) */
7054 	{
7055 		descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7056 		costs.indexStartupCost += descentCost;
7057 		costs.indexTotalCost += costs.num_sa_scans * descentCost;
7058 	}
7059 
7060 	/*
7061 	 * Likewise add a per-page charge, calculated the same as for btrees.
7062 	 */
7063 	descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7064 	costs.indexStartupCost += descentCost;
7065 	costs.indexTotalCost += costs.num_sa_scans * descentCost;
7066 
7067 	*indexStartupCost = costs.indexStartupCost;
7068 	*indexTotalCost = costs.indexTotalCost;
7069 	*indexSelectivity = costs.indexSelectivity;
7070 	*indexCorrelation = costs.indexCorrelation;
7071 	*indexPages = costs.numIndexPages;
7072 }
7073 
7074 
7075 /*
7076  * Support routines for gincostestimate
7077  */
7078 
7079 typedef struct
7080 {
7081 	bool		attHasFullScan[INDEX_MAX_KEYS];
7082 	bool		attHasNormalScan[INDEX_MAX_KEYS];
7083 	double		partialEntries;
7084 	double		exactEntries;
7085 	double		searchEntries;
7086 	double		arrayScans;
7087 } GinQualCounts;
7088 
7089 /*
7090  * Estimate the number of index terms that need to be searched for while
7091  * testing the given GIN query, and increment the counts in *counts
7092  * appropriately.  If the query is unsatisfiable, return false.
7093  */
7094 static bool
gincost_pattern(IndexOptInfo * index,int indexcol,Oid clause_op,Datum query,GinQualCounts * counts)7095 gincost_pattern(IndexOptInfo *index, int indexcol,
7096 				Oid clause_op, Datum query,
7097 				GinQualCounts *counts)
7098 {
7099 	FmgrInfo	flinfo;
7100 	Oid			extractProcOid;
7101 	Oid			collation;
7102 	int			strategy_op;
7103 	Oid			lefttype,
7104 				righttype;
7105 	int32		nentries = 0;
7106 	bool	   *partial_matches = NULL;
7107 	Pointer    *extra_data = NULL;
7108 	bool	   *nullFlags = NULL;
7109 	int32		searchMode = GIN_SEARCH_MODE_DEFAULT;
7110 	int32		i;
7111 
7112 	Assert(indexcol < index->nkeycolumns);
7113 
7114 	/*
7115 	 * Get the operator's strategy number and declared input data types within
7116 	 * the index opfamily.  (We don't need the latter, but we use
7117 	 * get_op_opfamily_properties because it will throw error if it fails to
7118 	 * find a matching pg_amop entry.)
7119 	 */
7120 	get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
7121 							   &strategy_op, &lefttype, &righttype);
7122 
7123 	/*
7124 	 * GIN always uses the "default" support functions, which are those with
7125 	 * lefttype == righttype == the opclass' opcintype (see
7126 	 * IndexSupportInitialize in relcache.c).
7127 	 */
7128 	extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
7129 									   index->opcintype[indexcol],
7130 									   index->opcintype[indexcol],
7131 									   GIN_EXTRACTQUERY_PROC);
7132 
7133 	if (!OidIsValid(extractProcOid))
7134 	{
7135 		/* should not happen; throw same error as index_getprocinfo */
7136 		elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
7137 			 GIN_EXTRACTQUERY_PROC, indexcol + 1,
7138 			 get_rel_name(index->indexoid));
7139 	}
7140 
7141 	/*
7142 	 * Choose collation to pass to extractProc (should match initGinState).
7143 	 */
7144 	if (OidIsValid(index->indexcollations[indexcol]))
7145 		collation = index->indexcollations[indexcol];
7146 	else
7147 		collation = DEFAULT_COLLATION_OID;
7148 
7149 	fmgr_info(extractProcOid, &flinfo);
7150 
7151 	set_fn_opclass_options(&flinfo, index->opclassoptions[indexcol]);
7152 
7153 	FunctionCall7Coll(&flinfo,
7154 					  collation,
7155 					  query,
7156 					  PointerGetDatum(&nentries),
7157 					  UInt16GetDatum(strategy_op),
7158 					  PointerGetDatum(&partial_matches),
7159 					  PointerGetDatum(&extra_data),
7160 					  PointerGetDatum(&nullFlags),
7161 					  PointerGetDatum(&searchMode));
7162 
7163 	if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
7164 	{
7165 		/* No match is possible */
7166 		return false;
7167 	}
7168 
7169 	for (i = 0; i < nentries; i++)
7170 	{
7171 		/*
7172 		 * For partial match we haven't any information to estimate number of
7173 		 * matched entries in index, so, we just estimate it as 100
7174 		 */
7175 		if (partial_matches && partial_matches[i])
7176 			counts->partialEntries += 100;
7177 		else
7178 			counts->exactEntries++;
7179 
7180 		counts->searchEntries++;
7181 	}
7182 
7183 	if (searchMode == GIN_SEARCH_MODE_DEFAULT)
7184 	{
7185 		counts->attHasNormalScan[indexcol] = true;
7186 	}
7187 	else if (searchMode == GIN_SEARCH_MODE_INCLUDE_EMPTY)
7188 	{
7189 		/* Treat "include empty" like an exact-match item */
7190 		counts->attHasNormalScan[indexcol] = true;
7191 		counts->exactEntries++;
7192 		counts->searchEntries++;
7193 	}
7194 	else
7195 	{
7196 		/* It's GIN_SEARCH_MODE_ALL */
7197 		counts->attHasFullScan[indexcol] = true;
7198 	}
7199 
7200 	return true;
7201 }
7202 
7203 /*
7204  * Estimate the number of index terms that need to be searched for while
7205  * testing the given GIN index clause, and increment the counts in *counts
7206  * appropriately.  If the query is unsatisfiable, return false.
7207  */
7208 static bool
gincost_opexpr(PlannerInfo * root,IndexOptInfo * index,int indexcol,OpExpr * clause,GinQualCounts * counts)7209 gincost_opexpr(PlannerInfo *root,
7210 			   IndexOptInfo *index,
7211 			   int indexcol,
7212 			   OpExpr *clause,
7213 			   GinQualCounts *counts)
7214 {
7215 	Oid			clause_op = clause->opno;
7216 	Node	   *operand = (Node *) lsecond(clause->args);
7217 
7218 	/* aggressively reduce to a constant, and look through relabeling */
7219 	operand = estimate_expression_value(root, operand);
7220 
7221 	if (IsA(operand, RelabelType))
7222 		operand = (Node *) ((RelabelType *) operand)->arg;
7223 
7224 	/*
7225 	 * It's impossible to call extractQuery method for unknown operand. So
7226 	 * unless operand is a Const we can't do much; just assume there will be
7227 	 * one ordinary search entry from the operand at runtime.
7228 	 */
7229 	if (!IsA(operand, Const))
7230 	{
7231 		counts->exactEntries++;
7232 		counts->searchEntries++;
7233 		return true;
7234 	}
7235 
7236 	/* If Const is null, there can be no matches */
7237 	if (((Const *) operand)->constisnull)
7238 		return false;
7239 
7240 	/* Otherwise, apply extractQuery and get the actual term counts */
7241 	return gincost_pattern(index, indexcol, clause_op,
7242 						   ((Const *) operand)->constvalue,
7243 						   counts);
7244 }
7245 
7246 /*
7247  * Estimate the number of index terms that need to be searched for while
7248  * testing the given GIN index clause, and increment the counts in *counts
7249  * appropriately.  If the query is unsatisfiable, return false.
7250  *
7251  * A ScalarArrayOpExpr will give rise to N separate indexscans at runtime,
7252  * each of which involves one value from the RHS array, plus all the
7253  * non-array quals (if any).  To model this, we average the counts across
7254  * the RHS elements, and add the averages to the counts in *counts (which
7255  * correspond to per-indexscan costs).  We also multiply counts->arrayScans
7256  * by N, causing gincostestimate to scale up its estimates accordingly.
7257  */
7258 static bool
gincost_scalararrayopexpr(PlannerInfo * root,IndexOptInfo * index,int indexcol,ScalarArrayOpExpr * clause,double numIndexEntries,GinQualCounts * counts)7259 gincost_scalararrayopexpr(PlannerInfo *root,
7260 						  IndexOptInfo *index,
7261 						  int indexcol,
7262 						  ScalarArrayOpExpr *clause,
7263 						  double numIndexEntries,
7264 						  GinQualCounts *counts)
7265 {
7266 	Oid			clause_op = clause->opno;
7267 	Node	   *rightop = (Node *) lsecond(clause->args);
7268 	ArrayType  *arrayval;
7269 	int16		elmlen;
7270 	bool		elmbyval;
7271 	char		elmalign;
7272 	int			numElems;
7273 	Datum	   *elemValues;
7274 	bool	   *elemNulls;
7275 	GinQualCounts arraycounts;
7276 	int			numPossible = 0;
7277 	int			i;
7278 
7279 	Assert(clause->useOr);
7280 
7281 	/* aggressively reduce to a constant, and look through relabeling */
7282 	rightop = estimate_expression_value(root, rightop);
7283 
7284 	if (IsA(rightop, RelabelType))
7285 		rightop = (Node *) ((RelabelType *) rightop)->arg;
7286 
7287 	/*
7288 	 * It's impossible to call extractQuery method for unknown operand. So
7289 	 * unless operand is a Const we can't do much; just assume there will be
7290 	 * one ordinary search entry from each array entry at runtime, and fall
7291 	 * back on a probably-bad estimate of the number of array entries.
7292 	 */
7293 	if (!IsA(rightop, Const))
7294 	{
7295 		counts->exactEntries++;
7296 		counts->searchEntries++;
7297 		counts->arrayScans *= estimate_array_length(rightop);
7298 		return true;
7299 	}
7300 
7301 	/* If Const is null, there can be no matches */
7302 	if (((Const *) rightop)->constisnull)
7303 		return false;
7304 
7305 	/* Otherwise, extract the array elements and iterate over them */
7306 	arrayval = DatumGetArrayTypeP(((Const *) rightop)->constvalue);
7307 	get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
7308 						 &elmlen, &elmbyval, &elmalign);
7309 	deconstruct_array(arrayval,
7310 					  ARR_ELEMTYPE(arrayval),
7311 					  elmlen, elmbyval, elmalign,
7312 					  &elemValues, &elemNulls, &numElems);
7313 
7314 	memset(&arraycounts, 0, sizeof(arraycounts));
7315 
7316 	for (i = 0; i < numElems; i++)
7317 	{
7318 		GinQualCounts elemcounts;
7319 
7320 		/* NULL can't match anything, so ignore, as the executor will */
7321 		if (elemNulls[i])
7322 			continue;
7323 
7324 		/* Otherwise, apply extractQuery and get the actual term counts */
7325 		memset(&elemcounts, 0, sizeof(elemcounts));
7326 
7327 		if (gincost_pattern(index, indexcol, clause_op, elemValues[i],
7328 							&elemcounts))
7329 		{
7330 			/* We ignore array elements that are unsatisfiable patterns */
7331 			numPossible++;
7332 
7333 			if (elemcounts.attHasFullScan[indexcol] &&
7334 				!elemcounts.attHasNormalScan[indexcol])
7335 			{
7336 				/*
7337 				 * Full index scan will be required.  We treat this as if
7338 				 * every key in the index had been listed in the query; is
7339 				 * that reasonable?
7340 				 */
7341 				elemcounts.partialEntries = 0;
7342 				elemcounts.exactEntries = numIndexEntries;
7343 				elemcounts.searchEntries = numIndexEntries;
7344 			}
7345 			arraycounts.partialEntries += elemcounts.partialEntries;
7346 			arraycounts.exactEntries += elemcounts.exactEntries;
7347 			arraycounts.searchEntries += elemcounts.searchEntries;
7348 		}
7349 	}
7350 
7351 	if (numPossible == 0)
7352 	{
7353 		/* No satisfiable patterns in the array */
7354 		return false;
7355 	}
7356 
7357 	/*
7358 	 * Now add the averages to the global counts.  This will give us an
7359 	 * estimate of the average number of terms searched for in each indexscan,
7360 	 * including contributions from both array and non-array quals.
7361 	 */
7362 	counts->partialEntries += arraycounts.partialEntries / numPossible;
7363 	counts->exactEntries += arraycounts.exactEntries / numPossible;
7364 	counts->searchEntries += arraycounts.searchEntries / numPossible;
7365 
7366 	counts->arrayScans *= numPossible;
7367 
7368 	return true;
7369 }
7370 
7371 /*
7372  * GIN has search behavior completely different from other index types
7373  */
7374 void
gincostestimate(PlannerInfo * root,IndexPath * path,double loop_count,Cost * indexStartupCost,Cost * indexTotalCost,Selectivity * indexSelectivity,double * indexCorrelation,double * indexPages)7375 gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7376 				Cost *indexStartupCost, Cost *indexTotalCost,
7377 				Selectivity *indexSelectivity, double *indexCorrelation,
7378 				double *indexPages)
7379 {
7380 	IndexOptInfo *index = path->indexinfo;
7381 	List	   *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7382 	List	   *selectivityQuals;
7383 	double		numPages = index->pages,
7384 				numTuples = index->tuples;
7385 	double		numEntryPages,
7386 				numDataPages,
7387 				numPendingPages,
7388 				numEntries;
7389 	GinQualCounts counts;
7390 	bool		matchPossible;
7391 	bool		fullIndexScan;
7392 	double		partialScale;
7393 	double		entryPagesFetched,
7394 				dataPagesFetched,
7395 				dataPagesFetchedBySel;
7396 	double		qual_op_cost,
7397 				qual_arg_cost,
7398 				spc_random_page_cost,
7399 				outer_scans;
7400 	Relation	indexRel;
7401 	GinStatsData ginStats;
7402 	ListCell   *lc;
7403 	int			i;
7404 
7405 	/*
7406 	 * Obtain statistical information from the meta page, if possible.  Else
7407 	 * set ginStats to zeroes, and we'll cope below.
7408 	 */
7409 	if (!index->hypothetical)
7410 	{
7411 		/* Lock should have already been obtained in plancat.c */
7412 		indexRel = index_open(index->indexoid, NoLock);
7413 		ginGetStats(indexRel, &ginStats);
7414 		index_close(indexRel, NoLock);
7415 	}
7416 	else
7417 	{
7418 		memset(&ginStats, 0, sizeof(ginStats));
7419 	}
7420 
7421 	/*
7422 	 * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7423 	 * trusted, but the other fields are data as of the last VACUUM.  We can
7424 	 * scale them up to account for growth since then, but that method only
7425 	 * goes so far; in the worst case, the stats might be for a completely
7426 	 * empty index, and scaling them will produce pretty bogus numbers.
7427 	 * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7428 	 * it's grown more than that, fall back to estimating things only from the
7429 	 * assumed-accurate index size.  But we'll trust nPendingPages in any case
7430 	 * so long as it's not clearly insane, ie, more than the index size.
7431 	 */
7432 	if (ginStats.nPendingPages < numPages)
7433 		numPendingPages = ginStats.nPendingPages;
7434 	else
7435 		numPendingPages = 0;
7436 
7437 	if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7438 		ginStats.nTotalPages > numPages / 4 &&
7439 		ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7440 	{
7441 		/*
7442 		 * OK, the stats seem close enough to sane to be trusted.  But we
7443 		 * still need to scale them by the ratio numPages / nTotalPages to
7444 		 * account for growth since the last VACUUM.
7445 		 */
7446 		double		scale = numPages / ginStats.nTotalPages;
7447 
7448 		numEntryPages = ceil(ginStats.nEntryPages * scale);
7449 		numDataPages = ceil(ginStats.nDataPages * scale);
7450 		numEntries = ceil(ginStats.nEntries * scale);
7451 		/* ensure we didn't round up too much */
7452 		numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7453 		numDataPages = Min(numDataPages,
7454 						   numPages - numPendingPages - numEntryPages);
7455 	}
7456 	else
7457 	{
7458 		/*
7459 		 * We might get here because it's a hypothetical index, or an index
7460 		 * created pre-9.1 and never vacuumed since upgrading (in which case
7461 		 * its stats would read as zeroes), or just because it's grown too
7462 		 * much since the last VACUUM for us to put our faith in scaling.
7463 		 *
7464 		 * Invent some plausible internal statistics based on the index page
7465 		 * count (and clamp that to at least 10 pages, just in case).  We
7466 		 * estimate that 90% of the index is entry pages, and the rest is data
7467 		 * pages.  Estimate 100 entries per entry page; this is rather bogus
7468 		 * since it'll depend on the size of the keys, but it's more robust
7469 		 * than trying to predict the number of entries per heap tuple.
7470 		 */
7471 		numPages = Max(numPages, 10);
7472 		numEntryPages = floor((numPages - numPendingPages) * 0.90);
7473 		numDataPages = numPages - numPendingPages - numEntryPages;
7474 		numEntries = floor(numEntryPages * 100);
7475 	}
7476 
7477 	/* In an empty index, numEntries could be zero.  Avoid divide-by-zero */
7478 	if (numEntries < 1)
7479 		numEntries = 1;
7480 
7481 	/*
7482 	 * If the index is partial, AND the index predicate with the index-bound
7483 	 * quals to produce a more accurate idea of the number of rows covered by
7484 	 * the bound conditions.
7485 	 */
7486 	selectivityQuals = add_predicate_to_index_quals(index, indexQuals);
7487 
7488 	/* Estimate the fraction of main-table tuples that will be visited */
7489 	*indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7490 											   index->rel->relid,
7491 											   JOIN_INNER,
7492 											   NULL);
7493 
7494 	/* fetch estimated page cost for tablespace containing index */
7495 	get_tablespace_page_costs(index->reltablespace,
7496 							  &spc_random_page_cost,
7497 							  NULL);
7498 
7499 	/*
7500 	 * Generic assumption about index correlation: there isn't any.
7501 	 */
7502 	*indexCorrelation = 0.0;
7503 
7504 	/*
7505 	 * Examine quals to estimate number of search entries & partial matches
7506 	 */
7507 	memset(&counts, 0, sizeof(counts));
7508 	counts.arrayScans = 1;
7509 	matchPossible = true;
7510 
7511 	foreach(lc, path->indexclauses)
7512 	{
7513 		IndexClause *iclause = lfirst_node(IndexClause, lc);
7514 		ListCell   *lc2;
7515 
7516 		foreach(lc2, iclause->indexquals)
7517 		{
7518 			RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
7519 			Expr	   *clause = rinfo->clause;
7520 
7521 			if (IsA(clause, OpExpr))
7522 			{
7523 				matchPossible = gincost_opexpr(root,
7524 											   index,
7525 											   iclause->indexcol,
7526 											   (OpExpr *) clause,
7527 											   &counts);
7528 				if (!matchPossible)
7529 					break;
7530 			}
7531 			else if (IsA(clause, ScalarArrayOpExpr))
7532 			{
7533 				matchPossible = gincost_scalararrayopexpr(root,
7534 														  index,
7535 														  iclause->indexcol,
7536 														  (ScalarArrayOpExpr *) clause,
7537 														  numEntries,
7538 														  &counts);
7539 				if (!matchPossible)
7540 					break;
7541 			}
7542 			else
7543 			{
7544 				/* shouldn't be anything else for a GIN index */
7545 				elog(ERROR, "unsupported GIN indexqual type: %d",
7546 					 (int) nodeTag(clause));
7547 			}
7548 		}
7549 	}
7550 
7551 	/* Fall out if there were any provably-unsatisfiable quals */
7552 	if (!matchPossible)
7553 	{
7554 		*indexStartupCost = 0;
7555 		*indexTotalCost = 0;
7556 		*indexSelectivity = 0;
7557 		return;
7558 	}
7559 
7560 	/*
7561 	 * If attribute has a full scan and at the same time doesn't have normal
7562 	 * scan, then we'll have to scan all non-null entries of that attribute.
7563 	 * Currently, we don't have per-attribute statistics for GIN.  Thus, we
7564 	 * must assume the whole GIN index has to be scanned in this case.
7565 	 */
7566 	fullIndexScan = false;
7567 	for (i = 0; i < index->nkeycolumns; i++)
7568 	{
7569 		if (counts.attHasFullScan[i] && !counts.attHasNormalScan[i])
7570 		{
7571 			fullIndexScan = true;
7572 			break;
7573 		}
7574 	}
7575 
7576 	if (fullIndexScan || indexQuals == NIL)
7577 	{
7578 		/*
7579 		 * Full index scan will be required.  We treat this as if every key in
7580 		 * the index had been listed in the query; is that reasonable?
7581 		 */
7582 		counts.partialEntries = 0;
7583 		counts.exactEntries = numEntries;
7584 		counts.searchEntries = numEntries;
7585 	}
7586 
7587 	/* Will we have more than one iteration of a nestloop scan? */
7588 	outer_scans = loop_count;
7589 
7590 	/*
7591 	 * Compute cost to begin scan, first of all, pay attention to pending
7592 	 * list.
7593 	 */
7594 	entryPagesFetched = numPendingPages;
7595 
7596 	/*
7597 	 * Estimate number of entry pages read.  We need to do
7598 	 * counts.searchEntries searches.  Use a power function as it should be,
7599 	 * but tuples on leaf pages usually is much greater. Here we include all
7600 	 * searches in entry tree, including search of first entry in partial
7601 	 * match algorithm
7602 	 */
7603 	entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7604 
7605 	/*
7606 	 * Add an estimate of entry pages read by partial match algorithm. It's a
7607 	 * scan over leaf pages in entry tree.  We haven't any useful stats here,
7608 	 * so estimate it as proportion.  Because counts.partialEntries is really
7609 	 * pretty bogus (see code above), it's possible that it is more than
7610 	 * numEntries; clamp the proportion to ensure sanity.
7611 	 */
7612 	partialScale = counts.partialEntries / numEntries;
7613 	partialScale = Min(partialScale, 1.0);
7614 
7615 	entryPagesFetched += ceil(numEntryPages * partialScale);
7616 
7617 	/*
7618 	 * Partial match algorithm reads all data pages before doing actual scan,
7619 	 * so it's a startup cost.  Again, we haven't any useful stats here, so
7620 	 * estimate it as proportion.
7621 	 */
7622 	dataPagesFetched = ceil(numDataPages * partialScale);
7623 
7624 	/*
7625 	 * Calculate cache effects if more than one scan due to nestloops or array
7626 	 * quals.  The result is pro-rated per nestloop scan, but the array qual
7627 	 * factor shouldn't be pro-rated (compare genericcostestimate).
7628 	 */
7629 	if (outer_scans > 1 || counts.arrayScans > 1)
7630 	{
7631 		entryPagesFetched *= outer_scans * counts.arrayScans;
7632 		entryPagesFetched = index_pages_fetched(entryPagesFetched,
7633 												(BlockNumber) numEntryPages,
7634 												numEntryPages, root);
7635 		entryPagesFetched /= outer_scans;
7636 		dataPagesFetched *= outer_scans * counts.arrayScans;
7637 		dataPagesFetched = index_pages_fetched(dataPagesFetched,
7638 											   (BlockNumber) numDataPages,
7639 											   numDataPages, root);
7640 		dataPagesFetched /= outer_scans;
7641 	}
7642 
7643 	/*
7644 	 * Here we use random page cost because logically-close pages could be far
7645 	 * apart on disk.
7646 	 */
7647 	*indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7648 
7649 	/*
7650 	 * Now compute the number of data pages fetched during the scan.
7651 	 *
7652 	 * We assume every entry to have the same number of items, and that there
7653 	 * is no overlap between them. (XXX: tsvector and array opclasses collect
7654 	 * statistics on the frequency of individual keys; it would be nice to use
7655 	 * those here.)
7656 	 */
7657 	dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7658 
7659 	/*
7660 	 * If there is a lot of overlap among the entries, in particular if one of
7661 	 * the entries is very frequent, the above calculation can grossly
7662 	 * under-estimate.  As a simple cross-check, calculate a lower bound based
7663 	 * on the overall selectivity of the quals.  At a minimum, we must read
7664 	 * one item pointer for each matching entry.
7665 	 *
7666 	 * The width of each item pointer varies, based on the level of
7667 	 * compression.  We don't have statistics on that, but an average of
7668 	 * around 3 bytes per item is fairly typical.
7669 	 */
7670 	dataPagesFetchedBySel = ceil(*indexSelectivity *
7671 								 (numTuples / (BLCKSZ / 3)));
7672 	if (dataPagesFetchedBySel > dataPagesFetched)
7673 		dataPagesFetched = dataPagesFetchedBySel;
7674 
7675 	/* Account for cache effects, the same as above */
7676 	if (outer_scans > 1 || counts.arrayScans > 1)
7677 	{
7678 		dataPagesFetched *= outer_scans * counts.arrayScans;
7679 		dataPagesFetched = index_pages_fetched(dataPagesFetched,
7680 											   (BlockNumber) numDataPages,
7681 											   numDataPages, root);
7682 		dataPagesFetched /= outer_scans;
7683 	}
7684 
7685 	/* And apply random_page_cost as the cost per page */
7686 	*indexTotalCost = *indexStartupCost +
7687 		dataPagesFetched * spc_random_page_cost;
7688 
7689 	/*
7690 	 * Add on index qual eval costs, much as in genericcostestimate.  But we
7691 	 * can disregard indexorderbys, since GIN doesn't support those.
7692 	 */
7693 	qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7694 	qual_op_cost = cpu_operator_cost * list_length(indexQuals);
7695 
7696 	*indexStartupCost += qual_arg_cost;
7697 	*indexTotalCost += qual_arg_cost;
7698 	*indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7699 	*indexPages = dataPagesFetched;
7700 }
7701 
7702 /*
7703  * BRIN has search behavior completely different from other index types
7704  */
7705 void
brincostestimate(PlannerInfo * root,IndexPath * path,double loop_count,Cost * indexStartupCost,Cost * indexTotalCost,Selectivity * indexSelectivity,double * indexCorrelation,double * indexPages)7706 brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7707 				 Cost *indexStartupCost, Cost *indexTotalCost,
7708 				 Selectivity *indexSelectivity, double *indexCorrelation,
7709 				 double *indexPages)
7710 {
7711 	IndexOptInfo *index = path->indexinfo;
7712 	List	   *indexQuals = get_quals_from_indexclauses(path->indexclauses);
7713 	double		numPages = index->pages;
7714 	RelOptInfo *baserel = index->rel;
7715 	RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
7716 	Cost		spc_seq_page_cost;
7717 	Cost		spc_random_page_cost;
7718 	double		qual_arg_cost;
7719 	double		qualSelectivity;
7720 	BrinStatsData statsData;
7721 	double		indexRanges;
7722 	double		minimalRanges;
7723 	double		estimatedRanges;
7724 	double		selec;
7725 	Relation	indexRel;
7726 	ListCell   *l;
7727 	VariableStatData vardata;
7728 
7729 	Assert(rte->rtekind == RTE_RELATION);
7730 
7731 	/* fetch estimated page cost for the tablespace containing the index */
7732 	get_tablespace_page_costs(index->reltablespace,
7733 							  &spc_random_page_cost,
7734 							  &spc_seq_page_cost);
7735 
7736 	/*
7737 	 * Obtain some data from the index itself, if possible.  Otherwise invent
7738 	 * some plausible internal statistics based on the relation page count.
7739 	 */
7740 	if (!index->hypothetical)
7741 	{
7742 		/*
7743 		 * A lock should have already been obtained on the index in plancat.c.
7744 		 */
7745 		indexRel = index_open(index->indexoid, NoLock);
7746 		brinGetStats(indexRel, &statsData);
7747 		index_close(indexRel, NoLock);
7748 
7749 		/* work out the actual number of ranges in the index */
7750 		indexRanges = Max(ceil((double) baserel->pages /
7751 							   statsData.pagesPerRange), 1.0);
7752 	}
7753 	else
7754 	{
7755 		/*
7756 		 * Assume default number of pages per range, and estimate the number
7757 		 * of ranges based on that.
7758 		 */
7759 		indexRanges = Max(ceil((double) baserel->pages /
7760 							   BRIN_DEFAULT_PAGES_PER_RANGE), 1.0);
7761 
7762 		statsData.pagesPerRange = BRIN_DEFAULT_PAGES_PER_RANGE;
7763 		statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1;
7764 	}
7765 
7766 	/*
7767 	 * Compute index correlation
7768 	 *
7769 	 * Because we can use all index quals equally when scanning, we can use
7770 	 * the largest correlation (in absolute value) among columns used by the
7771 	 * query.  Start at zero, the worst possible case.  If we cannot find any
7772 	 * correlation statistics, we will keep it as 0.
7773 	 */
7774 	*indexCorrelation = 0;
7775 
7776 	foreach(l, path->indexclauses)
7777 	{
7778 		IndexClause *iclause = lfirst_node(IndexClause, l);
7779 		AttrNumber	attnum = index->indexkeys[iclause->indexcol];
7780 
7781 		/* attempt to lookup stats in relation for this index column */
7782 		if (attnum != 0)
7783 		{
7784 			/* Simple variable -- look to stats for the underlying table */
7785 			if (get_relation_stats_hook &&
7786 				(*get_relation_stats_hook) (root, rte, attnum, &vardata))
7787 			{
7788 				/*
7789 				 * The hook took control of acquiring a stats tuple.  If it
7790 				 * did supply a tuple, it'd better have supplied a freefunc.
7791 				 */
7792 				if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
7793 					elog(ERROR,
7794 						 "no function provided to release variable stats with");
7795 			}
7796 			else
7797 			{
7798 				vardata.statsTuple =
7799 					SearchSysCache3(STATRELATTINH,
7800 									ObjectIdGetDatum(rte->relid),
7801 									Int16GetDatum(attnum),
7802 									BoolGetDatum(false));
7803 				vardata.freefunc = ReleaseSysCache;
7804 			}
7805 		}
7806 		else
7807 		{
7808 			/*
7809 			 * Looks like we've found an expression column in the index. Let's
7810 			 * see if there's any stats for it.
7811 			 */
7812 
7813 			/* get the attnum from the 0-based index. */
7814 			attnum = iclause->indexcol + 1;
7815 
7816 			if (get_index_stats_hook &&
7817 				(*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
7818 			{
7819 				/*
7820 				 * The hook took control of acquiring a stats tuple.  If it
7821 				 * did supply a tuple, it'd better have supplied a freefunc.
7822 				 */
7823 				if (HeapTupleIsValid(vardata.statsTuple) &&
7824 					!vardata.freefunc)
7825 					elog(ERROR, "no function provided to release variable stats with");
7826 			}
7827 			else
7828 			{
7829 				vardata.statsTuple = SearchSysCache3(STATRELATTINH,
7830 													 ObjectIdGetDatum(index->indexoid),
7831 													 Int16GetDatum(attnum),
7832 													 BoolGetDatum(false));
7833 				vardata.freefunc = ReleaseSysCache;
7834 			}
7835 		}
7836 
7837 		if (HeapTupleIsValid(vardata.statsTuple))
7838 		{
7839 			AttStatsSlot sslot;
7840 
7841 			if (get_attstatsslot(&sslot, vardata.statsTuple,
7842 								 STATISTIC_KIND_CORRELATION, InvalidOid,
7843 								 ATTSTATSSLOT_NUMBERS))
7844 			{
7845 				double		varCorrelation = 0.0;
7846 
7847 				if (sslot.nnumbers > 0)
7848 					varCorrelation = Abs(sslot.numbers[0]);
7849 
7850 				if (varCorrelation > *indexCorrelation)
7851 					*indexCorrelation = varCorrelation;
7852 
7853 				free_attstatsslot(&sslot);
7854 			}
7855 		}
7856 
7857 		ReleaseVariableStats(vardata);
7858 	}
7859 
7860 	qualSelectivity = clauselist_selectivity(root, indexQuals,
7861 											 baserel->relid,
7862 											 JOIN_INNER, NULL);
7863 
7864 	/*
7865 	 * Now calculate the minimum possible ranges we could match with if all of
7866 	 * the rows were in the perfect order in the table's heap.
7867 	 */
7868 	minimalRanges = ceil(indexRanges * qualSelectivity);
7869 
7870 	/*
7871 	 * Now estimate the number of ranges that we'll touch by using the
7872 	 * indexCorrelation from the stats. Careful not to divide by zero (note
7873 	 * we're using the absolute value of the correlation).
7874 	 */
7875 	if (*indexCorrelation < 1.0e-10)
7876 		estimatedRanges = indexRanges;
7877 	else
7878 		estimatedRanges = Min(minimalRanges / *indexCorrelation, indexRanges);
7879 
7880 	/* we expect to visit this portion of the table */
7881 	selec = estimatedRanges / indexRanges;
7882 
7883 	CLAMP_PROBABILITY(selec);
7884 
7885 	*indexSelectivity = selec;
7886 
7887 	/*
7888 	 * Compute the index qual costs, much as in genericcostestimate, to add to
7889 	 * the index costs.  We can disregard indexorderbys, since BRIN doesn't
7890 	 * support those.
7891 	 */
7892 	qual_arg_cost = index_other_operands_eval_cost(root, indexQuals);
7893 
7894 	/*
7895 	 * Compute the startup cost as the cost to read the whole revmap
7896 	 * sequentially, including the cost to execute the index quals.
7897 	 */
7898 	*indexStartupCost =
7899 		spc_seq_page_cost * statsData.revmapNumPages * loop_count;
7900 	*indexStartupCost += qual_arg_cost;
7901 
7902 	/*
7903 	 * To read a BRIN index there might be a bit of back and forth over
7904 	 * regular pages, as revmap might point to them out of sequential order;
7905 	 * calculate the total cost as reading the whole index in random order.
7906 	 */
7907 	*indexTotalCost = *indexStartupCost +
7908 		spc_random_page_cost * (numPages - statsData.revmapNumPages) * loop_count;
7909 
7910 	/*
7911 	 * Charge a small amount per range tuple which we expect to match to. This
7912 	 * is meant to reflect the costs of manipulating the bitmap. The BRIN scan
7913 	 * will set a bit for each page in the range when we find a matching
7914 	 * range, so we must multiply the charge by the number of pages in the
7915 	 * range.
7916 	 */
7917 	*indexTotalCost += 0.1 * cpu_operator_cost * estimatedRanges *
7918 		statsData.pagesPerRange;
7919 
7920 	*indexPages = index->pages;
7921 }
7922