1 /*-------------------------------------------------------------------------
2  *
3  * analyze.c
4  *	  the Postgres statistics generator
5  *
6  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/commands/analyze.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <math.h>
18 
19 #include "access/multixact.h"
20 #include "access/sysattr.h"
21 #include "access/transam.h"
22 #include "access/tupconvert.h"
test(S s,typename S::difference_type pos,It first,It last,S expected)23 #include "access/tuptoaster.h"
24 #include "access/visibilitymap.h"
25 #include "access/xact.h"
26 #include "catalog/catalog.h"
27 #include "catalog/index.h"
28 #include "catalog/indexing.h"
29 #include "catalog/pg_collation.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_namespace.h"
32 #include "catalog/pg_statistic_ext.h"
33 #include "commands/dbcommands.h"
34 #include "commands/tablecmds.h"
35 #include "commands/vacuum.h"
36 #include "executor/executor.h"
37 #include "foreign/fdwapi.h"
38 #include "miscadmin.h"
39 #include "nodes/nodeFuncs.h"
40 #include "parser/parse_oper.h"
41 #include "parser/parse_relation.h"
42 #include "pgstat.h"
43 #include "postmaster/autovacuum.h"
44 #include "statistics/extended_stats_internal.h"
45 #include "statistics/statistics.h"
46 #include "storage/bufmgr.h"
47 #include "storage/lmgr.h"
48 #include "storage/proc.h"
49 #include "storage/procarray.h"
50 #include "utils/acl.h"
51 #include "utils/attoptcache.h"
52 #include "utils/builtins.h"
53 #include "utils/datum.h"
54 #include "utils/fmgroids.h"
55 #include "utils/guc.h"
56 #include "utils/lsyscache.h"
57 #include "utils/memutils.h"
58 #include "utils/pg_rusage.h"
59 #include "utils/sampling.h"
60 #include "utils/sortsupport.h"
61 #include "utils/syscache.h"
62 #include "utils/timestamp.h"
63 #include "utils/tqual.h"
64 
65 
66 /* Per-index data for ANALYZE */
67 typedef struct AnlIndexData
68 {
69 	IndexInfo  *indexInfo;		/* BuildIndexInfo result */
70 	double		tupleFract;		/* fraction of rows for partial index */
71 	VacAttrStats **vacattrstats;	/* index attrs to analyze */
72 	int			attr_cnt;
73 } AnlIndexData;
74 
75 
76 /* Default statistics target (GUC parameter) */
77 int			default_statistics_target = 100;
78 
79 /* A few variables that don't seem worth passing around as parameters */
80 static MemoryContext anl_context = NULL;
81 static BufferAccessStrategy vac_strategy;
82 
83 
84 static void do_analyze_rel(Relation onerel, int options,
85 			   VacuumParams *params, List *va_cols,
86 			   AcquireSampleRowsFunc acquirefunc, BlockNumber relpages,
87 			   bool inh, bool in_outer_xact, int elevel);
88 static void compute_index_stats(Relation onerel, double totalrows,
89 					AnlIndexData *indexdata, int nindexes,
90 					HeapTuple *rows, int numrows,
91 					MemoryContext col_context);
92 static VacAttrStats *examine_attribute(Relation onerel, int attnum,
93 				  Node *index_expr);
94 static int acquire_sample_rows(Relation onerel, int elevel,
95 					HeapTuple *rows, int targrows,
96 					double *totalrows, double *totaldeadrows);
97 static int	compare_rows(const void *a, const void *b);
98 static int acquire_inherited_sample_rows(Relation onerel, int elevel,
99 							  HeapTuple *rows, int targrows,
100 							  double *totalrows, double *totaldeadrows);
101 static void update_attstats(Oid relid, bool inh,
102 				int natts, VacAttrStats **vacattrstats);
103 static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
104 static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
105 
106 
107 /*
108  *	analyze_rel() -- analyze one relation
109  *
110  * relid identifies the relation to analyze.  If relation is supplied, use
111  * the name therein for reporting any failure to open/lock the rel; do not
112  * use it once we've successfully opened the rel, since it might be stale.
113  */
114 void
115 analyze_rel(Oid relid, RangeVar *relation, int options,
116 			VacuumParams *params, List *va_cols, bool in_outer_xact,
117 			BufferAccessStrategy bstrategy)
118 {
119 	Relation	onerel;
120 	int			elevel;
121 	AcquireSampleRowsFunc acquirefunc = NULL;
122 	BlockNumber relpages = 0;
123 	bool		rel_lock = true;
124 
125 	/* Select logging level */
126 	if (options & VACOPT_VERBOSE)
127 		elevel = INFO;
128 	else
129 		elevel = DEBUG2;
130 
131 	/* Set up static variables */
132 	vac_strategy = bstrategy;
133 
134 	/*
135 	 * Check for user-requested abort.
136 	 */
137 	CHECK_FOR_INTERRUPTS();
138 
139 	/*
140 	 * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
141 	 * ANALYZEs don't run on it concurrently.  (This also locks out a
142 	 * concurrent VACUUM, which doesn't matter much at the moment but might
143 	 * matter if we ever try to accumulate stats on dead tuples.) If the rel
144 	 * has been dropped since we last saw it, we don't need to process it.
145 	 */
146 	if (!(options & VACOPT_NOWAIT))
147 		onerel = try_relation_open(relid, ShareUpdateExclusiveLock);
148 	else if (ConditionalLockRelationOid(relid, ShareUpdateExclusiveLock))
149 		onerel = try_relation_open(relid, NoLock);
150 	else
151 	{
152 		onerel = NULL;
153 		rel_lock = false;
154 	}
155 
156 	/*
157 	 * If we failed to open or lock the relation, emit a log message before
158 	 * exiting.
159 	 */
160 	if (!onerel)
161 	{
162 		/*
163 		 * If the RangeVar is not defined, we do not have enough information
164 		 * to provide a meaningful log statement.  Chances are that
165 		 * analyze_rel's caller has intentionally not provided this
166 		 * information so that this logging is skipped, anyway.
167 		 */
168 		if (relation == NULL)
169 			return;
170 
171 		/*
172 		 * Determine the log level.  For autovacuum logs, we emit a LOG if
173 		 * log_autovacuum_min_duration is not disabled.  For manual ANALYZE,
174 		 * we emit a WARNING to match the log statements in the permissions
175 		 * checks.
176 		 */
177 		if (!IsAutoVacuumWorkerProcess())
178 			elevel = WARNING;
179 		else if (params->log_min_duration >= 0)
180 			elevel = LOG;
181 		else
182 			return;
183 
184 		if (!rel_lock)
185 			ereport(elevel,
186 					(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
187 					 errmsg("skipping analyze of \"%s\" --- lock not available",
188 							relation->relname)));
189 		else
190 			ereport(elevel,
191 					(errcode(ERRCODE_UNDEFINED_TABLE),
192 					 errmsg("skipping analyze of \"%s\" --- relation no longer exists",
193 							relation->relname)));
194 
195 		return;
196 	}
197 
198 	/*
199 	 * Check permissions --- this should match vacuum's check!
200 	 */
201 	if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
202 		  (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared)))
203 	{
204 		/* No need for a WARNING if we already complained during VACUUM */
205 		if (!(options & VACOPT_VACUUM))
206 		{
207 			if (onerel->rd_rel->relisshared)
208 				ereport(WARNING,
209 						(errmsg("skipping \"%s\" --- only superuser can analyze it",
210 								RelationGetRelationName(onerel))));
211 			else if (onerel->rd_rel->relnamespace == PG_CATALOG_NAMESPACE)
212 				ereport(WARNING,
213 						(errmsg("skipping \"%s\" --- only superuser or database owner can analyze it",
214 								RelationGetRelationName(onerel))));
215 			else
216 				ereport(WARNING,
217 						(errmsg("skipping \"%s\" --- only table or database owner can analyze it",
218 								RelationGetRelationName(onerel))));
219 		}
220 		relation_close(onerel, ShareUpdateExclusiveLock);
221 		return;
222 	}
223 
224 	/*
225 	 * Silently ignore tables that are temp tables of other backends ---
226 	 * trying to analyze these is rather pointless, since their contents are
227 	 * probably not up-to-date on disk.  (We don't throw a warning here; it
228 	 * would just lead to chatter during a database-wide ANALYZE.)
229 	 */
230 	if (RELATION_IS_OTHER_TEMP(onerel))
231 	{
232 		relation_close(onerel, ShareUpdateExclusiveLock);
233 		return;
234 	}
235 
236 	/*
237 	 * We can ANALYZE any table except pg_statistic. See update_attstats
238 	 */
239 	if (RelationGetRelid(onerel) == StatisticRelationId)
240 	{
241 		relation_close(onerel, ShareUpdateExclusiveLock);
242 		return;
243 	}
244 
245 	/*
246 	 * Check that it's of an analyzable relkind, and set up appropriately.
247 	 */
248 	if (onerel->rd_rel->relkind == RELKIND_RELATION ||
249 		onerel->rd_rel->relkind == RELKIND_MATVIEW)
250 	{
251 		/* Regular table, so we'll use the regular row acquisition function */
252 		acquirefunc = acquire_sample_rows;
253 		/* Also get regular table's size */
254 		relpages = RelationGetNumberOfBlocks(onerel);
255 	}
256 	else if (onerel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
257 	{
258 		/*
259 		 * For a foreign table, call the FDW's hook function to see whether it
260 		 * supports analysis.
261 		 */
262 		FdwRoutine *fdwroutine;
263 		bool		ok = false;
264 
265 		fdwroutine = GetFdwRoutineForRelation(onerel, false);
266 
267 		if (fdwroutine->AnalyzeForeignTable != NULL)
268 			ok = fdwroutine->AnalyzeForeignTable(onerel,
269 												 &acquirefunc,
270 												 &relpages);
271 
272 		if (!ok)
273 		{
274 			ereport(WARNING,
275 					(errmsg("skipping \"%s\" --- cannot analyze this foreign table",
276 							RelationGetRelationName(onerel))));
277 			relation_close(onerel, ShareUpdateExclusiveLock);
278 			return;
279 		}
280 	}
281 	else if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
282 	{
283 		/*
284 		 * For partitioned tables, we want to do the recursive ANALYZE below.
285 		 */
286 	}
287 	else
288 	{
289 		/* No need for a WARNING if we already complained during VACUUM */
290 		if (!(options & VACOPT_VACUUM))
291 			ereport(WARNING,
292 					(errmsg("skipping \"%s\" --- cannot analyze non-tables or special system tables",
293 							RelationGetRelationName(onerel))));
294 		relation_close(onerel, ShareUpdateExclusiveLock);
295 		return;
296 	}
297 
298 	/*
299 	 * OK, let's do it.  First let other backends know I'm in ANALYZE.
300 	 */
301 	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
302 	MyPgXact->vacuumFlags |= PROC_IN_ANALYZE;
303 	LWLockRelease(ProcArrayLock);
304 
305 	/*
306 	 * Do the normal non-recursive ANALYZE.  We can skip this for partitioned
307 	 * tables, which don't contain any rows.
308 	 */
309 	if (onerel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
310 		do_analyze_rel(onerel, options, params, va_cols, acquirefunc,
311 					   relpages, false, in_outer_xact, elevel);
312 
313 	/*
314 	 * If there are child tables, do recursive ANALYZE.
315 	 */
316 	if (onerel->rd_rel->relhassubclass)
317 		do_analyze_rel(onerel, options, params, va_cols, acquirefunc, relpages,
318 					   true, in_outer_xact, elevel);
319 
320 	/*
321 	 * Close source relation now, but keep lock so that no one deletes it
322 	 * before we commit.  (If someone did, they'd fail to clean up the entries
323 	 * we made in pg_statistic.  Also, releasing the lock before commit would
324 	 * expose us to concurrent-update failures in update_attstats.)
325 	 */
326 	relation_close(onerel, NoLock);
327 
328 	/*
329 	 * Reset my PGXACT flag.  Note: we need this here, and not in vacuum_rel,
330 	 * because the vacuum flag is cleared by the end-of-xact code.
331 	 */
332 	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
333 	MyPgXact->vacuumFlags &= ~PROC_IN_ANALYZE;
334 	LWLockRelease(ProcArrayLock);
335 }
336 
337 /*
338  *	do_analyze_rel() -- analyze one relation, recursively or not
339  *
340  * Note that "acquirefunc" is only relevant for the non-inherited case.
341  * For the inherited case, acquire_inherited_sample_rows() determines the
342  * appropriate acquirefunc for each child table.
343  */
344 static void
345 do_analyze_rel(Relation onerel, int options, VacuumParams *params,
346 			   List *va_cols, AcquireSampleRowsFunc acquirefunc,
347 			   BlockNumber relpages, bool inh, bool in_outer_xact,
348 			   int elevel)
349 {
350 	int			attr_cnt,
351 				tcnt,
352 				i,
353 				ind;
354 	Relation   *Irel;
355 	int			nindexes;
356 	bool		hasindex;
357 	VacAttrStats **vacattrstats;
358 	AnlIndexData *indexdata;
359 	int			targrows,
360 				numrows;
361 	double		totalrows,
362 				totaldeadrows;
363 	HeapTuple  *rows;
364 	PGRUsage	ru0;
365 	TimestampTz starttime = 0;
366 	MemoryContext caller_context;
367 	Oid			save_userid;
368 	int			save_sec_context;
369 	int			save_nestlevel;
370 
371 	if (inh)
372 		ereport(elevel,
373 				(errmsg("analyzing \"%s.%s\" inheritance tree",
374 						get_namespace_name(RelationGetNamespace(onerel)),
375 						RelationGetRelationName(onerel))));
376 	else
377 		ereport(elevel,
378 				(errmsg("analyzing \"%s.%s\"",
379 						get_namespace_name(RelationGetNamespace(onerel)),
380 						RelationGetRelationName(onerel))));
381 
382 	/*
383 	 * Set up a working context so that we can easily free whatever junk gets
384 	 * created.
385 	 */
386 	anl_context = AllocSetContextCreate(CurrentMemoryContext,
387 										"Analyze",
388 										ALLOCSET_DEFAULT_SIZES);
389 	caller_context = MemoryContextSwitchTo(anl_context);
390 
391 	/*
392 	 * Switch to the table owner's userid, so that any index functions are run
393 	 * as that user.  Also lock down security-restricted operations and
394 	 * arrange to make GUC variable changes local to this command.
395 	 */
396 	GetUserIdAndSecContext(&save_userid, &save_sec_context);
397 	SetUserIdAndSecContext(onerel->rd_rel->relowner,
398 						   save_sec_context | SECURITY_RESTRICTED_OPERATION);
399 	save_nestlevel = NewGUCNestLevel();
400 
401 	/* measure elapsed time iff autovacuum logging requires it */
402 	if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
403 	{
404 		pg_rusage_init(&ru0);
405 		if (params->log_min_duration > 0)
406 			starttime = GetCurrentTimestamp();
407 	}
408 
409 	/*
410 	 * Determine which columns to analyze
411 	 *
412 	 * Note that system attributes are never analyzed, so we just reject them
413 	 * at the lookup stage.  We also reject duplicate column mentions.  (We
414 	 * could alternatively ignore duplicates, but analyzing a column twice
415 	 * won't work; we'd end up making a conflicting update in pg_statistic.)
416 	 */
417 	if (va_cols != NIL)
418 	{
419 		Bitmapset  *unique_cols = NULL;
420 		ListCell   *le;
421 
422 		vacattrstats = (VacAttrStats **) palloc(list_length(va_cols) *
423 												sizeof(VacAttrStats *));
424 		tcnt = 0;
425 		foreach(le, va_cols)
426 		{
427 			char	   *col = strVal(lfirst(le));
428 
429 			i = attnameAttNum(onerel, col, false);
430 			if (i == InvalidAttrNumber)
431 				ereport(ERROR,
432 						(errcode(ERRCODE_UNDEFINED_COLUMN),
433 						 errmsg("column \"%s\" of relation \"%s\" does not exist",
434 								col, RelationGetRelationName(onerel))));
435 			if (bms_is_member(i, unique_cols))
436 				ereport(ERROR,
437 						(errcode(ERRCODE_DUPLICATE_COLUMN),
438 						 errmsg("column \"%s\" of relation \"%s\" appears more than once",
439 								col, RelationGetRelationName(onerel))));
440 			unique_cols = bms_add_member(unique_cols, i);
441 
442 			vacattrstats[tcnt] = examine_attribute(onerel, i, NULL);
443 			if (vacattrstats[tcnt] != NULL)
444 				tcnt++;
445 		}
446 		attr_cnt = tcnt;
447 	}
448 	else
449 	{
450 		attr_cnt = onerel->rd_att->natts;
451 		vacattrstats = (VacAttrStats **)
452 			palloc(attr_cnt * sizeof(VacAttrStats *));
453 		tcnt = 0;
454 		for (i = 1; i <= attr_cnt; i++)
455 		{
456 			vacattrstats[tcnt] = examine_attribute(onerel, i, NULL);
457 			if (vacattrstats[tcnt] != NULL)
458 				tcnt++;
459 		}
460 		attr_cnt = tcnt;
461 	}
462 
463 	/*
464 	 * Open all indexes of the relation, and see if there are any analyzable
465 	 * columns in the indexes.  We do not analyze index columns if there was
466 	 * an explicit column list in the ANALYZE command, however.  If we are
467 	 * doing a recursive scan, we don't want to touch the parent's indexes at
468 	 * all.
469 	 */
470 	if (!inh)
471 		vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
472 	else
473 	{
474 		Irel = NULL;
475 		nindexes = 0;
476 	}
477 	hasindex = (nindexes > 0);
478 	indexdata = NULL;
479 	if (hasindex)
480 	{
481 		indexdata = (AnlIndexData *) palloc0(nindexes * sizeof(AnlIndexData));
482 		for (ind = 0; ind < nindexes; ind++)
483 		{
484 			AnlIndexData *thisdata = &indexdata[ind];
485 			IndexInfo  *indexInfo;
486 
487 			thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]);
488 			thisdata->tupleFract = 1.0; /* fix later if partial */
489 			if (indexInfo->ii_Expressions != NIL && va_cols == NIL)
490 			{
491 				ListCell   *indexpr_item = list_head(indexInfo->ii_Expressions);
492 
493 				thisdata->vacattrstats = (VacAttrStats **)
494 					palloc(indexInfo->ii_NumIndexAttrs * sizeof(VacAttrStats *));
495 				tcnt = 0;
496 				for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
497 				{
498 					int			keycol = indexInfo->ii_IndexAttrNumbers[i];
499 
500 					if (keycol == 0)
501 					{
502 						/* Found an index expression */
503 						Node	   *indexkey;
504 
505 						if (indexpr_item == NULL)	/* shouldn't happen */
506 							elog(ERROR, "too few entries in indexprs list");
507 						indexkey = (Node *) lfirst(indexpr_item);
508 						indexpr_item = lnext(indexpr_item);
509 						thisdata->vacattrstats[tcnt] =
510 							examine_attribute(Irel[ind], i + 1, indexkey);
511 						if (thisdata->vacattrstats[tcnt] != NULL)
512 							tcnt++;
513 					}
514 				}
515 				thisdata->attr_cnt = tcnt;
516 			}
517 		}
518 	}
519 
520 	/*
521 	 * Determine how many rows we need to sample, using the worst case from
522 	 * all analyzable columns.  We use a lower bound of 100 rows to avoid
523 	 * possible overflow in Vitter's algorithm.  (Note: that will also be the
524 	 * target in the corner case where there are no analyzable columns.)
525 	 */
526 	targrows = 100;
527 	for (i = 0; i < attr_cnt; i++)
528 	{
529 		if (targrows < vacattrstats[i]->minrows)
530 			targrows = vacattrstats[i]->minrows;
531 	}
532 	for (ind = 0; ind < nindexes; ind++)
533 	{
534 		AnlIndexData *thisdata = &indexdata[ind];
535 
536 		for (i = 0; i < thisdata->attr_cnt; i++)
537 		{
538 			if (targrows < thisdata->vacattrstats[i]->minrows)
539 				targrows = thisdata->vacattrstats[i]->minrows;
540 		}
541 	}
542 
543 	/*
544 	 * Acquire the sample rows
545 	 */
546 	rows = (HeapTuple *) palloc(targrows * sizeof(HeapTuple));
547 	if (inh)
548 		numrows = acquire_inherited_sample_rows(onerel, elevel,
549 												rows, targrows,
550 												&totalrows, &totaldeadrows);
551 	else
552 		numrows = (*acquirefunc) (onerel, elevel,
553 								  rows, targrows,
554 								  &totalrows, &totaldeadrows);
555 
556 	/*
557 	 * Compute the statistics.  Temporary results during the calculations for
558 	 * each column are stored in a child context.  The calc routines are
559 	 * responsible to make sure that whatever they store into the VacAttrStats
560 	 * structure is allocated in anl_context.
561 	 */
562 	if (numrows > 0)
563 	{
564 		MemoryContext col_context,
565 					old_context;
566 
567 		col_context = AllocSetContextCreate(anl_context,
568 											"Analyze Column",
569 											ALLOCSET_DEFAULT_SIZES);
570 		old_context = MemoryContextSwitchTo(col_context);
571 
572 		for (i = 0; i < attr_cnt; i++)
573 		{
574 			VacAttrStats *stats = vacattrstats[i];
575 			AttributeOpts *aopt;
576 
577 			stats->rows = rows;
578 			stats->tupDesc = onerel->rd_att;
579 			stats->compute_stats(stats,
580 								 std_fetch_func,
581 								 numrows,
582 								 totalrows);
583 
584 			/*
585 			 * If the appropriate flavor of the n_distinct option is
586 			 * specified, override with the corresponding value.
587 			 */
588 			aopt = get_attribute_options(onerel->rd_id, stats->attr->attnum);
589 			if (aopt != NULL)
590 			{
591 				float8		n_distinct;
592 
593 				n_distinct = inh ? aopt->n_distinct_inherited : aopt->n_distinct;
594 				if (n_distinct != 0.0)
595 					stats->stadistinct = n_distinct;
596 			}
597 
598 			MemoryContextResetAndDeleteChildren(col_context);
599 		}
600 
601 		if (hasindex)
602 			compute_index_stats(onerel, totalrows,
603 								indexdata, nindexes,
604 								rows, numrows,
605 								col_context);
606 
607 		MemoryContextSwitchTo(old_context);
608 		MemoryContextDelete(col_context);
609 
610 		/*
611 		 * Emit the completed stats rows into pg_statistic, replacing any
612 		 * previous statistics for the target columns.  (If there are stats in
613 		 * pg_statistic for columns we didn't process, we leave them alone.)
614 		 */
615 		update_attstats(RelationGetRelid(onerel), inh,
616 						attr_cnt, vacattrstats);
617 
618 		for (ind = 0; ind < nindexes; ind++)
619 		{
620 			AnlIndexData *thisdata = &indexdata[ind];
621 
622 			update_attstats(RelationGetRelid(Irel[ind]), false,
623 							thisdata->attr_cnt, thisdata->vacattrstats);
624 		}
625 
626 		/*
627 		 * Build extended statistics (if there are any).
628 		 *
629 		 * For now we only build extended statistics on individual relations,
630 		 * not for relations representing inheritance trees.
631 		 */
632 		if (!inh)
633 			BuildRelationExtStatistics(onerel, totalrows, numrows, rows,
634 									   attr_cnt, vacattrstats);
635 	}
636 
637 	/*
638 	 * Update pages/tuples stats in pg_class ... but not if we're doing
639 	 * inherited stats.
640 	 */
641 	if (!inh)
642 	{
643 		BlockNumber relallvisible;
644 
645 		visibilitymap_count(onerel, &relallvisible, NULL);
646 
647 		vac_update_relstats(onerel,
648 							relpages,
649 							totalrows,
650 							relallvisible,
651 							hasindex,
652 							InvalidTransactionId,
653 							InvalidMultiXactId,
654 							in_outer_xact);
655 	}
656 
657 	/*
658 	 * Same for indexes. Vacuum always scans all indexes, so if we're part of
659 	 * VACUUM ANALYZE, don't overwrite the accurate count already inserted by
660 	 * VACUUM.
661 	 */
662 	if (!inh && !(options & VACOPT_VACUUM))
663 	{
664 		for (ind = 0; ind < nindexes; ind++)
665 		{
666 			AnlIndexData *thisdata = &indexdata[ind];
667 			double		totalindexrows;
668 
669 			totalindexrows = ceil(thisdata->tupleFract * totalrows);
670 			vac_update_relstats(Irel[ind],
671 								RelationGetNumberOfBlocks(Irel[ind]),
672 								totalindexrows,
673 								0,
674 								false,
675 								InvalidTransactionId,
676 								InvalidMultiXactId,
677 								in_outer_xact);
678 		}
679 	}
680 
681 	/*
682 	 * Report ANALYZE to the stats collector, too.  However, if doing
683 	 * inherited stats we shouldn't report, because the stats collector only
684 	 * tracks per-table stats.  Reset the changes_since_analyze counter only
685 	 * if we analyzed all columns; otherwise, there is still work for
686 	 * auto-analyze to do.
687 	 */
688 	if (!inh)
689 		pgstat_report_analyze(onerel, totalrows, totaldeadrows,
690 							  (va_cols == NIL));
691 
692 	/* If this isn't part of VACUUM ANALYZE, let index AMs do cleanup */
693 	if (!(options & VACOPT_VACUUM))
694 	{
695 		for (ind = 0; ind < nindexes; ind++)
696 		{
697 			IndexBulkDeleteResult *stats;
698 			IndexVacuumInfo ivinfo;
699 
700 			ivinfo.index = Irel[ind];
701 			ivinfo.analyze_only = true;
702 			ivinfo.estimated_count = true;
703 			ivinfo.message_level = elevel;
704 			ivinfo.num_heap_tuples = onerel->rd_rel->reltuples;
705 			ivinfo.strategy = vac_strategy;
706 
707 			stats = index_vacuum_cleanup(&ivinfo, NULL);
708 
709 			if (stats)
710 				pfree(stats);
711 		}
712 	}
713 
714 	/* Done with indexes */
715 	vac_close_indexes(nindexes, Irel, NoLock);
716 
717 	/* Log the action if appropriate */
718 	if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
719 	{
720 		if (params->log_min_duration == 0 ||
721 			TimestampDifferenceExceeds(starttime, GetCurrentTimestamp(),
722 									   params->log_min_duration))
723 			ereport(LOG,
724 					(errmsg("automatic analyze of table \"%s.%s.%s\" system usage: %s",
725 							get_database_name(MyDatabaseId),
726 							get_namespace_name(RelationGetNamespace(onerel)),
727 							RelationGetRelationName(onerel),
728 							pg_rusage_show(&ru0))));
729 	}
730 
731 	/* Roll back any GUC changes executed by index functions */
732 	AtEOXact_GUC(false, save_nestlevel);
733 
734 	/* Restore userid and security context */
735 	SetUserIdAndSecContext(save_userid, save_sec_context);
736 
737 	/* Restore current context and release memory */
738 	MemoryContextSwitchTo(caller_context);
739 	MemoryContextDelete(anl_context);
740 	anl_context = NULL;
741 }
742 
743 /*
744  * Compute statistics about indexes of a relation
745  */
746 static void
747 compute_index_stats(Relation onerel, double totalrows,
748 					AnlIndexData *indexdata, int nindexes,
749 					HeapTuple *rows, int numrows,
750 					MemoryContext col_context)
751 {
752 	MemoryContext ind_context,
753 				old_context;
754 	Datum		values[INDEX_MAX_KEYS];
755 	bool		isnull[INDEX_MAX_KEYS];
756 	int			ind,
757 				i;
758 
759 	ind_context = AllocSetContextCreate(anl_context,
760 										"Analyze Index",
761 										ALLOCSET_DEFAULT_SIZES);
762 	old_context = MemoryContextSwitchTo(ind_context);
763 
764 	for (ind = 0; ind < nindexes; ind++)
765 	{
766 		AnlIndexData *thisdata = &indexdata[ind];
767 		IndexInfo  *indexInfo = thisdata->indexInfo;
768 		int			attr_cnt = thisdata->attr_cnt;
769 		TupleTableSlot *slot;
770 		EState	   *estate;
771 		ExprContext *econtext;
772 		ExprState  *predicate;
773 		Datum	   *exprvals;
774 		bool	   *exprnulls;
775 		int			numindexrows,
776 					tcnt,
777 					rowno;
778 		double		totalindexrows;
779 
780 		/* Ignore index if no columns to analyze and not partial */
781 		if (attr_cnt == 0 && indexInfo->ii_Predicate == NIL)
782 			continue;
783 
784 		/*
785 		 * Need an EState for evaluation of index expressions and
786 		 * partial-index predicates.  Create it in the per-index context to be
787 		 * sure it gets cleaned up at the bottom of the loop.
788 		 */
789 		estate = CreateExecutorState();
790 		econtext = GetPerTupleExprContext(estate);
791 		/* Need a slot to hold the current heap tuple, too */
792 		slot = MakeSingleTupleTableSlot(RelationGetDescr(onerel));
793 
794 		/* Arrange for econtext's scan tuple to be the tuple under test */
795 		econtext->ecxt_scantuple = slot;
796 
797 		/* Set up execution state for predicate. */
798 		predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
799 
800 		/* Compute and save index expression values */
801 		exprvals = (Datum *) palloc(numrows * attr_cnt * sizeof(Datum));
802 		exprnulls = (bool *) palloc(numrows * attr_cnt * sizeof(bool));
803 		numindexrows = 0;
804 		tcnt = 0;
805 		for (rowno = 0; rowno < numrows; rowno++)
806 		{
807 			HeapTuple	heapTuple = rows[rowno];
808 
809 			vacuum_delay_point();
810 
811 			/*
812 			 * Reset the per-tuple context each time, to reclaim any cruft
813 			 * left behind by evaluating the predicate or index expressions.
814 			 */
815 			ResetExprContext(econtext);
816 
817 			/* Set up for predicate or expression evaluation */
818 			ExecStoreTuple(heapTuple, slot, InvalidBuffer, false);
819 
820 			/* If index is partial, check predicate */
821 			if (predicate != NULL)
822 			{
823 				if (!ExecQual(predicate, econtext))
824 					continue;
825 			}
826 			numindexrows++;
827 
828 			if (attr_cnt > 0)
829 			{
830 				/*
831 				 * Evaluate the index row to compute expression values. We
832 				 * could do this by hand, but FormIndexDatum is convenient.
833 				 */
834 				FormIndexDatum(indexInfo,
835 							   slot,
836 							   estate,
837 							   values,
838 							   isnull);
839 
840 				/*
841 				 * Save just the columns we care about.  We copy the values
842 				 * into ind_context from the estate's per-tuple context.
843 				 */
844 				for (i = 0; i < attr_cnt; i++)
845 				{
846 					VacAttrStats *stats = thisdata->vacattrstats[i];
847 					int			attnum = stats->attr->attnum;
848 
849 					if (isnull[attnum - 1])
850 					{
851 						exprvals[tcnt] = (Datum) 0;
852 						exprnulls[tcnt] = true;
853 					}
854 					else
855 					{
856 						exprvals[tcnt] = datumCopy(values[attnum - 1],
857 												   stats->attrtype->typbyval,
858 												   stats->attrtype->typlen);
859 						exprnulls[tcnt] = false;
860 					}
861 					tcnt++;
862 				}
863 			}
864 		}
865 
866 		/*
867 		 * Having counted the number of rows that pass the predicate in the
868 		 * sample, we can estimate the total number of rows in the index.
869 		 */
870 		thisdata->tupleFract = (double) numindexrows / (double) numrows;
871 		totalindexrows = ceil(thisdata->tupleFract * totalrows);
872 
873 		/*
874 		 * Now we can compute the statistics for the expression columns.
875 		 */
876 		if (numindexrows > 0)
877 		{
878 			MemoryContextSwitchTo(col_context);
879 			for (i = 0; i < attr_cnt; i++)
880 			{
881 				VacAttrStats *stats = thisdata->vacattrstats[i];
882 				AttributeOpts *aopt =
883 				get_attribute_options(stats->attr->attrelid,
884 									  stats->attr->attnum);
885 
886 				stats->exprvals = exprvals + i;
887 				stats->exprnulls = exprnulls + i;
888 				stats->rowstride = attr_cnt;
889 				stats->compute_stats(stats,
890 									 ind_fetch_func,
891 									 numindexrows,
892 									 totalindexrows);
893 
894 				/*
895 				 * If the n_distinct option is specified, it overrides the
896 				 * above computation.  For indices, we always use just
897 				 * n_distinct, not n_distinct_inherited.
898 				 */
899 				if (aopt != NULL && aopt->n_distinct != 0.0)
900 					stats->stadistinct = aopt->n_distinct;
901 
902 				MemoryContextResetAndDeleteChildren(col_context);
903 			}
904 		}
905 
906 		/* And clean up */
907 		MemoryContextSwitchTo(ind_context);
908 
909 		ExecDropSingleTupleTableSlot(slot);
910 		FreeExecutorState(estate);
911 		MemoryContextResetAndDeleteChildren(ind_context);
912 	}
913 
914 	MemoryContextSwitchTo(old_context);
915 	MemoryContextDelete(ind_context);
916 }
917 
918 /*
919  * examine_attribute -- pre-analysis of a single column
920  *
921  * Determine whether the column is analyzable; if so, create and initialize
922  * a VacAttrStats struct for it.  If not, return NULL.
923  *
924  * If index_expr isn't NULL, then we're trying to analyze an expression index,
925  * and index_expr is the expression tree representing the column's data.
926  */
927 static VacAttrStats *
928 examine_attribute(Relation onerel, int attnum, Node *index_expr)
929 {
930 	Form_pg_attribute attr = TupleDescAttr(onerel->rd_att, attnum - 1);
931 	HeapTuple	typtuple;
932 	VacAttrStats *stats;
933 	int			i;
934 	bool		ok;
935 
936 	/* Never analyze dropped columns */
937 	if (attr->attisdropped)
938 		return NULL;
939 
940 	/* Don't analyze column if user has specified not to */
941 	if (attr->attstattarget == 0)
942 		return NULL;
943 
944 	/*
945 	 * Create the VacAttrStats struct.  Note that we only have a copy of the
946 	 * fixed fields of the pg_attribute tuple.
947 	 */
948 	stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
949 	stats->attr = (Form_pg_attribute) palloc(ATTRIBUTE_FIXED_PART_SIZE);
950 	memcpy(stats->attr, attr, ATTRIBUTE_FIXED_PART_SIZE);
951 
952 	/*
953 	 * When analyzing an expression index, believe the expression tree's type
954 	 * not the column datatype --- the latter might be the opckeytype storage
955 	 * type of the opclass, which is not interesting for our purposes.  (Note:
956 	 * if we did anything with non-expression index columns, we'd need to
957 	 * figure out where to get the correct type info from, but for now that's
958 	 * not a problem.)	It's not clear whether anyone will care about the
959 	 * typmod, but we store that too just in case.
960 	 */
961 	if (index_expr)
962 	{
963 		stats->attrtypid = exprType(index_expr);
964 		stats->attrtypmod = exprTypmod(index_expr);
965 	}
966 	else
967 	{
968 		stats->attrtypid = attr->atttypid;
969 		stats->attrtypmod = attr->atttypmod;
970 	}
971 
972 	typtuple = SearchSysCacheCopy1(TYPEOID,
973 								   ObjectIdGetDatum(stats->attrtypid));
974 	if (!HeapTupleIsValid(typtuple))
975 		elog(ERROR, "cache lookup failed for type %u", stats->attrtypid);
976 	stats->attrtype = (Form_pg_type) GETSTRUCT(typtuple);
977 	stats->anl_context = anl_context;
978 	stats->tupattnum = attnum;
979 
980 	/*
981 	 * The fields describing the stats->stavalues[n] element types default to
982 	 * the type of the data being analyzed, but the type-specific typanalyze
983 	 * function can change them if it wants to store something else.
984 	 */
985 	for (i = 0; i < STATISTIC_NUM_SLOTS; i++)
986 	{
987 		stats->statypid[i] = stats->attrtypid;
988 		stats->statyplen[i] = stats->attrtype->typlen;
989 		stats->statypbyval[i] = stats->attrtype->typbyval;
990 		stats->statypalign[i] = stats->attrtype->typalign;
991 	}
992 
993 	/*
994 	 * Call the type-specific typanalyze function.  If none is specified, use
995 	 * std_typanalyze().
996 	 */
997 	if (OidIsValid(stats->attrtype->typanalyze))
998 		ok = DatumGetBool(OidFunctionCall1(stats->attrtype->typanalyze,
999 										   PointerGetDatum(stats)));
1000 	else
1001 		ok = std_typanalyze(stats);
1002 
1003 	if (!ok || stats->compute_stats == NULL || stats->minrows <= 0)
1004 	{
1005 		heap_freetuple(typtuple);
1006 		pfree(stats->attr);
1007 		pfree(stats);
1008 		return NULL;
1009 	}
1010 
1011 	return stats;
1012 }
1013 
1014 /*
1015  * acquire_sample_rows -- acquire a random sample of rows from the table
1016  *
1017  * Selected rows are returned in the caller-allocated array rows[], which
1018  * must have at least targrows entries.
1019  * The actual number of rows selected is returned as the function result.
1020  * We also estimate the total numbers of live and dead rows in the table,
1021  * and return them into *totalrows and *totaldeadrows, respectively.
1022  *
1023  * The returned list of tuples is in order by physical position in the table.
1024  * (We will rely on this later to derive correlation estimates.)
1025  *
1026  * As of May 2004 we use a new two-stage method:  Stage one selects up
1027  * to targrows random blocks (or all blocks, if there aren't so many).
1028  * Stage two scans these blocks and uses the Vitter algorithm to create
1029  * a random sample of targrows rows (or less, if there are less in the
1030  * sample of blocks).  The two stages are executed simultaneously: each
1031  * block is processed as soon as stage one returns its number and while
1032  * the rows are read stage two controls which ones are to be inserted
1033  * into the sample.
1034  *
1035  * Although every row has an equal chance of ending up in the final
1036  * sample, this sampling method is not perfect: not every possible
1037  * sample has an equal chance of being selected.  For large relations
1038  * the number of different blocks represented by the sample tends to be
1039  * too small.  We can live with that for now.  Improvements are welcome.
1040  *
1041  * An important property of this sampling method is that because we do
1042  * look at a statistically unbiased set of blocks, we should get
1043  * unbiased estimates of the average numbers of live and dead rows per
1044  * block.  The previous sampling method put too much credence in the row
1045  * density near the start of the table.
1046  */
1047 static int
1048 acquire_sample_rows(Relation onerel, int elevel,
1049 					HeapTuple *rows, int targrows,
1050 					double *totalrows, double *totaldeadrows)
1051 {
1052 	int			numrows = 0;	/* # rows now in reservoir */
1053 	double		samplerows = 0; /* total # rows collected */
1054 	double		liverows = 0;	/* # live rows seen */
1055 	double		deadrows = 0;	/* # dead rows seen */
1056 	double		rowstoskip = -1;	/* -1 means not set yet */
1057 	BlockNumber totalblocks;
1058 	TransactionId OldestXmin;
1059 	BlockSamplerData bs;
1060 	ReservoirStateData rstate;
1061 
1062 	Assert(targrows > 0);
1063 
1064 	totalblocks = RelationGetNumberOfBlocks(onerel);
1065 
1066 	/* Need a cutoff xmin for HeapTupleSatisfiesVacuum */
1067 	OldestXmin = GetOldestXmin(onerel, PROCARRAY_FLAGS_VACUUM);
1068 
1069 	/* Prepare for sampling block numbers */
1070 	BlockSampler_Init(&bs, totalblocks, targrows, random());
1071 	/* Prepare for sampling rows */
1072 	reservoir_init_selection_state(&rstate, targrows);
1073 
1074 	/* Outer loop over blocks to sample */
1075 	while (BlockSampler_HasMore(&bs))
1076 	{
1077 		BlockNumber targblock = BlockSampler_Next(&bs);
1078 		Buffer		targbuffer;
1079 		Page		targpage;
1080 		OffsetNumber targoffset,
1081 					maxoffset;
1082 
1083 		vacuum_delay_point();
1084 
1085 		/*
1086 		 * We must maintain a pin on the target page's buffer to ensure that
1087 		 * the maxoffset value stays good (else concurrent VACUUM might delete
1088 		 * tuples out from under us).  Hence, pin the page until we are done
1089 		 * looking at it.  We also choose to hold sharelock on the buffer
1090 		 * throughout --- we could release and re-acquire sharelock for each
1091 		 * tuple, but since we aren't doing much work per tuple, the extra
1092 		 * lock traffic is probably better avoided.
1093 		 */
1094 		targbuffer = ReadBufferExtended(onerel, MAIN_FORKNUM, targblock,
1095 										RBM_NORMAL, vac_strategy);
1096 		LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
1097 		targpage = BufferGetPage(targbuffer);
1098 		maxoffset = PageGetMaxOffsetNumber(targpage);
1099 
1100 		/* Inner loop over all tuples on the selected page */
1101 		for (targoffset = FirstOffsetNumber; targoffset <= maxoffset; targoffset++)
1102 		{
1103 			ItemId		itemid;
1104 			HeapTupleData targtuple;
1105 			bool		sample_it = false;
1106 
1107 			itemid = PageGetItemId(targpage, targoffset);
1108 
1109 			/*
1110 			 * We ignore unused and redirect line pointers.  DEAD line
1111 			 * pointers should be counted as dead, because we need vacuum to
1112 			 * run to get rid of them.  Note that this rule agrees with the
1113 			 * way that heap_page_prune() counts things.
1114 			 */
1115 			if (!ItemIdIsNormal(itemid))
1116 			{
1117 				if (ItemIdIsDead(itemid))
1118 					deadrows += 1;
1119 				continue;
1120 			}
1121 
1122 			ItemPointerSet(&targtuple.t_self, targblock, targoffset);
1123 
1124 			targtuple.t_tableOid = RelationGetRelid(onerel);
1125 			targtuple.t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
1126 			targtuple.t_len = ItemIdGetLength(itemid);
1127 
1128 			switch (HeapTupleSatisfiesVacuum(&targtuple,
1129 											 OldestXmin,
1130 											 targbuffer))
1131 			{
1132 				case HEAPTUPLE_LIVE:
1133 					sample_it = true;
1134 					liverows += 1;
1135 					break;
1136 
1137 				case HEAPTUPLE_DEAD:
1138 				case HEAPTUPLE_RECENTLY_DEAD:
1139 					/* Count dead and recently-dead rows */
1140 					deadrows += 1;
1141 					break;
1142 
1143 				case HEAPTUPLE_INSERT_IN_PROGRESS:
1144 
1145 					/*
1146 					 * Insert-in-progress rows are not counted.  We assume
1147 					 * that when the inserting transaction commits or aborts,
1148 					 * it will send a stats message to increment the proper
1149 					 * count.  This works right only if that transaction ends
1150 					 * after we finish analyzing the table; if things happen
1151 					 * in the other order, its stats update will be
1152 					 * overwritten by ours.  However, the error will be large
1153 					 * only if the other transaction runs long enough to
1154 					 * insert many tuples, so assuming it will finish after us
1155 					 * is the safer option.
1156 					 *
1157 					 * A special case is that the inserting transaction might
1158 					 * be our own.  In this case we should count and sample
1159 					 * the row, to accommodate users who load a table and
1160 					 * analyze it in one transaction.  (pgstat_report_analyze
1161 					 * has to adjust the numbers we send to the stats
1162 					 * collector to make this come out right.)
1163 					 */
1164 					if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple.t_data)))
1165 					{
1166 						sample_it = true;
1167 						liverows += 1;
1168 					}
1169 					break;
1170 
1171 				case HEAPTUPLE_DELETE_IN_PROGRESS:
1172 
1173 					/*
1174 					 * We count and sample delete-in-progress rows the same as
1175 					 * live ones, so that the stats counters come out right if
1176 					 * the deleting transaction commits after us, per the same
1177 					 * reasoning given above.
1178 					 *
1179 					 * If the delete was done by our own transaction, however,
1180 					 * we must count the row as dead to make
1181 					 * pgstat_report_analyze's stats adjustments come out
1182 					 * right.  (Note: this works out properly when the row was
1183 					 * both inserted and deleted in our xact.)
1184 					 *
1185 					 * The net effect of these choices is that we act as
1186 					 * though an IN_PROGRESS transaction hasn't happened yet,
1187 					 * except if it is our own transaction, which we assume
1188 					 * has happened.
1189 					 *
1190 					 * This approach ensures that we behave sanely if we see
1191 					 * both the pre-image and post-image rows for a row being
1192 					 * updated by a concurrent transaction: we will sample the
1193 					 * pre-image but not the post-image.  We also get sane
1194 					 * results if the concurrent transaction never commits.
1195 					 */
1196 					if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(targtuple.t_data)))
1197 						deadrows += 1;
1198 					else
1199 					{
1200 						sample_it = true;
1201 						liverows += 1;
1202 					}
1203 					break;
1204 
1205 				default:
1206 					elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1207 					break;
1208 			}
1209 
1210 			if (sample_it)
1211 			{
1212 				/*
1213 				 * The first targrows sample rows are simply copied into the
1214 				 * reservoir. Then we start replacing tuples in the sample
1215 				 * until we reach the end of the relation.  This algorithm is
1216 				 * from Jeff Vitter's paper (see full citation in
1217 				 * utils/misc/sampling.c). It works by repeatedly computing
1218 				 * the number of tuples to skip before selecting a tuple,
1219 				 * which replaces a randomly chosen element of the reservoir
1220 				 * (current set of tuples).  At all times the reservoir is a
1221 				 * true random sample of the tuples we've passed over so far,
1222 				 * so when we fall off the end of the relation we're done.
1223 				 */
1224 				if (numrows < targrows)
1225 					rows[numrows++] = heap_copytuple(&targtuple);
1226 				else
1227 				{
1228 					/*
1229 					 * t in Vitter's paper is the number of records already
1230 					 * processed.  If we need to compute a new S value, we
1231 					 * must use the not-yet-incremented value of samplerows as
1232 					 * t.
1233 					 */
1234 					if (rowstoskip < 0)
1235 						rowstoskip = reservoir_get_next_S(&rstate, samplerows, targrows);
1236 
1237 					if (rowstoskip <= 0)
1238 					{
1239 						/*
1240 						 * Found a suitable tuple, so save it, replacing one
1241 						 * old tuple at random
1242 						 */
1243 						int			k = (int) (targrows * sampler_random_fract(rstate.randstate));
1244 
1245 						Assert(k >= 0 && k < targrows);
1246 						heap_freetuple(rows[k]);
1247 						rows[k] = heap_copytuple(&targtuple);
1248 					}
1249 
1250 					rowstoskip -= 1;
1251 				}
1252 
1253 				samplerows += 1;
1254 			}
1255 		}
1256 
1257 		/* Now release the lock and pin on the page */
1258 		UnlockReleaseBuffer(targbuffer);
1259 	}
1260 
1261 	/*
1262 	 * If we didn't find as many tuples as we wanted then we're done. No sort
1263 	 * is needed, since they're already in order.
1264 	 *
1265 	 * Otherwise we need to sort the collected tuples by position
1266 	 * (itempointer). It's not worth worrying about corner cases where the
1267 	 * tuples are already sorted.
1268 	 */
1269 	if (numrows == targrows)
1270 		qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
1271 
1272 	/*
1273 	 * Estimate total numbers of live and dead rows in relation, extrapolating
1274 	 * on the assumption that the average tuple density in pages we didn't
1275 	 * scan is the same as in the pages we did scan.  Since what we scanned is
1276 	 * a random sample of the pages in the relation, this should be a good
1277 	 * assumption.
1278 	 */
1279 	if (bs.m > 0)
1280 	{
1281 		*totalrows = floor((liverows / bs.m) * totalblocks + 0.5);
1282 		*totaldeadrows = floor((deadrows / bs.m) * totalblocks + 0.5);
1283 	}
1284 	else
1285 	{
1286 		*totalrows = 0.0;
1287 		*totaldeadrows = 0.0;
1288 	}
1289 
1290 	/*
1291 	 * Emit some interesting relation info
1292 	 */
1293 	ereport(elevel,
1294 			(errmsg("\"%s\": scanned %d of %u pages, "
1295 					"containing %.0f live rows and %.0f dead rows; "
1296 					"%d rows in sample, %.0f estimated total rows",
1297 					RelationGetRelationName(onerel),
1298 					bs.m, totalblocks,
1299 					liverows, deadrows,
1300 					numrows, *totalrows)));
1301 
1302 	return numrows;
1303 }
1304 
1305 /*
1306  * qsort comparator for sorting rows[] array
1307  */
1308 static int
1309 compare_rows(const void *a, const void *b)
1310 {
1311 	HeapTuple	ha = *(const HeapTuple *) a;
1312 	HeapTuple	hb = *(const HeapTuple *) b;
1313 	BlockNumber ba = ItemPointerGetBlockNumber(&ha->t_self);
1314 	OffsetNumber oa = ItemPointerGetOffsetNumber(&ha->t_self);
1315 	BlockNumber bb = ItemPointerGetBlockNumber(&hb->t_self);
1316 	OffsetNumber ob = ItemPointerGetOffsetNumber(&hb->t_self);
1317 
1318 	if (ba < bb)
1319 		return -1;
1320 	if (ba > bb)
1321 		return 1;
1322 	if (oa < ob)
1323 		return -1;
1324 	if (oa > ob)
1325 		return 1;
1326 	return 0;
1327 }
1328 
1329 
1330 /*
1331  * acquire_inherited_sample_rows -- acquire sample rows from inheritance tree
1332  *
1333  * This has the same API as acquire_sample_rows, except that rows are
1334  * collected from all inheritance children as well as the specified table.
1335  * We fail and return zero if there are no inheritance children, or if all
1336  * children are foreign tables that don't support ANALYZE.
1337  */
1338 static int
1339 acquire_inherited_sample_rows(Relation onerel, int elevel,
1340 							  HeapTuple *rows, int targrows,
1341 							  double *totalrows, double *totaldeadrows)
1342 {
1343 	List	   *tableOIDs;
1344 	Relation   *rels;
1345 	AcquireSampleRowsFunc *acquirefuncs;
1346 	double	   *relblocks;
1347 	double		totalblocks;
1348 	int			numrows,
1349 				nrels,
1350 				i;
1351 	ListCell   *lc;
1352 	bool		has_child;
1353 
1354 	/*
1355 	 * Find all members of inheritance set.  We only need AccessShareLock on
1356 	 * the children.
1357 	 */
1358 	tableOIDs =
1359 		find_all_inheritors(RelationGetRelid(onerel), AccessShareLock, NULL);
1360 
1361 	/*
1362 	 * Check that there's at least one descendant, else fail.  This could
1363 	 * happen despite analyze_rel's relhassubclass check, if table once had a
1364 	 * child but no longer does.  In that case, we can clear the
1365 	 * relhassubclass field so as not to make the same mistake again later.
1366 	 * (This is safe because we hold ShareUpdateExclusiveLock.)
1367 	 */
1368 	if (list_length(tableOIDs) < 2)
1369 	{
1370 		/* CCI because we already updated the pg_class row in this command */
1371 		CommandCounterIncrement();
1372 		SetRelationHasSubclass(RelationGetRelid(onerel), false);
1373 		ereport(elevel,
1374 				(errmsg("skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no child tables",
1375 						get_namespace_name(RelationGetNamespace(onerel)),
1376 						RelationGetRelationName(onerel))));
1377 		return 0;
1378 	}
1379 
1380 	/*
1381 	 * Identify acquirefuncs to use, and count blocks in all the relations.
1382 	 * The result could overflow BlockNumber, so we use double arithmetic.
1383 	 */
1384 	rels = (Relation *) palloc(list_length(tableOIDs) * sizeof(Relation));
1385 	acquirefuncs = (AcquireSampleRowsFunc *)
1386 		palloc(list_length(tableOIDs) * sizeof(AcquireSampleRowsFunc));
1387 	relblocks = (double *) palloc(list_length(tableOIDs) * sizeof(double));
1388 	totalblocks = 0;
1389 	nrels = 0;
1390 	has_child = false;
1391 	foreach(lc, tableOIDs)
1392 	{
1393 		Oid			childOID = lfirst_oid(lc);
1394 		Relation	childrel;
1395 		AcquireSampleRowsFunc acquirefunc = NULL;
1396 		BlockNumber relpages = 0;
1397 
1398 		/* We already got the needed lock */
1399 		childrel = heap_open(childOID, NoLock);
1400 
1401 		/* Ignore if temp table of another backend */
1402 		if (RELATION_IS_OTHER_TEMP(childrel))
1403 		{
1404 			/* ... but release the lock on it */
1405 			Assert(childrel != onerel);
1406 			heap_close(childrel, AccessShareLock);
1407 			continue;
1408 		}
1409 
1410 		/* Check table type (MATVIEW can't happen, but might as well allow) */
1411 		if (childrel->rd_rel->relkind == RELKIND_RELATION ||
1412 			childrel->rd_rel->relkind == RELKIND_MATVIEW)
1413 		{
1414 			/* Regular table, so use the regular row acquisition function */
1415 			acquirefunc = acquire_sample_rows;
1416 			relpages = RelationGetNumberOfBlocks(childrel);
1417 		}
1418 		else if (childrel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1419 		{
1420 			/*
1421 			 * For a foreign table, call the FDW's hook function to see
1422 			 * whether it supports analysis.
1423 			 */
1424 			FdwRoutine *fdwroutine;
1425 			bool		ok = false;
1426 
1427 			fdwroutine = GetFdwRoutineForRelation(childrel, false);
1428 
1429 			if (fdwroutine->AnalyzeForeignTable != NULL)
1430 				ok = fdwroutine->AnalyzeForeignTable(childrel,
1431 													 &acquirefunc,
1432 													 &relpages);
1433 
1434 			if (!ok)
1435 			{
1436 				/* ignore, but release the lock on it */
1437 				Assert(childrel != onerel);
1438 				heap_close(childrel, AccessShareLock);
1439 				continue;
1440 			}
1441 		}
1442 		else
1443 		{
1444 			/*
1445 			 * ignore, but release the lock on it.  don't try to unlock the
1446 			 * passed-in relation
1447 			 */
1448 			Assert(childrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
1449 			if (childrel != onerel)
1450 				heap_close(childrel, AccessShareLock);
1451 			else
1452 				heap_close(childrel, NoLock);
1453 			continue;
1454 		}
1455 
1456 		/* OK, we'll process this child */
1457 		has_child = true;
1458 		rels[nrels] = childrel;
1459 		acquirefuncs[nrels] = acquirefunc;
1460 		relblocks[nrels] = (double) relpages;
1461 		totalblocks += (double) relpages;
1462 		nrels++;
1463 	}
1464 
1465 	/*
1466 	 * If we don't have at least one child table to consider, fail.  If the
1467 	 * relation is a partitioned table, it's not counted as a child table.
1468 	 */
1469 	if (!has_child)
1470 	{
1471 		ereport(elevel,
1472 				(errmsg("skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no analyzable child tables",
1473 						get_namespace_name(RelationGetNamespace(onerel)),
1474 						RelationGetRelationName(onerel))));
1475 		return 0;
1476 	}
1477 
1478 	/*
1479 	 * Now sample rows from each relation, proportionally to its fraction of
1480 	 * the total block count.  (This might be less than desirable if the child
1481 	 * rels have radically different free-space percentages, but it's not
1482 	 * clear that it's worth working harder.)
1483 	 */
1484 	numrows = 0;
1485 	*totalrows = 0;
1486 	*totaldeadrows = 0;
1487 	for (i = 0; i < nrels; i++)
1488 	{
1489 		Relation	childrel = rels[i];
1490 		AcquireSampleRowsFunc acquirefunc = acquirefuncs[i];
1491 		double		childblocks = relblocks[i];
1492 
1493 		if (childblocks > 0)
1494 		{
1495 			int			childtargrows;
1496 
1497 			childtargrows = (int) rint(targrows * childblocks / totalblocks);
1498 			/* Make sure we don't overrun due to roundoff error */
1499 			childtargrows = Min(childtargrows, targrows - numrows);
1500 			if (childtargrows > 0)
1501 			{
1502 				int			childrows;
1503 				double		trows,
1504 							tdrows;
1505 
1506 				/* Fetch a random sample of the child's rows */
1507 				childrows = (*acquirefunc) (childrel, elevel,
1508 											rows + numrows, childtargrows,
1509 											&trows, &tdrows);
1510 
1511 				/* We may need to convert from child's rowtype to parent's */
1512 				if (childrows > 0 &&
1513 					!equalTupleDescs(RelationGetDescr(childrel),
1514 									 RelationGetDescr(onerel)))
1515 				{
1516 					TupleConversionMap *map;
1517 
1518 					map = convert_tuples_by_name(RelationGetDescr(childrel),
1519 												 RelationGetDescr(onerel),
1520 												 gettext_noop("could not convert row type"));
1521 					if (map != NULL)
1522 					{
1523 						int			j;
1524 
1525 						for (j = 0; j < childrows; j++)
1526 						{
1527 							HeapTuple	newtup;
1528 
1529 							newtup = do_convert_tuple(rows[numrows + j], map);
1530 							heap_freetuple(rows[numrows + j]);
1531 							rows[numrows + j] = newtup;
1532 						}
1533 						free_conversion_map(map);
1534 					}
1535 				}
1536 
1537 				/* And add to counts */
1538 				numrows += childrows;
1539 				*totalrows += trows;
1540 				*totaldeadrows += tdrows;
1541 			}
1542 		}
1543 
1544 		/*
1545 		 * Note: we cannot release the child-table locks, since we may have
1546 		 * pointers to their TOAST tables in the sampled rows.
1547 		 */
1548 		heap_close(childrel, NoLock);
1549 	}
1550 
1551 	return numrows;
1552 }
1553 
1554 
1555 /*
1556  *	update_attstats() -- update attribute statistics for one relation
1557  *
1558  *		Statistics are stored in several places: the pg_class row for the
1559  *		relation has stats about the whole relation, and there is a
1560  *		pg_statistic row for each (non-system) attribute that has ever
1561  *		been analyzed.  The pg_class values are updated by VACUUM, not here.
1562  *
1563  *		pg_statistic rows are just added or updated normally.  This means
1564  *		that pg_statistic will probably contain some deleted rows at the
1565  *		completion of a vacuum cycle, unless it happens to get vacuumed last.
1566  *
1567  *		To keep things simple, we punt for pg_statistic, and don't try
1568  *		to compute or store rows for pg_statistic itself in pg_statistic.
1569  *		This could possibly be made to work, but it's not worth the trouble.
1570  *		Note analyze_rel() has seen to it that we won't come here when
1571  *		vacuuming pg_statistic itself.
1572  *
1573  *		Note: there would be a race condition here if two backends could
1574  *		ANALYZE the same table concurrently.  Presently, we lock that out
1575  *		by taking a self-exclusive lock on the relation in analyze_rel().
1576  */
1577 static void
1578 update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats)
1579 {
1580 	Relation	sd;
1581 	int			attno;
1582 
1583 	if (natts <= 0)
1584 		return;					/* nothing to do */
1585 
1586 	sd = heap_open(StatisticRelationId, RowExclusiveLock);
1587 
1588 	for (attno = 0; attno < natts; attno++)
1589 	{
1590 		VacAttrStats *stats = vacattrstats[attno];
1591 		HeapTuple	stup,
1592 					oldtup;
1593 		int			i,
1594 					k,
1595 					n;
1596 		Datum		values[Natts_pg_statistic];
1597 		bool		nulls[Natts_pg_statistic];
1598 		bool		replaces[Natts_pg_statistic];
1599 
1600 		/* Ignore attr if we weren't able to collect stats */
1601 		if (!stats->stats_valid)
1602 			continue;
1603 
1604 		/*
1605 		 * Construct a new pg_statistic tuple
1606 		 */
1607 		for (i = 0; i < Natts_pg_statistic; ++i)
1608 		{
1609 			nulls[i] = false;
1610 			replaces[i] = true;
1611 		}
1612 
1613 		values[Anum_pg_statistic_starelid - 1] = ObjectIdGetDatum(relid);
1614 		values[Anum_pg_statistic_staattnum - 1] = Int16GetDatum(stats->attr->attnum);
1615 		values[Anum_pg_statistic_stainherit - 1] = BoolGetDatum(inh);
1616 		values[Anum_pg_statistic_stanullfrac - 1] = Float4GetDatum(stats->stanullfrac);
1617 		values[Anum_pg_statistic_stawidth - 1] = Int32GetDatum(stats->stawidth);
1618 		values[Anum_pg_statistic_stadistinct - 1] = Float4GetDatum(stats->stadistinct);
1619 		i = Anum_pg_statistic_stakind1 - 1;
1620 		for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1621 		{
1622 			values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */
1623 		}
1624 		i = Anum_pg_statistic_staop1 - 1;
1625 		for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1626 		{
1627 			values[i++] = ObjectIdGetDatum(stats->staop[k]);	/* staopN */
1628 		}
1629 		i = Anum_pg_statistic_stanumbers1 - 1;
1630 		for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1631 		{
1632 			int			nnum = stats->numnumbers[k];
1633 
1634 			if (nnum > 0)
1635 			{
1636 				Datum	   *numdatums = (Datum *) palloc(nnum * sizeof(Datum));
1637 				ArrayType  *arry;
1638 
1639 				for (n = 0; n < nnum; n++)
1640 					numdatums[n] = Float4GetDatum(stats->stanumbers[k][n]);
1641 				/* XXX knows more than it should about type float4: */
1642 				arry = construct_array(numdatums, nnum,
1643 									   FLOAT4OID,
1644 									   sizeof(float4), FLOAT4PASSBYVAL, 'i');
1645 				values[i++] = PointerGetDatum(arry);	/* stanumbersN */
1646 			}
1647 			else
1648 			{
1649 				nulls[i] = true;
1650 				values[i++] = (Datum) 0;
1651 			}
1652 		}
1653 		i = Anum_pg_statistic_stavalues1 - 1;
1654 		for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1655 		{
1656 			if (stats->numvalues[k] > 0)
1657 			{
1658 				ArrayType  *arry;
1659 
1660 				arry = construct_array(stats->stavalues[k],
1661 									   stats->numvalues[k],
1662 									   stats->statypid[k],
1663 									   stats->statyplen[k],
1664 									   stats->statypbyval[k],
1665 									   stats->statypalign[k]);
1666 				values[i++] = PointerGetDatum(arry);	/* stavaluesN */
1667 			}
1668 			else
1669 			{
1670 				nulls[i] = true;
1671 				values[i++] = (Datum) 0;
1672 			}
1673 		}
1674 
1675 		/* Is there already a pg_statistic tuple for this attribute? */
1676 		oldtup = SearchSysCache3(STATRELATTINH,
1677 								 ObjectIdGetDatum(relid),
1678 								 Int16GetDatum(stats->attr->attnum),
1679 								 BoolGetDatum(inh));
1680 
1681 		if (HeapTupleIsValid(oldtup))
1682 		{
1683 			/* Yes, replace it */
1684 			stup = heap_modify_tuple(oldtup,
1685 									 RelationGetDescr(sd),
1686 									 values,
1687 									 nulls,
1688 									 replaces);
1689 			ReleaseSysCache(oldtup);
1690 			CatalogTupleUpdate(sd, &stup->t_self, stup);
1691 		}
1692 		else
1693 		{
1694 			/* No, insert new tuple */
1695 			stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
1696 			CatalogTupleInsert(sd, stup);
1697 		}
1698 
1699 		heap_freetuple(stup);
1700 	}
1701 
1702 	heap_close(sd, RowExclusiveLock);
1703 }
1704 
1705 /*
1706  * Standard fetch function for use by compute_stats subroutines.
1707  *
1708  * This exists to provide some insulation between compute_stats routines
1709  * and the actual storage of the sample data.
1710  */
1711 static Datum
1712 std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull)
1713 {
1714 	int			attnum = stats->tupattnum;
1715 	HeapTuple	tuple = stats->rows[rownum];
1716 	TupleDesc	tupDesc = stats->tupDesc;
1717 
1718 	return heap_getattr(tuple, attnum, tupDesc, isNull);
1719 }
1720 
1721 /*
1722  * Fetch function for analyzing index expressions.
1723  *
1724  * We have not bothered to construct index tuples, instead the data is
1725  * just in Datum arrays.
1726  */
1727 static Datum
1728 ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull)
1729 {
1730 	int			i;
1731 
1732 	/* exprvals and exprnulls are already offset for proper column */
1733 	i = rownum * stats->rowstride;
1734 	*isNull = stats->exprnulls[i];
1735 	return stats->exprvals[i];
1736 }
1737 
1738 
1739 /*==========================================================================
1740  *
1741  * Code below this point represents the "standard" type-specific statistics
1742  * analysis algorithms.  This code can be replaced on a per-data-type basis
1743  * by setting a nonzero value in pg_type.typanalyze.
1744  *
1745  *==========================================================================
1746  */
1747 
1748 
1749 /*
1750  * To avoid consuming too much memory during analysis and/or too much space
1751  * in the resulting pg_statistic rows, we ignore varlena datums that are wider
1752  * than WIDTH_THRESHOLD (after detoasting!).  This is legitimate for MCV
1753  * and distinct-value calculations since a wide value is unlikely to be
1754  * duplicated at all, much less be a most-common value.  For the same reason,
1755  * ignoring wide values will not affect our estimates of histogram bin
1756  * boundaries very much.
1757  */
1758 #define WIDTH_THRESHOLD  1024
1759 
1760 #define swapInt(a,b)	do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1761 #define swapDatum(a,b)	do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1762 
1763 /*
1764  * Extra information used by the default analysis routines
1765  */
1766 typedef struct
1767 {
1768 	int			count;			/* # of duplicates */
1769 	int			first;			/* values[] index of first occurrence */
1770 } ScalarMCVItem;
1771 
1772 typedef struct
1773 {
1774 	SortSupport ssup;
1775 	int		   *tupnoLink;
1776 } CompareScalarsContext;
1777 
1778 
1779 static void compute_trivial_stats(VacAttrStatsP stats,
1780 					  AnalyzeAttrFetchFunc fetchfunc,
1781 					  int samplerows,
1782 					  double totalrows);
1783 static void compute_distinct_stats(VacAttrStatsP stats,
1784 					   AnalyzeAttrFetchFunc fetchfunc,
1785 					   int samplerows,
1786 					   double totalrows);
1787 static void compute_scalar_stats(VacAttrStatsP stats,
1788 					 AnalyzeAttrFetchFunc fetchfunc,
1789 					 int samplerows,
1790 					 double totalrows);
1791 static int	compare_scalars(const void *a, const void *b, void *arg);
1792 static int	compare_mcvs(const void *a, const void *b);
1793 static int analyze_mcv_list(int *mcv_counts,
1794 				 int num_mcv,
1795 				 double stadistinct,
1796 				 double stanullfrac,
1797 				 int samplerows,
1798 				 double totalrows);
1799 
1800 
1801 /*
1802  * std_typanalyze -- the default type-specific typanalyze function
1803  */
1804 bool
1805 std_typanalyze(VacAttrStats *stats)
1806 {
1807 	Form_pg_attribute attr = stats->attr;
1808 	Oid			ltopr;
1809 	Oid			eqopr;
1810 	StdAnalyzeData *mystats;
1811 
1812 	/* If the attstattarget column is negative, use the default value */
1813 	/* NB: it is okay to scribble on stats->attr since it's a copy */
1814 	if (attr->attstattarget < 0)
1815 		attr->attstattarget = default_statistics_target;
1816 
1817 	/* Look for default "<" and "=" operators for column's type */
1818 	get_sort_group_operators(stats->attrtypid,
1819 							 false, false, false,
1820 							 &ltopr, &eqopr, NULL,
1821 							 NULL);
1822 
1823 	/* Save the operator info for compute_stats routines */
1824 	mystats = (StdAnalyzeData *) palloc(sizeof(StdAnalyzeData));
1825 	mystats->eqopr = eqopr;
1826 	mystats->eqfunc = OidIsValid(eqopr) ? get_opcode(eqopr) : InvalidOid;
1827 	mystats->ltopr = ltopr;
1828 	stats->extra_data = mystats;
1829 
1830 	/*
1831 	 * Determine which standard statistics algorithm to use
1832 	 */
1833 	if (OidIsValid(eqopr) && OidIsValid(ltopr))
1834 	{
1835 		/* Seems to be a scalar datatype */
1836 		stats->compute_stats = compute_scalar_stats;
1837 		/*--------------------
1838 		 * The following choice of minrows is based on the paper
1839 		 * "Random sampling for histogram construction: how much is enough?"
1840 		 * by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in
1841 		 * Proceedings of ACM SIGMOD International Conference on Management
1842 		 * of Data, 1998, Pages 436-447.  Their Corollary 1 to Theorem 5
1843 		 * says that for table size n, histogram size k, maximum relative
1844 		 * error in bin size f, and error probability gamma, the minimum
1845 		 * random sample size is
1846 		 *		r = 4 * k * ln(2*n/gamma) / f^2
1847 		 * Taking f = 0.5, gamma = 0.01, n = 10^6 rows, we obtain
1848 		 *		r = 305.82 * k
1849 		 * Note that because of the log function, the dependence on n is
1850 		 * quite weak; even at n = 10^12, a 300*k sample gives <= 0.66
1851 		 * bin size error with probability 0.99.  So there's no real need to
1852 		 * scale for n, which is a good thing because we don't necessarily
1853 		 * know it at this point.
1854 		 *--------------------
1855 		 */
1856 		stats->minrows = 300 * attr->attstattarget;
1857 	}
1858 	else if (OidIsValid(eqopr))
1859 	{
1860 		/* We can still recognize distinct values */
1861 		stats->compute_stats = compute_distinct_stats;
1862 		/* Might as well use the same minrows as above */
1863 		stats->minrows = 300 * attr->attstattarget;
1864 	}
1865 	else
1866 	{
1867 		/* Can't do much but the trivial stuff */
1868 		stats->compute_stats = compute_trivial_stats;
1869 		/* Might as well use the same minrows as above */
1870 		stats->minrows = 300 * attr->attstattarget;
1871 	}
1872 
1873 	return true;
1874 }
1875 
1876 
1877 /*
1878  *	compute_trivial_stats() -- compute very basic column statistics
1879  *
1880  *	We use this when we cannot find a hash "=" operator for the datatype.
1881  *
1882  *	We determine the fraction of non-null rows and the average datum width.
1883  */
1884 static void
1885 compute_trivial_stats(VacAttrStatsP stats,
1886 					  AnalyzeAttrFetchFunc fetchfunc,
1887 					  int samplerows,
1888 					  double totalrows)
1889 {
1890 	int			i;
1891 	int			null_cnt = 0;
1892 	int			nonnull_cnt = 0;
1893 	double		total_width = 0;
1894 	bool		is_varlena = (!stats->attrtype->typbyval &&
1895 							  stats->attrtype->typlen == -1);
1896 	bool		is_varwidth = (!stats->attrtype->typbyval &&
1897 							   stats->attrtype->typlen < 0);
1898 
1899 	for (i = 0; i < samplerows; i++)
1900 	{
1901 		Datum		value;
1902 		bool		isnull;
1903 
1904 		vacuum_delay_point();
1905 
1906 		value = fetchfunc(stats, i, &isnull);
1907 
1908 		/* Check for null/nonnull */
1909 		if (isnull)
1910 		{
1911 			null_cnt++;
1912 			continue;
1913 		}
1914 		nonnull_cnt++;
1915 
1916 		/*
1917 		 * If it's a variable-width field, add up widths for average width
1918 		 * calculation.  Note that if the value is toasted, we use the toasted
1919 		 * width.  We don't bother with this calculation if it's a fixed-width
1920 		 * type.
1921 		 */
1922 		if (is_varlena)
1923 		{
1924 			total_width += VARSIZE_ANY(DatumGetPointer(value));
1925 		}
1926 		else if (is_varwidth)
1927 		{
1928 			/* must be cstring */
1929 			total_width += strlen(DatumGetCString(value)) + 1;
1930 		}
1931 	}
1932 
1933 	/* We can only compute average width if we found some non-null values. */
1934 	if (nonnull_cnt > 0)
1935 	{
1936 		stats->stats_valid = true;
1937 		/* Do the simple null-frac and width stats */
1938 		stats->stanullfrac = (double) null_cnt / (double) samplerows;
1939 		if (is_varwidth)
1940 			stats->stawidth = total_width / (double) nonnull_cnt;
1941 		else
1942 			stats->stawidth = stats->attrtype->typlen;
1943 		stats->stadistinct = 0.0;	/* "unknown" */
1944 	}
1945 	else if (null_cnt > 0)
1946 	{
1947 		/* We found only nulls; assume the column is entirely null */
1948 		stats->stats_valid = true;
1949 		stats->stanullfrac = 1.0;
1950 		if (is_varwidth)
1951 			stats->stawidth = 0;	/* "unknown" */
1952 		else
1953 			stats->stawidth = stats->attrtype->typlen;
1954 		stats->stadistinct = 0.0;	/* "unknown" */
1955 	}
1956 }
1957 
1958 
1959 /*
1960  *	compute_distinct_stats() -- compute column statistics including ndistinct
1961  *
1962  *	We use this when we can find only an "=" operator for the datatype.
1963  *
1964  *	We determine the fraction of non-null rows, the average width, the
1965  *	most common values, and the (estimated) number of distinct values.
1966  *
1967  *	The most common values are determined by brute force: we keep a list
1968  *	of previously seen values, ordered by number of times seen, as we scan
1969  *	the samples.  A newly seen value is inserted just after the last
1970  *	multiply-seen value, causing the bottommost (oldest) singly-seen value
1971  *	to drop off the list.  The accuracy of this method, and also its cost,
1972  *	depend mainly on the length of the list we are willing to keep.
1973  */
1974 static void
1975 compute_distinct_stats(VacAttrStatsP stats,
1976 					   AnalyzeAttrFetchFunc fetchfunc,
1977 					   int samplerows,
1978 					   double totalrows)
1979 {
1980 	int			i;
1981 	int			null_cnt = 0;
1982 	int			nonnull_cnt = 0;
1983 	int			toowide_cnt = 0;
1984 	double		total_width = 0;
1985 	bool		is_varlena = (!stats->attrtype->typbyval &&
1986 							  stats->attrtype->typlen == -1);
1987 	bool		is_varwidth = (!stats->attrtype->typbyval &&
1988 							   stats->attrtype->typlen < 0);
1989 	FmgrInfo	f_cmpeq;
1990 	typedef struct
1991 	{
1992 		Datum		value;
1993 		int			count;
1994 	} TrackItem;
1995 	TrackItem  *track;
1996 	int			track_cnt,
1997 				track_max;
1998 	int			num_mcv = stats->attr->attstattarget;
1999 	StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
2000 
2001 	/*
2002 	 * We track up to 2*n values for an n-element MCV list; but at least 10
2003 	 */
2004 	track_max = 2 * num_mcv;
2005 	if (track_max < 10)
2006 		track_max = 10;
2007 	track = (TrackItem *) palloc(track_max * sizeof(TrackItem));
2008 	track_cnt = 0;
2009 
2010 	fmgr_info(mystats->eqfunc, &f_cmpeq);
2011 
2012 	for (i = 0; i < samplerows; i++)
2013 	{
2014 		Datum		value;
2015 		bool		isnull;
2016 		bool		match;
2017 		int			firstcount1,
2018 					j;
2019 
2020 		vacuum_delay_point();
2021 
2022 		value = fetchfunc(stats, i, &isnull);
2023 
2024 		/* Check for null/nonnull */
2025 		if (isnull)
2026 		{
2027 			null_cnt++;
2028 			continue;
2029 		}
2030 		nonnull_cnt++;
2031 
2032 		/*
2033 		 * If it's a variable-width field, add up widths for average width
2034 		 * calculation.  Note that if the value is toasted, we use the toasted
2035 		 * width.  We don't bother with this calculation if it's a fixed-width
2036 		 * type.
2037 		 */
2038 		if (is_varlena)
2039 		{
2040 			total_width += VARSIZE_ANY(DatumGetPointer(value));
2041 
2042 			/*
2043 			 * If the value is toasted, we want to detoast it just once to
2044 			 * avoid repeated detoastings and resultant excess memory usage
2045 			 * during the comparisons.  Also, check to see if the value is
2046 			 * excessively wide, and if so don't detoast at all --- just
2047 			 * ignore the value.
2048 			 */
2049 			if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
2050 			{
2051 				toowide_cnt++;
2052 				continue;
2053 			}
2054 			value = PointerGetDatum(PG_DETOAST_DATUM(value));
2055 		}
2056 		else if (is_varwidth)
2057 		{
2058 			/* must be cstring */
2059 			total_width += strlen(DatumGetCString(value)) + 1;
2060 		}
2061 
2062 		/*
2063 		 * See if the value matches anything we're already tracking.
2064 		 */
2065 		match = false;
2066 		firstcount1 = track_cnt;
2067 		for (j = 0; j < track_cnt; j++)
2068 		{
2069 			/* We always use the default collation for statistics */
2070 			if (DatumGetBool(FunctionCall2Coll(&f_cmpeq,
2071 											   DEFAULT_COLLATION_OID,
2072 											   value, track[j].value)))
2073 			{
2074 				match = true;
2075 				break;
2076 			}
2077 			if (j < firstcount1 && track[j].count == 1)
2078 				firstcount1 = j;
2079 		}
2080 
2081 		if (match)
2082 		{
2083 			/* Found a match */
2084 			track[j].count++;
2085 			/* This value may now need to "bubble up" in the track list */
2086 			while (j > 0 && track[j].count > track[j - 1].count)
2087 			{
2088 				swapDatum(track[j].value, track[j - 1].value);
2089 				swapInt(track[j].count, track[j - 1].count);
2090 				j--;
2091 			}
2092 		}
2093 		else
2094 		{
2095 			/* No match.  Insert at head of count-1 list */
2096 			if (track_cnt < track_max)
2097 				track_cnt++;
2098 			for (j = track_cnt - 1; j > firstcount1; j--)
2099 			{
2100 				track[j].value = track[j - 1].value;
2101 				track[j].count = track[j - 1].count;
2102 			}
2103 			if (firstcount1 < track_cnt)
2104 			{
2105 				track[firstcount1].value = value;
2106 				track[firstcount1].count = 1;
2107 			}
2108 		}
2109 	}
2110 
2111 	/* We can only compute real stats if we found some non-null values. */
2112 	if (nonnull_cnt > 0)
2113 	{
2114 		int			nmultiple,
2115 					summultiple;
2116 
2117 		stats->stats_valid = true;
2118 		/* Do the simple null-frac and width stats */
2119 		stats->stanullfrac = (double) null_cnt / (double) samplerows;
2120 		if (is_varwidth)
2121 			stats->stawidth = total_width / (double) nonnull_cnt;
2122 		else
2123 			stats->stawidth = stats->attrtype->typlen;
2124 
2125 		/* Count the number of values we found multiple times */
2126 		summultiple = 0;
2127 		for (nmultiple = 0; nmultiple < track_cnt; nmultiple++)
2128 		{
2129 			if (track[nmultiple].count == 1)
2130 				break;
2131 			summultiple += track[nmultiple].count;
2132 		}
2133 
2134 		if (nmultiple == 0)
2135 		{
2136 			/*
2137 			 * If we found no repeated non-null values, assume it's a unique
2138 			 * column; but be sure to discount for any nulls we found.
2139 			 */
2140 			stats->stadistinct = -1.0 * (1.0 - stats->stanullfrac);
2141 		}
2142 		else if (track_cnt < track_max && toowide_cnt == 0 &&
2143 				 nmultiple == track_cnt)
2144 		{
2145 			/*
2146 			 * Our track list includes every value in the sample, and every
2147 			 * value appeared more than once.  Assume the column has just
2148 			 * these values.  (This case is meant to address columns with
2149 			 * small, fixed sets of possible values, such as boolean or enum
2150 			 * columns.  If there are any values that appear just once in the
2151 			 * sample, including too-wide values, we should assume that that's
2152 			 * not what we're dealing with.)
2153 			 */
2154 			stats->stadistinct = track_cnt;
2155 		}
2156 		else
2157 		{
2158 			/*----------
2159 			 * Estimate the number of distinct values using the estimator
2160 			 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
2161 			 *		n*d / (n - f1 + f1*n/N)
2162 			 * where f1 is the number of distinct values that occurred
2163 			 * exactly once in our sample of n rows (from a total of N),
2164 			 * and d is the total number of distinct values in the sample.
2165 			 * This is their Duj1 estimator; the other estimators they
2166 			 * recommend are considerably more complex, and are numerically
2167 			 * very unstable when n is much smaller than N.
2168 			 *
2169 			 * In this calculation, we consider only non-nulls.  We used to
2170 			 * include rows with null values in the n and N counts, but that
2171 			 * leads to inaccurate answers in columns with many nulls, and
2172 			 * it's intuitively bogus anyway considering the desired result is
2173 			 * the number of distinct non-null values.
2174 			 *
2175 			 * We assume (not very reliably!) that all the multiply-occurring
2176 			 * values are reflected in the final track[] list, and the other
2177 			 * nonnull values all appeared but once.  (XXX this usually
2178 			 * results in a drastic overestimate of ndistinct.  Can we do
2179 			 * any better?)
2180 			 *----------
2181 			 */
2182 			int			f1 = nonnull_cnt - summultiple;
2183 			int			d = f1 + nmultiple;
2184 			double		n = samplerows - null_cnt;
2185 			double		N = totalrows * (1.0 - stats->stanullfrac);
2186 			double		stadistinct;
2187 
2188 			/* N == 0 shouldn't happen, but just in case ... */
2189 			if (N > 0)
2190 				stadistinct = (n * d) / ((n - f1) + f1 * n / N);
2191 			else
2192 				stadistinct = 0;
2193 
2194 			/* Clamp to sane range in case of roundoff error */
2195 			if (stadistinct < d)
2196 				stadistinct = d;
2197 			if (stadistinct > N)
2198 				stadistinct = N;
2199 			/* And round to integer */
2200 			stats->stadistinct = floor(stadistinct + 0.5);
2201 		}
2202 
2203 		/*
2204 		 * If we estimated the number of distinct values at more than 10% of
2205 		 * the total row count (a very arbitrary limit), then assume that
2206 		 * stadistinct should scale with the row count rather than be a fixed
2207 		 * value.
2208 		 */
2209 		if (stats->stadistinct > 0.1 * totalrows)
2210 			stats->stadistinct = -(stats->stadistinct / totalrows);
2211 
2212 		/*
2213 		 * Decide how many values are worth storing as most-common values. If
2214 		 * we are able to generate a complete MCV list (all the values in the
2215 		 * sample will fit, and we think these are all the ones in the table),
2216 		 * then do so.  Otherwise, store only those values that are
2217 		 * significantly more common than the values not in the list.
2218 		 *
2219 		 * Note: the first of these cases is meant to address columns with
2220 		 * small, fixed sets of possible values, such as boolean or enum
2221 		 * columns.  If we can *completely* represent the column population by
2222 		 * an MCV list that will fit into the stats target, then we should do
2223 		 * so and thus provide the planner with complete information.  But if
2224 		 * the MCV list is not complete, it's generally worth being more
2225 		 * selective, and not just filling it all the way up to the stats
2226 		 * target.
2227 		 */
2228 		if (track_cnt < track_max && toowide_cnt == 0 &&
2229 			stats->stadistinct > 0 &&
2230 			track_cnt <= num_mcv)
2231 		{
2232 			/* Track list includes all values seen, and all will fit */
2233 			num_mcv = track_cnt;
2234 		}
2235 		else
2236 		{
2237 			int		   *mcv_counts;
2238 
2239 			/* Incomplete list; decide how many values are worth keeping */
2240 			if (num_mcv > track_cnt)
2241 				num_mcv = track_cnt;
2242 
2243 			if (num_mcv > 0)
2244 			{
2245 				mcv_counts = (int *) palloc(num_mcv * sizeof(int));
2246 				for (i = 0; i < num_mcv; i++)
2247 					mcv_counts[i] = track[i].count;
2248 
2249 				num_mcv = analyze_mcv_list(mcv_counts, num_mcv,
2250 										   stats->stadistinct,
2251 										   stats->stanullfrac,
2252 										   samplerows, totalrows);
2253 			}
2254 		}
2255 
2256 		/* Generate MCV slot entry */
2257 		if (num_mcv > 0)
2258 		{
2259 			MemoryContext old_context;
2260 			Datum	   *mcv_values;
2261 			float4	   *mcv_freqs;
2262 
2263 			/* Must copy the target values into anl_context */
2264 			old_context = MemoryContextSwitchTo(stats->anl_context);
2265 			mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
2266 			mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
2267 			for (i = 0; i < num_mcv; i++)
2268 			{
2269 				mcv_values[i] = datumCopy(track[i].value,
2270 										  stats->attrtype->typbyval,
2271 										  stats->attrtype->typlen);
2272 				mcv_freqs[i] = (double) track[i].count / (double) samplerows;
2273 			}
2274 			MemoryContextSwitchTo(old_context);
2275 
2276 			stats->stakind[0] = STATISTIC_KIND_MCV;
2277 			stats->staop[0] = mystats->eqopr;
2278 			stats->stanumbers[0] = mcv_freqs;
2279 			stats->numnumbers[0] = num_mcv;
2280 			stats->stavalues[0] = mcv_values;
2281 			stats->numvalues[0] = num_mcv;
2282 
2283 			/*
2284 			 * Accept the defaults for stats->statypid and others. They have
2285 			 * been set before we were called (see vacuum.h)
2286 			 */
2287 		}
2288 	}
2289 	else if (null_cnt > 0)
2290 	{
2291 		/* We found only nulls; assume the column is entirely null */
2292 		stats->stats_valid = true;
2293 		stats->stanullfrac = 1.0;
2294 		if (is_varwidth)
2295 			stats->stawidth = 0;	/* "unknown" */
2296 		else
2297 			stats->stawidth = stats->attrtype->typlen;
2298 		stats->stadistinct = 0.0;	/* "unknown" */
2299 	}
2300 
2301 	/* We don't need to bother cleaning up any of our temporary palloc's */
2302 }
2303 
2304 
2305 /*
2306  *	compute_scalar_stats() -- compute column statistics
2307  *
2308  *	We use this when we can find "=" and "<" operators for the datatype.
2309  *
2310  *	We determine the fraction of non-null rows, the average width, the
2311  *	most common values, the (estimated) number of distinct values, the
2312  *	distribution histogram, and the correlation of physical to logical order.
2313  *
2314  *	The desired stats can be determined fairly easily after sorting the
2315  *	data values into order.
2316  */
2317 static void
2318 compute_scalar_stats(VacAttrStatsP stats,
2319 					 AnalyzeAttrFetchFunc fetchfunc,
2320 					 int samplerows,
2321 					 double totalrows)
2322 {
2323 	int			i;
2324 	int			null_cnt = 0;
2325 	int			nonnull_cnt = 0;
2326 	int			toowide_cnt = 0;
2327 	double		total_width = 0;
2328 	bool		is_varlena = (!stats->attrtype->typbyval &&
2329 							  stats->attrtype->typlen == -1);
2330 	bool		is_varwidth = (!stats->attrtype->typbyval &&
2331 							   stats->attrtype->typlen < 0);
2332 	double		corr_xysum;
2333 	SortSupportData ssup;
2334 	ScalarItem *values;
2335 	int			values_cnt = 0;
2336 	int		   *tupnoLink;
2337 	ScalarMCVItem *track;
2338 	int			track_cnt = 0;
2339 	int			num_mcv = stats->attr->attstattarget;
2340 	int			num_bins = stats->attr->attstattarget;
2341 	StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
2342 
2343 	values = (ScalarItem *) palloc(samplerows * sizeof(ScalarItem));
2344 	tupnoLink = (int *) palloc(samplerows * sizeof(int));
2345 	track = (ScalarMCVItem *) palloc(num_mcv * sizeof(ScalarMCVItem));
2346 
2347 	memset(&ssup, 0, sizeof(ssup));
2348 	ssup.ssup_cxt = CurrentMemoryContext;
2349 	/* We always use the default collation for statistics */
2350 	ssup.ssup_collation = DEFAULT_COLLATION_OID;
2351 	ssup.ssup_nulls_first = false;
2352 
2353 	/*
2354 	 * For now, don't perform abbreviated key conversion, because full values
2355 	 * are required for MCV slot generation.  Supporting that optimization
2356 	 * would necessitate teaching compare_scalars() to call a tie-breaker.
2357 	 */
2358 	ssup.abbreviate = false;
2359 
2360 	PrepareSortSupportFromOrderingOp(mystats->ltopr, &ssup);
2361 
2362 	/* Initial scan to find sortable values */
2363 	for (i = 0; i < samplerows; i++)
2364 	{
2365 		Datum		value;
2366 		bool		isnull;
2367 
2368 		vacuum_delay_point();
2369 
2370 		value = fetchfunc(stats, i, &isnull);
2371 
2372 		/* Check for null/nonnull */
2373 		if (isnull)
2374 		{
2375 			null_cnt++;
2376 			continue;
2377 		}
2378 		nonnull_cnt++;
2379 
2380 		/*
2381 		 * If it's a variable-width field, add up widths for average width
2382 		 * calculation.  Note that if the value is toasted, we use the toasted
2383 		 * width.  We don't bother with this calculation if it's a fixed-width
2384 		 * type.
2385 		 */
2386 		if (is_varlena)
2387 		{
2388 			total_width += VARSIZE_ANY(DatumGetPointer(value));
2389 
2390 			/*
2391 			 * If the value is toasted, we want to detoast it just once to
2392 			 * avoid repeated detoastings and resultant excess memory usage
2393 			 * during the comparisons.  Also, check to see if the value is
2394 			 * excessively wide, and if so don't detoast at all --- just
2395 			 * ignore the value.
2396 			 */
2397 			if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
2398 			{
2399 				toowide_cnt++;
2400 				continue;
2401 			}
2402 			value = PointerGetDatum(PG_DETOAST_DATUM(value));
2403 		}
2404 		else if (is_varwidth)
2405 		{
2406 			/* must be cstring */
2407 			total_width += strlen(DatumGetCString(value)) + 1;
2408 		}
2409 
2410 		/* Add it to the list to be sorted */
2411 		values[values_cnt].value = value;
2412 		values[values_cnt].tupno = values_cnt;
2413 		tupnoLink[values_cnt] = values_cnt;
2414 		values_cnt++;
2415 	}
2416 
2417 	/* We can only compute real stats if we found some sortable values. */
2418 	if (values_cnt > 0)
2419 	{
2420 		int			ndistinct,	/* # distinct values in sample */
2421 					nmultiple,	/* # that appear multiple times */
2422 					num_hist,
2423 					dups_cnt;
2424 		int			slot_idx = 0;
2425 		CompareScalarsContext cxt;
2426 
2427 		/* Sort the collected values */
2428 		cxt.ssup = &ssup;
2429 		cxt.tupnoLink = tupnoLink;
2430 		qsort_arg((void *) values, values_cnt, sizeof(ScalarItem),
2431 				  compare_scalars, (void *) &cxt);
2432 
2433 		/*
2434 		 * Now scan the values in order, find the most common ones, and also
2435 		 * accumulate ordering-correlation statistics.
2436 		 *
2437 		 * To determine which are most common, we first have to count the
2438 		 * number of duplicates of each value.  The duplicates are adjacent in
2439 		 * the sorted list, so a brute-force approach is to compare successive
2440 		 * datum values until we find two that are not equal. However, that
2441 		 * requires N-1 invocations of the datum comparison routine, which are
2442 		 * completely redundant with work that was done during the sort.  (The
2443 		 * sort algorithm must at some point have compared each pair of items
2444 		 * that are adjacent in the sorted order; otherwise it could not know
2445 		 * that it's ordered the pair correctly.) We exploit this by having
2446 		 * compare_scalars remember the highest tupno index that each
2447 		 * ScalarItem has been found equal to.  At the end of the sort, a
2448 		 * ScalarItem's tupnoLink will still point to itself if and only if it
2449 		 * is the last item of its group of duplicates (since the group will
2450 		 * be ordered by tupno).
2451 		 */
2452 		corr_xysum = 0;
2453 		ndistinct = 0;
2454 		nmultiple = 0;
2455 		dups_cnt = 0;
2456 		for (i = 0; i < values_cnt; i++)
2457 		{
2458 			int			tupno = values[i].tupno;
2459 
2460 			corr_xysum += ((double) i) * ((double) tupno);
2461 			dups_cnt++;
2462 			if (tupnoLink[tupno] == tupno)
2463 			{
2464 				/* Reached end of duplicates of this value */
2465 				ndistinct++;
2466 				if (dups_cnt > 1)
2467 				{
2468 					nmultiple++;
2469 					if (track_cnt < num_mcv ||
2470 						dups_cnt > track[track_cnt - 1].count)
2471 					{
2472 						/*
2473 						 * Found a new item for the mcv list; find its
2474 						 * position, bubbling down old items if needed. Loop
2475 						 * invariant is that j points at an empty/ replaceable
2476 						 * slot.
2477 						 */
2478 						int			j;
2479 
2480 						if (track_cnt < num_mcv)
2481 							track_cnt++;
2482 						for (j = track_cnt - 1; j > 0; j--)
2483 						{
2484 							if (dups_cnt <= track[j - 1].count)
2485 								break;
2486 							track[j].count = track[j - 1].count;
2487 							track[j].first = track[j - 1].first;
2488 						}
2489 						track[j].count = dups_cnt;
2490 						track[j].first = i + 1 - dups_cnt;
2491 					}
2492 				}
2493 				dups_cnt = 0;
2494 			}
2495 		}
2496 
2497 		stats->stats_valid = true;
2498 		/* Do the simple null-frac and width stats */
2499 		stats->stanullfrac = (double) null_cnt / (double) samplerows;
2500 		if (is_varwidth)
2501 			stats->stawidth = total_width / (double) nonnull_cnt;
2502 		else
2503 			stats->stawidth = stats->attrtype->typlen;
2504 
2505 		if (nmultiple == 0)
2506 		{
2507 			/*
2508 			 * If we found no repeated non-null values, assume it's a unique
2509 			 * column; but be sure to discount for any nulls we found.
2510 			 */
2511 			stats->stadistinct = -1.0 * (1.0 - stats->stanullfrac);
2512 		}
2513 		else if (toowide_cnt == 0 && nmultiple == ndistinct)
2514 		{
2515 			/*
2516 			 * Every value in the sample appeared more than once.  Assume the
2517 			 * column has just these values.  (This case is meant to address
2518 			 * columns with small, fixed sets of possible values, such as
2519 			 * boolean or enum columns.  If there are any values that appear
2520 			 * just once in the sample, including too-wide values, we should
2521 			 * assume that that's not what we're dealing with.)
2522 			 */
2523 			stats->stadistinct = ndistinct;
2524 		}
2525 		else
2526 		{
2527 			/*----------
2528 			 * Estimate the number of distinct values using the estimator
2529 			 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
2530 			 *		n*d / (n - f1 + f1*n/N)
2531 			 * where f1 is the number of distinct values that occurred
2532 			 * exactly once in our sample of n rows (from a total of N),
2533 			 * and d is the total number of distinct values in the sample.
2534 			 * This is their Duj1 estimator; the other estimators they
2535 			 * recommend are considerably more complex, and are numerically
2536 			 * very unstable when n is much smaller than N.
2537 			 *
2538 			 * In this calculation, we consider only non-nulls.  We used to
2539 			 * include rows with null values in the n and N counts, but that
2540 			 * leads to inaccurate answers in columns with many nulls, and
2541 			 * it's intuitively bogus anyway considering the desired result is
2542 			 * the number of distinct non-null values.
2543 			 *
2544 			 * Overwidth values are assumed to have been distinct.
2545 			 *----------
2546 			 */
2547 			int			f1 = ndistinct - nmultiple + toowide_cnt;
2548 			int			d = f1 + nmultiple;
2549 			double		n = samplerows - null_cnt;
2550 			double		N = totalrows * (1.0 - stats->stanullfrac);
2551 			double		stadistinct;
2552 
2553 			/* N == 0 shouldn't happen, but just in case ... */
2554 			if (N > 0)
2555 				stadistinct = (n * d) / ((n - f1) + f1 * n / N);
2556 			else
2557 				stadistinct = 0;
2558 
2559 			/* Clamp to sane range in case of roundoff error */
2560 			if (stadistinct < d)
2561 				stadistinct = d;
2562 			if (stadistinct > N)
2563 				stadistinct = N;
2564 			/* And round to integer */
2565 			stats->stadistinct = floor(stadistinct + 0.5);
2566 		}
2567 
2568 		/*
2569 		 * If we estimated the number of distinct values at more than 10% of
2570 		 * the total row count (a very arbitrary limit), then assume that
2571 		 * stadistinct should scale with the row count rather than be a fixed
2572 		 * value.
2573 		 */
2574 		if (stats->stadistinct > 0.1 * totalrows)
2575 			stats->stadistinct = -(stats->stadistinct / totalrows);
2576 
2577 		/*
2578 		 * Decide how many values are worth storing as most-common values. If
2579 		 * we are able to generate a complete MCV list (all the values in the
2580 		 * sample will fit, and we think these are all the ones in the table),
2581 		 * then do so.  Otherwise, store only those values that are
2582 		 * significantly more common than the values not in the list.
2583 		 *
2584 		 * Note: the first of these cases is meant to address columns with
2585 		 * small, fixed sets of possible values, such as boolean or enum
2586 		 * columns.  If we can *completely* represent the column population by
2587 		 * an MCV list that will fit into the stats target, then we should do
2588 		 * so and thus provide the planner with complete information.  But if
2589 		 * the MCV list is not complete, it's generally worth being more
2590 		 * selective, and not just filling it all the way up to the stats
2591 		 * target.
2592 		 */
2593 		if (track_cnt == ndistinct && toowide_cnt == 0 &&
2594 			stats->stadistinct > 0 &&
2595 			track_cnt <= num_mcv)
2596 		{
2597 			/* Track list includes all values seen, and all will fit */
2598 			num_mcv = track_cnt;
2599 		}
2600 		else
2601 		{
2602 			int		   *mcv_counts;
2603 
2604 			/* Incomplete list; decide how many values are worth keeping */
2605 			if (num_mcv > track_cnt)
2606 				num_mcv = track_cnt;
2607 
2608 			if (num_mcv > 0)
2609 			{
2610 				mcv_counts = (int *) palloc(num_mcv * sizeof(int));
2611 				for (i = 0; i < num_mcv; i++)
2612 					mcv_counts[i] = track[i].count;
2613 
2614 				num_mcv = analyze_mcv_list(mcv_counts, num_mcv,
2615 										   stats->stadistinct,
2616 										   stats->stanullfrac,
2617 										   samplerows, totalrows);
2618 			}
2619 		}
2620 
2621 		/* Generate MCV slot entry */
2622 		if (num_mcv > 0)
2623 		{
2624 			MemoryContext old_context;
2625 			Datum	   *mcv_values;
2626 			float4	   *mcv_freqs;
2627 
2628 			/* Must copy the target values into anl_context */
2629 			old_context = MemoryContextSwitchTo(stats->anl_context);
2630 			mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
2631 			mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
2632 			for (i = 0; i < num_mcv; i++)
2633 			{
2634 				mcv_values[i] = datumCopy(values[track[i].first].value,
2635 										  stats->attrtype->typbyval,
2636 										  stats->attrtype->typlen);
2637 				mcv_freqs[i] = (double) track[i].count / (double) samplerows;
2638 			}
2639 			MemoryContextSwitchTo(old_context);
2640 
2641 			stats->stakind[slot_idx] = STATISTIC_KIND_MCV;
2642 			stats->staop[slot_idx] = mystats->eqopr;
2643 			stats->stanumbers[slot_idx] = mcv_freqs;
2644 			stats->numnumbers[slot_idx] = num_mcv;
2645 			stats->stavalues[slot_idx] = mcv_values;
2646 			stats->numvalues[slot_idx] = num_mcv;
2647 
2648 			/*
2649 			 * Accept the defaults for stats->statypid and others. They have
2650 			 * been set before we were called (see vacuum.h)
2651 			 */
2652 			slot_idx++;
2653 		}
2654 
2655 		/*
2656 		 * Generate a histogram slot entry if there are at least two distinct
2657 		 * values not accounted for in the MCV list.  (This ensures the
2658 		 * histogram won't collapse to empty or a singleton.)
2659 		 */
2660 		num_hist = ndistinct - num_mcv;
2661 		if (num_hist > num_bins)
2662 			num_hist = num_bins + 1;
2663 		if (num_hist >= 2)
2664 		{
2665 			MemoryContext old_context;
2666 			Datum	   *hist_values;
2667 			int			nvals;
2668 			int			pos,
2669 						posfrac,
2670 						delta,
2671 						deltafrac;
2672 
2673 			/* Sort the MCV items into position order to speed next loop */
2674 			qsort((void *) track, num_mcv,
2675 				  sizeof(ScalarMCVItem), compare_mcvs);
2676 
2677 			/*
2678 			 * Collapse out the MCV items from the values[] array.
2679 			 *
2680 			 * Note we destroy the values[] array here... but we don't need it
2681 			 * for anything more.  We do, however, still need values_cnt.
2682 			 * nvals will be the number of remaining entries in values[].
2683 			 */
2684 			if (num_mcv > 0)
2685 			{
2686 				int			src,
2687 							dest;
2688 				int			j;
2689 
2690 				src = dest = 0;
2691 				j = 0;			/* index of next interesting MCV item */
2692 				while (src < values_cnt)
2693 				{
2694 					int			ncopy;
2695 
2696 					if (j < num_mcv)
2697 					{
2698 						int			first = track[j].first;
2699 
2700 						if (src >= first)
2701 						{
2702 							/* advance past this MCV item */
2703 							src = first + track[j].count;
2704 							j++;
2705 							continue;
2706 						}
2707 						ncopy = first - src;
2708 					}
2709 					else
2710 						ncopy = values_cnt - src;
2711 					memmove(&values[dest], &values[src],
2712 							ncopy * sizeof(ScalarItem));
2713 					src += ncopy;
2714 					dest += ncopy;
2715 				}
2716 				nvals = dest;
2717 			}
2718 			else
2719 				nvals = values_cnt;
2720 			Assert(nvals >= num_hist);
2721 
2722 			/* Must copy the target values into anl_context */
2723 			old_context = MemoryContextSwitchTo(stats->anl_context);
2724 			hist_values = (Datum *) palloc(num_hist * sizeof(Datum));
2725 
2726 			/*
2727 			 * The object of this loop is to copy the first and last values[]
2728 			 * entries along with evenly-spaced values in between.  So the
2729 			 * i'th value is values[(i * (nvals - 1)) / (num_hist - 1)].  But
2730 			 * computing that subscript directly risks integer overflow when
2731 			 * the stats target is more than a couple thousand.  Instead we
2732 			 * add (nvals - 1) / (num_hist - 1) to pos at each step, tracking
2733 			 * the integral and fractional parts of the sum separately.
2734 			 */
2735 			delta = (nvals - 1) / (num_hist - 1);
2736 			deltafrac = (nvals - 1) % (num_hist - 1);
2737 			pos = posfrac = 0;
2738 
2739 			for (i = 0; i < num_hist; i++)
2740 			{
2741 				hist_values[i] = datumCopy(values[pos].value,
2742 										   stats->attrtype->typbyval,
2743 										   stats->attrtype->typlen);
2744 				pos += delta;
2745 				posfrac += deltafrac;
2746 				if (posfrac >= (num_hist - 1))
2747 				{
2748 					/* fractional part exceeds 1, carry to integer part */
2749 					pos++;
2750 					posfrac -= (num_hist - 1);
2751 				}
2752 			}
2753 
2754 			MemoryContextSwitchTo(old_context);
2755 
2756 			stats->stakind[slot_idx] = STATISTIC_KIND_HISTOGRAM;
2757 			stats->staop[slot_idx] = mystats->ltopr;
2758 			stats->stavalues[slot_idx] = hist_values;
2759 			stats->numvalues[slot_idx] = num_hist;
2760 
2761 			/*
2762 			 * Accept the defaults for stats->statypid and others. They have
2763 			 * been set before we were called (see vacuum.h)
2764 			 */
2765 			slot_idx++;
2766 		}
2767 
2768 		/* Generate a correlation entry if there are multiple values */
2769 		if (values_cnt > 1)
2770 		{
2771 			MemoryContext old_context;
2772 			float4	   *corrs;
2773 			double		corr_xsum,
2774 						corr_x2sum;
2775 
2776 			/* Must copy the target values into anl_context */
2777 			old_context = MemoryContextSwitchTo(stats->anl_context);
2778 			corrs = (float4 *) palloc(sizeof(float4));
2779 			MemoryContextSwitchTo(old_context);
2780 
2781 			/*----------
2782 			 * Since we know the x and y value sets are both
2783 			 *		0, 1, ..., values_cnt-1
2784 			 * we have sum(x) = sum(y) =
2785 			 *		(values_cnt-1)*values_cnt / 2
2786 			 * and sum(x^2) = sum(y^2) =
2787 			 *		(values_cnt-1)*values_cnt*(2*values_cnt-1) / 6.
2788 			 *----------
2789 			 */
2790 			corr_xsum = ((double) (values_cnt - 1)) *
2791 				((double) values_cnt) / 2.0;
2792 			corr_x2sum = ((double) (values_cnt - 1)) *
2793 				((double) values_cnt) * (double) (2 * values_cnt - 1) / 6.0;
2794 
2795 			/* And the correlation coefficient reduces to */
2796 			corrs[0] = (values_cnt * corr_xysum - corr_xsum * corr_xsum) /
2797 				(values_cnt * corr_x2sum - corr_xsum * corr_xsum);
2798 
2799 			stats->stakind[slot_idx] = STATISTIC_KIND_CORRELATION;
2800 			stats->staop[slot_idx] = mystats->ltopr;
2801 			stats->stanumbers[slot_idx] = corrs;
2802 			stats->numnumbers[slot_idx] = 1;
2803 			slot_idx++;
2804 		}
2805 	}
2806 	else if (nonnull_cnt > 0)
2807 	{
2808 		/* We found some non-null values, but they were all too wide */
2809 		Assert(nonnull_cnt == toowide_cnt);
2810 		stats->stats_valid = true;
2811 		/* Do the simple null-frac and width stats */
2812 		stats->stanullfrac = (double) null_cnt / (double) samplerows;
2813 		if (is_varwidth)
2814 			stats->stawidth = total_width / (double) nonnull_cnt;
2815 		else
2816 			stats->stawidth = stats->attrtype->typlen;
2817 		/* Assume all too-wide values are distinct, so it's a unique column */
2818 		stats->stadistinct = -1.0 * (1.0 - stats->stanullfrac);
2819 	}
2820 	else if (null_cnt > 0)
2821 	{
2822 		/* We found only nulls; assume the column is entirely null */
2823 		stats->stats_valid = true;
2824 		stats->stanullfrac = 1.0;
2825 		if (is_varwidth)
2826 			stats->stawidth = 0;	/* "unknown" */
2827 		else
2828 			stats->stawidth = stats->attrtype->typlen;
2829 		stats->stadistinct = 0.0;	/* "unknown" */
2830 	}
2831 
2832 	/* We don't need to bother cleaning up any of our temporary palloc's */
2833 }
2834 
2835 /*
2836  * qsort_arg comparator for sorting ScalarItems
2837  *
2838  * Aside from sorting the items, we update the tupnoLink[] array
2839  * whenever two ScalarItems are found to contain equal datums.  The array
2840  * is indexed by tupno; for each ScalarItem, it contains the highest
2841  * tupno that that item's datum has been found to be equal to.  This allows
2842  * us to avoid additional comparisons in compute_scalar_stats().
2843  */
2844 static int
2845 compare_scalars(const void *a, const void *b, void *arg)
2846 {
2847 	Datum		da = ((const ScalarItem *) a)->value;
2848 	int			ta = ((const ScalarItem *) a)->tupno;
2849 	Datum		db = ((const ScalarItem *) b)->value;
2850 	int			tb = ((const ScalarItem *) b)->tupno;
2851 	CompareScalarsContext *cxt = (CompareScalarsContext *) arg;
2852 	int			compare;
2853 
2854 	compare = ApplySortComparator(da, false, db, false, cxt->ssup);
2855 	if (compare != 0)
2856 		return compare;
2857 
2858 	/*
2859 	 * The two datums are equal, so update cxt->tupnoLink[].
2860 	 */
2861 	if (cxt->tupnoLink[ta] < tb)
2862 		cxt->tupnoLink[ta] = tb;
2863 	if (cxt->tupnoLink[tb] < ta)
2864 		cxt->tupnoLink[tb] = ta;
2865 
2866 	/*
2867 	 * For equal datums, sort by tupno
2868 	 */
2869 	return ta - tb;
2870 }
2871 
2872 /*
2873  * qsort comparator for sorting ScalarMCVItems by position
2874  */
2875 static int
2876 compare_mcvs(const void *a, const void *b)
2877 {
2878 	int			da = ((const ScalarMCVItem *) a)->first;
2879 	int			db = ((const ScalarMCVItem *) b)->first;
2880 
2881 	return da - db;
2882 }
2883 
2884 /*
2885  * Analyze the list of common values in the sample and decide how many are
2886  * worth storing in the table's MCV list.
2887  *
2888  * mcv_counts is assumed to be a list of the counts of the most common values
2889  * seen in the sample, starting with the most common.  The return value is the
2890  * number that are significantly more common than the values not in the list,
2891  * and which are therefore deemed worth storing in the table's MCV list.
2892  */
2893 static int
2894 analyze_mcv_list(int *mcv_counts,
2895 				 int num_mcv,
2896 				 double stadistinct,
2897 				 double stanullfrac,
2898 				 int samplerows,
2899 				 double totalrows)
2900 {
2901 	double		ndistinct_table;
2902 	double		sumcount;
2903 	int			i;
2904 
2905 	/*
2906 	 * If the entire table was sampled, keep the whole list.  This also
2907 	 * protects us against division by zero in the code below.
2908 	 */
2909 	if (samplerows == totalrows || totalrows <= 1.0)
2910 		return num_mcv;
2911 
2912 	/* Re-extract the estimated number of distinct nonnull values in table */
2913 	ndistinct_table = stadistinct;
2914 	if (ndistinct_table < 0)
2915 		ndistinct_table = -ndistinct_table * totalrows;
2916 
2917 	/*
2918 	 * Exclude the least common values from the MCV list, if they are not
2919 	 * significantly more common than the estimated selectivity they would
2920 	 * have if they weren't in the list.  All non-MCV values are assumed to be
2921 	 * equally common, after taking into account the frequencies of all the
2922 	 * the values in the MCV list and the number of nulls (c.f. eqsel()).
2923 	 *
2924 	 * Here sumcount tracks the total count of all but the last (least common)
2925 	 * value in the MCV list, allowing us to determine the effect of excluding
2926 	 * that value from the list.
2927 	 *
2928 	 * Note that we deliberately do this by removing values from the full
2929 	 * list, rather than starting with an empty list and adding values,
2930 	 * because the latter approach can fail to add any values if all the most
2931 	 * common values have around the same frequency and make up the majority
2932 	 * of the table, so that the overall average frequency of all values is
2933 	 * roughly the same as that of the common values.  This would lead to any
2934 	 * uncommon values being significantly overestimated.
2935 	 */
2936 	sumcount = 0.0;
2937 	for (i = 0; i < num_mcv - 1; i++)
2938 		sumcount += mcv_counts[i];
2939 
2940 	while (num_mcv > 0)
2941 	{
2942 		double		selec,
2943 					otherdistinct,
2944 					N,
2945 					n,
2946 					K,
2947 					variance,
2948 					stddev;
2949 
2950 		/*
2951 		 * Estimated selectivity the least common value would have if it
2952 		 * wasn't in the MCV list (c.f. eqsel()).
2953 		 */
2954 		selec = 1.0 - sumcount / samplerows - stanullfrac;
2955 		if (selec < 0.0)
2956 			selec = 0.0;
2957 		if (selec > 1.0)
2958 			selec = 1.0;
2959 		otherdistinct = ndistinct_table - (num_mcv - 1);
2960 		if (otherdistinct > 1)
2961 			selec /= otherdistinct;
2962 
2963 		/*
2964 		 * If the value is kept in the MCV list, its population frequency is
2965 		 * assumed to equal its sample frequency.  We use the lower end of a
2966 		 * textbook continuity-corrected Wald-type confidence interval to
2967 		 * determine if that is significantly more common than the non-MCV
2968 		 * frequency --- specifically we assume the population frequency is
2969 		 * highly likely to be within around 2 standard errors of the sample
2970 		 * frequency, which equates to an interval of 2 standard deviations
2971 		 * either side of the sample count, plus an additional 0.5 for the
2972 		 * continuity correction.  Since we are sampling without replacement,
2973 		 * this is a hypergeometric distribution.
2974 		 *
2975 		 * XXX: Empirically, this approach seems to work quite well, but it
2976 		 * may be worth considering more advanced techniques for estimating
2977 		 * the confidence interval of the hypergeometric distribution.
2978 		 */
2979 		N = totalrows;
2980 		n = samplerows;
2981 		K = N * mcv_counts[num_mcv - 1] / n;
2982 		variance = n * K * (N - K) * (N - n) / (N * N * (N - 1));
2983 		stddev = sqrt(variance);
2984 
2985 		if (mcv_counts[num_mcv - 1] > selec * samplerows + 2 * stddev + 0.5)
2986 		{
2987 			/*
2988 			 * The value is significantly more common than the non-MCV
2989 			 * selectivity would suggest.  Keep it, and all the other more
2990 			 * common values in the list.
2991 			 */
2992 			break;
2993 		}
2994 		else
2995 		{
2996 			/* Discard this value and consider the next least common value */
2997 			num_mcv--;
2998 			if (num_mcv == 0)
2999 				break;
3000 			sumcount -= mcv_counts[num_mcv - 1];
3001 		}
3002 	}
3003 	return num_mcv;
3004 }
3005