1 /*-------------------------------------------------------------------------
2  *
3  * plannodes.h
4  *	  definitions for query plan nodes
5  *
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/nodes/plannodes.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef PLANNODES_H
15 #define PLANNODES_H
16 
17 #include "access/sdir.h"
18 #include "access/stratnum.h"
19 #include "lib/stringinfo.h"
20 #include "nodes/bitmapset.h"
21 #include "nodes/lockoptions.h"
22 #include "nodes/primnodes.h"
23 
24 
25 /* ----------------------------------------------------------------
26  *						node definitions
27  * ----------------------------------------------------------------
28  */
29 
30 /* ----------------
31  *		PlannedStmt node
32  *
33  * The output of the planner is a Plan tree headed by a PlannedStmt node.
34  * PlannedStmt holds the "one time" information needed by the executor.
35  *
36  * For simplicity in APIs, we also wrap utility statements in PlannedStmt
37  * nodes; in such cases, commandType == CMD_UTILITY, the statement itself
38  * is in the utilityStmt field, and the rest of the struct is mostly dummy.
39  * (We do use canSetTag, stmt_location, stmt_len, and possibly queryId.)
40  * ----------------
41  */
42 typedef struct PlannedStmt
43 {
44 	NodeTag		type;
45 
46 	CmdType		commandType;	/* select|insert|update|delete|utility */
47 
48 	uint64		queryId;		/* query identifier (copied from Query) */
49 
50 	bool		hasReturning;	/* is it insert|update|delete RETURNING? */
51 
52 	bool		hasModifyingCTE;	/* has insert|update|delete in WITH? */
53 
54 	bool		canSetTag;		/* do I set the command result tag? */
55 
56 	bool		transientPlan;	/* redo plan when TransactionXmin changes? */
57 
58 	bool		dependsOnRole;	/* is plan specific to current role? */
59 
60 	bool		parallelModeNeeded; /* parallel mode required to execute? */
61 
62 	int			jitFlags;		/* which forms of JIT should be performed */
63 
64 	struct Plan *planTree;		/* tree of Plan nodes */
65 
66 	List	   *rtable;			/* list of RangeTblEntry nodes */
67 
68 	/* rtable indexes of target relations for INSERT/UPDATE/DELETE */
69 	List	   *resultRelations;	/* integer list of RT indexes, or NIL */
70 
71 	/*
72 	 * rtable indexes of partitioned table roots that are UPDATE/DELETE
73 	 * targets; needed for trigger firing.
74 	 */
75 	List	   *rootResultRelations;
76 
77 	List	   *appendRelations;	/* list of AppendRelInfo nodes */
78 
79 	List	   *subplans;		/* Plan trees for SubPlan expressions; note
80 								 * that some could be NULL */
81 
82 	Bitmapset  *rewindPlanIDs;	/* indices of subplans that require REWIND */
83 
84 	List	   *rowMarks;		/* a list of PlanRowMark's */
85 
86 	List	   *relationOids;	/* OIDs of relations the plan depends on */
87 
88 	List	   *invalItems;		/* other dependencies, as PlanInvalItems */
89 
90 	List	   *paramExecTypes; /* type OIDs for PARAM_EXEC Params */
91 
92 	Node	   *utilityStmt;	/* non-null if this is utility stmt */
93 
94 	/* statement location in source string (copied from Query) */
95 	int			stmt_location;	/* start location, or -1 if unknown */
96 	int			stmt_len;		/* length in bytes; 0 means "rest of string" */
97 } PlannedStmt;
98 
99 /* macro for fetching the Plan associated with a SubPlan node */
100 #define exec_subplan_get_plan(plannedstmt, subplan) \
101 	((Plan *) list_nth((plannedstmt)->subplans, (subplan)->plan_id - 1))
102 
103 
104 /* ----------------
105  *		Plan node
106  *
107  * All plan nodes "derive" from the Plan structure by having the
108  * Plan structure as the first field.  This ensures that everything works
109  * when nodes are cast to Plan's.  (node pointers are frequently cast to Plan*
110  * when passed around generically in the executor)
111  *
112  * We never actually instantiate any Plan nodes; this is just the common
113  * abstract superclass for all Plan-type nodes.
114  * ----------------
115  */
116 typedef struct Plan
117 {
118 	NodeTag		type;
119 
120 	/*
121 	 * estimated execution costs for plan (see costsize.c for more info)
122 	 */
123 	Cost		startup_cost;	/* cost expended before fetching any tuples */
124 	Cost		total_cost;		/* total cost (assuming all tuples fetched) */
125 
126 	/*
127 	 * planner's estimate of result size of this plan step
128 	 */
129 	double		plan_rows;		/* number of rows plan is expected to emit */
130 	int			plan_width;		/* average row width in bytes */
131 
132 	/*
133 	 * information needed for parallel query
134 	 */
135 	bool		parallel_aware; /* engage parallel-aware logic? */
136 	bool		parallel_safe;	/* OK to use as part of parallel plan? */
137 
138 	/*
139 	 * Common structural data for all Plan types.
140 	 */
141 	int			plan_node_id;	/* unique across entire final plan tree */
142 	List	   *targetlist;		/* target list to be computed at this node */
143 	List	   *qual;			/* implicitly-ANDed qual conditions */
144 	struct Plan *lefttree;		/* input plan tree(s) */
145 	struct Plan *righttree;
146 	List	   *initPlan;		/* Init Plan nodes (un-correlated expr
147 								 * subselects) */
148 
149 	/*
150 	 * Information for management of parameter-change-driven rescanning
151 	 *
152 	 * extParam includes the paramIDs of all external PARAM_EXEC params
153 	 * affecting this plan node or its children.  setParam params from the
154 	 * node's initPlans are not included, but their extParams are.
155 	 *
156 	 * allParam includes all the extParam paramIDs, plus the IDs of local
157 	 * params that affect the node (i.e., the setParams of its initplans).
158 	 * These are _all_ the PARAM_EXEC params that affect this node.
159 	 */
160 	Bitmapset  *extParam;
161 	Bitmapset  *allParam;
162 } Plan;
163 
164 /* ----------------
165  *	these are defined to avoid confusion problems with "left"
166  *	and "right" and "inner" and "outer".  The convention is that
167  *	the "left" plan is the "outer" plan and the "right" plan is
168  *	the inner plan, but these make the code more readable.
169  * ----------------
170  */
171 #define innerPlan(node)			(((Plan *)(node))->righttree)
172 #define outerPlan(node)			(((Plan *)(node))->lefttree)
173 
174 
175 /* ----------------
176  *	 Result node -
177  *		If no outer plan, evaluate a variable-free targetlist.
178  *		If outer plan, return tuples from outer plan (after a level of
179  *		projection as shown by targetlist).
180  *
181  * If resconstantqual isn't NULL, it represents a one-time qualification
182  * test (i.e., one that doesn't depend on any variables from the outer plan,
183  * so needs to be evaluated only once).
184  * ----------------
185  */
186 typedef struct Result
187 {
188 	Plan		plan;
189 	Node	   *resconstantqual;
190 } Result;
191 
192 /* ----------------
193  *	 ProjectSet node -
194  *		Apply a projection that includes set-returning functions to the
195  *		output tuples of the outer plan.
196  * ----------------
197  */
198 typedef struct ProjectSet
199 {
200 	Plan		plan;
201 } ProjectSet;
202 
203 /* ----------------
204  *	 ModifyTable node -
205  *		Apply rows produced by subplan(s) to result table(s),
206  *		by inserting, updating, or deleting.
207  *
208  * If the originally named target table is a partitioned table, both
209  * nominalRelation and rootRelation contain the RT index of the partition
210  * root, which is not otherwise mentioned in the plan.  Otherwise rootRelation
211  * is zero.  However, nominalRelation will always be set, as it's the rel that
212  * EXPLAIN should claim is the INSERT/UPDATE/DELETE target.
213  *
214  * Note that rowMarks and epqParam are presumed to be valid for all the
215  * subplan(s); they can't contain any info that varies across subplans.
216  * ----------------
217  */
218 typedef struct ModifyTable
219 {
220 	Plan		plan;
221 	CmdType		operation;		/* INSERT, UPDATE, or DELETE */
222 	bool		canSetTag;		/* do we set the command tag/es_processed? */
223 	Index		nominalRelation;	/* Parent RT index for use of EXPLAIN */
224 	Index		rootRelation;	/* Root RT index, if target is partitioned */
225 	bool		partColsUpdated;	/* some part key in hierarchy updated */
226 	List	   *resultRelations;	/* integer list of RT indexes */
227 	int			resultRelIndex; /* index of first resultRel in plan's list */
228 	int			rootResultRelIndex; /* index of the partitioned table root */
229 	List	   *plans;			/* plan(s) producing source data */
230 	List	   *withCheckOptionLists;	/* per-target-table WCO lists */
231 	List	   *returningLists; /* per-target-table RETURNING tlists */
232 	List	   *fdwPrivLists;	/* per-target-table FDW private data lists */
233 	Bitmapset  *fdwDirectModifyPlans;	/* indices of FDW DM plans */
234 	List	   *rowMarks;		/* PlanRowMarks (non-locking only) */
235 	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
236 	OnConflictAction onConflictAction;	/* ON CONFLICT action */
237 	List	   *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs  */
238 	List	   *onConflictSet;	/* SET for INSERT ON CONFLICT DO UPDATE */
239 	Node	   *onConflictWhere;	/* WHERE for ON CONFLICT UPDATE */
240 	Index		exclRelRTI;		/* RTI of the EXCLUDED pseudo relation */
241 	List	   *exclRelTlist;	/* tlist of the EXCLUDED pseudo relation */
242 } ModifyTable;
243 
244 struct PartitionPruneInfo;		/* forward reference to struct below */
245 
246 /* ----------------
247  *	 Append node -
248  *		Generate the concatenation of the results of sub-plans.
249  * ----------------
250  */
251 typedef struct Append
252 {
253 	Plan		plan;
254 	Bitmapset  *apprelids;		/* RTIs of appendrel(s) formed by this node */
255 	List	   *appendplans;
256 
257 	/*
258 	 * All 'appendplans' preceding this index are non-partial plans. All
259 	 * 'appendplans' from this index onwards are partial plans.
260 	 */
261 	int			first_partial_plan;
262 
263 	/* Info for run-time subplan pruning; NULL if we're not doing that */
264 	struct PartitionPruneInfo *part_prune_info;
265 } Append;
266 
267 /* ----------------
268  *	 MergeAppend node -
269  *		Merge the results of pre-sorted sub-plans to preserve the ordering.
270  * ----------------
271  */
272 typedef struct MergeAppend
273 {
274 	Plan		plan;
275 	Bitmapset  *apprelids;		/* RTIs of appendrel(s) formed by this node */
276 	List	   *mergeplans;
277 	/* these fields are just like the sort-key info in struct Sort: */
278 	int			numCols;		/* number of sort-key columns */
279 	AttrNumber *sortColIdx;		/* their indexes in the target list */
280 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
281 	Oid		   *collations;		/* OIDs of collations */
282 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
283 	/* Info for run-time subplan pruning; NULL if we're not doing that */
284 	struct PartitionPruneInfo *part_prune_info;
285 } MergeAppend;
286 
287 /* ----------------
288  *	RecursiveUnion node -
289  *		Generate a recursive union of two subplans.
290  *
291  * The "outer" subplan is always the non-recursive term, and the "inner"
292  * subplan is the recursive term.
293  * ----------------
294  */
295 typedef struct RecursiveUnion
296 {
297 	Plan		plan;
298 	int			wtParam;		/* ID of Param representing work table */
299 	/* Remaining fields are zero/null in UNION ALL case */
300 	int			numCols;		/* number of columns to check for
301 								 * duplicate-ness */
302 	AttrNumber *dupColIdx;		/* their indexes in the target list */
303 	Oid		   *dupOperators;	/* equality operators to compare with */
304 	Oid		   *dupCollations;
305 	long		numGroups;		/* estimated number of groups in input */
306 } RecursiveUnion;
307 
308 /* ----------------
309  *	 BitmapAnd node -
310  *		Generate the intersection of the results of sub-plans.
311  *
312  * The subplans must be of types that yield tuple bitmaps.  The targetlist
313  * and qual fields of the plan are unused and are always NIL.
314  * ----------------
315  */
316 typedef struct BitmapAnd
317 {
318 	Plan		plan;
319 	List	   *bitmapplans;
320 } BitmapAnd;
321 
322 /* ----------------
323  *	 BitmapOr node -
324  *		Generate the union of the results of sub-plans.
325  *
326  * The subplans must be of types that yield tuple bitmaps.  The targetlist
327  * and qual fields of the plan are unused and are always NIL.
328  * ----------------
329  */
330 typedef struct BitmapOr
331 {
332 	Plan		plan;
333 	bool		isshared;
334 	List	   *bitmapplans;
335 } BitmapOr;
336 
337 /*
338  * ==========
339  * Scan nodes
340  * ==========
341  */
342 typedef struct Scan
343 {
344 	Plan		plan;
345 	Index		scanrelid;		/* relid is index into the range table */
346 } Scan;
347 
348 /* ----------------
349  *		sequential scan node
350  * ----------------
351  */
352 typedef Scan SeqScan;
353 
354 /* ----------------
355  *		table sample scan node
356  * ----------------
357  */
358 typedef struct SampleScan
359 {
360 	Scan		scan;
361 	/* use struct pointer to avoid including parsenodes.h here */
362 	struct TableSampleClause *tablesample;
363 } SampleScan;
364 
365 /* ----------------
366  *		index scan node
367  *
368  * indexqualorig is an implicitly-ANDed list of index qual expressions, each
369  * in the same form it appeared in the query WHERE condition.  Each should
370  * be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
371  * The indexkey is a Var or expression referencing column(s) of the index's
372  * base table.  The comparisonval might be any expression, but it won't use
373  * any columns of the base table.  The expressions are ordered by index
374  * column position (but items referencing the same index column can appear
375  * in any order).  indexqualorig is used at runtime only if we have to recheck
376  * a lossy indexqual.
377  *
378  * indexqual has the same form, but the expressions have been commuted if
379  * necessary to put the indexkeys on the left, and the indexkeys are replaced
380  * by Var nodes identifying the index columns (their varno is INDEX_VAR and
381  * their varattno is the index column number).
382  *
383  * indexorderbyorig is similarly the original form of any ORDER BY expressions
384  * that are being implemented by the index, while indexorderby is modified to
385  * have index column Vars on the left-hand side.  Here, multiple expressions
386  * must appear in exactly the ORDER BY order, and this is not necessarily the
387  * index column order.  Only the expressions are provided, not the auxiliary
388  * sort-order information from the ORDER BY SortGroupClauses; it's assumed
389  * that the sort ordering is fully determinable from the top-level operators.
390  * indexorderbyorig is used at runtime to recheck the ordering, if the index
391  * cannot calculate an accurate ordering.  It is also needed for EXPLAIN.
392  *
393  * indexorderbyops is a list of the OIDs of the operators used to sort the
394  * ORDER BY expressions.  This is used together with indexorderbyorig to
395  * recheck ordering at run time.  (Note that indexorderby, indexorderbyorig,
396  * and indexorderbyops are used for amcanorderbyop cases, not amcanorder.)
397  *
398  * indexorderdir specifies the scan ordering, for indexscans on amcanorder
399  * indexes (for other indexes it should be "don't care").
400  * ----------------
401  */
402 typedef struct IndexScan
403 {
404 	Scan		scan;
405 	Oid			indexid;		/* OID of index to scan */
406 	List	   *indexqual;		/* list of index quals (usually OpExprs) */
407 	List	   *indexqualorig;	/* the same in original form */
408 	List	   *indexorderby;	/* list of index ORDER BY exprs */
409 	List	   *indexorderbyorig;	/* the same in original form */
410 	List	   *indexorderbyops;	/* OIDs of sort ops for ORDER BY exprs */
411 	ScanDirection indexorderdir;	/* forward or backward or don't care */
412 } IndexScan;
413 
414 /* ----------------
415  *		index-only scan node
416  *
417  * IndexOnlyScan is very similar to IndexScan, but it specifies an
418  * index-only scan, in which the data comes from the index not the heap.
419  * Because of this, *all* Vars in the plan node's targetlist, qual, and
420  * index expressions reference index columns and have varno = INDEX_VAR.
421  * Hence we do not need separate indexqualorig and indexorderbyorig lists,
422  * since their contents would be equivalent to indexqual and indexorderby.
423  *
424  * To help EXPLAIN interpret the index Vars for display, we provide
425  * indextlist, which represents the contents of the index as a targetlist
426  * with one TLE per index column.  Vars appearing in this list reference
427  * the base table, and this is the only field in the plan node that may
428  * contain such Vars.
429  * ----------------
430  */
431 typedef struct IndexOnlyScan
432 {
433 	Scan		scan;
434 	Oid			indexid;		/* OID of index to scan */
435 	List	   *indexqual;		/* list of index quals (usually OpExprs) */
436 	List	   *indexorderby;	/* list of index ORDER BY exprs */
437 	List	   *indextlist;		/* TargetEntry list describing index's cols */
438 	ScanDirection indexorderdir;	/* forward or backward or don't care */
439 } IndexOnlyScan;
440 
441 /* ----------------
442  *		bitmap index scan node
443  *
444  * BitmapIndexScan delivers a bitmap of potential tuple locations;
445  * it does not access the heap itself.  The bitmap is used by an
446  * ancestor BitmapHeapScan node, possibly after passing through
447  * intermediate BitmapAnd and/or BitmapOr nodes to combine it with
448  * the results of other BitmapIndexScans.
449  *
450  * The fields have the same meanings as for IndexScan, except we don't
451  * store a direction flag because direction is uninteresting.
452  *
453  * In a BitmapIndexScan plan node, the targetlist and qual fields are
454  * not used and are always NIL.  The indexqualorig field is unused at
455  * run time too, but is saved for the benefit of EXPLAIN.
456  * ----------------
457  */
458 typedef struct BitmapIndexScan
459 {
460 	Scan		scan;
461 	Oid			indexid;		/* OID of index to scan */
462 	bool		isshared;		/* Create shared bitmap if set */
463 	List	   *indexqual;		/* list of index quals (OpExprs) */
464 	List	   *indexqualorig;	/* the same in original form */
465 } BitmapIndexScan;
466 
467 /* ----------------
468  *		bitmap sequential scan node
469  *
470  * This needs a copy of the qual conditions being used by the input index
471  * scans because there are various cases where we need to recheck the quals;
472  * for example, when the bitmap is lossy about the specific rows on a page
473  * that meet the index condition.
474  * ----------------
475  */
476 typedef struct BitmapHeapScan
477 {
478 	Scan		scan;
479 	List	   *bitmapqualorig; /* index quals, in standard expr form */
480 } BitmapHeapScan;
481 
482 /* ----------------
483  *		tid scan node
484  *
485  * tidquals is an implicitly OR'ed list of qual expressions of the form
486  * "CTID = pseudoconstant", or "CTID = ANY(pseudoconstant_array)",
487  * or a CurrentOfExpr for the relation.
488  * ----------------
489  */
490 typedef struct TidScan
491 {
492 	Scan		scan;
493 	List	   *tidquals;		/* qual(s) involving CTID = something */
494 } TidScan;
495 
496 /* ----------------
497  *		subquery scan node
498  *
499  * SubqueryScan is for scanning the output of a sub-query in the range table.
500  * We often need an extra plan node above the sub-query's plan to perform
501  * expression evaluations (which we can't push into the sub-query without
502  * risking changing its semantics).  Although we are not scanning a physical
503  * relation, we make this a descendant of Scan anyway for code-sharing
504  * purposes.
505  *
506  * Note: we store the sub-plan in the type-specific subplan field, not in
507  * the generic lefttree field as you might expect.  This is because we do
508  * not want plan-tree-traversal routines to recurse into the subplan without
509  * knowing that they are changing Query contexts.
510  * ----------------
511  */
512 typedef struct SubqueryScan
513 {
514 	Scan		scan;
515 	Plan	   *subplan;
516 } SubqueryScan;
517 
518 /* ----------------
519  *		FunctionScan node
520  * ----------------
521  */
522 typedef struct FunctionScan
523 {
524 	Scan		scan;
525 	List	   *functions;		/* list of RangeTblFunction nodes */
526 	bool		funcordinality; /* WITH ORDINALITY */
527 } FunctionScan;
528 
529 /* ----------------
530  *		ValuesScan node
531  * ----------------
532  */
533 typedef struct ValuesScan
534 {
535 	Scan		scan;
536 	List	   *values_lists;	/* list of expression lists */
537 } ValuesScan;
538 
539 /* ----------------
540  *		TableFunc scan node
541  * ----------------
542  */
543 typedef struct TableFuncScan
544 {
545 	Scan		scan;
546 	TableFunc  *tablefunc;		/* table function node */
547 } TableFuncScan;
548 
549 /* ----------------
550  *		CteScan node
551  * ----------------
552  */
553 typedef struct CteScan
554 {
555 	Scan		scan;
556 	int			ctePlanId;		/* ID of init SubPlan for CTE */
557 	int			cteParam;		/* ID of Param representing CTE output */
558 } CteScan;
559 
560 /* ----------------
561  *		NamedTuplestoreScan node
562  * ----------------
563  */
564 typedef struct NamedTuplestoreScan
565 {
566 	Scan		scan;
567 	char	   *enrname;		/* Name given to Ephemeral Named Relation */
568 } NamedTuplestoreScan;
569 
570 /* ----------------
571  *		WorkTableScan node
572  * ----------------
573  */
574 typedef struct WorkTableScan
575 {
576 	Scan		scan;
577 	int			wtParam;		/* ID of Param representing work table */
578 } WorkTableScan;
579 
580 /* ----------------
581  *		ForeignScan node
582  *
583  * fdw_exprs and fdw_private are both under the control of the foreign-data
584  * wrapper, but fdw_exprs is presumed to contain expression trees and will
585  * be post-processed accordingly by the planner; fdw_private won't be.
586  * Note that everything in both lists must be copiable by copyObject().
587  * One way to store an arbitrary blob of bytes is to represent it as a bytea
588  * Const.  Usually, though, you'll be better off choosing a representation
589  * that can be dumped usefully by nodeToString().
590  *
591  * fdw_scan_tlist is a targetlist describing the contents of the scan tuple
592  * returned by the FDW; it can be NIL if the scan tuple matches the declared
593  * rowtype of the foreign table, which is the normal case for a simple foreign
594  * table scan.  (If the plan node represents a foreign join, fdw_scan_tlist
595  * is required since there is no rowtype available from the system catalogs.)
596  * When fdw_scan_tlist is provided, Vars in the node's tlist and quals must
597  * have varno INDEX_VAR, and their varattnos correspond to resnos in the
598  * fdw_scan_tlist (which are also column numbers in the actual scan tuple).
599  * fdw_scan_tlist is never actually executed; it just holds expression trees
600  * describing what is in the scan tuple's columns.
601  *
602  * fdw_recheck_quals should contain any quals which the core system passed to
603  * the FDW but which were not added to scan.plan.qual; that is, it should
604  * contain the quals being checked remotely.  This is needed for correct
605  * behavior during EvalPlanQual rechecks.
606  *
607  * When the plan node represents a foreign join, scan.scanrelid is zero and
608  * fs_relids must be consulted to identify the join relation.  (fs_relids
609  * is valid for simple scans as well, but will always match scan.scanrelid.)
610  * ----------------
611  */
612 typedef struct ForeignScan
613 {
614 	Scan		scan;
615 	CmdType		operation;		/* SELECT/INSERT/UPDATE/DELETE */
616 	Oid			fs_server;		/* OID of foreign server */
617 	List	   *fdw_exprs;		/* expressions that FDW may evaluate */
618 	List	   *fdw_private;	/* private data for FDW */
619 	List	   *fdw_scan_tlist; /* optional tlist describing scan tuple */
620 	List	   *fdw_recheck_quals;	/* original quals not in scan.plan.qual */
621 	Bitmapset  *fs_relids;		/* RTIs generated by this scan */
622 	bool		fsSystemCol;	/* true if any "system column" is needed */
623 } ForeignScan;
624 
625 /* ----------------
626  *	   CustomScan node
627  *
628  * The comments for ForeignScan's fdw_exprs, fdw_private, fdw_scan_tlist,
629  * and fs_relids fields apply equally to CustomScan's custom_exprs,
630  * custom_private, custom_scan_tlist, and custom_relids fields.  The
631  * convention of setting scan.scanrelid to zero for joins applies as well.
632  *
633  * Note that since Plan trees can be copied, custom scan providers *must*
634  * fit all plan data they need into those fields; embedding CustomScan in
635  * a larger struct will not work.
636  * ----------------
637  */
638 struct CustomScanMethods;
639 
640 typedef struct CustomScan
641 {
642 	Scan		scan;
643 	uint32		flags;			/* mask of CUSTOMPATH_* flags, see
644 								 * nodes/extensible.h */
645 	List	   *custom_plans;	/* list of Plan nodes, if any */
646 	List	   *custom_exprs;	/* expressions that custom code may evaluate */
647 	List	   *custom_private; /* private data for custom code */
648 	List	   *custom_scan_tlist;	/* optional tlist describing scan tuple */
649 	Bitmapset  *custom_relids;	/* RTIs generated by this scan */
650 	const struct CustomScanMethods *methods;
651 } CustomScan;
652 
653 /*
654  * ==========
655  * Join nodes
656  * ==========
657  */
658 
659 /* ----------------
660  *		Join node
661  *
662  * jointype:	rule for joining tuples from left and right subtrees
663  * inner_unique each outer tuple can match to no more than one inner tuple
664  * joinqual:	qual conditions that came from JOIN/ON or JOIN/USING
665  *				(plan.qual contains conditions that came from WHERE)
666  *
667  * When jointype is INNER, joinqual and plan.qual are semantically
668  * interchangeable.  For OUTER jointypes, the two are *not* interchangeable;
669  * only joinqual is used to determine whether a match has been found for
670  * the purpose of deciding whether to generate null-extended tuples.
671  * (But plan.qual is still applied before actually returning a tuple.)
672  * For an outer join, only joinquals are allowed to be used as the merge
673  * or hash condition of a merge or hash join.
674  *
675  * inner_unique is set if the joinquals are such that no more than one inner
676  * tuple could match any given outer tuple.  This allows the executor to
677  * skip searching for additional matches.  (This must be provable from just
678  * the joinquals, ignoring plan.qual, due to where the executor tests it.)
679  * ----------------
680  */
681 typedef struct Join
682 {
683 	Plan		plan;
684 	JoinType	jointype;
685 	bool		inner_unique;
686 	List	   *joinqual;		/* JOIN quals (in addition to plan.qual) */
687 } Join;
688 
689 /* ----------------
690  *		nest loop join node
691  *
692  * The nestParams list identifies any executor Params that must be passed
693  * into execution of the inner subplan carrying values from the current row
694  * of the outer subplan.  Currently we restrict these values to be simple
695  * Vars, but perhaps someday that'd be worth relaxing.  (Note: during plan
696  * creation, the paramval can actually be a PlaceHolderVar expression; but it
697  * must be a Var with varno OUTER_VAR by the time it gets to the executor.)
698  * ----------------
699  */
700 typedef struct NestLoop
701 {
702 	Join		join;
703 	List	   *nestParams;		/* list of NestLoopParam nodes */
704 } NestLoop;
705 
706 typedef struct NestLoopParam
707 {
708 	NodeTag		type;
709 	int			paramno;		/* number of the PARAM_EXEC Param to set */
710 	Var		   *paramval;		/* outer-relation Var to assign to Param */
711 } NestLoopParam;
712 
713 /* ----------------
714  *		merge join node
715  *
716  * The expected ordering of each mergeable column is described by a btree
717  * opfamily OID, a collation OID, a direction (BTLessStrategyNumber or
718  * BTGreaterStrategyNumber) and a nulls-first flag.  Note that the two sides
719  * of each mergeclause may be of different datatypes, but they are ordered the
720  * same way according to the common opfamily and collation.  The operator in
721  * each mergeclause must be an equality operator of the indicated opfamily.
722  * ----------------
723  */
724 typedef struct MergeJoin
725 {
726 	Join		join;
727 	bool		skip_mark_restore;	/* Can we skip mark/restore calls? */
728 	List	   *mergeclauses;	/* mergeclauses as expression trees */
729 	/* these are arrays, but have the same length as the mergeclauses list: */
730 	Oid		   *mergeFamilies;	/* per-clause OIDs of btree opfamilies */
731 	Oid		   *mergeCollations;	/* per-clause OIDs of collations */
732 	int		   *mergeStrategies;	/* per-clause ordering (ASC or DESC) */
733 	bool	   *mergeNullsFirst;	/* per-clause nulls ordering */
734 } MergeJoin;
735 
736 /* ----------------
737  *		hash join node
738  * ----------------
739  */
740 typedef struct HashJoin
741 {
742 	Join		join;
743 	List	   *hashclauses;
744 	List	   *hashoperators;
745 	List	   *hashcollations;
746 
747 	/*
748 	 * List of expressions to be hashed for tuples from the outer plan, to
749 	 * perform lookups in the hashtable over the inner plan.
750 	 */
751 	List	   *hashkeys;
752 } HashJoin;
753 
754 /* ----------------
755  *		materialization node
756  * ----------------
757  */
758 typedef struct Material
759 {
760 	Plan		plan;
761 } Material;
762 
763 /* ----------------
764  *		sort node
765  * ----------------
766  */
767 typedef struct Sort
768 {
769 	Plan		plan;
770 	int			numCols;		/* number of sort-key columns */
771 	AttrNumber *sortColIdx;		/* their indexes in the target list */
772 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
773 	Oid		   *collations;		/* OIDs of collations */
774 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
775 } Sort;
776 
777 /* ----------------
778  *		incremental sort node
779  * ----------------
780  */
781 typedef struct IncrementalSort
782 {
783 	Sort		sort;
784 	int			nPresortedCols; /* number of presorted columns */
785 } IncrementalSort;
786 
787 /* ---------------
788  *	 group node -
789  *		Used for queries with GROUP BY (but no aggregates) specified.
790  *		The input must be presorted according to the grouping columns.
791  * ---------------
792  */
793 typedef struct Group
794 {
795 	Plan		plan;
796 	int			numCols;		/* number of grouping columns */
797 	AttrNumber *grpColIdx;		/* their indexes in the target list */
798 	Oid		   *grpOperators;	/* equality operators to compare with */
799 	Oid		   *grpCollations;
800 } Group;
801 
802 /* ---------------
803  *		aggregate node
804  *
805  * An Agg node implements plain or grouped aggregation.  For grouped
806  * aggregation, we can work with presorted input or unsorted input;
807  * the latter strategy uses an internal hashtable.
808  *
809  * Notice the lack of any direct info about the aggregate functions to be
810  * computed.  They are found by scanning the node's tlist and quals during
811  * executor startup.  (It is possible that there are no aggregate functions;
812  * this could happen if they get optimized away by constant-folding, or if
813  * we are using the Agg node to implement hash-based grouping.)
814  * ---------------
815  */
816 typedef struct Agg
817 {
818 	Plan		plan;
819 	AggStrategy aggstrategy;	/* basic strategy, see nodes.h */
820 	AggSplit	aggsplit;		/* agg-splitting mode, see nodes.h */
821 	int			numCols;		/* number of grouping columns */
822 	AttrNumber *grpColIdx;		/* their indexes in the target list */
823 	Oid		   *grpOperators;	/* equality operators to compare with */
824 	Oid		   *grpCollations;
825 	long		numGroups;		/* estimated number of groups in input */
826 	uint64		transitionSpace;	/* for pass-by-ref transition data */
827 	Bitmapset  *aggParams;		/* IDs of Params used in Aggref inputs */
828 	/* Note: planner provides numGroups & aggParams only in HASHED/MIXED case */
829 	List	   *groupingSets;	/* grouping sets to use */
830 	List	   *chain;			/* chained Agg/Sort nodes */
831 } Agg;
832 
833 /* ----------------
834  *		window aggregate node
835  * ----------------
836  */
837 typedef struct WindowAgg
838 {
839 	Plan		plan;
840 	Index		winref;			/* ID referenced by window functions */
841 	int			partNumCols;	/* number of columns in partition clause */
842 	AttrNumber *partColIdx;		/* their indexes in the target list */
843 	Oid		   *partOperators;	/* equality operators for partition columns */
844 	Oid		   *partCollations; /* collations for partition columns */
845 	int			ordNumCols;		/* number of columns in ordering clause */
846 	AttrNumber *ordColIdx;		/* their indexes in the target list */
847 	Oid		   *ordOperators;	/* equality operators for ordering columns */
848 	Oid		   *ordCollations;	/* collations for ordering columns */
849 	int			frameOptions;	/* frame_clause options, see WindowDef */
850 	Node	   *startOffset;	/* expression for starting bound, if any */
851 	Node	   *endOffset;		/* expression for ending bound, if any */
852 	/* these fields are used with RANGE offset PRECEDING/FOLLOWING: */
853 	Oid			startInRangeFunc;	/* in_range function for startOffset */
854 	Oid			endInRangeFunc; /* in_range function for endOffset */
855 	Oid			inRangeColl;	/* collation for in_range tests */
856 	bool		inRangeAsc;		/* use ASC sort order for in_range tests? */
857 	bool		inRangeNullsFirst;	/* nulls sort first for in_range tests? */
858 } WindowAgg;
859 
860 /* ----------------
861  *		unique node
862  * ----------------
863  */
864 typedef struct Unique
865 {
866 	Plan		plan;
867 	int			numCols;		/* number of columns to check for uniqueness */
868 	AttrNumber *uniqColIdx;		/* their indexes in the target list */
869 	Oid		   *uniqOperators;	/* equality operators to compare with */
870 	Oid		   *uniqCollations; /* collations for equality comparisons */
871 } Unique;
872 
873 /* ------------
874  *		gather node
875  *
876  * Note: rescan_param is the ID of a PARAM_EXEC parameter slot.  That slot
877  * will never actually contain a value, but the Gather node must flag it as
878  * having changed whenever it is rescanned.  The child parallel-aware scan
879  * nodes are marked as depending on that parameter, so that the rescan
880  * machinery is aware that their output is likely to change across rescans.
881  * In some cases we don't need a rescan Param, so rescan_param is set to -1.
882  * ------------
883  */
884 typedef struct Gather
885 {
886 	Plan		plan;
887 	int			num_workers;	/* planned number of worker processes */
888 	int			rescan_param;	/* ID of Param that signals a rescan, or -1 */
889 	bool		single_copy;	/* don't execute plan more than once */
890 	bool		invisible;		/* suppress EXPLAIN display (for testing)? */
891 	Bitmapset  *initParam;		/* param id's of initplans which are referred
892 								 * at gather or one of it's child node */
893 } Gather;
894 
895 /* ------------
896  *		gather merge node
897  * ------------
898  */
899 typedef struct GatherMerge
900 {
901 	Plan		plan;
902 	int			num_workers;	/* planned number of worker processes */
903 	int			rescan_param;	/* ID of Param that signals a rescan, or -1 */
904 	/* remaining fields are just like the sort-key info in struct Sort */
905 	int			numCols;		/* number of sort-key columns */
906 	AttrNumber *sortColIdx;		/* their indexes in the target list */
907 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
908 	Oid		   *collations;		/* OIDs of collations */
909 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
910 	Bitmapset  *initParam;		/* param id's of initplans which are referred
911 								 * at gather merge or one of it's child node */
912 } GatherMerge;
913 
914 /* ----------------
915  *		hash build node
916  *
917  * If the executor is supposed to try to apply skew join optimization, then
918  * skewTable/skewColumn/skewInherit identify the outer relation's join key
919  * column, from which the relevant MCV statistics can be fetched.
920  * ----------------
921  */
922 typedef struct Hash
923 {
924 	Plan		plan;
925 
926 	/*
927 	 * List of expressions to be hashed for tuples from Hash's outer plan,
928 	 * needed to put them into the hashtable.
929 	 */
930 	List	   *hashkeys;		/* hash keys for the hashjoin condition */
931 	Oid			skewTable;		/* outer join key's table OID, or InvalidOid */
932 	AttrNumber	skewColumn;		/* outer join key's column #, or zero */
933 	bool		skewInherit;	/* is outer join rel an inheritance tree? */
934 	/* all other info is in the parent HashJoin node */
935 	double		rows_total;		/* estimate total rows if parallel_aware */
936 } Hash;
937 
938 /* ----------------
939  *		setop node
940  * ----------------
941  */
942 typedef struct SetOp
943 {
944 	Plan		plan;
945 	SetOpCmd	cmd;			/* what to do, see nodes.h */
946 	SetOpStrategy strategy;		/* how to do it, see nodes.h */
947 	int			numCols;		/* number of columns to check for
948 								 * duplicate-ness */
949 	AttrNumber *dupColIdx;		/* their indexes in the target list */
950 	Oid		   *dupOperators;	/* equality operators to compare with */
951 	Oid		   *dupCollations;
952 	AttrNumber	flagColIdx;		/* where is the flag column, if any */
953 	int			firstFlag;		/* flag value for first input relation */
954 	long		numGroups;		/* estimated number of groups in input */
955 } SetOp;
956 
957 /* ----------------
958  *		lock-rows node
959  *
960  * rowMarks identifies the rels to be locked by this node; it should be
961  * a subset of the rowMarks listed in the top-level PlannedStmt.
962  * epqParam is a Param that all scan nodes below this one must depend on.
963  * It is used to force re-evaluation of the plan during EvalPlanQual.
964  * ----------------
965  */
966 typedef struct LockRows
967 {
968 	Plan		plan;
969 	List	   *rowMarks;		/* a list of PlanRowMark's */
970 	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
971 } LockRows;
972 
973 /* ----------------
974  *		limit node
975  *
976  * Note: as of Postgres 8.2, the offset and count expressions are expected
977  * to yield int8, rather than int4 as before.
978  * ----------------
979  */
980 typedef struct Limit
981 {
982 	Plan		plan;
983 	Node	   *limitOffset;	/* OFFSET parameter, or NULL if none */
984 	Node	   *limitCount;		/* COUNT parameter, or NULL if none */
985 	LimitOption limitOption;	/* limit type */
986 	int			uniqNumCols;	/* number of columns to check for similarity  */
987 	AttrNumber *uniqColIdx;		/* their indexes in the target list */
988 	Oid		   *uniqOperators;	/* equality operators to compare with */
989 	Oid		   *uniqCollations; /* collations for equality comparisons */
990 } Limit;
991 
992 
993 /*
994  * RowMarkType -
995  *	  enums for types of row-marking operations
996  *
997  * The first four of these values represent different lock strengths that
998  * we can take on tuples according to SELECT FOR [KEY] UPDATE/SHARE requests.
999  * We support these on regular tables, as well as on foreign tables whose FDWs
1000  * report support for late locking.  For other foreign tables, any locking
1001  * that might be done for such requests must happen during the initial row
1002  * fetch; their FDWs provide no mechanism for going back to lock a row later.
1003  * This means that the semantics will be a bit different than for a local
1004  * table; in particular we are likely to lock more rows than would be locked
1005  * locally, since remote rows will be locked even if they then fail
1006  * locally-checked restriction or join quals.  However, the prospect of
1007  * doing a separate remote query to lock each selected row is usually pretty
1008  * unappealing, so early locking remains a credible design choice for FDWs.
1009  *
1010  * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we have to uniquely
1011  * identify all the source rows, not only those from the target relations, so
1012  * that we can perform EvalPlanQual rechecking at need.  For plain tables we
1013  * can just fetch the TID, much as for a target relation; this case is
1014  * represented by ROW_MARK_REFERENCE.  Otherwise (for example for VALUES or
1015  * FUNCTION scans) we have to copy the whole row value.  ROW_MARK_COPY is
1016  * pretty inefficient, since most of the time we'll never need the data; but
1017  * fortunately the overhead is usually not performance-critical in practice.
1018  * By default we use ROW_MARK_COPY for foreign tables, but if the FDW has
1019  * a concept of rowid it can request to use ROW_MARK_REFERENCE instead.
1020  * (Again, this probably doesn't make sense if a physical remote fetch is
1021  * needed, but for FDWs that map to local storage it might be credible.)
1022  */
1023 typedef enum RowMarkType
1024 {
1025 	ROW_MARK_EXCLUSIVE,			/* obtain exclusive tuple lock */
1026 	ROW_MARK_NOKEYEXCLUSIVE,	/* obtain no-key exclusive tuple lock */
1027 	ROW_MARK_SHARE,				/* obtain shared tuple lock */
1028 	ROW_MARK_KEYSHARE,			/* obtain keyshare tuple lock */
1029 	ROW_MARK_REFERENCE,			/* just fetch the TID, don't lock it */
1030 	ROW_MARK_COPY				/* physically copy the row value */
1031 } RowMarkType;
1032 
1033 #define RowMarkRequiresRowShareLock(marktype)  ((marktype) <= ROW_MARK_KEYSHARE)
1034 
1035 /*
1036  * PlanRowMark -
1037  *	   plan-time representation of FOR [KEY] UPDATE/SHARE clauses
1038  *
1039  * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
1040  * PlanRowMark node for each non-target relation in the query.  Relations that
1041  * are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
1042  * regular tables or supported foreign tables) or ROW_MARK_COPY (if not).
1043  *
1044  * Initially all PlanRowMarks have rti == prti and isParent == false.
1045  * When the planner discovers that a relation is the root of an inheritance
1046  * tree, it sets isParent true, and adds an additional PlanRowMark to the
1047  * list for each child relation (including the target rel itself in its role
1048  * as a child).  isParent is also set to true for the partitioned child
1049  * relations, which are not scanned just like the root parent.  The child
1050  * entries have rti == child rel's RT index and prti == parent's RT index,
1051  * and can therefore be recognized as children by the fact that prti != rti.
1052  * The parent's allMarkTypes field gets the OR of (1<<markType) across all
1053  * its children (this definition allows children to use different markTypes).
1054  *
1055  * The planner also adds resjunk output columns to the plan that carry
1056  * information sufficient to identify the locked or fetched rows.  When
1057  * markType != ROW_MARK_COPY, these columns are named
1058  *		tableoid%u			OID of table
1059  *		ctid%u				TID of row
1060  * The tableoid column is only present for an inheritance hierarchy.
1061  * When markType == ROW_MARK_COPY, there is instead a single column named
1062  *		wholerow%u			whole-row value of relation
1063  * (An inheritance hierarchy could have all three resjunk output columns,
1064  * if some children use a different markType than others.)
1065  * In all three cases, %u represents the rowmark ID number (rowmarkId).
1066  * This number is unique within a plan tree, except that child relation
1067  * entries copy their parent's rowmarkId.  (Assigning unique numbers
1068  * means we needn't renumber rowmarkIds when flattening subqueries, which
1069  * would require finding and renaming the resjunk columns as well.)
1070  * Note this means that all tables in an inheritance hierarchy share the
1071  * same resjunk column names.  However, in an inherited UPDATE/DELETE the
1072  * columns could have different physical column numbers in each subplan.
1073  */
1074 typedef struct PlanRowMark
1075 {
1076 	NodeTag		type;
1077 	Index		rti;			/* range table index of markable relation */
1078 	Index		prti;			/* range table index of parent relation */
1079 	Index		rowmarkId;		/* unique identifier for resjunk columns */
1080 	RowMarkType markType;		/* see enum above */
1081 	int			allMarkTypes;	/* OR of (1<<markType) for all children */
1082 	LockClauseStrength strength;	/* LockingClause's strength, or LCS_NONE */
1083 	LockWaitPolicy waitPolicy;	/* NOWAIT and SKIP LOCKED options */
1084 	bool		isParent;		/* true if this is a "dummy" parent entry */
1085 } PlanRowMark;
1086 
1087 
1088 /*
1089  * Node types to represent partition pruning information.
1090  */
1091 
1092 /*
1093  * PartitionPruneInfo - Details required to allow the executor to prune
1094  * partitions.
1095  *
1096  * Here we store mapping details to allow translation of a partitioned table's
1097  * index as returned by the partition pruning code into subplan indexes for
1098  * plan types which support arbitrary numbers of subplans, such as Append.
1099  * We also store various details to tell the executor when it should be
1100  * performing partition pruning.
1101  *
1102  * Each PartitionedRelPruneInfo describes the partitioning rules for a single
1103  * partitioned table (a/k/a level of partitioning).  Since a partitioning
1104  * hierarchy could contain multiple levels, we represent it by a List of
1105  * PartitionedRelPruneInfos, where the first entry represents the topmost
1106  * partitioned table and additional entries represent non-leaf child
1107  * partitions, ordered such that parents appear before their children.
1108  * Then, since an Append-type node could have multiple partitioning
1109  * hierarchies among its children, we have an unordered List of those Lists.
1110  *
1111  * prune_infos			List of Lists containing PartitionedRelPruneInfo nodes,
1112  *						one sublist per run-time-prunable partition hierarchy
1113  *						appearing in the parent plan node's subplans.
1114  * other_subplans		Indexes of any subplans that are not accounted for
1115  *						by any of the PartitionedRelPruneInfo nodes in
1116  *						"prune_infos".  These subplans must not be pruned.
1117  */
1118 typedef struct PartitionPruneInfo
1119 {
1120 	NodeTag		type;
1121 	List	   *prune_infos;
1122 	Bitmapset  *other_subplans;
1123 } PartitionPruneInfo;
1124 
1125 /*
1126  * PartitionedRelPruneInfo - Details required to allow the executor to prune
1127  * partitions for a single partitioned table.
1128  *
1129  * subplan_map[] and subpart_map[] are indexed by partition index of the
1130  * partitioned table referenced by 'rtindex', the partition index being the
1131  * order that the partitions are defined in the table's PartitionDesc.  For a
1132  * leaf partition p, subplan_map[p] contains the zero-based index of the
1133  * partition's subplan in the parent plan's subplan list; it is -1 if the
1134  * partition is non-leaf or has been pruned.  For a non-leaf partition p,
1135  * subpart_map[p] contains the zero-based index of that sub-partition's
1136  * PartitionedRelPruneInfo in the hierarchy's PartitionedRelPruneInfo list;
1137  * it is -1 if the partition is a leaf or has been pruned.  Note that subplan
1138  * indexes, as stored in 'subplan_map', are global across the parent plan
1139  * node, but partition indexes are valid only within a particular hierarchy.
1140  * relid_map[p] contains the partition's OID, or 0 if the partition was pruned.
1141  */
1142 typedef struct PartitionedRelPruneInfo
1143 {
1144 	NodeTag		type;
1145 	Index		rtindex;		/* RT index of partition rel for this level */
1146 	Bitmapset  *present_parts;	/* Indexes of all partitions which subplans or
1147 								 * subparts are present for */
1148 	int			nparts;			/* Length of the following arrays: */
1149 	int		   *subplan_map;	/* subplan index by partition index, or -1 */
1150 	int		   *subpart_map;	/* subpart index by partition index, or -1 */
1151 	Oid		   *relid_map;		/* relation OID by partition index, or 0 */
1152 
1153 	/*
1154 	 * initial_pruning_steps shows how to prune during executor startup (i.e.,
1155 	 * without use of any PARAM_EXEC Params); it is NIL if no startup pruning
1156 	 * is required.  exec_pruning_steps shows how to prune with PARAM_EXEC
1157 	 * Params; it is NIL if no per-scan pruning is required.
1158 	 */
1159 	List	   *initial_pruning_steps;	/* List of PartitionPruneStep */
1160 	List	   *exec_pruning_steps; /* List of PartitionPruneStep */
1161 	Bitmapset  *execparamids;	/* All PARAM_EXEC Param IDs in
1162 								 * exec_pruning_steps */
1163 } PartitionedRelPruneInfo;
1164 
1165 /*
1166  * Abstract Node type for partition pruning steps (there are no concrete
1167  * Nodes of this type).
1168  *
1169  * step_id is the global identifier of the step within its pruning context.
1170  */
1171 typedef struct PartitionPruneStep
1172 {
1173 	NodeTag		type;
1174 	int			step_id;
1175 } PartitionPruneStep;
1176 
1177 /*
1178  * PartitionPruneStepOp - Information to prune using a set of mutually AND'd
1179  *							OpExpr clauses
1180  *
1181  * This contains information extracted from up to partnatts OpExpr clauses,
1182  * where partnatts is the number of partition key columns.  'opstrategy' is the
1183  * strategy of the operator in the clause matched to the last partition key.
1184  * 'exprs' contains expressions which comprise the lookup key to be passed to
1185  * the partition bound search function.  'cmpfns' contains the OIDs of
1186  * comparison functions used to compare aforementioned expressions with
1187  * partition bounds.  Both 'exprs' and 'cmpfns' contain the same number of
1188  * items, up to partnatts items.
1189  *
1190  * Once we find the offset of a partition bound using the lookup key, we
1191  * determine which partitions to include in the result based on the value of
1192  * 'opstrategy'.  For example, if it were equality, we'd return just the
1193  * partition that would contain that key or a set of partitions if the key
1194  * didn't consist of all partitioning columns.  For non-equality strategies,
1195  * we'd need to include other partitions as appropriate.
1196  *
1197  * 'nullkeys' is the set containing the offset of the partition keys (0 to
1198  * partnatts - 1) that were matched to an IS NULL clause.  This is only
1199  * considered for hash partitioning as we need to pass which keys are null
1200  * to the hash partition bound search function.  It is never possible to
1201  * have an expression be present in 'exprs' for a given partition key and
1202  * the corresponding bit set in 'nullkeys'.
1203  */
1204 typedef struct PartitionPruneStepOp
1205 {
1206 	PartitionPruneStep step;
1207 
1208 	StrategyNumber opstrategy;
1209 	List	   *exprs;
1210 	List	   *cmpfns;
1211 	Bitmapset  *nullkeys;
1212 } PartitionPruneStepOp;
1213 
1214 /*
1215  * PartitionPruneStepCombine - Information to prune using a BoolExpr clause
1216  *
1217  * For BoolExpr clauses, we combine the set of partitions determined for each
1218  * of the argument clauses.
1219  */
1220 typedef enum PartitionPruneCombineOp
1221 {
1222 	PARTPRUNE_COMBINE_UNION,
1223 	PARTPRUNE_COMBINE_INTERSECT
1224 } PartitionPruneCombineOp;
1225 
1226 typedef struct PartitionPruneStepCombine
1227 {
1228 	PartitionPruneStep step;
1229 
1230 	PartitionPruneCombineOp combineOp;
1231 	List	   *source_stepids;
1232 } PartitionPruneStepCombine;
1233 
1234 
1235 /*
1236  * Plan invalidation info
1237  *
1238  * We track the objects on which a PlannedStmt depends in two ways:
1239  * relations are recorded as a simple list of OIDs, and everything else
1240  * is represented as a list of PlanInvalItems.  A PlanInvalItem is designed
1241  * to be used with the syscache invalidation mechanism, so it identifies a
1242  * system catalog entry by cache ID and hash value.
1243  */
1244 typedef struct PlanInvalItem
1245 {
1246 	NodeTag		type;
1247 	int			cacheId;		/* a syscache ID, see utils/syscache.h */
1248 	uint32		hashValue;		/* hash value of object's cache lookup key */
1249 } PlanInvalItem;
1250 
1251 #endif							/* PLANNODES_H */
1252