1 /*-------------------------------------------------------------------------
2  *
3  * plannodes.h
4  *	  definitions for query plan nodes
5  *
6  *
7  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/nodes/plannodes.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef PLANNODES_H
15 #define PLANNODES_H
16 
17 #include "access/sdir.h"
18 #include "access/stratnum.h"
19 #include "lib/stringinfo.h"
20 #include "nodes/bitmapset.h"
21 #include "nodes/lockoptions.h"
22 #include "nodes/primnodes.h"
23 
24 
25 /* ----------------------------------------------------------------
26  *						node definitions
27  * ----------------------------------------------------------------
28  */
29 
30 /* ----------------
31  *		PlannedStmt node
32  *
33  * The output of the planner is a Plan tree headed by a PlannedStmt node.
34  * PlannedStmt holds the "one time" information needed by the executor.
35  *
36  * For simplicity in APIs, we also wrap utility statements in PlannedStmt
37  * nodes; in such cases, commandType == CMD_UTILITY, the statement itself
38  * is in the utilityStmt field, and the rest of the struct is mostly dummy.
39  * (We do use canSetTag, stmt_location, stmt_len, and possibly queryId.)
40  * ----------------
41  */
42 typedef struct PlannedStmt
43 {
44 	NodeTag		type;
45 
46 	CmdType		commandType;	/* select|insert|update|delete|utility */
47 
48 	uint64		queryId;		/* query identifier (copied from Query) */
49 
50 	bool		hasReturning;	/* is it insert|update|delete RETURNING? */
51 
52 	bool		hasModifyingCTE;	/* has insert|update|delete in WITH? */
53 
54 	bool		canSetTag;		/* do I set the command result tag? */
55 
56 	bool		transientPlan;	/* redo plan when TransactionXmin changes? */
57 
58 	bool		dependsOnRole;	/* is plan specific to current role? */
59 
60 	bool		parallelModeNeeded; /* parallel mode required to execute? */
61 
62 	int			jitFlags;		/* which forms of JIT should be performed */
63 
64 	struct Plan *planTree;		/* tree of Plan nodes */
65 
66 	List	   *rtable;			/* list of RangeTblEntry nodes */
67 
68 	/* rtable indexes of target relations for INSERT/UPDATE/DELETE */
69 	List	   *resultRelations;	/* integer list of RT indexes, or NIL */
70 
71 	List	   *appendRelations;	/* list of AppendRelInfo nodes */
72 
73 	List	   *subplans;		/* Plan trees for SubPlan expressions; note
74 								 * that some could be NULL */
75 
76 	Bitmapset  *rewindPlanIDs;	/* indices of subplans that require REWIND */
77 
78 	List	   *rowMarks;		/* a list of PlanRowMark's */
79 
80 	List	   *relationOids;	/* OIDs of relations the plan depends on */
81 
82 	List	   *invalItems;		/* other dependencies, as PlanInvalItems */
83 
84 	List	   *paramExecTypes; /* type OIDs for PARAM_EXEC Params */
85 
86 	Node	   *utilityStmt;	/* non-null if this is utility stmt */
87 
88 	/* statement location in source string (copied from Query) */
89 	int			stmt_location;	/* start location, or -1 if unknown */
90 	int			stmt_len;		/* length in bytes; 0 means "rest of string" */
91 } PlannedStmt;
92 
93 /* macro for fetching the Plan associated with a SubPlan node */
94 #define exec_subplan_get_plan(plannedstmt, subplan) \
95 	((Plan *) list_nth((plannedstmt)->subplans, (subplan)->plan_id - 1))
96 
97 
98 /* ----------------
99  *		Plan node
100  *
101  * All plan nodes "derive" from the Plan structure by having the
102  * Plan structure as the first field.  This ensures that everything works
103  * when nodes are cast to Plan's.  (node pointers are frequently cast to Plan*
104  * when passed around generically in the executor)
105  *
106  * We never actually instantiate any Plan nodes; this is just the common
107  * abstract superclass for all Plan-type nodes.
108  * ----------------
109  */
110 typedef struct Plan
111 {
112 	NodeTag		type;
113 
114 	/*
115 	 * estimated execution costs for plan (see costsize.c for more info)
116 	 */
117 	Cost		startup_cost;	/* cost expended before fetching any tuples */
118 	Cost		total_cost;		/* total cost (assuming all tuples fetched) */
119 
120 	/*
121 	 * planner's estimate of result size of this plan step
122 	 */
123 	double		plan_rows;		/* number of rows plan is expected to emit */
124 	int			plan_width;		/* average row width in bytes */
125 
126 	/*
127 	 * information needed for parallel query
128 	 */
129 	bool		parallel_aware; /* engage parallel-aware logic? */
130 	bool		parallel_safe;	/* OK to use as part of parallel plan? */
131 
132 	/*
133 	 * information needed for asynchronous execution
134 	 */
135 	bool		async_capable;	/* engage asynchronous-capable logic? */
136 
137 	/*
138 	 * Common structural data for all Plan types.
139 	 */
140 	int			plan_node_id;	/* unique across entire final plan tree */
141 	List	   *targetlist;		/* target list to be computed at this node */
142 	List	   *qual;			/* implicitly-ANDed qual conditions */
143 	struct Plan *lefttree;		/* input plan tree(s) */
144 	struct Plan *righttree;
145 	List	   *initPlan;		/* Init Plan nodes (un-correlated expr
146 								 * subselects) */
147 
148 	/*
149 	 * Information for management of parameter-change-driven rescanning
150 	 *
151 	 * extParam includes the paramIDs of all external PARAM_EXEC params
152 	 * affecting this plan node or its children.  setParam params from the
153 	 * node's initPlans are not included, but their extParams are.
154 	 *
155 	 * allParam includes all the extParam paramIDs, plus the IDs of local
156 	 * params that affect the node (i.e., the setParams of its initplans).
157 	 * These are _all_ the PARAM_EXEC params that affect this node.
158 	 */
159 	Bitmapset  *extParam;
160 	Bitmapset  *allParam;
161 } Plan;
162 
163 /* ----------------
164  *	these are defined to avoid confusion problems with "left"
165  *	and "right" and "inner" and "outer".  The convention is that
166  *	the "left" plan is the "outer" plan and the "right" plan is
167  *	the inner plan, but these make the code more readable.
168  * ----------------
169  */
170 #define innerPlan(node)			(((Plan *)(node))->righttree)
171 #define outerPlan(node)			(((Plan *)(node))->lefttree)
172 
173 
174 /* ----------------
175  *	 Result node -
176  *		If no outer plan, evaluate a variable-free targetlist.
177  *		If outer plan, return tuples from outer plan (after a level of
178  *		projection as shown by targetlist).
179  *
180  * If resconstantqual isn't NULL, it represents a one-time qualification
181  * test (i.e., one that doesn't depend on any variables from the outer plan,
182  * so needs to be evaluated only once).
183  * ----------------
184  */
185 typedef struct Result
186 {
187 	Plan		plan;
188 	Node	   *resconstantqual;
189 } Result;
190 
191 /* ----------------
192  *	 ProjectSet node -
193  *		Apply a projection that includes set-returning functions to the
194  *		output tuples of the outer plan.
195  * ----------------
196  */
197 typedef struct ProjectSet
198 {
199 	Plan		plan;
200 } ProjectSet;
201 
202 /* ----------------
203  *	 ModifyTable node -
204  *		Apply rows produced by outer plan to result table(s),
205  *		by inserting, updating, or deleting.
206  *
207  * If the originally named target table is a partitioned table, both
208  * nominalRelation and rootRelation contain the RT index of the partition
209  * root, which is not otherwise mentioned in the plan.  Otherwise rootRelation
210  * is zero.  However, nominalRelation will always be set, as it's the rel that
211  * EXPLAIN should claim is the INSERT/UPDATE/DELETE target.
212  *
213  * Note that rowMarks and epqParam are presumed to be valid for all the
214  * table(s); they can't contain any info that varies across tables.
215  * ----------------
216  */
217 typedef struct ModifyTable
218 {
219 	Plan		plan;
220 	CmdType		operation;		/* INSERT, UPDATE, or DELETE */
221 	bool		canSetTag;		/* do we set the command tag/es_processed? */
222 	Index		nominalRelation;	/* Parent RT index for use of EXPLAIN */
223 	Index		rootRelation;	/* Root RT index, if target is partitioned */
224 	bool		partColsUpdated;	/* some part key in hierarchy updated? */
225 	List	   *resultRelations;	/* integer list of RT indexes */
226 	List	   *updateColnosLists;	/* per-target-table update_colnos lists */
227 	List	   *withCheckOptionLists;	/* per-target-table WCO lists */
228 	List	   *returningLists; /* per-target-table RETURNING tlists */
229 	List	   *fdwPrivLists;	/* per-target-table FDW private data lists */
230 	Bitmapset  *fdwDirectModifyPlans;	/* indices of FDW DM plans */
231 	List	   *rowMarks;		/* PlanRowMarks (non-locking only) */
232 	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
233 	OnConflictAction onConflictAction;	/* ON CONFLICT action */
234 	List	   *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs  */
235 	List	   *onConflictSet;	/* INSERT ON CONFLICT DO UPDATE targetlist */
236 	List	   *onConflictCols; /* target column numbers for onConflictSet */
237 	Node	   *onConflictWhere;	/* WHERE for ON CONFLICT UPDATE */
238 	Index		exclRelRTI;		/* RTI of the EXCLUDED pseudo relation */
239 	List	   *exclRelTlist;	/* tlist of the EXCLUDED pseudo relation */
240 } ModifyTable;
241 
242 struct PartitionPruneInfo;		/* forward reference to struct below */
243 
244 /* ----------------
245  *	 Append node -
246  *		Generate the concatenation of the results of sub-plans.
247  * ----------------
248  */
249 typedef struct Append
250 {
251 	Plan		plan;
252 	Bitmapset  *apprelids;		/* RTIs of appendrel(s) formed by this node */
253 	List	   *appendplans;
254 	int			nasyncplans;	/* # of asynchronous plans */
255 
256 	/*
257 	 * All 'appendplans' preceding this index are non-partial plans. All
258 	 * 'appendplans' from this index onwards are partial plans.
259 	 */
260 	int			first_partial_plan;
261 
262 	/* Info for run-time subplan pruning; NULL if we're not doing that */
263 	struct PartitionPruneInfo *part_prune_info;
264 } Append;
265 
266 /* ----------------
267  *	 MergeAppend node -
268  *		Merge the results of pre-sorted sub-plans to preserve the ordering.
269  * ----------------
270  */
271 typedef struct MergeAppend
272 {
273 	Plan		plan;
274 	Bitmapset  *apprelids;		/* RTIs of appendrel(s) formed by this node */
275 	List	   *mergeplans;
276 	/* these fields are just like the sort-key info in struct Sort: */
277 	int			numCols;		/* number of sort-key columns */
278 	AttrNumber *sortColIdx;		/* their indexes in the target list */
279 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
280 	Oid		   *collations;		/* OIDs of collations */
281 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
282 	/* Info for run-time subplan pruning; NULL if we're not doing that */
283 	struct PartitionPruneInfo *part_prune_info;
284 } MergeAppend;
285 
286 /* ----------------
287  *	RecursiveUnion node -
288  *		Generate a recursive union of two subplans.
289  *
290  * The "outer" subplan is always the non-recursive term, and the "inner"
291  * subplan is the recursive term.
292  * ----------------
293  */
294 typedef struct RecursiveUnion
295 {
296 	Plan		plan;
297 	int			wtParam;		/* ID of Param representing work table */
298 	/* Remaining fields are zero/null in UNION ALL case */
299 	int			numCols;		/* number of columns to check for
300 								 * duplicate-ness */
301 	AttrNumber *dupColIdx;		/* their indexes in the target list */
302 	Oid		   *dupOperators;	/* equality operators to compare with */
303 	Oid		   *dupCollations;
304 	long		numGroups;		/* estimated number of groups in input */
305 } RecursiveUnion;
306 
307 /* ----------------
308  *	 BitmapAnd node -
309  *		Generate the intersection of the results of sub-plans.
310  *
311  * The subplans must be of types that yield tuple bitmaps.  The targetlist
312  * and qual fields of the plan are unused and are always NIL.
313  * ----------------
314  */
315 typedef struct BitmapAnd
316 {
317 	Plan		plan;
318 	List	   *bitmapplans;
319 } BitmapAnd;
320 
321 /* ----------------
322  *	 BitmapOr node -
323  *		Generate the union of the results of sub-plans.
324  *
325  * The subplans must be of types that yield tuple bitmaps.  The targetlist
326  * and qual fields of the plan are unused and are always NIL.
327  * ----------------
328  */
329 typedef struct BitmapOr
330 {
331 	Plan		plan;
332 	bool		isshared;
333 	List	   *bitmapplans;
334 } BitmapOr;
335 
336 /*
337  * ==========
338  * Scan nodes
339  * ==========
340  */
341 typedef struct Scan
342 {
343 	Plan		plan;
344 	Index		scanrelid;		/* relid is index into the range table */
345 } Scan;
346 
347 /* ----------------
348  *		sequential scan node
349  * ----------------
350  */
351 typedef Scan SeqScan;
352 
353 /* ----------------
354  *		table sample scan node
355  * ----------------
356  */
357 typedef struct SampleScan
358 {
359 	Scan		scan;
360 	/* use struct pointer to avoid including parsenodes.h here */
361 	struct TableSampleClause *tablesample;
362 } SampleScan;
363 
364 /* ----------------
365  *		index scan node
366  *
367  * indexqualorig is an implicitly-ANDed list of index qual expressions, each
368  * in the same form it appeared in the query WHERE condition.  Each should
369  * be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
370  * The indexkey is a Var or expression referencing column(s) of the index's
371  * base table.  The comparisonval might be any expression, but it won't use
372  * any columns of the base table.  The expressions are ordered by index
373  * column position (but items referencing the same index column can appear
374  * in any order).  indexqualorig is used at runtime only if we have to recheck
375  * a lossy indexqual.
376  *
377  * indexqual has the same form, but the expressions have been commuted if
378  * necessary to put the indexkeys on the left, and the indexkeys are replaced
379  * by Var nodes identifying the index columns (their varno is INDEX_VAR and
380  * their varattno is the index column number).
381  *
382  * indexorderbyorig is similarly the original form of any ORDER BY expressions
383  * that are being implemented by the index, while indexorderby is modified to
384  * have index column Vars on the left-hand side.  Here, multiple expressions
385  * must appear in exactly the ORDER BY order, and this is not necessarily the
386  * index column order.  Only the expressions are provided, not the auxiliary
387  * sort-order information from the ORDER BY SortGroupClauses; it's assumed
388  * that the sort ordering is fully determinable from the top-level operators.
389  * indexorderbyorig is used at runtime to recheck the ordering, if the index
390  * cannot calculate an accurate ordering.  It is also needed for EXPLAIN.
391  *
392  * indexorderbyops is a list of the OIDs of the operators used to sort the
393  * ORDER BY expressions.  This is used together with indexorderbyorig to
394  * recheck ordering at run time.  (Note that indexorderby, indexorderbyorig,
395  * and indexorderbyops are used for amcanorderbyop cases, not amcanorder.)
396  *
397  * indexorderdir specifies the scan ordering, for indexscans on amcanorder
398  * indexes (for other indexes it should be "don't care").
399  * ----------------
400  */
401 typedef struct IndexScan
402 {
403 	Scan		scan;
404 	Oid			indexid;		/* OID of index to scan */
405 	List	   *indexqual;		/* list of index quals (usually OpExprs) */
406 	List	   *indexqualorig;	/* the same in original form */
407 	List	   *indexorderby;	/* list of index ORDER BY exprs */
408 	List	   *indexorderbyorig;	/* the same in original form */
409 	List	   *indexorderbyops;	/* OIDs of sort ops for ORDER BY exprs */
410 	ScanDirection indexorderdir;	/* forward or backward or don't care */
411 } IndexScan;
412 
413 /* ----------------
414  *		index-only scan node
415  *
416  * IndexOnlyScan is very similar to IndexScan, but it specifies an
417  * index-only scan, in which the data comes from the index not the heap.
418  * Because of this, *all* Vars in the plan node's targetlist, qual, and
419  * index expressions reference index columns and have varno = INDEX_VAR.
420  * Hence we do not need separate indexqualorig and indexorderbyorig lists,
421  * since their contents would be equivalent to indexqual and indexorderby.
422  *
423  * To help EXPLAIN interpret the index Vars for display, we provide
424  * indextlist, which represents the contents of the index as a targetlist
425  * with one TLE per index column.  Vars appearing in this list reference
426  * the base table, and this is the only field in the plan node that may
427  * contain such Vars.
428  * ----------------
429  */
430 typedef struct IndexOnlyScan
431 {
432 	Scan		scan;
433 	Oid			indexid;		/* OID of index to scan */
434 	List	   *indexqual;		/* list of index quals (usually OpExprs) */
435 	List	   *indexorderby;	/* list of index ORDER BY exprs */
436 	List	   *indextlist;		/* TargetEntry list describing index's cols */
437 	ScanDirection indexorderdir;	/* forward or backward or don't care */
438 } IndexOnlyScan;
439 
440 /* ----------------
441  *		bitmap index scan node
442  *
443  * BitmapIndexScan delivers a bitmap of potential tuple locations;
444  * it does not access the heap itself.  The bitmap is used by an
445  * ancestor BitmapHeapScan node, possibly after passing through
446  * intermediate BitmapAnd and/or BitmapOr nodes to combine it with
447  * the results of other BitmapIndexScans.
448  *
449  * The fields have the same meanings as for IndexScan, except we don't
450  * store a direction flag because direction is uninteresting.
451  *
452  * In a BitmapIndexScan plan node, the targetlist and qual fields are
453  * not used and are always NIL.  The indexqualorig field is unused at
454  * run time too, but is saved for the benefit of EXPLAIN.
455  * ----------------
456  */
457 typedef struct BitmapIndexScan
458 {
459 	Scan		scan;
460 	Oid			indexid;		/* OID of index to scan */
461 	bool		isshared;		/* Create shared bitmap if set */
462 	List	   *indexqual;		/* list of index quals (OpExprs) */
463 	List	   *indexqualorig;	/* the same in original form */
464 } BitmapIndexScan;
465 
466 /* ----------------
467  *		bitmap sequential scan node
468  *
469  * This needs a copy of the qual conditions being used by the input index
470  * scans because there are various cases where we need to recheck the quals;
471  * for example, when the bitmap is lossy about the specific rows on a page
472  * that meet the index condition.
473  * ----------------
474  */
475 typedef struct BitmapHeapScan
476 {
477 	Scan		scan;
478 	List	   *bitmapqualorig; /* index quals, in standard expr form */
479 } BitmapHeapScan;
480 
481 /* ----------------
482  *		tid scan node
483  *
484  * tidquals is an implicitly OR'ed list of qual expressions of the form
485  * "CTID = pseudoconstant", or "CTID = ANY(pseudoconstant_array)",
486  * or a CurrentOfExpr for the relation.
487  * ----------------
488  */
489 typedef struct TidScan
490 {
491 	Scan		scan;
492 	List	   *tidquals;		/* qual(s) involving CTID = something */
493 } TidScan;
494 
495 /* ----------------
496  *		tid range scan node
497  *
498  * tidrangequals is an implicitly AND'ed list of qual expressions of the form
499  * "CTID relop pseudoconstant", where relop is one of >,>=,<,<=.
500  * ----------------
501  */
502 typedef struct TidRangeScan
503 {
504 	Scan		scan;
505 	List	   *tidrangequals;	/* qual(s) involving CTID op something */
506 } TidRangeScan;
507 
508 /* ----------------
509  *		subquery scan node
510  *
511  * SubqueryScan is for scanning the output of a sub-query in the range table.
512  * We often need an extra plan node above the sub-query's plan to perform
513  * expression evaluations (which we can't push into the sub-query without
514  * risking changing its semantics).  Although we are not scanning a physical
515  * relation, we make this a descendant of Scan anyway for code-sharing
516  * purposes.
517  *
518  * Note: we store the sub-plan in the type-specific subplan field, not in
519  * the generic lefttree field as you might expect.  This is because we do
520  * not want plan-tree-traversal routines to recurse into the subplan without
521  * knowing that they are changing Query contexts.
522  * ----------------
523  */
524 typedef struct SubqueryScan
525 {
526 	Scan		scan;
527 	Plan	   *subplan;
528 } SubqueryScan;
529 
530 /* ----------------
531  *		FunctionScan node
532  * ----------------
533  */
534 typedef struct FunctionScan
535 {
536 	Scan		scan;
537 	List	   *functions;		/* list of RangeTblFunction nodes */
538 	bool		funcordinality; /* WITH ORDINALITY */
539 } FunctionScan;
540 
541 /* ----------------
542  *		ValuesScan node
543  * ----------------
544  */
545 typedef struct ValuesScan
546 {
547 	Scan		scan;
548 	List	   *values_lists;	/* list of expression lists */
549 } ValuesScan;
550 
551 /* ----------------
552  *		TableFunc scan node
553  * ----------------
554  */
555 typedef struct TableFuncScan
556 {
557 	Scan		scan;
558 	TableFunc  *tablefunc;		/* table function node */
559 } TableFuncScan;
560 
561 /* ----------------
562  *		CteScan node
563  * ----------------
564  */
565 typedef struct CteScan
566 {
567 	Scan		scan;
568 	int			ctePlanId;		/* ID of init SubPlan for CTE */
569 	int			cteParam;		/* ID of Param representing CTE output */
570 } CteScan;
571 
572 /* ----------------
573  *		NamedTuplestoreScan node
574  * ----------------
575  */
576 typedef struct NamedTuplestoreScan
577 {
578 	Scan		scan;
579 	char	   *enrname;		/* Name given to Ephemeral Named Relation */
580 } NamedTuplestoreScan;
581 
582 /* ----------------
583  *		WorkTableScan node
584  * ----------------
585  */
586 typedef struct WorkTableScan
587 {
588 	Scan		scan;
589 	int			wtParam;		/* ID of Param representing work table */
590 } WorkTableScan;
591 
592 /* ----------------
593  *		ForeignScan node
594  *
595  * fdw_exprs and fdw_private are both under the control of the foreign-data
596  * wrapper, but fdw_exprs is presumed to contain expression trees and will
597  * be post-processed accordingly by the planner; fdw_private won't be.
598  * Note that everything in both lists must be copiable by copyObject().
599  * One way to store an arbitrary blob of bytes is to represent it as a bytea
600  * Const.  Usually, though, you'll be better off choosing a representation
601  * that can be dumped usefully by nodeToString().
602  *
603  * fdw_scan_tlist is a targetlist describing the contents of the scan tuple
604  * returned by the FDW; it can be NIL if the scan tuple matches the declared
605  * rowtype of the foreign table, which is the normal case for a simple foreign
606  * table scan.  (If the plan node represents a foreign join, fdw_scan_tlist
607  * is required since there is no rowtype available from the system catalogs.)
608  * When fdw_scan_tlist is provided, Vars in the node's tlist and quals must
609  * have varno INDEX_VAR, and their varattnos correspond to resnos in the
610  * fdw_scan_tlist (which are also column numbers in the actual scan tuple).
611  * fdw_scan_tlist is never actually executed; it just holds expression trees
612  * describing what is in the scan tuple's columns.
613  *
614  * fdw_recheck_quals should contain any quals which the core system passed to
615  * the FDW but which were not added to scan.plan.qual; that is, it should
616  * contain the quals being checked remotely.  This is needed for correct
617  * behavior during EvalPlanQual rechecks.
618  *
619  * When the plan node represents a foreign join, scan.scanrelid is zero and
620  * fs_relids must be consulted to identify the join relation.  (fs_relids
621  * is valid for simple scans as well, but will always match scan.scanrelid.)
622  *
623  * If the FDW's PlanDirectModify() callback decides to repurpose a ForeignScan
624  * node to perform the UPDATE or DELETE operation directly in the remote
625  * server, it sets 'operation' and 'resultRelation' to identify the operation
626  * type and target relation.  Note that these fields are only set if the
627  * modification is performed *fully* remotely; otherwise, the modification is
628  * driven by a local ModifyTable node and 'operation' is left to CMD_SELECT.
629  * ----------------
630  */
631 typedef struct ForeignScan
632 {
633 	Scan		scan;
634 	CmdType		operation;		/* SELECT/INSERT/UPDATE/DELETE */
635 	Index		resultRelation; /* direct modification target's RT index */
636 	Oid			fs_server;		/* OID of foreign server */
637 	List	   *fdw_exprs;		/* expressions that FDW may evaluate */
638 	List	   *fdw_private;	/* private data for FDW */
639 	List	   *fdw_scan_tlist; /* optional tlist describing scan tuple */
640 	List	   *fdw_recheck_quals;	/* original quals not in scan.plan.qual */
641 	Bitmapset  *fs_relids;		/* RTIs generated by this scan */
642 	bool		fsSystemCol;	/* true if any "system column" is needed */
643 } ForeignScan;
644 
645 /* ----------------
646  *	   CustomScan node
647  *
648  * The comments for ForeignScan's fdw_exprs, fdw_private, fdw_scan_tlist,
649  * and fs_relids fields apply equally to CustomScan's custom_exprs,
650  * custom_private, custom_scan_tlist, and custom_relids fields.  The
651  * convention of setting scan.scanrelid to zero for joins applies as well.
652  *
653  * Note that since Plan trees can be copied, custom scan providers *must*
654  * fit all plan data they need into those fields; embedding CustomScan in
655  * a larger struct will not work.
656  * ----------------
657  */
658 struct CustomScanMethods;
659 
660 typedef struct CustomScan
661 {
662 	Scan		scan;
663 	uint32		flags;			/* mask of CUSTOMPATH_* flags, see
664 								 * nodes/extensible.h */
665 	List	   *custom_plans;	/* list of Plan nodes, if any */
666 	List	   *custom_exprs;	/* expressions that custom code may evaluate */
667 	List	   *custom_private; /* private data for custom code */
668 	List	   *custom_scan_tlist;	/* optional tlist describing scan tuple */
669 	Bitmapset  *custom_relids;	/* RTIs generated by this scan */
670 	const struct CustomScanMethods *methods;
671 } CustomScan;
672 
673 /*
674  * ==========
675  * Join nodes
676  * ==========
677  */
678 
679 /* ----------------
680  *		Join node
681  *
682  * jointype:	rule for joining tuples from left and right subtrees
683  * inner_unique each outer tuple can match to no more than one inner tuple
684  * joinqual:	qual conditions that came from JOIN/ON or JOIN/USING
685  *				(plan.qual contains conditions that came from WHERE)
686  *
687  * When jointype is INNER, joinqual and plan.qual are semantically
688  * interchangeable.  For OUTER jointypes, the two are *not* interchangeable;
689  * only joinqual is used to determine whether a match has been found for
690  * the purpose of deciding whether to generate null-extended tuples.
691  * (But plan.qual is still applied before actually returning a tuple.)
692  * For an outer join, only joinquals are allowed to be used as the merge
693  * or hash condition of a merge or hash join.
694  *
695  * inner_unique is set if the joinquals are such that no more than one inner
696  * tuple could match any given outer tuple.  This allows the executor to
697  * skip searching for additional matches.  (This must be provable from just
698  * the joinquals, ignoring plan.qual, due to where the executor tests it.)
699  * ----------------
700  */
701 typedef struct Join
702 {
703 	Plan		plan;
704 	JoinType	jointype;
705 	bool		inner_unique;
706 	List	   *joinqual;		/* JOIN quals (in addition to plan.qual) */
707 } Join;
708 
709 /* ----------------
710  *		nest loop join node
711  *
712  * The nestParams list identifies any executor Params that must be passed
713  * into execution of the inner subplan carrying values from the current row
714  * of the outer subplan.  Currently we restrict these values to be simple
715  * Vars, but perhaps someday that'd be worth relaxing.  (Note: during plan
716  * creation, the paramval can actually be a PlaceHolderVar expression; but it
717  * must be a Var with varno OUTER_VAR by the time it gets to the executor.)
718  * ----------------
719  */
720 typedef struct NestLoop
721 {
722 	Join		join;
723 	List	   *nestParams;		/* list of NestLoopParam nodes */
724 } NestLoop;
725 
726 typedef struct NestLoopParam
727 {
728 	NodeTag		type;
729 	int			paramno;		/* number of the PARAM_EXEC Param to set */
730 	Var		   *paramval;		/* outer-relation Var to assign to Param */
731 } NestLoopParam;
732 
733 /* ----------------
734  *		merge join node
735  *
736  * The expected ordering of each mergeable column is described by a btree
737  * opfamily OID, a collation OID, a direction (BTLessStrategyNumber or
738  * BTGreaterStrategyNumber) and a nulls-first flag.  Note that the two sides
739  * of each mergeclause may be of different datatypes, but they are ordered the
740  * same way according to the common opfamily and collation.  The operator in
741  * each mergeclause must be an equality operator of the indicated opfamily.
742  * ----------------
743  */
744 typedef struct MergeJoin
745 {
746 	Join		join;
747 	bool		skip_mark_restore;	/* Can we skip mark/restore calls? */
748 	List	   *mergeclauses;	/* mergeclauses as expression trees */
749 	/* these are arrays, but have the same length as the mergeclauses list: */
750 	Oid		   *mergeFamilies;	/* per-clause OIDs of btree opfamilies */
751 	Oid		   *mergeCollations;	/* per-clause OIDs of collations */
752 	int		   *mergeStrategies;	/* per-clause ordering (ASC or DESC) */
753 	bool	   *mergeNullsFirst;	/* per-clause nulls ordering */
754 } MergeJoin;
755 
756 /* ----------------
757  *		hash join node
758  * ----------------
759  */
760 typedef struct HashJoin
761 {
762 	Join		join;
763 	List	   *hashclauses;
764 	List	   *hashoperators;
765 	List	   *hashcollations;
766 
767 	/*
768 	 * List of expressions to be hashed for tuples from the outer plan, to
769 	 * perform lookups in the hashtable over the inner plan.
770 	 */
771 	List	   *hashkeys;
772 } HashJoin;
773 
774 /* ----------------
775  *		materialization node
776  * ----------------
777  */
778 typedef struct Material
779 {
780 	Plan		plan;
781 } Material;
782 
783 /* ----------------
784  *		memoize node
785  * ----------------
786  */
787 typedef struct Memoize
788 {
789 	Plan		plan;
790 
791 	int			numKeys;		/* size of the two arrays below */
792 
793 	Oid		   *hashOperators;	/* hash operators for each key */
794 	Oid		   *collations;		/* cache keys */
795 	List	   *param_exprs;	/* exprs containing parameters */
796 	bool		singlerow;		/* true if the cache entry should be marked as
797 								 * complete after we store the first tuple in
798 								 * it. */
799 	uint32		est_entries;	/* The maximum number of entries that the
800 								 * planner expects will fit in the cache, or 0
801 								 * if unknown */
802 } Memoize;
803 
804 /* ----------------
805  *		sort node
806  * ----------------
807  */
808 typedef struct Sort
809 {
810 	Plan		plan;
811 	int			numCols;		/* number of sort-key columns */
812 	AttrNumber *sortColIdx;		/* their indexes in the target list */
813 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
814 	Oid		   *collations;		/* OIDs of collations */
815 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
816 } Sort;
817 
818 /* ----------------
819  *		incremental sort node
820  * ----------------
821  */
822 typedef struct IncrementalSort
823 {
824 	Sort		sort;
825 	int			nPresortedCols; /* number of presorted columns */
826 } IncrementalSort;
827 
828 /* ---------------
829  *	 group node -
830  *		Used for queries with GROUP BY (but no aggregates) specified.
831  *		The input must be presorted according to the grouping columns.
832  * ---------------
833  */
834 typedef struct Group
835 {
836 	Plan		plan;
837 	int			numCols;		/* number of grouping columns */
838 	AttrNumber *grpColIdx;		/* their indexes in the target list */
839 	Oid		   *grpOperators;	/* equality operators to compare with */
840 	Oid		   *grpCollations;
841 } Group;
842 
843 /* ---------------
844  *		aggregate node
845  *
846  * An Agg node implements plain or grouped aggregation.  For grouped
847  * aggregation, we can work with presorted input or unsorted input;
848  * the latter strategy uses an internal hashtable.
849  *
850  * Notice the lack of any direct info about the aggregate functions to be
851  * computed.  They are found by scanning the node's tlist and quals during
852  * executor startup.  (It is possible that there are no aggregate functions;
853  * this could happen if they get optimized away by constant-folding, or if
854  * we are using the Agg node to implement hash-based grouping.)
855  * ---------------
856  */
857 typedef struct Agg
858 {
859 	Plan		plan;
860 	AggStrategy aggstrategy;	/* basic strategy, see nodes.h */
861 	AggSplit	aggsplit;		/* agg-splitting mode, see nodes.h */
862 	int			numCols;		/* number of grouping columns */
863 	AttrNumber *grpColIdx;		/* their indexes in the target list */
864 	Oid		   *grpOperators;	/* equality operators to compare with */
865 	Oid		   *grpCollations;
866 	long		numGroups;		/* estimated number of groups in input */
867 	uint64		transitionSpace;	/* for pass-by-ref transition data */
868 	Bitmapset  *aggParams;		/* IDs of Params used in Aggref inputs */
869 	/* Note: planner provides numGroups & aggParams only in HASHED/MIXED case */
870 	List	   *groupingSets;	/* grouping sets to use */
871 	List	   *chain;			/* chained Agg/Sort nodes */
872 } Agg;
873 
874 /* ----------------
875  *		window aggregate node
876  * ----------------
877  */
878 typedef struct WindowAgg
879 {
880 	Plan		plan;
881 	Index		winref;			/* ID referenced by window functions */
882 	int			partNumCols;	/* number of columns in partition clause */
883 	AttrNumber *partColIdx;		/* their indexes in the target list */
884 	Oid		   *partOperators;	/* equality operators for partition columns */
885 	Oid		   *partCollations; /* collations for partition columns */
886 	int			ordNumCols;		/* number of columns in ordering clause */
887 	AttrNumber *ordColIdx;		/* their indexes in the target list */
888 	Oid		   *ordOperators;	/* equality operators for ordering columns */
889 	Oid		   *ordCollations;	/* collations for ordering columns */
890 	int			frameOptions;	/* frame_clause options, see WindowDef */
891 	Node	   *startOffset;	/* expression for starting bound, if any */
892 	Node	   *endOffset;		/* expression for ending bound, if any */
893 	/* these fields are used with RANGE offset PRECEDING/FOLLOWING: */
894 	Oid			startInRangeFunc;	/* in_range function for startOffset */
895 	Oid			endInRangeFunc; /* in_range function for endOffset */
896 	Oid			inRangeColl;	/* collation for in_range tests */
897 	bool		inRangeAsc;		/* use ASC sort order for in_range tests? */
898 	bool		inRangeNullsFirst;	/* nulls sort first for in_range tests? */
899 } WindowAgg;
900 
901 /* ----------------
902  *		unique node
903  * ----------------
904  */
905 typedef struct Unique
906 {
907 	Plan		plan;
908 	int			numCols;		/* number of columns to check for uniqueness */
909 	AttrNumber *uniqColIdx;		/* their indexes in the target list */
910 	Oid		   *uniqOperators;	/* equality operators to compare with */
911 	Oid		   *uniqCollations; /* collations for equality comparisons */
912 } Unique;
913 
914 /* ------------
915  *		gather node
916  *
917  * Note: rescan_param is the ID of a PARAM_EXEC parameter slot.  That slot
918  * will never actually contain a value, but the Gather node must flag it as
919  * having changed whenever it is rescanned.  The child parallel-aware scan
920  * nodes are marked as depending on that parameter, so that the rescan
921  * machinery is aware that their output is likely to change across rescans.
922  * In some cases we don't need a rescan Param, so rescan_param is set to -1.
923  * ------------
924  */
925 typedef struct Gather
926 {
927 	Plan		plan;
928 	int			num_workers;	/* planned number of worker processes */
929 	int			rescan_param;	/* ID of Param that signals a rescan, or -1 */
930 	bool		single_copy;	/* don't execute plan more than once */
931 	bool		invisible;		/* suppress EXPLAIN display (for testing)? */
932 	Bitmapset  *initParam;		/* param id's of initplans which are referred
933 								 * at gather or one of it's child node */
934 } Gather;
935 
936 /* ------------
937  *		gather merge node
938  * ------------
939  */
940 typedef struct GatherMerge
941 {
942 	Plan		plan;
943 	int			num_workers;	/* planned number of worker processes */
944 	int			rescan_param;	/* ID of Param that signals a rescan, or -1 */
945 	/* remaining fields are just like the sort-key info in struct Sort */
946 	int			numCols;		/* number of sort-key columns */
947 	AttrNumber *sortColIdx;		/* their indexes in the target list */
948 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
949 	Oid		   *collations;		/* OIDs of collations */
950 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
951 	Bitmapset  *initParam;		/* param id's of initplans which are referred
952 								 * at gather merge or one of it's child node */
953 } GatherMerge;
954 
955 /* ----------------
956  *		hash build node
957  *
958  * If the executor is supposed to try to apply skew join optimization, then
959  * skewTable/skewColumn/skewInherit identify the outer relation's join key
960  * column, from which the relevant MCV statistics can be fetched.
961  * ----------------
962  */
963 typedef struct Hash
964 {
965 	Plan		plan;
966 
967 	/*
968 	 * List of expressions to be hashed for tuples from Hash's outer plan,
969 	 * needed to put them into the hashtable.
970 	 */
971 	List	   *hashkeys;		/* hash keys for the hashjoin condition */
972 	Oid			skewTable;		/* outer join key's table OID, or InvalidOid */
973 	AttrNumber	skewColumn;		/* outer join key's column #, or zero */
974 	bool		skewInherit;	/* is outer join rel an inheritance tree? */
975 	/* all other info is in the parent HashJoin node */
976 	double		rows_total;		/* estimate total rows if parallel_aware */
977 } Hash;
978 
979 /* ----------------
980  *		setop node
981  * ----------------
982  */
983 typedef struct SetOp
984 {
985 	Plan		plan;
986 	SetOpCmd	cmd;			/* what to do, see nodes.h */
987 	SetOpStrategy strategy;		/* how to do it, see nodes.h */
988 	int			numCols;		/* number of columns to check for
989 								 * duplicate-ness */
990 	AttrNumber *dupColIdx;		/* their indexes in the target list */
991 	Oid		   *dupOperators;	/* equality operators to compare with */
992 	Oid		   *dupCollations;
993 	AttrNumber	flagColIdx;		/* where is the flag column, if any */
994 	int			firstFlag;		/* flag value for first input relation */
995 	long		numGroups;		/* estimated number of groups in input */
996 } SetOp;
997 
998 /* ----------------
999  *		lock-rows node
1000  *
1001  * rowMarks identifies the rels to be locked by this node; it should be
1002  * a subset of the rowMarks listed in the top-level PlannedStmt.
1003  * epqParam is a Param that all scan nodes below this one must depend on.
1004  * It is used to force re-evaluation of the plan during EvalPlanQual.
1005  * ----------------
1006  */
1007 typedef struct LockRows
1008 {
1009 	Plan		plan;
1010 	List	   *rowMarks;		/* a list of PlanRowMark's */
1011 	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
1012 } LockRows;
1013 
1014 /* ----------------
1015  *		limit node
1016  *
1017  * Note: as of Postgres 8.2, the offset and count expressions are expected
1018  * to yield int8, rather than int4 as before.
1019  * ----------------
1020  */
1021 typedef struct Limit
1022 {
1023 	Plan		plan;
1024 	Node	   *limitOffset;	/* OFFSET parameter, or NULL if none */
1025 	Node	   *limitCount;		/* COUNT parameter, or NULL if none */
1026 	LimitOption limitOption;	/* limit type */
1027 	int			uniqNumCols;	/* number of columns to check for similarity  */
1028 	AttrNumber *uniqColIdx;		/* their indexes in the target list */
1029 	Oid		   *uniqOperators;	/* equality operators to compare with */
1030 	Oid		   *uniqCollations; /* collations for equality comparisons */
1031 } Limit;
1032 
1033 
1034 /*
1035  * RowMarkType -
1036  *	  enums for types of row-marking operations
1037  *
1038  * The first four of these values represent different lock strengths that
1039  * we can take on tuples according to SELECT FOR [KEY] UPDATE/SHARE requests.
1040  * We support these on regular tables, as well as on foreign tables whose FDWs
1041  * report support for late locking.  For other foreign tables, any locking
1042  * that might be done for such requests must happen during the initial row
1043  * fetch; their FDWs provide no mechanism for going back to lock a row later.
1044  * This means that the semantics will be a bit different than for a local
1045  * table; in particular we are likely to lock more rows than would be locked
1046  * locally, since remote rows will be locked even if they then fail
1047  * locally-checked restriction or join quals.  However, the prospect of
1048  * doing a separate remote query to lock each selected row is usually pretty
1049  * unappealing, so early locking remains a credible design choice for FDWs.
1050  *
1051  * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we have to uniquely
1052  * identify all the source rows, not only those from the target relations, so
1053  * that we can perform EvalPlanQual rechecking at need.  For plain tables we
1054  * can just fetch the TID, much as for a target relation; this case is
1055  * represented by ROW_MARK_REFERENCE.  Otherwise (for example for VALUES or
1056  * FUNCTION scans) we have to copy the whole row value.  ROW_MARK_COPY is
1057  * pretty inefficient, since most of the time we'll never need the data; but
1058  * fortunately the overhead is usually not performance-critical in practice.
1059  * By default we use ROW_MARK_COPY for foreign tables, but if the FDW has
1060  * a concept of rowid it can request to use ROW_MARK_REFERENCE instead.
1061  * (Again, this probably doesn't make sense if a physical remote fetch is
1062  * needed, but for FDWs that map to local storage it might be credible.)
1063  */
1064 typedef enum RowMarkType
1065 {
1066 	ROW_MARK_EXCLUSIVE,			/* obtain exclusive tuple lock */
1067 	ROW_MARK_NOKEYEXCLUSIVE,	/* obtain no-key exclusive tuple lock */
1068 	ROW_MARK_SHARE,				/* obtain shared tuple lock */
1069 	ROW_MARK_KEYSHARE,			/* obtain keyshare tuple lock */
1070 	ROW_MARK_REFERENCE,			/* just fetch the TID, don't lock it */
1071 	ROW_MARK_COPY				/* physically copy the row value */
1072 } RowMarkType;
1073 
1074 #define RowMarkRequiresRowShareLock(marktype)  ((marktype) <= ROW_MARK_KEYSHARE)
1075 
1076 /*
1077  * PlanRowMark -
1078  *	   plan-time representation of FOR [KEY] UPDATE/SHARE clauses
1079  *
1080  * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
1081  * PlanRowMark node for each non-target relation in the query.  Relations that
1082  * are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
1083  * regular tables or supported foreign tables) or ROW_MARK_COPY (if not).
1084  *
1085  * Initially all PlanRowMarks have rti == prti and isParent == false.
1086  * When the planner discovers that a relation is the root of an inheritance
1087  * tree, it sets isParent true, and adds an additional PlanRowMark to the
1088  * list for each child relation (including the target rel itself in its role
1089  * as a child, if it is not a partitioned table).  Any non-leaf partitioned
1090  * child relations will also have entries with isParent = true.  The child
1091  * entries have rti == child rel's RT index and prti == top parent's RT index,
1092  * and can therefore be recognized as children by the fact that prti != rti.
1093  * The parent's allMarkTypes field gets the OR of (1<<markType) across all
1094  * its children (this definition allows children to use different markTypes).
1095  *
1096  * The planner also adds resjunk output columns to the plan that carry
1097  * information sufficient to identify the locked or fetched rows.  When
1098  * markType != ROW_MARK_COPY, these columns are named
1099  *		tableoid%u			OID of table
1100  *		ctid%u				TID of row
1101  * The tableoid column is only present for an inheritance hierarchy.
1102  * When markType == ROW_MARK_COPY, there is instead a single column named
1103  *		wholerow%u			whole-row value of relation
1104  * (An inheritance hierarchy could have all three resjunk output columns,
1105  * if some children use a different markType than others.)
1106  * In all three cases, %u represents the rowmark ID number (rowmarkId).
1107  * This number is unique within a plan tree, except that child relation
1108  * entries copy their parent's rowmarkId.  (Assigning unique numbers
1109  * means we needn't renumber rowmarkIds when flattening subqueries, which
1110  * would require finding and renaming the resjunk columns as well.)
1111  * Note this means that all tables in an inheritance hierarchy share the
1112  * same resjunk column names.
1113  */
1114 typedef struct PlanRowMark
1115 {
1116 	NodeTag		type;
1117 	Index		rti;			/* range table index of markable relation */
1118 	Index		prti;			/* range table index of parent relation */
1119 	Index		rowmarkId;		/* unique identifier for resjunk columns */
1120 	RowMarkType markType;		/* see enum above */
1121 	int			allMarkTypes;	/* OR of (1<<markType) for all children */
1122 	LockClauseStrength strength;	/* LockingClause's strength, or LCS_NONE */
1123 	LockWaitPolicy waitPolicy;	/* NOWAIT and SKIP LOCKED options */
1124 	bool		isParent;		/* true if this is a "dummy" parent entry */
1125 } PlanRowMark;
1126 
1127 
1128 /*
1129  * Node types to represent partition pruning information.
1130  */
1131 
1132 /*
1133  * PartitionPruneInfo - Details required to allow the executor to prune
1134  * partitions.
1135  *
1136  * Here we store mapping details to allow translation of a partitioned table's
1137  * index as returned by the partition pruning code into subplan indexes for
1138  * plan types which support arbitrary numbers of subplans, such as Append.
1139  * We also store various details to tell the executor when it should be
1140  * performing partition pruning.
1141  *
1142  * Each PartitionedRelPruneInfo describes the partitioning rules for a single
1143  * partitioned table (a/k/a level of partitioning).  Since a partitioning
1144  * hierarchy could contain multiple levels, we represent it by a List of
1145  * PartitionedRelPruneInfos, where the first entry represents the topmost
1146  * partitioned table and additional entries represent non-leaf child
1147  * partitions, ordered such that parents appear before their children.
1148  * Then, since an Append-type node could have multiple partitioning
1149  * hierarchies among its children, we have an unordered List of those Lists.
1150  *
1151  * prune_infos			List of Lists containing PartitionedRelPruneInfo nodes,
1152  *						one sublist per run-time-prunable partition hierarchy
1153  *						appearing in the parent plan node's subplans.
1154  * other_subplans		Indexes of any subplans that are not accounted for
1155  *						by any of the PartitionedRelPruneInfo nodes in
1156  *						"prune_infos".  These subplans must not be pruned.
1157  */
1158 typedef struct PartitionPruneInfo
1159 {
1160 	NodeTag		type;
1161 	List	   *prune_infos;
1162 	Bitmapset  *other_subplans;
1163 } PartitionPruneInfo;
1164 
1165 /*
1166  * PartitionedRelPruneInfo - Details required to allow the executor to prune
1167  * partitions for a single partitioned table.
1168  *
1169  * subplan_map[] and subpart_map[] are indexed by partition index of the
1170  * partitioned table referenced by 'rtindex', the partition index being the
1171  * order that the partitions are defined in the table's PartitionDesc.  For a
1172  * leaf partition p, subplan_map[p] contains the zero-based index of the
1173  * partition's subplan in the parent plan's subplan list; it is -1 if the
1174  * partition is non-leaf or has been pruned.  For a non-leaf partition p,
1175  * subpart_map[p] contains the zero-based index of that sub-partition's
1176  * PartitionedRelPruneInfo in the hierarchy's PartitionedRelPruneInfo list;
1177  * it is -1 if the partition is a leaf or has been pruned.  Note that subplan
1178  * indexes, as stored in 'subplan_map', are global across the parent plan
1179  * node, but partition indexes are valid only within a particular hierarchy.
1180  * relid_map[p] contains the partition's OID, or 0 if the partition was pruned.
1181  */
1182 typedef struct PartitionedRelPruneInfo
1183 {
1184 	NodeTag		type;
1185 	Index		rtindex;		/* RT index of partition rel for this level */
1186 	Bitmapset  *present_parts;	/* Indexes of all partitions which subplans or
1187 								 * subparts are present for */
1188 	int			nparts;			/* Length of the following arrays: */
1189 	int		   *subplan_map;	/* subplan index by partition index, or -1 */
1190 	int		   *subpart_map;	/* subpart index by partition index, or -1 */
1191 	Oid		   *relid_map;		/* relation OID by partition index, or 0 */
1192 
1193 	/*
1194 	 * initial_pruning_steps shows how to prune during executor startup (i.e.,
1195 	 * without use of any PARAM_EXEC Params); it is NIL if no startup pruning
1196 	 * is required.  exec_pruning_steps shows how to prune with PARAM_EXEC
1197 	 * Params; it is NIL if no per-scan pruning is required.
1198 	 */
1199 	List	   *initial_pruning_steps;	/* List of PartitionPruneStep */
1200 	List	   *exec_pruning_steps; /* List of PartitionPruneStep */
1201 	Bitmapset  *execparamids;	/* All PARAM_EXEC Param IDs in
1202 								 * exec_pruning_steps */
1203 } PartitionedRelPruneInfo;
1204 
1205 /*
1206  * Abstract Node type for partition pruning steps (there are no concrete
1207  * Nodes of this type).
1208  *
1209  * step_id is the global identifier of the step within its pruning context.
1210  */
1211 typedef struct PartitionPruneStep
1212 {
1213 	NodeTag		type;
1214 	int			step_id;
1215 } PartitionPruneStep;
1216 
1217 /*
1218  * PartitionPruneStepOp - Information to prune using a set of mutually ANDed
1219  *							OpExpr clauses
1220  *
1221  * This contains information extracted from up to partnatts OpExpr clauses,
1222  * where partnatts is the number of partition key columns.  'opstrategy' is the
1223  * strategy of the operator in the clause matched to the last partition key.
1224  * 'exprs' contains expressions which comprise the lookup key to be passed to
1225  * the partition bound search function.  'cmpfns' contains the OIDs of
1226  * comparison functions used to compare aforementioned expressions with
1227  * partition bounds.  Both 'exprs' and 'cmpfns' contain the same number of
1228  * items, up to partnatts items.
1229  *
1230  * Once we find the offset of a partition bound using the lookup key, we
1231  * determine which partitions to include in the result based on the value of
1232  * 'opstrategy'.  For example, if it were equality, we'd return just the
1233  * partition that would contain that key or a set of partitions if the key
1234  * didn't consist of all partitioning columns.  For non-equality strategies,
1235  * we'd need to include other partitions as appropriate.
1236  *
1237  * 'nullkeys' is the set containing the offset of the partition keys (0 to
1238  * partnatts - 1) that were matched to an IS NULL clause.  This is only
1239  * considered for hash partitioning as we need to pass which keys are null
1240  * to the hash partition bound search function.  It is never possible to
1241  * have an expression be present in 'exprs' for a given partition key and
1242  * the corresponding bit set in 'nullkeys'.
1243  */
1244 typedef struct PartitionPruneStepOp
1245 {
1246 	PartitionPruneStep step;
1247 
1248 	StrategyNumber opstrategy;
1249 	List	   *exprs;
1250 	List	   *cmpfns;
1251 	Bitmapset  *nullkeys;
1252 } PartitionPruneStepOp;
1253 
1254 /*
1255  * PartitionPruneStepCombine - Information to prune using a BoolExpr clause
1256  *
1257  * For BoolExpr clauses, we combine the set of partitions determined for each
1258  * of the argument clauses.
1259  */
1260 typedef enum PartitionPruneCombineOp
1261 {
1262 	PARTPRUNE_COMBINE_UNION,
1263 	PARTPRUNE_COMBINE_INTERSECT
1264 } PartitionPruneCombineOp;
1265 
1266 typedef struct PartitionPruneStepCombine
1267 {
1268 	PartitionPruneStep step;
1269 
1270 	PartitionPruneCombineOp combineOp;
1271 	List	   *source_stepids;
1272 } PartitionPruneStepCombine;
1273 
1274 
1275 /*
1276  * Plan invalidation info
1277  *
1278  * We track the objects on which a PlannedStmt depends in two ways:
1279  * relations are recorded as a simple list of OIDs, and everything else
1280  * is represented as a list of PlanInvalItems.  A PlanInvalItem is designed
1281  * to be used with the syscache invalidation mechanism, so it identifies a
1282  * system catalog entry by cache ID and hash value.
1283  */
1284 typedef struct PlanInvalItem
1285 {
1286 	NodeTag		type;
1287 	int			cacheId;		/* a syscache ID, see utils/syscache.h */
1288 	uint32		hashValue;		/* hash value of object's cache lookup key */
1289 } PlanInvalItem;
1290 
1291 #endif							/* PLANNODES_H */
1292