1 /*-------------------------------------------------------------------------
2  *
3  * plannodes.h
4  *	  definitions for query plan nodes
5  *
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/nodes/plannodes.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef PLANNODES_H
15 #define PLANNODES_H
16 
17 #include "access/sdir.h"
18 #include "lib/stringinfo.h"
19 #include "nodes/bitmapset.h"
20 #include "nodes/lockoptions.h"
21 #include "nodes/primnodes.h"
22 
23 
24 /* ----------------------------------------------------------------
25  *						node definitions
26  * ----------------------------------------------------------------
27  */
28 
29 /* ----------------
30  *		PlannedStmt node
31  *
32  * The output of the planner is a Plan tree headed by a PlannedStmt node.
33  * PlannedStmt holds the "one time" information needed by the executor.
34  *
35  * For simplicity in APIs, we also wrap utility statements in PlannedStmt
36  * nodes; in such cases, commandType == CMD_UTILITY, the statement itself
37  * is in the utilityStmt field, and the rest of the struct is mostly dummy.
38  * (We do use canSetTag, stmt_location, stmt_len, and possibly queryId.)
39  * ----------------
40  */
41 typedef struct PlannedStmt
42 {
43 	NodeTag		type;
44 
45 	CmdType		commandType;	/* select|insert|update|delete|utility */
46 
47 	uint32		queryId;		/* query identifier (copied from Query) */
48 
49 	bool		hasReturning;	/* is it insert|update|delete RETURNING? */
50 
51 	bool		hasModifyingCTE;	/* has insert|update|delete in WITH? */
52 
53 	bool		canSetTag;		/* do I set the command result tag? */
54 
55 	bool		transientPlan;	/* redo plan when TransactionXmin changes? */
56 
57 	bool		dependsOnRole;	/* is plan specific to current role? */
58 
59 	bool		parallelModeNeeded; /* parallel mode required to execute? */
60 
61 	struct Plan *planTree;		/* tree of Plan nodes */
62 
63 	List	   *rtable;			/* list of RangeTblEntry nodes */
64 
65 	/* rtable indexes of target relations for INSERT/UPDATE/DELETE */
66 	List	   *resultRelations;	/* integer list of RT indexes, or NIL */
67 
68 	/*
69 	 * rtable indexes of non-leaf target relations for UPDATE/DELETE on all
70 	 * the partitioned tables mentioned in the query.
71 	 */
72 	List	   *nonleafResultRelations;
73 
74 	/*
75 	 * rtable indexes of root target relations for UPDATE/DELETE; this list
76 	 * maintains a subset of the RT indexes in nonleafResultRelations,
77 	 * indicating the roots of the respective partition hierarchies.
78 	 */
79 	List	   *rootResultRelations;
80 
81 	List	   *subplans;		/* Plan trees for SubPlan expressions; note
82 								 * that some could be NULL */
83 
84 	Bitmapset  *rewindPlanIDs;	/* indices of subplans that require REWIND */
85 
86 	List	   *rowMarks;		/* a list of PlanRowMark's */
87 
88 	List	   *relationOids;	/* OIDs of relations the plan depends on */
89 
90 	List	   *invalItems;		/* other dependencies, as PlanInvalItems */
91 
92 	int			nParamExec;		/* number of PARAM_EXEC Params used */
93 
94 	Node	   *utilityStmt;	/* non-null if this is utility stmt */
95 
96 	/* statement location in source string (copied from Query) */
97 	int			stmt_location;	/* start location, or -1 if unknown */
98 	int			stmt_len;		/* length in bytes; 0 means "rest of string" */
99 } PlannedStmt;
100 
101 /* macro for fetching the Plan associated with a SubPlan node */
102 #define exec_subplan_get_plan(plannedstmt, subplan) \
103 	((Plan *) list_nth((plannedstmt)->subplans, (subplan)->plan_id - 1))
104 
105 
106 /* ----------------
107  *		Plan node
108  *
109  * All plan nodes "derive" from the Plan structure by having the
110  * Plan structure as the first field.  This ensures that everything works
111  * when nodes are cast to Plan's.  (node pointers are frequently cast to Plan*
112  * when passed around generically in the executor)
113  *
114  * We never actually instantiate any Plan nodes; this is just the common
115  * abstract superclass for all Plan-type nodes.
116  * ----------------
117  */
118 typedef struct Plan
119 {
120 	NodeTag		type;
121 
122 	/*
123 	 * estimated execution costs for plan (see costsize.c for more info)
124 	 */
125 	Cost		startup_cost;	/* cost expended before fetching any tuples */
126 	Cost		total_cost;		/* total cost (assuming all tuples fetched) */
127 
128 	/*
129 	 * planner's estimate of result size of this plan step
130 	 */
131 	double		plan_rows;		/* number of rows plan is expected to emit */
132 	int			plan_width;		/* average row width in bytes */
133 
134 	/*
135 	 * information needed for parallel query
136 	 */
137 	bool		parallel_aware; /* engage parallel-aware logic? */
138 	bool		parallel_safe;	/* OK to use as part of parallel plan? */
139 
140 	/*
141 	 * Common structural data for all Plan types.
142 	 */
143 	int			plan_node_id;	/* unique across entire final plan tree */
144 	List	   *targetlist;		/* target list to be computed at this node */
145 	List	   *qual;			/* implicitly-ANDed qual conditions */
146 	struct Plan *lefttree;		/* input plan tree(s) */
147 	struct Plan *righttree;
148 	List	   *initPlan;		/* Init Plan nodes (un-correlated expr
149 								 * subselects) */
150 
151 	/*
152 	 * Information for management of parameter-change-driven rescanning
153 	 *
154 	 * extParam includes the paramIDs of all external PARAM_EXEC params
155 	 * affecting this plan node or its children.  setParam params from the
156 	 * node's initPlans are not included, but their extParams are.
157 	 *
158 	 * allParam includes all the extParam paramIDs, plus the IDs of local
159 	 * params that affect the node (i.e., the setParams of its initplans).
160 	 * These are _all_ the PARAM_EXEC params that affect this node.
161 	 */
162 	Bitmapset  *extParam;
163 	Bitmapset  *allParam;
164 } Plan;
165 
166 /* ----------------
167  *	these are defined to avoid confusion problems with "left"
168  *	and "right" and "inner" and "outer".  The convention is that
169  *	the "left" plan is the "outer" plan and the "right" plan is
170  *	the inner plan, but these make the code more readable.
171  * ----------------
172  */
173 #define innerPlan(node)			(((Plan *)(node))->righttree)
174 #define outerPlan(node)			(((Plan *)(node))->lefttree)
175 
176 
177 /* ----------------
178  *	 Result node -
179  *		If no outer plan, evaluate a variable-free targetlist.
180  *		If outer plan, return tuples from outer plan (after a level of
181  *		projection as shown by targetlist).
182  *
183  * If resconstantqual isn't NULL, it represents a one-time qualification
184  * test (i.e., one that doesn't depend on any variables from the outer plan,
185  * so needs to be evaluated only once).
186  * ----------------
187  */
188 typedef struct Result
189 {
190 	Plan		plan;
191 	Node	   *resconstantqual;
192 } Result;
193 
194 /* ----------------
195  *	 ProjectSet node -
196  *		Apply a projection that includes set-returning functions to the
197  *		output tuples of the outer plan.
198  * ----------------
199  */
200 typedef struct ProjectSet
201 {
202 	Plan		plan;
203 } ProjectSet;
204 
205 /* ----------------
206  *	 ModifyTable node -
207  *		Apply rows produced by subplan(s) to result table(s),
208  *		by inserting, updating, or deleting.
209  *
210  * Note that rowMarks and epqParam are presumed to be valid for all the
211  * subplan(s); they can't contain any info that varies across subplans.
212  * ----------------
213  */
214 typedef struct ModifyTable
215 {
216 	Plan		plan;
217 	CmdType		operation;		/* INSERT, UPDATE, or DELETE */
218 	bool		canSetTag;		/* do we set the command tag/es_processed? */
219 	Index		nominalRelation;	/* Parent RT index for use of EXPLAIN */
220 	/* RT indexes of non-leaf tables in a partition tree */
221 	List	   *partitioned_rels;
222 	List	   *resultRelations;	/* integer list of RT indexes */
223 	int			resultRelIndex; /* index of first resultRel in plan's list */
224 	int			rootResultRelIndex; /* index of the partitioned table root */
225 	List	   *plans;			/* plan(s) producing source data */
226 	List	   *withCheckOptionLists;	/* per-target-table WCO lists */
227 	List	   *returningLists; /* per-target-table RETURNING tlists */
228 	List	   *fdwPrivLists;	/* per-target-table FDW private data lists */
229 	Bitmapset  *fdwDirectModifyPlans;	/* indices of FDW DM plans */
230 	List	   *rowMarks;		/* PlanRowMarks (non-locking only) */
231 	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
232 	OnConflictAction onConflictAction;	/* ON CONFLICT action */
233 	List	   *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs  */
234 	List	   *onConflictSet;	/* SET for INSERT ON CONFLICT DO UPDATE */
235 	Node	   *onConflictWhere;	/* WHERE for ON CONFLICT UPDATE */
236 	Index		exclRelRTI;		/* RTI of the EXCLUDED pseudo relation */
237 	List	   *exclRelTlist;	/* tlist of the EXCLUDED pseudo relation */
238 } ModifyTable;
239 
240 /* ----------------
241  *	 Append node -
242  *		Generate the concatenation of the results of sub-plans.
243  * ----------------
244  */
245 typedef struct Append
246 {
247 	Plan		plan;
248 	/* RT indexes of non-leaf tables in a partition tree */
249 	List	   *partitioned_rels;
250 	List	   *appendplans;
251 } Append;
252 
253 /* ----------------
254  *	 MergeAppend node -
255  *		Merge the results of pre-sorted sub-plans to preserve the ordering.
256  * ----------------
257  */
258 typedef struct MergeAppend
259 {
260 	Plan		plan;
261 	/* RT indexes of non-leaf tables in a partition tree */
262 	List	   *partitioned_rels;
263 	List	   *mergeplans;
264 	/* remaining fields are just like the sort-key info in struct Sort */
265 	int			numCols;		/* number of sort-key columns */
266 	AttrNumber *sortColIdx;		/* their indexes in the target list */
267 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
268 	Oid		   *collations;		/* OIDs of collations */
269 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
270 } MergeAppend;
271 
272 /* ----------------
273  *	RecursiveUnion node -
274  *		Generate a recursive union of two subplans.
275  *
276  * The "outer" subplan is always the non-recursive term, and the "inner"
277  * subplan is the recursive term.
278  * ----------------
279  */
280 typedef struct RecursiveUnion
281 {
282 	Plan		plan;
283 	int			wtParam;		/* ID of Param representing work table */
284 	/* Remaining fields are zero/null in UNION ALL case */
285 	int			numCols;		/* number of columns to check for
286 								 * duplicate-ness */
287 	AttrNumber *dupColIdx;		/* their indexes in the target list */
288 	Oid		   *dupOperators;	/* equality operators to compare with */
289 	long		numGroups;		/* estimated number of groups in input */
290 } RecursiveUnion;
291 
292 /* ----------------
293  *	 BitmapAnd node -
294  *		Generate the intersection of the results of sub-plans.
295  *
296  * The subplans must be of types that yield tuple bitmaps.  The targetlist
297  * and qual fields of the plan are unused and are always NIL.
298  * ----------------
299  */
300 typedef struct BitmapAnd
301 {
302 	Plan		plan;
303 	List	   *bitmapplans;
304 } BitmapAnd;
305 
306 /* ----------------
307  *	 BitmapOr node -
308  *		Generate the union of the results of sub-plans.
309  *
310  * The subplans must be of types that yield tuple bitmaps.  The targetlist
311  * and qual fields of the plan are unused and are always NIL.
312  * ----------------
313  */
314 typedef struct BitmapOr
315 {
316 	Plan		plan;
317 	bool		isshared;
318 	List	   *bitmapplans;
319 } BitmapOr;
320 
321 /*
322  * ==========
323  * Scan nodes
324  * ==========
325  */
326 typedef struct Scan
327 {
328 	Plan		plan;
329 	Index		scanrelid;		/* relid is index into the range table */
330 } Scan;
331 
332 /* ----------------
333  *		sequential scan node
334  * ----------------
335  */
336 typedef Scan SeqScan;
337 
338 /* ----------------
339  *		table sample scan node
340  * ----------------
341  */
342 typedef struct SampleScan
343 {
344 	Scan		scan;
345 	/* use struct pointer to avoid including parsenodes.h here */
346 	struct TableSampleClause *tablesample;
347 } SampleScan;
348 
349 /* ----------------
350  *		index scan node
351  *
352  * indexqualorig is an implicitly-ANDed list of index qual expressions, each
353  * in the same form it appeared in the query WHERE condition.  Each should
354  * be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
355  * The indexkey is a Var or expression referencing column(s) of the index's
356  * base table.  The comparisonval might be any expression, but it won't use
357  * any columns of the base table.  The expressions are ordered by index
358  * column position (but items referencing the same index column can appear
359  * in any order).  indexqualorig is used at runtime only if we have to recheck
360  * a lossy indexqual.
361  *
362  * indexqual has the same form, but the expressions have been commuted if
363  * necessary to put the indexkeys on the left, and the indexkeys are replaced
364  * by Var nodes identifying the index columns (their varno is INDEX_VAR and
365  * their varattno is the index column number).
366  *
367  * indexorderbyorig is similarly the original form of any ORDER BY expressions
368  * that are being implemented by the index, while indexorderby is modified to
369  * have index column Vars on the left-hand side.  Here, multiple expressions
370  * must appear in exactly the ORDER BY order, and this is not necessarily the
371  * index column order.  Only the expressions are provided, not the auxiliary
372  * sort-order information from the ORDER BY SortGroupClauses; it's assumed
373  * that the sort ordering is fully determinable from the top-level operators.
374  * indexorderbyorig is used at runtime to recheck the ordering, if the index
375  * cannot calculate an accurate ordering.  It is also needed for EXPLAIN.
376  *
377  * indexorderbyops is a list of the OIDs of the operators used to sort the
378  * ORDER BY expressions.  This is used together with indexorderbyorig to
379  * recheck ordering at run time.  (Note that indexorderby, indexorderbyorig,
380  * and indexorderbyops are used for amcanorderbyop cases, not amcanorder.)
381  *
382  * indexorderdir specifies the scan ordering, for indexscans on amcanorder
383  * indexes (for other indexes it should be "don't care").
384  * ----------------
385  */
386 typedef struct IndexScan
387 {
388 	Scan		scan;
389 	Oid			indexid;		/* OID of index to scan */
390 	List	   *indexqual;		/* list of index quals (usually OpExprs) */
391 	List	   *indexqualorig;	/* the same in original form */
392 	List	   *indexorderby;	/* list of index ORDER BY exprs */
393 	List	   *indexorderbyorig;	/* the same in original form */
394 	List	   *indexorderbyops;	/* OIDs of sort ops for ORDER BY exprs */
395 	ScanDirection indexorderdir;	/* forward or backward or don't care */
396 } IndexScan;
397 
398 /* ----------------
399  *		index-only scan node
400  *
401  * IndexOnlyScan is very similar to IndexScan, but it specifies an
402  * index-only scan, in which the data comes from the index not the heap.
403  * Because of this, *all* Vars in the plan node's targetlist, qual, and
404  * index expressions reference index columns and have varno = INDEX_VAR.
405  * Hence we do not need separate indexqualorig and indexorderbyorig lists,
406  * since their contents would be equivalent to indexqual and indexorderby.
407  *
408  * To help EXPLAIN interpret the index Vars for display, we provide
409  * indextlist, which represents the contents of the index as a targetlist
410  * with one TLE per index column.  Vars appearing in this list reference
411  * the base table, and this is the only field in the plan node that may
412  * contain such Vars.
413  * ----------------
414  */
415 typedef struct IndexOnlyScan
416 {
417 	Scan		scan;
418 	Oid			indexid;		/* OID of index to scan */
419 	List	   *indexqual;		/* list of index quals (usually OpExprs) */
420 	List	   *indexorderby;	/* list of index ORDER BY exprs */
421 	List	   *indextlist;		/* TargetEntry list describing index's cols */
422 	ScanDirection indexorderdir;	/* forward or backward or don't care */
423 } IndexOnlyScan;
424 
425 /* ----------------
426  *		bitmap index scan node
427  *
428  * BitmapIndexScan delivers a bitmap of potential tuple locations;
429  * it does not access the heap itself.  The bitmap is used by an
430  * ancestor BitmapHeapScan node, possibly after passing through
431  * intermediate BitmapAnd and/or BitmapOr nodes to combine it with
432  * the results of other BitmapIndexScans.
433  *
434  * The fields have the same meanings as for IndexScan, except we don't
435  * store a direction flag because direction is uninteresting.
436  *
437  * In a BitmapIndexScan plan node, the targetlist and qual fields are
438  * not used and are always NIL.  The indexqualorig field is unused at
439  * run time too, but is saved for the benefit of EXPLAIN.
440  * ----------------
441  */
442 typedef struct BitmapIndexScan
443 {
444 	Scan		scan;
445 	Oid			indexid;		/* OID of index to scan */
446 	bool		isshared;		/* Create shared bitmap if set */
447 	List	   *indexqual;		/* list of index quals (OpExprs) */
448 	List	   *indexqualorig;	/* the same in original form */
449 } BitmapIndexScan;
450 
451 /* ----------------
452  *		bitmap sequential scan node
453  *
454  * This needs a copy of the qual conditions being used by the input index
455  * scans because there are various cases where we need to recheck the quals;
456  * for example, when the bitmap is lossy about the specific rows on a page
457  * that meet the index condition.
458  * ----------------
459  */
460 typedef struct BitmapHeapScan
461 {
462 	Scan		scan;
463 	List	   *bitmapqualorig; /* index quals, in standard expr form */
464 } BitmapHeapScan;
465 
466 /* ----------------
467  *		tid scan node
468  *
469  * tidquals is an implicitly OR'ed list of qual expressions of the form
470  * "CTID = pseudoconstant" or "CTID = ANY(pseudoconstant_array)".
471  * ----------------
472  */
473 typedef struct TidScan
474 {
475 	Scan		scan;
476 	List	   *tidquals;		/* qual(s) involving CTID = something */
477 } TidScan;
478 
479 /* ----------------
480  *		subquery scan node
481  *
482  * SubqueryScan is for scanning the output of a sub-query in the range table.
483  * We often need an extra plan node above the sub-query's plan to perform
484  * expression evaluations (which we can't push into the sub-query without
485  * risking changing its semantics).  Although we are not scanning a physical
486  * relation, we make this a descendant of Scan anyway for code-sharing
487  * purposes.
488  *
489  * Note: we store the sub-plan in the type-specific subplan field, not in
490  * the generic lefttree field as you might expect.  This is because we do
491  * not want plan-tree-traversal routines to recurse into the subplan without
492  * knowing that they are changing Query contexts.
493  * ----------------
494  */
495 typedef struct SubqueryScan
496 {
497 	Scan		scan;
498 	Plan	   *subplan;
499 } SubqueryScan;
500 
501 /* ----------------
502  *		FunctionScan node
503  * ----------------
504  */
505 typedef struct FunctionScan
506 {
507 	Scan		scan;
508 	List	   *functions;		/* list of RangeTblFunction nodes */
509 	bool		funcordinality; /* WITH ORDINALITY */
510 } FunctionScan;
511 
512 /* ----------------
513  *		ValuesScan node
514  * ----------------
515  */
516 typedef struct ValuesScan
517 {
518 	Scan		scan;
519 	List	   *values_lists;	/* list of expression lists */
520 } ValuesScan;
521 
522 /* ----------------
523  *		TableFunc scan node
524  * ----------------
525  */
526 typedef struct TableFuncScan
527 {
528 	Scan		scan;
529 	TableFunc  *tablefunc;		/* table function node */
530 } TableFuncScan;
531 
532 /* ----------------
533  *		CteScan node
534  * ----------------
535  */
536 typedef struct CteScan
537 {
538 	Scan		scan;
539 	int			ctePlanId;		/* ID of init SubPlan for CTE */
540 	int			cteParam;		/* ID of Param representing CTE output */
541 } CteScan;
542 
543 /* ----------------
544  *		NamedTuplestoreScan node
545  * ----------------
546  */
547 typedef struct NamedTuplestoreScan
548 {
549 	Scan		scan;
550 	char	   *enrname;		/* Name given to Ephemeral Named Relation */
551 } NamedTuplestoreScan;
552 
553 /* ----------------
554  *		WorkTableScan node
555  * ----------------
556  */
557 typedef struct WorkTableScan
558 {
559 	Scan		scan;
560 	int			wtParam;		/* ID of Param representing work table */
561 } WorkTableScan;
562 
563 /* ----------------
564  *		ForeignScan node
565  *
566  * fdw_exprs and fdw_private are both under the control of the foreign-data
567  * wrapper, but fdw_exprs is presumed to contain expression trees and will
568  * be post-processed accordingly by the planner; fdw_private won't be.
569  * Note that everything in both lists must be copiable by copyObject().
570  * One way to store an arbitrary blob of bytes is to represent it as a bytea
571  * Const.  Usually, though, you'll be better off choosing a representation
572  * that can be dumped usefully by nodeToString().
573  *
574  * fdw_scan_tlist is a targetlist describing the contents of the scan tuple
575  * returned by the FDW; it can be NIL if the scan tuple matches the declared
576  * rowtype of the foreign table, which is the normal case for a simple foreign
577  * table scan.  (If the plan node represents a foreign join, fdw_scan_tlist
578  * is required since there is no rowtype available from the system catalogs.)
579  * When fdw_scan_tlist is provided, Vars in the node's tlist and quals must
580  * have varno INDEX_VAR, and their varattnos correspond to resnos in the
581  * fdw_scan_tlist (which are also column numbers in the actual scan tuple).
582  * fdw_scan_tlist is never actually executed; it just holds expression trees
583  * describing what is in the scan tuple's columns.
584  *
585  * fdw_recheck_quals should contain any quals which the core system passed to
586  * the FDW but which were not added to scan.plan.qual; that is, it should
587  * contain the quals being checked remotely.  This is needed for correct
588  * behavior during EvalPlanQual rechecks.
589  *
590  * When the plan node represents a foreign join, scan.scanrelid is zero and
591  * fs_relids must be consulted to identify the join relation.  (fs_relids
592  * is valid for simple scans as well, but will always match scan.scanrelid.)
593  * ----------------
594  */
595 typedef struct ForeignScan
596 {
597 	Scan		scan;
598 	CmdType		operation;		/* SELECT/INSERT/UPDATE/DELETE */
599 	Oid			fs_server;		/* OID of foreign server */
600 	List	   *fdw_exprs;		/* expressions that FDW may evaluate */
601 	List	   *fdw_private;	/* private data for FDW */
602 	List	   *fdw_scan_tlist; /* optional tlist describing scan tuple */
603 	List	   *fdw_recheck_quals;	/* original quals not in scan.plan.qual */
604 	Bitmapset  *fs_relids;		/* RTIs generated by this scan */
605 	bool		fsSystemCol;	/* true if any "system column" is needed */
606 } ForeignScan;
607 
608 /* ----------------
609  *	   CustomScan node
610  *
611  * The comments for ForeignScan's fdw_exprs, fdw_private, fdw_scan_tlist,
612  * and fs_relids fields apply equally to CustomScan's custom_exprs,
613  * custom_private, custom_scan_tlist, and custom_relids fields.  The
614  * convention of setting scan.scanrelid to zero for joins applies as well.
615  *
616  * Note that since Plan trees can be copied, custom scan providers *must*
617  * fit all plan data they need into those fields; embedding CustomScan in
618  * a larger struct will not work.
619  * ----------------
620  */
621 struct CustomScanMethods;
622 
623 typedef struct CustomScan
624 {
625 	Scan		scan;
626 	uint32		flags;			/* mask of CUSTOMPATH_* flags, see
627 								 * nodes/extensible.h */
628 	List	   *custom_plans;	/* list of Plan nodes, if any */
629 	List	   *custom_exprs;	/* expressions that custom code may evaluate */
630 	List	   *custom_private; /* private data for custom code */
631 	List	   *custom_scan_tlist;	/* optional tlist describing scan tuple */
632 	Bitmapset  *custom_relids;	/* RTIs generated by this scan */
633 	const struct CustomScanMethods *methods;
634 } CustomScan;
635 
636 /*
637  * ==========
638  * Join nodes
639  * ==========
640  */
641 
642 /* ----------------
643  *		Join node
644  *
645  * jointype:	rule for joining tuples from left and right subtrees
646  * inner_unique each outer tuple can match to no more than one inner tuple
647  * joinqual:	qual conditions that came from JOIN/ON or JOIN/USING
648  *				(plan.qual contains conditions that came from WHERE)
649  *
650  * When jointype is INNER, joinqual and plan.qual are semantically
651  * interchangeable.  For OUTER jointypes, the two are *not* interchangeable;
652  * only joinqual is used to determine whether a match has been found for
653  * the purpose of deciding whether to generate null-extended tuples.
654  * (But plan.qual is still applied before actually returning a tuple.)
655  * For an outer join, only joinquals are allowed to be used as the merge
656  * or hash condition of a merge or hash join.
657  *
658  * inner_unique is set if the joinquals are such that no more than one inner
659  * tuple could match any given outer tuple.  This allows the executor to
660  * skip searching for additional matches.  (This must be provable from just
661  * the joinquals, ignoring plan.qual, due to where the executor tests it.)
662  * ----------------
663  */
664 typedef struct Join
665 {
666 	Plan		plan;
667 	JoinType	jointype;
668 	bool		inner_unique;
669 	List	   *joinqual;		/* JOIN quals (in addition to plan.qual) */
670 } Join;
671 
672 /* ----------------
673  *		nest loop join node
674  *
675  * The nestParams list identifies any executor Params that must be passed
676  * into execution of the inner subplan carrying values from the current row
677  * of the outer subplan.  Currently we restrict these values to be simple
678  * Vars, but perhaps someday that'd be worth relaxing.  (Note: during plan
679  * creation, the paramval can actually be a PlaceHolderVar expression; but it
680  * must be a Var with varno OUTER_VAR by the time it gets to the executor.)
681  * ----------------
682  */
683 typedef struct NestLoop
684 {
685 	Join		join;
686 	List	   *nestParams;		/* list of NestLoopParam nodes */
687 } NestLoop;
688 
689 typedef struct NestLoopParam
690 {
691 	NodeTag		type;
692 	int			paramno;		/* number of the PARAM_EXEC Param to set */
693 	Var		   *paramval;		/* outer-relation Var to assign to Param */
694 } NestLoopParam;
695 
696 /* ----------------
697  *		merge join node
698  *
699  * The expected ordering of each mergeable column is described by a btree
700  * opfamily OID, a collation OID, a direction (BTLessStrategyNumber or
701  * BTGreaterStrategyNumber) and a nulls-first flag.  Note that the two sides
702  * of each mergeclause may be of different datatypes, but they are ordered the
703  * same way according to the common opfamily and collation.  The operator in
704  * each mergeclause must be an equality operator of the indicated opfamily.
705  * ----------------
706  */
707 typedef struct MergeJoin
708 {
709 	Join		join;
710 	bool		skip_mark_restore;	/* Can we skip mark/restore calls? */
711 	List	   *mergeclauses;	/* mergeclauses as expression trees */
712 	/* these are arrays, but have the same length as the mergeclauses list: */
713 	Oid		   *mergeFamilies;	/* per-clause OIDs of btree opfamilies */
714 	Oid		   *mergeCollations;	/* per-clause OIDs of collations */
715 	int		   *mergeStrategies;	/* per-clause ordering (ASC or DESC) */
716 	bool	   *mergeNullsFirst;	/* per-clause nulls ordering */
717 } MergeJoin;
718 
719 /* ----------------
720  *		hash join node
721  * ----------------
722  */
723 typedef struct HashJoin
724 {
725 	Join		join;
726 	List	   *hashclauses;
727 } HashJoin;
728 
729 /* ----------------
730  *		materialization node
731  * ----------------
732  */
733 typedef struct Material
734 {
735 	Plan		plan;
736 } Material;
737 
738 /* ----------------
739  *		sort node
740  * ----------------
741  */
742 typedef struct Sort
743 {
744 	Plan		plan;
745 	int			numCols;		/* number of sort-key columns */
746 	AttrNumber *sortColIdx;		/* their indexes in the target list */
747 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
748 	Oid		   *collations;		/* OIDs of collations */
749 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
750 } Sort;
751 
752 /* ---------------
753  *	 group node -
754  *		Used for queries with GROUP BY (but no aggregates) specified.
755  *		The input must be presorted according to the grouping columns.
756  * ---------------
757  */
758 typedef struct Group
759 {
760 	Plan		plan;
761 	int			numCols;		/* number of grouping columns */
762 	AttrNumber *grpColIdx;		/* their indexes in the target list */
763 	Oid		   *grpOperators;	/* equality operators to compare with */
764 } Group;
765 
766 /* ---------------
767  *		aggregate node
768  *
769  * An Agg node implements plain or grouped aggregation.  For grouped
770  * aggregation, we can work with presorted input or unsorted input;
771  * the latter strategy uses an internal hashtable.
772  *
773  * Notice the lack of any direct info about the aggregate functions to be
774  * computed.  They are found by scanning the node's tlist and quals during
775  * executor startup.  (It is possible that there are no aggregate functions;
776  * this could happen if they get optimized away by constant-folding, or if
777  * we are using the Agg node to implement hash-based grouping.)
778  * ---------------
779  */
780 typedef struct Agg
781 {
782 	Plan		plan;
783 	AggStrategy aggstrategy;	/* basic strategy, see nodes.h */
784 	AggSplit	aggsplit;		/* agg-splitting mode, see nodes.h */
785 	int			numCols;		/* number of grouping columns */
786 	AttrNumber *grpColIdx;		/* their indexes in the target list */
787 	Oid		   *grpOperators;	/* equality operators to compare with */
788 	long		numGroups;		/* estimated number of groups in input */
789 	Bitmapset  *aggParams;		/* IDs of Params used in Aggref inputs */
790 	/* Note: planner provides numGroups & aggParams only in HASHED/MIXED case */
791 	List	   *groupingSets;	/* grouping sets to use */
792 	List	   *chain;			/* chained Agg/Sort nodes */
793 } Agg;
794 
795 /* ----------------
796  *		window aggregate node
797  * ----------------
798  */
799 typedef struct WindowAgg
800 {
801 	Plan		plan;
802 	Index		winref;			/* ID referenced by window functions */
803 	int			partNumCols;	/* number of columns in partition clause */
804 	AttrNumber *partColIdx;		/* their indexes in the target list */
805 	Oid		   *partOperators;	/* equality operators for partition columns */
806 	int			ordNumCols;		/* number of columns in ordering clause */
807 	AttrNumber *ordColIdx;		/* their indexes in the target list */
808 	Oid		   *ordOperators;	/* equality operators for ordering columns */
809 	int			frameOptions;	/* frame_clause options, see WindowDef */
810 	Node	   *startOffset;	/* expression for starting bound, if any */
811 	Node	   *endOffset;		/* expression for ending bound, if any */
812 } WindowAgg;
813 
814 /* ----------------
815  *		unique node
816  * ----------------
817  */
818 typedef struct Unique
819 {
820 	Plan		plan;
821 	int			numCols;		/* number of columns to check for uniqueness */
822 	AttrNumber *uniqColIdx;		/* their indexes in the target list */
823 	Oid		   *uniqOperators;	/* equality operators to compare with */
824 } Unique;
825 
826 /* ------------
827  *		gather node
828  *
829  * Note: rescan_param is the ID of a PARAM_EXEC parameter slot.  That slot
830  * will never actually contain a value, but the Gather node must flag it as
831  * having changed whenever it is rescanned.  The child parallel-aware scan
832  * nodes are marked as depending on that parameter, so that the rescan
833  * machinery is aware that their output is likely to change across rescans.
834  * In some cases we don't need a rescan Param, so rescan_param is set to -1.
835  * ------------
836  */
837 typedef struct Gather
838 {
839 	Plan		plan;
840 	int			num_workers;	/* planned number of worker processes */
841 	int			rescan_param;	/* ID of Param that signals a rescan, or -1 */
842 	bool		single_copy;	/* don't execute plan more than once */
843 	bool		invisible;		/* suppress EXPLAIN display (for testing)? */
844 } Gather;
845 
846 /* ------------
847  *		gather merge node
848  * ------------
849  */
850 typedef struct GatherMerge
851 {
852 	Plan		plan;
853 	int			num_workers;	/* planned number of worker processes */
854 	int			rescan_param;	/* ID of Param that signals a rescan, or -1 */
855 	/* remaining fields are just like the sort-key info in struct Sort */
856 	int			numCols;		/* number of sort-key columns */
857 	AttrNumber *sortColIdx;		/* their indexes in the target list */
858 	Oid		   *sortOperators;	/* OIDs of operators to sort them by */
859 	Oid		   *collations;		/* OIDs of collations */
860 	bool	   *nullsFirst;		/* NULLS FIRST/LAST directions */
861 } GatherMerge;
862 
863 /* ----------------
864  *		hash build node
865  *
866  * If the executor is supposed to try to apply skew join optimization, then
867  * skewTable/skewColumn/skewInherit identify the outer relation's join key
868  * column, from which the relevant MCV statistics can be fetched.
869  * ----------------
870  */
871 typedef struct Hash
872 {
873 	Plan		plan;
874 	Oid			skewTable;		/* outer join key's table OID, or InvalidOid */
875 	AttrNumber	skewColumn;		/* outer join key's column #, or zero */
876 	bool		skewInherit;	/* is outer join rel an inheritance tree? */
877 	/* all other info is in the parent HashJoin node */
878 } Hash;
879 
880 /* ----------------
881  *		setop node
882  * ----------------
883  */
884 typedef struct SetOp
885 {
886 	Plan		plan;
887 	SetOpCmd	cmd;			/* what to do, see nodes.h */
888 	SetOpStrategy strategy;		/* how to do it, see nodes.h */
889 	int			numCols;		/* number of columns to check for
890 								 * duplicate-ness */
891 	AttrNumber *dupColIdx;		/* their indexes in the target list */
892 	Oid		   *dupOperators;	/* equality operators to compare with */
893 	AttrNumber	flagColIdx;		/* where is the flag column, if any */
894 	int			firstFlag;		/* flag value for first input relation */
895 	long		numGroups;		/* estimated number of groups in input */
896 } SetOp;
897 
898 /* ----------------
899  *		lock-rows node
900  *
901  * rowMarks identifies the rels to be locked by this node; it should be
902  * a subset of the rowMarks listed in the top-level PlannedStmt.
903  * epqParam is a Param that all scan nodes below this one must depend on.
904  * It is used to force re-evaluation of the plan during EvalPlanQual.
905  * ----------------
906  */
907 typedef struct LockRows
908 {
909 	Plan		plan;
910 	List	   *rowMarks;		/* a list of PlanRowMark's */
911 	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
912 } LockRows;
913 
914 /* ----------------
915  *		limit node
916  *
917  * Note: as of Postgres 8.2, the offset and count expressions are expected
918  * to yield int8, rather than int4 as before.
919  * ----------------
920  */
921 typedef struct Limit
922 {
923 	Plan		plan;
924 	Node	   *limitOffset;	/* OFFSET parameter, or NULL if none */
925 	Node	   *limitCount;		/* COUNT parameter, or NULL if none */
926 } Limit;
927 
928 
929 /*
930  * RowMarkType -
931  *	  enums for types of row-marking operations
932  *
933  * The first four of these values represent different lock strengths that
934  * we can take on tuples according to SELECT FOR [KEY] UPDATE/SHARE requests.
935  * We support these on regular tables, as well as on foreign tables whose FDWs
936  * report support for late locking.  For other foreign tables, any locking
937  * that might be done for such requests must happen during the initial row
938  * fetch; their FDWs provide no mechanism for going back to lock a row later.
939  * This means that the semantics will be a bit different than for a local
940  * table; in particular we are likely to lock more rows than would be locked
941  * locally, since remote rows will be locked even if they then fail
942  * locally-checked restriction or join quals.  However, the prospect of
943  * doing a separate remote query to lock each selected row is usually pretty
944  * unappealing, so early locking remains a credible design choice for FDWs.
945  *
946  * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we have to uniquely
947  * identify all the source rows, not only those from the target relations, so
948  * that we can perform EvalPlanQual rechecking at need.  For plain tables we
949  * can just fetch the TID, much as for a target relation; this case is
950  * represented by ROW_MARK_REFERENCE.  Otherwise (for example for VALUES or
951  * FUNCTION scans) we have to copy the whole row value.  ROW_MARK_COPY is
952  * pretty inefficient, since most of the time we'll never need the data; but
953  * fortunately the overhead is usually not performance-critical in practice.
954  * By default we use ROW_MARK_COPY for foreign tables, but if the FDW has
955  * a concept of rowid it can request to use ROW_MARK_REFERENCE instead.
956  * (Again, this probably doesn't make sense if a physical remote fetch is
957  * needed, but for FDWs that map to local storage it might be credible.)
958  */
959 typedef enum RowMarkType
960 {
961 	ROW_MARK_EXCLUSIVE,			/* obtain exclusive tuple lock */
962 	ROW_MARK_NOKEYEXCLUSIVE,	/* obtain no-key exclusive tuple lock */
963 	ROW_MARK_SHARE,				/* obtain shared tuple lock */
964 	ROW_MARK_KEYSHARE,			/* obtain keyshare tuple lock */
965 	ROW_MARK_REFERENCE,			/* just fetch the TID, don't lock it */
966 	ROW_MARK_COPY				/* physically copy the row value */
967 } RowMarkType;
968 
969 #define RowMarkRequiresRowShareLock(marktype)  ((marktype) <= ROW_MARK_KEYSHARE)
970 
971 /*
972  * PlanRowMark -
973  *	   plan-time representation of FOR [KEY] UPDATE/SHARE clauses
974  *
975  * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
976  * PlanRowMark node for each non-target relation in the query.  Relations that
977  * are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
978  * regular tables or supported foreign tables) or ROW_MARK_COPY (if not).
979  *
980  * Initially all PlanRowMarks have rti == prti and isParent == false.
981  * When the planner discovers that a relation is the root of an inheritance
982  * tree, it sets isParent true, and adds an additional PlanRowMark to the
983  * list for each child relation (including the target rel itself in its role
984  * as a child).  isParent is also set to true for the partitioned child
985  * relations, which are not scanned just like the root parent.  The child
986  * entries have rti == child rel's RT index and prti == parent's RT index,
987  * and can therefore be recognized as children by the fact that prti != rti.
988  * The parent's allMarkTypes field gets the OR of (1<<markType) across all
989  * its children (this definition allows children to use different markTypes).
990  *
991  * The planner also adds resjunk output columns to the plan that carry
992  * information sufficient to identify the locked or fetched rows.  When
993  * markType != ROW_MARK_COPY, these columns are named
994  *		tableoid%u			OID of table
995  *		ctid%u				TID of row
996  * The tableoid column is only present for an inheritance hierarchy.
997  * When markType == ROW_MARK_COPY, there is instead a single column named
998  *		wholerow%u			whole-row value of relation
999  * (An inheritance hierarchy could have all three resjunk output columns,
1000  * if some children use a different markType than others.)
1001  * In all three cases, %u represents the rowmark ID number (rowmarkId).
1002  * This number is unique within a plan tree, except that child relation
1003  * entries copy their parent's rowmarkId.  (Assigning unique numbers
1004  * means we needn't renumber rowmarkIds when flattening subqueries, which
1005  * would require finding and renaming the resjunk columns as well.)
1006  * Note this means that all tables in an inheritance hierarchy share the
1007  * same resjunk column names.  However, in an inherited UPDATE/DELETE the
1008  * columns could have different physical column numbers in each subplan.
1009  */
1010 typedef struct PlanRowMark
1011 {
1012 	NodeTag		type;
1013 	Index		rti;			/* range table index of markable relation */
1014 	Index		prti;			/* range table index of parent relation */
1015 	Index		rowmarkId;		/* unique identifier for resjunk columns */
1016 	RowMarkType markType;		/* see enum above */
1017 	int			allMarkTypes;	/* OR of (1<<markType) for all children */
1018 	LockClauseStrength strength;	/* LockingClause's strength, or LCS_NONE */
1019 	LockWaitPolicy waitPolicy;	/* NOWAIT and SKIP LOCKED options */
1020 	bool		isParent;		/* true if this is a "dummy" parent entry */
1021 } PlanRowMark;
1022 
1023 
1024 /*
1025  * Plan invalidation info
1026  *
1027  * We track the objects on which a PlannedStmt depends in two ways:
1028  * relations are recorded as a simple list of OIDs, and everything else
1029  * is represented as a list of PlanInvalItems.  A PlanInvalItem is designed
1030  * to be used with the syscache invalidation mechanism, so it identifies a
1031  * system catalog entry by cache ID and hash value.
1032  */
1033 typedef struct PlanInvalItem
1034 {
1035 	NodeTag		type;
1036 	int			cacheId;		/* a syscache ID, see utils/syscache.h */
1037 	uint32		hashValue;		/* hash value of object's cache lookup key */
1038 } PlanInvalItem;
1039 
1040 #endif							/* PLANNODES_H */
1041