1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *	  top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *	ExecutorStart()
8  *	ExecutorRun()
9  *	ExecutorFinish()
10  *	ExecutorEnd()
11  *
12  *	These four procedures are the external interface to the executor.
13  *	In each case, the query descriptor is required as an argument.
14  *
15  *	ExecutorStart must be called at the beginning of execution of any
16  *	query plan and ExecutorEnd must always be called at the end of
17  *	execution of a plan (unless it is aborted due to error).
18  *
19  *	ExecutorRun accepts direction and count arguments that specify whether
20  *	the plan is to be executed forwards, backwards, and for how many tuples.
21  *	In some cases ExecutorRun may be called multiple times to process all
22  *	the tuples for a plan.  It is also acceptable to stop short of executing
23  *	the whole plan (but only if it is a SELECT).
24  *
25  *	ExecutorFinish must be called after the final ExecutorRun call and
26  *	before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
27  *	which should also omit ExecutorRun.
28  *
29  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
30  * Portions Copyright (c) 1994, Regents of the University of California
31  *
32  *
33  * IDENTIFICATION
34  *	  src/backend/executor/execMain.c
35  *
36  *-------------------------------------------------------------------------
37  */
38 #include "postgres.h"
39 
40 #include "access/heapam.h"
41 #include "access/htup_details.h"
42 #include "access/sysattr.h"
43 #include "access/tableam.h"
44 #include "access/transam.h"
45 #include "access/xact.h"
46 #include "catalog/namespace.h"
47 #include "catalog/pg_publication.h"
48 #include "commands/matview.h"
49 #include "commands/trigger.h"
50 #include "executor/execdebug.h"
51 #include "executor/nodeSubplan.h"
52 #include "foreign/fdwapi.h"
53 #include "jit/jit.h"
54 #include "mb/pg_wchar.h"
55 #include "miscadmin.h"
56 #include "parser/parsetree.h"
57 #include "storage/bufmgr.h"
58 #include "storage/lmgr.h"
59 #include "tcop/utility.h"
60 #include "utils/acl.h"
61 #include "utils/lsyscache.h"
62 #include "utils/memutils.h"
63 #include "utils/partcache.h"
64 #include "utils/rls.h"
65 #include "utils/ruleutils.h"
66 #include "utils/snapmgr.h"
67 
68 
69 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74 
75 /* Hook for plugin to get control in ExecCheckRTPerms() */
76 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77 
78 /* decls for local routines only used within this module */
79 static void InitPlan(QueryDesc *queryDesc, int eflags);
80 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 static void ExecPostprocessPlan(EState *estate);
82 static void ExecEndPlan(PlanState *planstate, EState *estate);
83 static void ExecutePlan(EState *estate, PlanState *planstate,
84 						bool use_parallel_mode,
85 						CmdType operation,
86 						bool sendTuples,
87 						uint64 numberTuples,
88 						ScanDirection direction,
89 						DestReceiver *dest,
90 						bool execute_once);
91 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
92 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
93 									  Bitmapset *modifiedCols,
94 									  AclMode requiredPerms);
95 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
96 static char *ExecBuildSlotValueDescription(Oid reloid,
97 										   TupleTableSlot *slot,
98 										   TupleDesc tupdesc,
99 										   Bitmapset *modifiedCols,
100 										   int maxfieldlen);
101 static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
102 
103 /* end of local decls */
104 
105 
106 /* ----------------------------------------------------------------
107  *		ExecutorStart
108  *
109  *		This routine must be called at the beginning of any execution of any
110  *		query plan
111  *
112  * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
113  * only because some places use QueryDescs for utility commands).  The tupDesc
114  * field of the QueryDesc is filled in to describe the tuples that will be
115  * returned, and the internal fields (estate and planstate) are set up.
116  *
117  * eflags contains flag bits as described in executor.h.
118  *
119  * NB: the CurrentMemoryContext when this is called will become the parent
120  * of the per-query context used for this Executor invocation.
121  *
122  * We provide a function hook variable that lets loadable plugins
123  * get control when ExecutorStart is called.  Such a plugin would
124  * normally call standard_ExecutorStart().
125  *
126  * ----------------------------------------------------------------
127  */
128 void
ExecutorStart(QueryDesc * queryDesc,int eflags)129 ExecutorStart(QueryDesc *queryDesc, int eflags)
130 {
131 	if (ExecutorStart_hook)
132 		(*ExecutorStart_hook) (queryDesc, eflags);
133 	else
134 		standard_ExecutorStart(queryDesc, eflags);
135 }
136 
137 void
standard_ExecutorStart(QueryDesc * queryDesc,int eflags)138 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
139 {
140 	EState	   *estate;
141 	MemoryContext oldcontext;
142 
143 	/* sanity checks: queryDesc must not be started already */
144 	Assert(queryDesc != NULL);
145 	Assert(queryDesc->estate == NULL);
146 
147 	/*
148 	 * If the transaction is read-only, we need to check if any writes are
149 	 * planned to non-temporary tables.  EXPLAIN is considered read-only.
150 	 *
151 	 * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
152 	 * would require (a) storing the combocid hash in shared memory, rather
153 	 * than synchronizing it just once at the start of parallelism, and (b) an
154 	 * alternative to heap_update()'s reliance on xmax for mutual exclusion.
155 	 * INSERT may have no such troubles, but we forbid it to simplify the
156 	 * checks.
157 	 *
158 	 * We have lower-level defenses in CommandCounterIncrement and elsewhere
159 	 * against performing unsafe operations in parallel mode, but this gives a
160 	 * more user-friendly error message.
161 	 */
162 	if ((XactReadOnly || IsInParallelMode()) &&
163 		!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
164 		ExecCheckXactReadOnly(queryDesc->plannedstmt);
165 
166 	/*
167 	 * Build EState, switch into per-query memory context for startup.
168 	 */
169 	estate = CreateExecutorState();
170 	queryDesc->estate = estate;
171 
172 	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
173 
174 	/*
175 	 * Fill in external parameters, if any, from queryDesc; and allocate
176 	 * workspace for internal parameters
177 	 */
178 	estate->es_param_list_info = queryDesc->params;
179 
180 	if (queryDesc->plannedstmt->paramExecTypes != NIL)
181 	{
182 		int			nParamExec;
183 
184 		nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
185 		estate->es_param_exec_vals = (ParamExecData *)
186 			palloc0(nParamExec * sizeof(ParamExecData));
187 	}
188 
189 	estate->es_sourceText = queryDesc->sourceText;
190 
191 	/*
192 	 * Fill in the query environment, if any, from queryDesc.
193 	 */
194 	estate->es_queryEnv = queryDesc->queryEnv;
195 
196 	/*
197 	 * If non-read-only query, set the command ID to mark output tuples with
198 	 */
199 	switch (queryDesc->operation)
200 	{
201 		case CMD_SELECT:
202 
203 			/*
204 			 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
205 			 * tuples
206 			 */
207 			if (queryDesc->plannedstmt->rowMarks != NIL ||
208 				queryDesc->plannedstmt->hasModifyingCTE)
209 				estate->es_output_cid = GetCurrentCommandId(true);
210 
211 			/*
212 			 * A SELECT without modifying CTEs can't possibly queue triggers,
213 			 * so force skip-triggers mode. This is just a marginal efficiency
214 			 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
215 			 * all that expensive, but we might as well do it.
216 			 */
217 			if (!queryDesc->plannedstmt->hasModifyingCTE)
218 				eflags |= EXEC_FLAG_SKIP_TRIGGERS;
219 			break;
220 
221 		case CMD_INSERT:
222 		case CMD_DELETE:
223 		case CMD_UPDATE:
224 			estate->es_output_cid = GetCurrentCommandId(true);
225 			break;
226 
227 		default:
228 			elog(ERROR, "unrecognized operation code: %d",
229 				 (int) queryDesc->operation);
230 			break;
231 	}
232 
233 	/*
234 	 * Copy other important information into the EState
235 	 */
236 	estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
237 	estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
238 	estate->es_top_eflags = eflags;
239 	estate->es_instrument = queryDesc->instrument_options;
240 	estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
241 
242 	/*
243 	 * Set up an AFTER-trigger statement context, unless told not to, or
244 	 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
245 	 */
246 	if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
247 		AfterTriggerBeginQuery();
248 
249 	/*
250 	 * Initialize the plan state tree
251 	 */
252 	InitPlan(queryDesc, eflags);
253 
254 	MemoryContextSwitchTo(oldcontext);
255 }
256 
257 /* ----------------------------------------------------------------
258  *		ExecutorRun
259  *
260  *		This is the main routine of the executor module. It accepts
261  *		the query descriptor from the traffic cop and executes the
262  *		query plan.
263  *
264  *		ExecutorStart must have been called already.
265  *
266  *		If direction is NoMovementScanDirection then nothing is done
267  *		except to start up/shut down the destination.  Otherwise,
268  *		we retrieve up to 'count' tuples in the specified direction.
269  *
270  *		Note: count = 0 is interpreted as no portal limit, i.e., run to
271  *		completion.  Also note that the count limit is only applied to
272  *		retrieved tuples, not for instance to those inserted/updated/deleted
273  *		by a ModifyTable plan node.
274  *
275  *		There is no return value, but output tuples (if any) are sent to
276  *		the destination receiver specified in the QueryDesc; and the number
277  *		of tuples processed at the top level can be found in
278  *		estate->es_processed.
279  *
280  *		We provide a function hook variable that lets loadable plugins
281  *		get control when ExecutorRun is called.  Such a plugin would
282  *		normally call standard_ExecutorRun().
283  *
284  * ----------------------------------------------------------------
285  */
286 void
ExecutorRun(QueryDesc * queryDesc,ScanDirection direction,uint64 count,bool execute_once)287 ExecutorRun(QueryDesc *queryDesc,
288 			ScanDirection direction, uint64 count,
289 			bool execute_once)
290 {
291 	if (ExecutorRun_hook)
292 		(*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
293 	else
294 		standard_ExecutorRun(queryDesc, direction, count, execute_once);
295 }
296 
297 void
standard_ExecutorRun(QueryDesc * queryDesc,ScanDirection direction,uint64 count,bool execute_once)298 standard_ExecutorRun(QueryDesc *queryDesc,
299 					 ScanDirection direction, uint64 count, bool execute_once)
300 {
301 	EState	   *estate;
302 	CmdType		operation;
303 	DestReceiver *dest;
304 	bool		sendTuples;
305 	MemoryContext oldcontext;
306 
307 	/* sanity checks */
308 	Assert(queryDesc != NULL);
309 
310 	estate = queryDesc->estate;
311 
312 	Assert(estate != NULL);
313 	Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
314 
315 	/*
316 	 * Switch into per-query memory context
317 	 */
318 	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
319 
320 	/* Allow instrumentation of Executor overall runtime */
321 	if (queryDesc->totaltime)
322 		InstrStartNode(queryDesc->totaltime);
323 
324 	/*
325 	 * extract information from the query descriptor and the query feature.
326 	 */
327 	operation = queryDesc->operation;
328 	dest = queryDesc->dest;
329 
330 	/*
331 	 * startup tuple receiver, if we will be emitting tuples
332 	 */
333 	estate->es_processed = 0;
334 
335 	sendTuples = (operation == CMD_SELECT ||
336 				  queryDesc->plannedstmt->hasReturning);
337 
338 	if (sendTuples)
339 		dest->rStartup(dest, operation, queryDesc->tupDesc);
340 
341 	/*
342 	 * run plan
343 	 */
344 	if (!ScanDirectionIsNoMovement(direction))
345 	{
346 		if (execute_once && queryDesc->already_executed)
347 			elog(ERROR, "can't re-execute query flagged for single execution");
348 		queryDesc->already_executed = true;
349 
350 		ExecutePlan(estate,
351 					queryDesc->planstate,
352 					queryDesc->plannedstmt->parallelModeNeeded,
353 					operation,
354 					sendTuples,
355 					count,
356 					direction,
357 					dest,
358 					execute_once);
359 	}
360 
361 	/*
362 	 * shutdown tuple receiver, if we started it
363 	 */
364 	if (sendTuples)
365 		dest->rShutdown(dest);
366 
367 	if (queryDesc->totaltime)
368 		InstrStopNode(queryDesc->totaltime, estate->es_processed);
369 
370 	MemoryContextSwitchTo(oldcontext);
371 }
372 
373 /* ----------------------------------------------------------------
374  *		ExecutorFinish
375  *
376  *		This routine must be called after the last ExecutorRun call.
377  *		It performs cleanup such as firing AFTER triggers.  It is
378  *		separate from ExecutorEnd because EXPLAIN ANALYZE needs to
379  *		include these actions in the total runtime.
380  *
381  *		We provide a function hook variable that lets loadable plugins
382  *		get control when ExecutorFinish is called.  Such a plugin would
383  *		normally call standard_ExecutorFinish().
384  *
385  * ----------------------------------------------------------------
386  */
387 void
ExecutorFinish(QueryDesc * queryDesc)388 ExecutorFinish(QueryDesc *queryDesc)
389 {
390 	if (ExecutorFinish_hook)
391 		(*ExecutorFinish_hook) (queryDesc);
392 	else
393 		standard_ExecutorFinish(queryDesc);
394 }
395 
396 void
standard_ExecutorFinish(QueryDesc * queryDesc)397 standard_ExecutorFinish(QueryDesc *queryDesc)
398 {
399 	EState	   *estate;
400 	MemoryContext oldcontext;
401 
402 	/* sanity checks */
403 	Assert(queryDesc != NULL);
404 
405 	estate = queryDesc->estate;
406 
407 	Assert(estate != NULL);
408 	Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
409 
410 	/* This should be run once and only once per Executor instance */
411 	Assert(!estate->es_finished);
412 
413 	/* Switch into per-query memory context */
414 	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
415 
416 	/* Allow instrumentation of Executor overall runtime */
417 	if (queryDesc->totaltime)
418 		InstrStartNode(queryDesc->totaltime);
419 
420 	/* Run ModifyTable nodes to completion */
421 	ExecPostprocessPlan(estate);
422 
423 	/* Execute queued AFTER triggers, unless told not to */
424 	if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
425 		AfterTriggerEndQuery(estate);
426 
427 	if (queryDesc->totaltime)
428 		InstrStopNode(queryDesc->totaltime, 0);
429 
430 	MemoryContextSwitchTo(oldcontext);
431 
432 	estate->es_finished = true;
433 }
434 
435 /* ----------------------------------------------------------------
436  *		ExecutorEnd
437  *
438  *		This routine must be called at the end of execution of any
439  *		query plan
440  *
441  *		We provide a function hook variable that lets loadable plugins
442  *		get control when ExecutorEnd is called.  Such a plugin would
443  *		normally call standard_ExecutorEnd().
444  *
445  * ----------------------------------------------------------------
446  */
447 void
ExecutorEnd(QueryDesc * queryDesc)448 ExecutorEnd(QueryDesc *queryDesc)
449 {
450 	if (ExecutorEnd_hook)
451 		(*ExecutorEnd_hook) (queryDesc);
452 	else
453 		standard_ExecutorEnd(queryDesc);
454 }
455 
456 void
standard_ExecutorEnd(QueryDesc * queryDesc)457 standard_ExecutorEnd(QueryDesc *queryDesc)
458 {
459 	EState	   *estate;
460 	MemoryContext oldcontext;
461 
462 	/* sanity checks */
463 	Assert(queryDesc != NULL);
464 
465 	estate = queryDesc->estate;
466 
467 	Assert(estate != NULL);
468 
469 	/*
470 	 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
471 	 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
472 	 * might forget to call it.
473 	 */
474 	Assert(estate->es_finished ||
475 		   (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
476 
477 	/*
478 	 * Switch into per-query memory context to run ExecEndPlan
479 	 */
480 	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
481 
482 	ExecEndPlan(queryDesc->planstate, estate);
483 
484 	/* do away with our snapshots */
485 	UnregisterSnapshot(estate->es_snapshot);
486 	UnregisterSnapshot(estate->es_crosscheck_snapshot);
487 
488 	/*
489 	 * Must switch out of context before destroying it
490 	 */
491 	MemoryContextSwitchTo(oldcontext);
492 
493 	/*
494 	 * Release EState and per-query memory context.  This should release
495 	 * everything the executor has allocated.
496 	 */
497 	FreeExecutorState(estate);
498 
499 	/* Reset queryDesc fields that no longer point to anything */
500 	queryDesc->tupDesc = NULL;
501 	queryDesc->estate = NULL;
502 	queryDesc->planstate = NULL;
503 	queryDesc->totaltime = NULL;
504 }
505 
506 /* ----------------------------------------------------------------
507  *		ExecutorRewind
508  *
509  *		This routine may be called on an open queryDesc to rewind it
510  *		to the start.
511  * ----------------------------------------------------------------
512  */
513 void
ExecutorRewind(QueryDesc * queryDesc)514 ExecutorRewind(QueryDesc *queryDesc)
515 {
516 	EState	   *estate;
517 	MemoryContext oldcontext;
518 
519 	/* sanity checks */
520 	Assert(queryDesc != NULL);
521 
522 	estate = queryDesc->estate;
523 
524 	Assert(estate != NULL);
525 
526 	/* It's probably not sensible to rescan updating queries */
527 	Assert(queryDesc->operation == CMD_SELECT);
528 
529 	/*
530 	 * Switch into per-query memory context
531 	 */
532 	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
533 
534 	/*
535 	 * rescan plan
536 	 */
537 	ExecReScan(queryDesc->planstate);
538 
539 	MemoryContextSwitchTo(oldcontext);
540 }
541 
542 
543 /*
544  * ExecCheckRTPerms
545  *		Check access permissions for all relations listed in a range table.
546  *
547  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
548  * error if ereport_on_violation is true, or simply returns false otherwise.
549  *
550  * Note that this does NOT address row level security policies (aka: RLS).  If
551  * rows will be returned to the user as a result of this permission check
552  * passing, then RLS also needs to be consulted (and check_enable_rls()).
553  *
554  * See rewrite/rowsecurity.c.
555  */
556 bool
ExecCheckRTPerms(List * rangeTable,bool ereport_on_violation)557 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
558 {
559 	ListCell   *l;
560 	bool		result = true;
561 
562 	foreach(l, rangeTable)
563 	{
564 		RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
565 
566 		result = ExecCheckRTEPerms(rte);
567 		if (!result)
568 		{
569 			Assert(rte->rtekind == RTE_RELATION);
570 			if (ereport_on_violation)
571 				aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)),
572 							   get_rel_name(rte->relid));
573 			return false;
574 		}
575 	}
576 
577 	if (ExecutorCheckPerms_hook)
578 		result = (*ExecutorCheckPerms_hook) (rangeTable,
579 											 ereport_on_violation);
580 	return result;
581 }
582 
583 /*
584  * ExecCheckRTEPerms
585  *		Check access permissions for a single RTE.
586  */
587 static bool
ExecCheckRTEPerms(RangeTblEntry * rte)588 ExecCheckRTEPerms(RangeTblEntry *rte)
589 {
590 	AclMode		requiredPerms;
591 	AclMode		relPerms;
592 	AclMode		remainingPerms;
593 	Oid			relOid;
594 	Oid			userid;
595 
596 	/*
597 	 * Only plain-relation RTEs need to be checked here.  Function RTEs are
598 	 * checked when the function is prepared for execution.  Join, subquery,
599 	 * and special RTEs need no checks.
600 	 */
601 	if (rte->rtekind != RTE_RELATION)
602 		return true;
603 
604 	/*
605 	 * No work if requiredPerms is empty.
606 	 */
607 	requiredPerms = rte->requiredPerms;
608 	if (requiredPerms == 0)
609 		return true;
610 
611 	relOid = rte->relid;
612 
613 	/*
614 	 * userid to check as: current user unless we have a setuid indication.
615 	 *
616 	 * Note: GetUserId() is presently fast enough that there's no harm in
617 	 * calling it separately for each RTE.  If that stops being true, we could
618 	 * call it once in ExecCheckRTPerms and pass the userid down from there.
619 	 * But for now, no need for the extra clutter.
620 	 */
621 	userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
622 
623 	/*
624 	 * We must have *all* the requiredPerms bits, but some of the bits can be
625 	 * satisfied from column-level rather than relation-level permissions.
626 	 * First, remove any bits that are satisfied by relation permissions.
627 	 */
628 	relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
629 	remainingPerms = requiredPerms & ~relPerms;
630 	if (remainingPerms != 0)
631 	{
632 		int			col = -1;
633 
634 		/*
635 		 * If we lack any permissions that exist only as relation permissions,
636 		 * we can fail straight away.
637 		 */
638 		if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
639 			return false;
640 
641 		/*
642 		 * Check to see if we have the needed privileges at column level.
643 		 *
644 		 * Note: failures just report a table-level error; it would be nicer
645 		 * to report a column-level error if we have some but not all of the
646 		 * column privileges.
647 		 */
648 		if (remainingPerms & ACL_SELECT)
649 		{
650 			/*
651 			 * When the query doesn't explicitly reference any columns (for
652 			 * example, SELECT COUNT(*) FROM table), allow the query if we
653 			 * have SELECT on any column of the rel, as per SQL spec.
654 			 */
655 			if (bms_is_empty(rte->selectedCols))
656 			{
657 				if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
658 											  ACLMASK_ANY) != ACLCHECK_OK)
659 					return false;
660 			}
661 
662 			while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
663 			{
664 				/* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
665 				AttrNumber	attno = col + FirstLowInvalidHeapAttributeNumber;
666 
667 				if (attno == InvalidAttrNumber)
668 				{
669 					/* Whole-row reference, must have priv on all cols */
670 					if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
671 												  ACLMASK_ALL) != ACLCHECK_OK)
672 						return false;
673 				}
674 				else
675 				{
676 					if (pg_attribute_aclcheck(relOid, attno, userid,
677 											  ACL_SELECT) != ACLCHECK_OK)
678 						return false;
679 				}
680 			}
681 		}
682 
683 		/*
684 		 * Basically the same for the mod columns, for both INSERT and UPDATE
685 		 * privilege as specified by remainingPerms.
686 		 */
687 		if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
688 																	  userid,
689 																	  rte->insertedCols,
690 																	  ACL_INSERT))
691 			return false;
692 
693 		if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
694 																	  userid,
695 																	  rte->updatedCols,
696 																	  ACL_UPDATE))
697 			return false;
698 	}
699 	return true;
700 }
701 
702 /*
703  * ExecCheckRTEPermsModified
704  *		Check INSERT or UPDATE access permissions for a single RTE (these
705  *		are processed uniformly).
706  */
707 static bool
ExecCheckRTEPermsModified(Oid relOid,Oid userid,Bitmapset * modifiedCols,AclMode requiredPerms)708 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
709 						  AclMode requiredPerms)
710 {
711 	int			col = -1;
712 
713 	/*
714 	 * When the query doesn't explicitly update any columns, allow the query
715 	 * if we have permission on any column of the rel.  This is to handle
716 	 * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
717 	 */
718 	if (bms_is_empty(modifiedCols))
719 	{
720 		if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
721 									  ACLMASK_ANY) != ACLCHECK_OK)
722 			return false;
723 	}
724 
725 	while ((col = bms_next_member(modifiedCols, col)) >= 0)
726 	{
727 		/* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
728 		AttrNumber	attno = col + FirstLowInvalidHeapAttributeNumber;
729 
730 		if (attno == InvalidAttrNumber)
731 		{
732 			/* whole-row reference can't happen here */
733 			elog(ERROR, "whole-row update is not implemented");
734 		}
735 		else
736 		{
737 			if (pg_attribute_aclcheck(relOid, attno, userid,
738 									  requiredPerms) != ACLCHECK_OK)
739 				return false;
740 		}
741 	}
742 	return true;
743 }
744 
745 /*
746  * Check that the query does not imply any writes to non-temp tables;
747  * unless we're in parallel mode, in which case don't even allow writes
748  * to temp tables.
749  *
750  * Note: in a Hot Standby this would need to reject writes to temp
751  * tables just as we do in parallel mode; but an HS standby can't have created
752  * any temp tables in the first place, so no need to check that.
753  */
754 static void
ExecCheckXactReadOnly(PlannedStmt * plannedstmt)755 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
756 {
757 	ListCell   *l;
758 
759 	/*
760 	 * Fail if write permissions are requested in parallel mode for table
761 	 * (temp or non-temp), otherwise fail for any non-temp table.
762 	 */
763 	foreach(l, plannedstmt->rtable)
764 	{
765 		RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
766 
767 		if (rte->rtekind != RTE_RELATION)
768 			continue;
769 
770 		if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
771 			continue;
772 
773 		if (isTempNamespace(get_rel_namespace(rte->relid)))
774 			continue;
775 
776 		PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
777 	}
778 
779 	if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
780 		PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
781 }
782 
783 
784 /* ----------------------------------------------------------------
785  *		InitPlan
786  *
787  *		Initializes the query plan: open files, allocate storage
788  *		and start up the rule manager
789  * ----------------------------------------------------------------
790  */
791 static void
InitPlan(QueryDesc * queryDesc,int eflags)792 InitPlan(QueryDesc *queryDesc, int eflags)
793 {
794 	CmdType		operation = queryDesc->operation;
795 	PlannedStmt *plannedstmt = queryDesc->plannedstmt;
796 	Plan	   *plan = plannedstmt->planTree;
797 	List	   *rangeTable = plannedstmt->rtable;
798 	EState	   *estate = queryDesc->estate;
799 	PlanState  *planstate;
800 	TupleDesc	tupType;
801 	ListCell   *l;
802 	int			i;
803 
804 	/*
805 	 * Do permissions checks
806 	 */
807 	ExecCheckRTPerms(rangeTable, true);
808 
809 	/*
810 	 * initialize the node's execution state
811 	 */
812 	ExecInitRangeTable(estate, rangeTable);
813 
814 	estate->es_plannedstmt = plannedstmt;
815 
816 	/*
817 	 * Initialize ResultRelInfo data structures, and open the result rels.
818 	 */
819 	if (plannedstmt->resultRelations)
820 	{
821 		List	   *resultRelations = plannedstmt->resultRelations;
822 		int			numResultRelations = list_length(resultRelations);
823 		ResultRelInfo *resultRelInfos;
824 		ResultRelInfo *resultRelInfo;
825 
826 		resultRelInfos = (ResultRelInfo *)
827 			palloc(numResultRelations * sizeof(ResultRelInfo));
828 		resultRelInfo = resultRelInfos;
829 		foreach(l, resultRelations)
830 		{
831 			Index		resultRelationIndex = lfirst_int(l);
832 			Relation	resultRelation;
833 
834 			resultRelation = ExecGetRangeTableRelation(estate,
835 													   resultRelationIndex);
836 			InitResultRelInfo(resultRelInfo,
837 							  resultRelation,
838 							  resultRelationIndex,
839 							  NULL,
840 							  estate->es_instrument);
841 			resultRelInfo++;
842 		}
843 		estate->es_result_relations = resultRelInfos;
844 		estate->es_num_result_relations = numResultRelations;
845 
846 		/* es_result_relation_info is NULL except when within ModifyTable */
847 		estate->es_result_relation_info = NULL;
848 
849 		/*
850 		 * In the partitioned result relation case, also build ResultRelInfos
851 		 * for all the partitioned table roots, because we will need them to
852 		 * fire statement-level triggers, if any.
853 		 */
854 		if (plannedstmt->rootResultRelations)
855 		{
856 			int			num_roots = list_length(plannedstmt->rootResultRelations);
857 
858 			resultRelInfos = (ResultRelInfo *)
859 				palloc(num_roots * sizeof(ResultRelInfo));
860 			resultRelInfo = resultRelInfos;
861 			foreach(l, plannedstmt->rootResultRelations)
862 			{
863 				Index		resultRelIndex = lfirst_int(l);
864 				Relation	resultRelDesc;
865 
866 				resultRelDesc = ExecGetRangeTableRelation(estate,
867 														  resultRelIndex);
868 				InitResultRelInfo(resultRelInfo,
869 								  resultRelDesc,
870 								  resultRelIndex,
871 								  NULL,
872 								  estate->es_instrument);
873 				resultRelInfo++;
874 			}
875 
876 			estate->es_root_result_relations = resultRelInfos;
877 			estate->es_num_root_result_relations = num_roots;
878 		}
879 		else
880 		{
881 			estate->es_root_result_relations = NULL;
882 			estate->es_num_root_result_relations = 0;
883 		}
884 	}
885 	else
886 	{
887 		/*
888 		 * if no result relation, then set state appropriately
889 		 */
890 		estate->es_result_relations = NULL;
891 		estate->es_num_result_relations = 0;
892 		estate->es_result_relation_info = NULL;
893 		estate->es_root_result_relations = NULL;
894 		estate->es_num_root_result_relations = 0;
895 	}
896 
897 	/*
898 	 * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
899 	 */
900 	if (plannedstmt->rowMarks)
901 	{
902 		estate->es_rowmarks = (ExecRowMark **)
903 			palloc0(estate->es_range_table_size * sizeof(ExecRowMark *));
904 		foreach(l, plannedstmt->rowMarks)
905 		{
906 			PlanRowMark *rc = (PlanRowMark *) lfirst(l);
907 			Oid			relid;
908 			Relation	relation;
909 			ExecRowMark *erm;
910 
911 			/* ignore "parent" rowmarks; they are irrelevant at runtime */
912 			if (rc->isParent)
913 				continue;
914 
915 			/* get relation's OID (will produce InvalidOid if subquery) */
916 			relid = exec_rt_fetch(rc->rti, estate)->relid;
917 
918 			/* open relation, if we need to access it for this mark type */
919 			switch (rc->markType)
920 			{
921 				case ROW_MARK_EXCLUSIVE:
922 				case ROW_MARK_NOKEYEXCLUSIVE:
923 				case ROW_MARK_SHARE:
924 				case ROW_MARK_KEYSHARE:
925 				case ROW_MARK_REFERENCE:
926 					relation = ExecGetRangeTableRelation(estate, rc->rti);
927 					break;
928 				case ROW_MARK_COPY:
929 					/* no physical table access is required */
930 					relation = NULL;
931 					break;
932 				default:
933 					elog(ERROR, "unrecognized markType: %d", rc->markType);
934 					relation = NULL;	/* keep compiler quiet */
935 					break;
936 			}
937 
938 			/* Check that relation is a legal target for marking */
939 			if (relation)
940 				CheckValidRowMarkRel(relation, rc->markType);
941 
942 			erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
943 			erm->relation = relation;
944 			erm->relid = relid;
945 			erm->rti = rc->rti;
946 			erm->prti = rc->prti;
947 			erm->rowmarkId = rc->rowmarkId;
948 			erm->markType = rc->markType;
949 			erm->strength = rc->strength;
950 			erm->waitPolicy = rc->waitPolicy;
951 			erm->ermActive = false;
952 			ItemPointerSetInvalid(&(erm->curCtid));
953 			erm->ermExtra = NULL;
954 
955 			Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size &&
956 				   estate->es_rowmarks[erm->rti - 1] == NULL);
957 
958 			estate->es_rowmarks[erm->rti - 1] = erm;
959 		}
960 	}
961 
962 	/*
963 	 * Initialize the executor's tuple table to empty.
964 	 */
965 	estate->es_tupleTable = NIL;
966 
967 	/* signal that this EState is not used for EPQ */
968 	estate->es_epq_active = NULL;
969 
970 	/*
971 	 * Initialize private state information for each SubPlan.  We must do this
972 	 * before running ExecInitNode on the main query tree, since
973 	 * ExecInitSubPlan expects to be able to find these entries.
974 	 */
975 	Assert(estate->es_subplanstates == NIL);
976 	i = 1;						/* subplan indices count from 1 */
977 	foreach(l, plannedstmt->subplans)
978 	{
979 		Plan	   *subplan = (Plan *) lfirst(l);
980 		PlanState  *subplanstate;
981 		int			sp_eflags;
982 
983 		/*
984 		 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
985 		 * it is a parameterless subplan (not initplan), we suggest that it be
986 		 * prepared to handle REWIND efficiently; otherwise there is no need.
987 		 */
988 		sp_eflags = eflags
989 			& (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
990 		if (bms_is_member(i, plannedstmt->rewindPlanIDs))
991 			sp_eflags |= EXEC_FLAG_REWIND;
992 
993 		subplanstate = ExecInitNode(subplan, estate, sp_eflags);
994 
995 		estate->es_subplanstates = lappend(estate->es_subplanstates,
996 										   subplanstate);
997 
998 		i++;
999 	}
1000 
1001 	/*
1002 	 * Initialize the private state information for all the nodes in the query
1003 	 * tree.  This opens files, allocates storage and leaves us ready to start
1004 	 * processing tuples.
1005 	 */
1006 	planstate = ExecInitNode(plan, estate, eflags);
1007 
1008 	/*
1009 	 * Get the tuple descriptor describing the type of tuples to return.
1010 	 */
1011 	tupType = ExecGetResultType(planstate);
1012 
1013 	/*
1014 	 * Initialize the junk filter if needed.  SELECT queries need a filter if
1015 	 * there are any junk attrs in the top-level tlist.
1016 	 */
1017 	if (operation == CMD_SELECT)
1018 	{
1019 		bool		junk_filter_needed = false;
1020 		ListCell   *tlist;
1021 
1022 		foreach(tlist, plan->targetlist)
1023 		{
1024 			TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1025 
1026 			if (tle->resjunk)
1027 			{
1028 				junk_filter_needed = true;
1029 				break;
1030 			}
1031 		}
1032 
1033 		if (junk_filter_needed)
1034 		{
1035 			JunkFilter *j;
1036 			TupleTableSlot *slot;
1037 
1038 			slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual);
1039 			j = ExecInitJunkFilter(planstate->plan->targetlist,
1040 								   slot);
1041 			estate->es_junkFilter = j;
1042 
1043 			/* Want to return the cleaned tuple type */
1044 			tupType = j->jf_cleanTupType;
1045 		}
1046 	}
1047 
1048 	queryDesc->tupDesc = tupType;
1049 	queryDesc->planstate = planstate;
1050 }
1051 
1052 /*
1053  * Check that a proposed result relation is a legal target for the operation
1054  *
1055  * Generally the parser and/or planner should have noticed any such mistake
1056  * already, but let's make sure.
1057  *
1058  * Note: when changing this function, you probably also need to look at
1059  * CheckValidRowMarkRel.
1060  */
1061 void
CheckValidResultRel(ResultRelInfo * resultRelInfo,CmdType operation)1062 CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
1063 {
1064 	Relation	resultRel = resultRelInfo->ri_RelationDesc;
1065 	TriggerDesc *trigDesc = resultRel->trigdesc;
1066 	FdwRoutine *fdwroutine;
1067 
1068 	switch (resultRel->rd_rel->relkind)
1069 	{
1070 		case RELKIND_RELATION:
1071 		case RELKIND_PARTITIONED_TABLE:
1072 			CheckCmdReplicaIdentity(resultRel, operation);
1073 			break;
1074 		case RELKIND_SEQUENCE:
1075 			ereport(ERROR,
1076 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1077 					 errmsg("cannot change sequence \"%s\"",
1078 							RelationGetRelationName(resultRel))));
1079 			break;
1080 		case RELKIND_TOASTVALUE:
1081 			ereport(ERROR,
1082 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1083 					 errmsg("cannot change TOAST relation \"%s\"",
1084 							RelationGetRelationName(resultRel))));
1085 			break;
1086 		case RELKIND_VIEW:
1087 
1088 			/*
1089 			 * Okay only if there's a suitable INSTEAD OF trigger.  Messages
1090 			 * here should match rewriteHandler.c's rewriteTargetView and
1091 			 * RewriteQuery, except that we omit errdetail because we haven't
1092 			 * got the information handy (and given that we really shouldn't
1093 			 * get here anyway, it's not worth great exertion to get).
1094 			 */
1095 			switch (operation)
1096 			{
1097 				case CMD_INSERT:
1098 					if (!trigDesc || !trigDesc->trig_insert_instead_row)
1099 						ereport(ERROR,
1100 								(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1101 								 errmsg("cannot insert into view \"%s\"",
1102 										RelationGetRelationName(resultRel)),
1103 								 errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1104 					break;
1105 				case CMD_UPDATE:
1106 					if (!trigDesc || !trigDesc->trig_update_instead_row)
1107 						ereport(ERROR,
1108 								(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1109 								 errmsg("cannot update view \"%s\"",
1110 										RelationGetRelationName(resultRel)),
1111 								 errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1112 					break;
1113 				case CMD_DELETE:
1114 					if (!trigDesc || !trigDesc->trig_delete_instead_row)
1115 						ereport(ERROR,
1116 								(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1117 								 errmsg("cannot delete from view \"%s\"",
1118 										RelationGetRelationName(resultRel)),
1119 								 errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1120 					break;
1121 				default:
1122 					elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1123 					break;
1124 			}
1125 			break;
1126 		case RELKIND_MATVIEW:
1127 			if (!MatViewIncrementalMaintenanceIsEnabled())
1128 				ereport(ERROR,
1129 						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1130 						 errmsg("cannot change materialized view \"%s\"",
1131 								RelationGetRelationName(resultRel))));
1132 			break;
1133 		case RELKIND_FOREIGN_TABLE:
1134 			/* Okay only if the FDW supports it */
1135 			fdwroutine = resultRelInfo->ri_FdwRoutine;
1136 			switch (operation)
1137 			{
1138 				case CMD_INSERT:
1139 					if (fdwroutine->ExecForeignInsert == NULL)
1140 						ereport(ERROR,
1141 								(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1142 								 errmsg("cannot insert into foreign table \"%s\"",
1143 										RelationGetRelationName(resultRel))));
1144 					if (fdwroutine->IsForeignRelUpdatable != NULL &&
1145 						(fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1146 						ereport(ERROR,
1147 								(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1148 								 errmsg("foreign table \"%s\" does not allow inserts",
1149 										RelationGetRelationName(resultRel))));
1150 					break;
1151 				case CMD_UPDATE:
1152 					if (fdwroutine->ExecForeignUpdate == NULL)
1153 						ereport(ERROR,
1154 								(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1155 								 errmsg("cannot update foreign table \"%s\"",
1156 										RelationGetRelationName(resultRel))));
1157 					if (fdwroutine->IsForeignRelUpdatable != NULL &&
1158 						(fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1159 						ereport(ERROR,
1160 								(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1161 								 errmsg("foreign table \"%s\" does not allow updates",
1162 										RelationGetRelationName(resultRel))));
1163 					break;
1164 				case CMD_DELETE:
1165 					if (fdwroutine->ExecForeignDelete == NULL)
1166 						ereport(ERROR,
1167 								(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1168 								 errmsg("cannot delete from foreign table \"%s\"",
1169 										RelationGetRelationName(resultRel))));
1170 					if (fdwroutine->IsForeignRelUpdatable != NULL &&
1171 						(fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1172 						ereport(ERROR,
1173 								(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1174 								 errmsg("foreign table \"%s\" does not allow deletes",
1175 										RelationGetRelationName(resultRel))));
1176 					break;
1177 				default:
1178 					elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1179 					break;
1180 			}
1181 			break;
1182 		default:
1183 			ereport(ERROR,
1184 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1185 					 errmsg("cannot change relation \"%s\"",
1186 							RelationGetRelationName(resultRel))));
1187 			break;
1188 	}
1189 }
1190 
1191 /*
1192  * Check that a proposed rowmark target relation is a legal target
1193  *
1194  * In most cases parser and/or planner should have noticed this already, but
1195  * they don't cover all cases.
1196  */
1197 static void
CheckValidRowMarkRel(Relation rel,RowMarkType markType)1198 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1199 {
1200 	FdwRoutine *fdwroutine;
1201 
1202 	switch (rel->rd_rel->relkind)
1203 	{
1204 		case RELKIND_RELATION:
1205 		case RELKIND_PARTITIONED_TABLE:
1206 			/* OK */
1207 			break;
1208 		case RELKIND_SEQUENCE:
1209 			/* Must disallow this because we don't vacuum sequences */
1210 			ereport(ERROR,
1211 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1212 					 errmsg("cannot lock rows in sequence \"%s\"",
1213 							RelationGetRelationName(rel))));
1214 			break;
1215 		case RELKIND_TOASTVALUE:
1216 			/* We could allow this, but there seems no good reason to */
1217 			ereport(ERROR,
1218 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1219 					 errmsg("cannot lock rows in TOAST relation \"%s\"",
1220 							RelationGetRelationName(rel))));
1221 			break;
1222 		case RELKIND_VIEW:
1223 			/* Should not get here; planner should have expanded the view */
1224 			ereport(ERROR,
1225 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1226 					 errmsg("cannot lock rows in view \"%s\"",
1227 							RelationGetRelationName(rel))));
1228 			break;
1229 		case RELKIND_MATVIEW:
1230 			/* Allow referencing a matview, but not actual locking clauses */
1231 			if (markType != ROW_MARK_REFERENCE)
1232 				ereport(ERROR,
1233 						(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1234 						 errmsg("cannot lock rows in materialized view \"%s\"",
1235 								RelationGetRelationName(rel))));
1236 			break;
1237 		case RELKIND_FOREIGN_TABLE:
1238 			/* Okay only if the FDW supports it */
1239 			fdwroutine = GetFdwRoutineForRelation(rel, false);
1240 			if (fdwroutine->RefetchForeignRow == NULL)
1241 				ereport(ERROR,
1242 						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1243 						 errmsg("cannot lock rows in foreign table \"%s\"",
1244 								RelationGetRelationName(rel))));
1245 			break;
1246 		default:
1247 			ereport(ERROR,
1248 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1249 					 errmsg("cannot lock rows in relation \"%s\"",
1250 							RelationGetRelationName(rel))));
1251 			break;
1252 	}
1253 }
1254 
1255 /*
1256  * Initialize ResultRelInfo data for one result relation
1257  *
1258  * Caution: before Postgres 9.1, this function included the relkind checking
1259  * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1260  * appropriate.  Be sure callers cover those needs.
1261  */
1262 void
InitResultRelInfo(ResultRelInfo * resultRelInfo,Relation resultRelationDesc,Index resultRelationIndex,ResultRelInfo * partition_root_rri,int instrument_options)1263 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1264 				  Relation resultRelationDesc,
1265 				  Index resultRelationIndex,
1266 				  ResultRelInfo *partition_root_rri,
1267 				  int instrument_options)
1268 {
1269 	List	   *partition_check = NIL;
1270 
1271 	MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1272 	resultRelInfo->type = T_ResultRelInfo;
1273 	resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1274 	resultRelInfo->ri_RelationDesc = resultRelationDesc;
1275 	resultRelInfo->ri_NumIndices = 0;
1276 	resultRelInfo->ri_IndexRelationDescs = NULL;
1277 	resultRelInfo->ri_IndexRelationInfo = NULL;
1278 	/* make a copy so as not to depend on relcache info not changing... */
1279 	resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1280 	if (resultRelInfo->ri_TrigDesc)
1281 	{
1282 		int			n = resultRelInfo->ri_TrigDesc->numtriggers;
1283 
1284 		resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1285 			palloc0(n * sizeof(FmgrInfo));
1286 		resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1287 			palloc0(n * sizeof(ExprState *));
1288 		if (instrument_options)
1289 			resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1290 	}
1291 	else
1292 	{
1293 		resultRelInfo->ri_TrigFunctions = NULL;
1294 		resultRelInfo->ri_TrigWhenExprs = NULL;
1295 		resultRelInfo->ri_TrigInstrument = NULL;
1296 	}
1297 	if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1298 		resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1299 	else
1300 		resultRelInfo->ri_FdwRoutine = NULL;
1301 
1302 	/* The following fields are set later if needed */
1303 	resultRelInfo->ri_FdwState = NULL;
1304 	resultRelInfo->ri_usesFdwDirectModify = false;
1305 	resultRelInfo->ri_ConstraintExprs = NULL;
1306 	resultRelInfo->ri_GeneratedExprs = NULL;
1307 	resultRelInfo->ri_junkFilter = NULL;
1308 	resultRelInfo->ri_projectReturning = NULL;
1309 	resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1310 	resultRelInfo->ri_onConflict = NULL;
1311 	resultRelInfo->ri_ReturningSlot = NULL;
1312 	resultRelInfo->ri_TrigOldSlot = NULL;
1313 	resultRelInfo->ri_TrigNewSlot = NULL;
1314 
1315 	/*
1316 	 * Partition constraint, which also includes the partition constraint of
1317 	 * all the ancestors that are partitions.  Note that it will be checked
1318 	 * even in the case of tuple-routing where this table is the target leaf
1319 	 * partition, if there any BR triggers defined on the table.  Although
1320 	 * tuple-routing implicitly preserves the partition constraint of the
1321 	 * target partition for a given row, the BR triggers may change the row
1322 	 * such that the constraint is no longer satisfied, which we must fail for
1323 	 * by checking it explicitly.
1324 	 *
1325 	 * If this is a partitioned table, the partition constraint (if any) of a
1326 	 * given row will be checked just before performing tuple-routing.
1327 	 */
1328 	partition_check = RelationGetPartitionQual(resultRelationDesc);
1329 
1330 	resultRelInfo->ri_PartitionCheck = partition_check;
1331 	resultRelInfo->ri_RootResultRelInfo = partition_root_rri;
1332 	resultRelInfo->ri_PartitionInfo = NULL; /* may be set later */
1333 	resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
1334 }
1335 
1336 /*
1337  * ExecGetTriggerResultRel
1338  *		Get a ResultRelInfo for a trigger target relation.
1339  *
1340  * Most of the time, triggers are fired on one of the result relations of the
1341  * query, and so we can just return a member of the es_result_relations array,
1342  * or the es_root_result_relations array (if any), or the
1343  * es_tuple_routing_result_relations list (if any).  (Note: in self-join
1344  * situations there might be multiple members with the same OID; if so it
1345  * doesn't matter which one we pick.)
1346  *
1347  * However, it is sometimes necessary to fire triggers on other relations;
1348  * this happens mainly when an RI update trigger queues additional triggers
1349  * on other relations, which will be processed in the context of the outer
1350  * query.  For efficiency's sake, we want to have a ResultRelInfo for those
1351  * triggers too; that can avoid repeated re-opening of the relation.  (It
1352  * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1353  * triggers.)  So we make additional ResultRelInfo's as needed, and save them
1354  * in es_trig_target_relations.
1355  */
1356 ResultRelInfo *
ExecGetTriggerResultRel(EState * estate,Oid relid)1357 ExecGetTriggerResultRel(EState *estate, Oid relid)
1358 {
1359 	ResultRelInfo *rInfo;
1360 	int			nr;
1361 	ListCell   *l;
1362 	Relation	rel;
1363 	MemoryContext oldcontext;
1364 
1365 	/* First, search through the query result relations */
1366 	rInfo = estate->es_result_relations;
1367 	nr = estate->es_num_result_relations;
1368 	while (nr > 0)
1369 	{
1370 		if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1371 			return rInfo;
1372 		rInfo++;
1373 		nr--;
1374 	}
1375 	/* Second, search through the root result relations, if any */
1376 	rInfo = estate->es_root_result_relations;
1377 	nr = estate->es_num_root_result_relations;
1378 	while (nr > 0)
1379 	{
1380 		if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1381 			return rInfo;
1382 		rInfo++;
1383 		nr--;
1384 	}
1385 
1386 	/*
1387 	 * Third, search through the result relations that were created during
1388 	 * tuple routing, if any.
1389 	 */
1390 	foreach(l, estate->es_tuple_routing_result_relations)
1391 	{
1392 		rInfo = (ResultRelInfo *) lfirst(l);
1393 		if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1394 			return rInfo;
1395 	}
1396 
1397 	/* Nope, but maybe we already made an extra ResultRelInfo for it */
1398 	foreach(l, estate->es_trig_target_relations)
1399 	{
1400 		rInfo = (ResultRelInfo *) lfirst(l);
1401 		if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1402 			return rInfo;
1403 	}
1404 	/* Nope, so we need a new one */
1405 
1406 	/*
1407 	 * Open the target relation's relcache entry.  We assume that an
1408 	 * appropriate lock is still held by the backend from whenever the trigger
1409 	 * event got queued, so we need take no new lock here.  Also, we need not
1410 	 * recheck the relkind, so no need for CheckValidResultRel.
1411 	 */
1412 	rel = table_open(relid, NoLock);
1413 
1414 	/*
1415 	 * Make the new entry in the right context.
1416 	 */
1417 	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1418 	rInfo = makeNode(ResultRelInfo);
1419 	InitResultRelInfo(rInfo,
1420 					  rel,
1421 					  0,		/* dummy rangetable index */
1422 					  NULL,
1423 					  estate->es_instrument);
1424 	estate->es_trig_target_relations =
1425 		lappend(estate->es_trig_target_relations, rInfo);
1426 	MemoryContextSwitchTo(oldcontext);
1427 
1428 	/*
1429 	 * Currently, we don't need any index information in ResultRelInfos used
1430 	 * only for triggers, so no need to call ExecOpenIndices.
1431 	 */
1432 
1433 	return rInfo;
1434 }
1435 
1436 /*
1437  * Close any relations that have been opened by ExecGetTriggerResultRel().
1438  */
1439 void
ExecCleanUpTriggerState(EState * estate)1440 ExecCleanUpTriggerState(EState *estate)
1441 {
1442 	ListCell   *l;
1443 
1444 	foreach(l, estate->es_trig_target_relations)
1445 	{
1446 		ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1447 
1448 		/*
1449 		 * Assert this is a "dummy" ResultRelInfo, see above.  Otherwise we
1450 		 * might be issuing a duplicate close against a Relation opened by
1451 		 * ExecGetRangeTableRelation.
1452 		 */
1453 		Assert(resultRelInfo->ri_RangeTableIndex == 0);
1454 
1455 		/*
1456 		 * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1457 		 * these rels, we needn't call ExecCloseIndices either.
1458 		 */
1459 		Assert(resultRelInfo->ri_NumIndices == 0);
1460 
1461 		table_close(resultRelInfo->ri_RelationDesc, NoLock);
1462 	}
1463 }
1464 
1465 /* ----------------------------------------------------------------
1466  *		ExecPostprocessPlan
1467  *
1468  *		Give plan nodes a final chance to execute before shutdown
1469  * ----------------------------------------------------------------
1470  */
1471 static void
ExecPostprocessPlan(EState * estate)1472 ExecPostprocessPlan(EState *estate)
1473 {
1474 	ListCell   *lc;
1475 
1476 	/*
1477 	 * Make sure nodes run forward.
1478 	 */
1479 	estate->es_direction = ForwardScanDirection;
1480 
1481 	/*
1482 	 * Run any secondary ModifyTable nodes to completion, in case the main
1483 	 * query did not fetch all rows from them.  (We do this to ensure that
1484 	 * such nodes have predictable results.)
1485 	 */
1486 	foreach(lc, estate->es_auxmodifytables)
1487 	{
1488 		PlanState  *ps = (PlanState *) lfirst(lc);
1489 
1490 		for (;;)
1491 		{
1492 			TupleTableSlot *slot;
1493 
1494 			/* Reset the per-output-tuple exprcontext each time */
1495 			ResetPerTupleExprContext(estate);
1496 
1497 			slot = ExecProcNode(ps);
1498 
1499 			if (TupIsNull(slot))
1500 				break;
1501 		}
1502 	}
1503 }
1504 
1505 /* ----------------------------------------------------------------
1506  *		ExecEndPlan
1507  *
1508  *		Cleans up the query plan -- closes files and frees up storage
1509  *
1510  * NOTE: we are no longer very worried about freeing storage per se
1511  * in this code; FreeExecutorState should be guaranteed to release all
1512  * memory that needs to be released.  What we are worried about doing
1513  * is closing relations and dropping buffer pins.  Thus, for example,
1514  * tuple tables must be cleared or dropped to ensure pins are released.
1515  * ----------------------------------------------------------------
1516  */
1517 static void
ExecEndPlan(PlanState * planstate,EState * estate)1518 ExecEndPlan(PlanState *planstate, EState *estate)
1519 {
1520 	ResultRelInfo *resultRelInfo;
1521 	Index		num_relations;
1522 	Index		i;
1523 	ListCell   *l;
1524 
1525 	/*
1526 	 * shut down the node-type-specific query processing
1527 	 */
1528 	ExecEndNode(planstate);
1529 
1530 	/*
1531 	 * for subplans too
1532 	 */
1533 	foreach(l, estate->es_subplanstates)
1534 	{
1535 		PlanState  *subplanstate = (PlanState *) lfirst(l);
1536 
1537 		ExecEndNode(subplanstate);
1538 	}
1539 
1540 	/*
1541 	 * destroy the executor's tuple table.  Actually we only care about
1542 	 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1543 	 * the TupleTableSlots, since the containing memory context is about to go
1544 	 * away anyway.
1545 	 */
1546 	ExecResetTupleTable(estate->es_tupleTable, false);
1547 
1548 	/*
1549 	 * close indexes of result relation(s) if any.  (Rels themselves get
1550 	 * closed next.)
1551 	 */
1552 	resultRelInfo = estate->es_result_relations;
1553 	for (i = estate->es_num_result_relations; i > 0; i--)
1554 	{
1555 		ExecCloseIndices(resultRelInfo);
1556 		resultRelInfo++;
1557 	}
1558 
1559 	/*
1560 	 * close whatever rangetable Relations have been opened.  We do not
1561 	 * release any locks we might hold on those rels.
1562 	 */
1563 	num_relations = estate->es_range_table_size;
1564 	for (i = 0; i < num_relations; i++)
1565 	{
1566 		if (estate->es_relations[i])
1567 			table_close(estate->es_relations[i], NoLock);
1568 	}
1569 
1570 	/* likewise close any trigger target relations */
1571 	ExecCleanUpTriggerState(estate);
1572 }
1573 
1574 /* ----------------------------------------------------------------
1575  *		ExecutePlan
1576  *
1577  *		Processes the query plan until we have retrieved 'numberTuples' tuples,
1578  *		moving in the specified direction.
1579  *
1580  *		Runs to completion if numberTuples is 0
1581  *
1582  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1583  * user can see it
1584  * ----------------------------------------------------------------
1585  */
1586 static void
ExecutePlan(EState * estate,PlanState * planstate,bool use_parallel_mode,CmdType operation,bool sendTuples,uint64 numberTuples,ScanDirection direction,DestReceiver * dest,bool execute_once)1587 ExecutePlan(EState *estate,
1588 			PlanState *planstate,
1589 			bool use_parallel_mode,
1590 			CmdType operation,
1591 			bool sendTuples,
1592 			uint64 numberTuples,
1593 			ScanDirection direction,
1594 			DestReceiver *dest,
1595 			bool execute_once)
1596 {
1597 	TupleTableSlot *slot;
1598 	uint64		current_tuple_count;
1599 
1600 	/*
1601 	 * initialize local variables
1602 	 */
1603 	current_tuple_count = 0;
1604 
1605 	/*
1606 	 * Set the direction.
1607 	 */
1608 	estate->es_direction = direction;
1609 
1610 	/*
1611 	 * If the plan might potentially be executed multiple times, we must force
1612 	 * it to run without parallelism, because we might exit early.
1613 	 */
1614 	if (!execute_once)
1615 		use_parallel_mode = false;
1616 
1617 	estate->es_use_parallel_mode = use_parallel_mode;
1618 	if (use_parallel_mode)
1619 		EnterParallelMode();
1620 
1621 	/*
1622 	 * Loop until we've processed the proper number of tuples from the plan.
1623 	 */
1624 	for (;;)
1625 	{
1626 		/* Reset the per-output-tuple exprcontext */
1627 		ResetPerTupleExprContext(estate);
1628 
1629 		/*
1630 		 * Execute the plan and obtain a tuple
1631 		 */
1632 		slot = ExecProcNode(planstate);
1633 
1634 		/*
1635 		 * if the tuple is null, then we assume there is nothing more to
1636 		 * process so we just end the loop...
1637 		 */
1638 		if (TupIsNull(slot))
1639 			break;
1640 
1641 		/*
1642 		 * If we have a junk filter, then project a new tuple with the junk
1643 		 * removed.
1644 		 *
1645 		 * Store this new "clean" tuple in the junkfilter's resultSlot.
1646 		 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1647 		 * because that tuple slot has the wrong descriptor.)
1648 		 */
1649 		if (estate->es_junkFilter != NULL)
1650 			slot = ExecFilterJunk(estate->es_junkFilter, slot);
1651 
1652 		/*
1653 		 * If we are supposed to send the tuple somewhere, do so. (In
1654 		 * practice, this is probably always the case at this point.)
1655 		 */
1656 		if (sendTuples)
1657 		{
1658 			/*
1659 			 * If we are not able to send the tuple, we assume the destination
1660 			 * has closed and no more tuples can be sent. If that's the case,
1661 			 * end the loop.
1662 			 */
1663 			if (!dest->receiveSlot(slot, dest))
1664 				break;
1665 		}
1666 
1667 		/*
1668 		 * Count tuples processed, if this is a SELECT.  (For other operation
1669 		 * types, the ModifyTable plan node must count the appropriate
1670 		 * events.)
1671 		 */
1672 		if (operation == CMD_SELECT)
1673 			(estate->es_processed)++;
1674 
1675 		/*
1676 		 * check our tuple count.. if we've processed the proper number then
1677 		 * quit, else loop again and process more tuples.  Zero numberTuples
1678 		 * means no limit.
1679 		 */
1680 		current_tuple_count++;
1681 		if (numberTuples && numberTuples == current_tuple_count)
1682 			break;
1683 	}
1684 
1685 	/*
1686 	 * If we know we won't need to back up, we can release resources at this
1687 	 * point.
1688 	 */
1689 	if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
1690 		(void) ExecShutdownNode(planstate);
1691 
1692 	if (use_parallel_mode)
1693 		ExitParallelMode();
1694 }
1695 
1696 
1697 /*
1698  * ExecRelCheck --- check that tuple meets constraints for result relation
1699  *
1700  * Returns NULL if OK, else name of failed check constraint
1701  */
1702 static const char *
ExecRelCheck(ResultRelInfo * resultRelInfo,TupleTableSlot * slot,EState * estate)1703 ExecRelCheck(ResultRelInfo *resultRelInfo,
1704 			 TupleTableSlot *slot, EState *estate)
1705 {
1706 	Relation	rel = resultRelInfo->ri_RelationDesc;
1707 	int			ncheck = rel->rd_att->constr->num_check;
1708 	ConstrCheck *check = rel->rd_att->constr->check;
1709 	ExprContext *econtext;
1710 	MemoryContext oldContext;
1711 	int			i;
1712 
1713 	/*
1714 	 * If first time through for this result relation, build expression
1715 	 * nodetrees for rel's constraint expressions.  Keep them in the per-query
1716 	 * memory context so they'll survive throughout the query.
1717 	 */
1718 	if (resultRelInfo->ri_ConstraintExprs == NULL)
1719 	{
1720 		oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1721 		resultRelInfo->ri_ConstraintExprs =
1722 			(ExprState **) palloc(ncheck * sizeof(ExprState *));
1723 		for (i = 0; i < ncheck; i++)
1724 		{
1725 			Expr	   *checkconstr;
1726 
1727 			checkconstr = stringToNode(check[i].ccbin);
1728 			resultRelInfo->ri_ConstraintExprs[i] =
1729 				ExecPrepareExpr(checkconstr, estate);
1730 		}
1731 		MemoryContextSwitchTo(oldContext);
1732 	}
1733 
1734 	/*
1735 	 * We will use the EState's per-tuple context for evaluating constraint
1736 	 * expressions (creating it if it's not already there).
1737 	 */
1738 	econtext = GetPerTupleExprContext(estate);
1739 
1740 	/* Arrange for econtext's scan tuple to be the tuple under test */
1741 	econtext->ecxt_scantuple = slot;
1742 
1743 	/* And evaluate the constraints */
1744 	for (i = 0; i < ncheck; i++)
1745 	{
1746 		ExprState  *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1747 
1748 		/*
1749 		 * NOTE: SQL specifies that a NULL result from a constraint expression
1750 		 * is not to be treated as a failure.  Therefore, use ExecCheck not
1751 		 * ExecQual.
1752 		 */
1753 		if (!ExecCheck(checkconstr, econtext))
1754 			return check[i].ccname;
1755 	}
1756 
1757 	/* NULL result means no error */
1758 	return NULL;
1759 }
1760 
1761 /*
1762  * ExecPartitionCheck --- check that tuple meets the partition constraint.
1763  *
1764  * Returns true if it meets the partition constraint.  If the constraint
1765  * fails and we're asked to emit to error, do so and don't return; otherwise
1766  * return false.
1767  */
1768 bool
ExecPartitionCheck(ResultRelInfo * resultRelInfo,TupleTableSlot * slot,EState * estate,bool emitError)1769 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1770 				   EState *estate, bool emitError)
1771 {
1772 	ExprContext *econtext;
1773 	bool		success;
1774 
1775 	/*
1776 	 * If first time through, build expression state tree for the partition
1777 	 * check expression.  Keep it in the per-query memory context so they'll
1778 	 * survive throughout the query.
1779 	 */
1780 	if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1781 	{
1782 		List	   *qual = resultRelInfo->ri_PartitionCheck;
1783 
1784 		resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1785 	}
1786 
1787 	/*
1788 	 * We will use the EState's per-tuple context for evaluating constraint
1789 	 * expressions (creating it if it's not already there).
1790 	 */
1791 	econtext = GetPerTupleExprContext(estate);
1792 
1793 	/* Arrange for econtext's scan tuple to be the tuple under test */
1794 	econtext->ecxt_scantuple = slot;
1795 
1796 	/*
1797 	 * As in case of the catalogued constraints, we treat a NULL result as
1798 	 * success here, not a failure.
1799 	 */
1800 	success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1801 
1802 	/* if asked to emit error, don't actually return on failure */
1803 	if (!success && emitError)
1804 		ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1805 
1806 	return success;
1807 }
1808 
1809 /*
1810  * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1811  * partition constraint check.
1812  */
1813 void
ExecPartitionCheckEmitError(ResultRelInfo * resultRelInfo,TupleTableSlot * slot,EState * estate)1814 ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1815 							TupleTableSlot *slot,
1816 							EState *estate)
1817 {
1818 	Oid			root_relid;
1819 	TupleDesc	tupdesc;
1820 	char	   *val_desc;
1821 	Bitmapset  *modifiedCols;
1822 
1823 	/*
1824 	 * If the tuple has been routed, it's been converted to the partition's
1825 	 * rowtype, which might differ from the root table's.  We must convert it
1826 	 * back to the root table's rowtype so that val_desc in the error message
1827 	 * matches the input tuple.
1828 	 */
1829 	if (resultRelInfo->ri_RootResultRelInfo)
1830 	{
1831 		ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1832 		TupleDesc	old_tupdesc;
1833 		AttrNumber *map;
1834 
1835 		root_relid = RelationGetRelid(rootrel->ri_RelationDesc);
1836 		tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1837 
1838 		old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1839 		/* a reverse map */
1840 		map = convert_tuples_by_name_map_if_req(old_tupdesc, tupdesc,
1841 												gettext_noop("could not convert row type"));
1842 
1843 		/*
1844 		 * Partition-specific slot's tupdesc can't be changed, so allocate a
1845 		 * new one.
1846 		 */
1847 		if (map != NULL)
1848 			slot = execute_attr_map_slot(map, slot,
1849 										 MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1850 		modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1851 								 ExecGetUpdatedCols(rootrel, estate));
1852 	}
1853 	else
1854 	{
1855 		root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1856 		tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1857 		modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1858 								 ExecGetUpdatedCols(resultRelInfo, estate));
1859 	}
1860 
1861 	val_desc = ExecBuildSlotValueDescription(root_relid,
1862 											 slot,
1863 											 tupdesc,
1864 											 modifiedCols,
1865 											 64);
1866 	ereport(ERROR,
1867 			(errcode(ERRCODE_CHECK_VIOLATION),
1868 			 errmsg("new row for relation \"%s\" violates partition constraint",
1869 					RelationGetRelationName(resultRelInfo->ri_RelationDesc)),
1870 			 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1871 }
1872 
1873 /*
1874  * ExecConstraints - check constraints of the tuple in 'slot'
1875  *
1876  * This checks the traditional NOT NULL and check constraints.
1877  *
1878  * The partition constraint is *NOT* checked.
1879  *
1880  * Note: 'slot' contains the tuple to check the constraints of, which may
1881  * have been converted from the original input tuple after tuple routing.
1882  * 'resultRelInfo' is the final result relation, after tuple routing.
1883  */
1884 void
ExecConstraints(ResultRelInfo * resultRelInfo,TupleTableSlot * slot,EState * estate)1885 ExecConstraints(ResultRelInfo *resultRelInfo,
1886 				TupleTableSlot *slot, EState *estate)
1887 {
1888 	Relation	rel = resultRelInfo->ri_RelationDesc;
1889 	TupleDesc	tupdesc = RelationGetDescr(rel);
1890 	TupleConstr *constr = tupdesc->constr;
1891 	Bitmapset  *modifiedCols;
1892 
1893 	Assert(constr || resultRelInfo->ri_PartitionCheck);
1894 
1895 	if (constr && constr->has_not_null)
1896 	{
1897 		int			natts = tupdesc->natts;
1898 		int			attrChk;
1899 
1900 		for (attrChk = 1; attrChk <= natts; attrChk++)
1901 		{
1902 			Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1903 
1904 			if (att->attnotnull && slot_attisnull(slot, attrChk))
1905 			{
1906 				char	   *val_desc;
1907 				Relation	orig_rel = rel;
1908 				TupleDesc	orig_tupdesc = RelationGetDescr(rel);
1909 
1910 				/*
1911 				 * If the tuple has been routed, it's been converted to the
1912 				 * partition's rowtype, which might differ from the root
1913 				 * table's.  We must convert it back to the root table's
1914 				 * rowtype so that val_desc shown error message matches the
1915 				 * input tuple.
1916 				 */
1917 				if (resultRelInfo->ri_RootResultRelInfo)
1918 				{
1919 					ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1920 					AttrNumber *map;
1921 
1922 					tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1923 					/* a reverse map */
1924 					map = convert_tuples_by_name_map_if_req(orig_tupdesc,
1925 															tupdesc,
1926 															gettext_noop("could not convert row type"));
1927 
1928 					/*
1929 					 * Partition-specific slot's tupdesc can't be changed, so
1930 					 * allocate a new one.
1931 					 */
1932 					if (map != NULL)
1933 						slot = execute_attr_map_slot(map, slot,
1934 													 MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1935 					modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1936 											 ExecGetUpdatedCols(rootrel, estate));
1937 					rel = rootrel->ri_RelationDesc;
1938 				}
1939 				else
1940 					modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1941 											 ExecGetUpdatedCols(resultRelInfo, estate));
1942 				val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1943 														 slot,
1944 														 tupdesc,
1945 														 modifiedCols,
1946 														 64);
1947 
1948 				ereport(ERROR,
1949 						(errcode(ERRCODE_NOT_NULL_VIOLATION),
1950 						 errmsg("null value in column \"%s\" violates not-null constraint",
1951 								NameStr(att->attname)),
1952 						 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1953 						 errtablecol(orig_rel, attrChk)));
1954 			}
1955 		}
1956 	}
1957 
1958 	if (constr && constr->num_check > 0)
1959 	{
1960 		const char *failed;
1961 
1962 		if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1963 		{
1964 			char	   *val_desc;
1965 			Relation	orig_rel = rel;
1966 
1967 			/* See the comment above. */
1968 			if (resultRelInfo->ri_RootResultRelInfo)
1969 			{
1970 				ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1971 				TupleDesc	old_tupdesc = RelationGetDescr(rel);
1972 				AttrNumber *map;
1973 
1974 				tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1975 				/* a reverse map */
1976 				map = convert_tuples_by_name_map_if_req(old_tupdesc,
1977 														tupdesc,
1978 														gettext_noop("could not convert row type"));
1979 
1980 				/*
1981 				 * Partition-specific slot's tupdesc can't be changed, so
1982 				 * allocate a new one.
1983 				 */
1984 				if (map != NULL)
1985 					slot = execute_attr_map_slot(map, slot,
1986 												 MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1987 				modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1988 										 ExecGetUpdatedCols(rootrel, estate));
1989 				rel = rootrel->ri_RelationDesc;
1990 			}
1991 			else
1992 				modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1993 										 ExecGetUpdatedCols(resultRelInfo, estate));
1994 			val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1995 													 slot,
1996 													 tupdesc,
1997 													 modifiedCols,
1998 													 64);
1999 			ereport(ERROR,
2000 					(errcode(ERRCODE_CHECK_VIOLATION),
2001 					 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2002 							RelationGetRelationName(orig_rel), failed),
2003 					 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2004 					 errtableconstraint(orig_rel, failed)));
2005 		}
2006 	}
2007 }
2008 
2009 /*
2010  * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2011  * of the specified kind.
2012  *
2013  * Note that this needs to be called multiple times to ensure that all kinds of
2014  * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2015  * CHECK OPTION set and from row level security policies).  See ExecInsert()
2016  * and ExecUpdate().
2017  */
2018 void
ExecWithCheckOptions(WCOKind kind,ResultRelInfo * resultRelInfo,TupleTableSlot * slot,EState * estate)2019 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2020 					 TupleTableSlot *slot, EState *estate)
2021 {
2022 	Relation	rel = resultRelInfo->ri_RelationDesc;
2023 	TupleDesc	tupdesc = RelationGetDescr(rel);
2024 	ExprContext *econtext;
2025 	ListCell   *l1,
2026 			   *l2;
2027 
2028 	/*
2029 	 * We will use the EState's per-tuple context for evaluating constraint
2030 	 * expressions (creating it if it's not already there).
2031 	 */
2032 	econtext = GetPerTupleExprContext(estate);
2033 
2034 	/* Arrange for econtext's scan tuple to be the tuple under test */
2035 	econtext->ecxt_scantuple = slot;
2036 
2037 	/* Check each of the constraints */
2038 	forboth(l1, resultRelInfo->ri_WithCheckOptions,
2039 			l2, resultRelInfo->ri_WithCheckOptionExprs)
2040 	{
2041 		WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2042 		ExprState  *wcoExpr = (ExprState *) lfirst(l2);
2043 
2044 		/*
2045 		 * Skip any WCOs which are not the kind we are looking for at this
2046 		 * time.
2047 		 */
2048 		if (wco->kind != kind)
2049 			continue;
2050 
2051 		/*
2052 		 * WITH CHECK OPTION checks are intended to ensure that the new tuple
2053 		 * is visible (in the case of a view) or that it passes the
2054 		 * 'with-check' policy (in the case of row security). If the qual
2055 		 * evaluates to NULL or FALSE, then the new tuple won't be included in
2056 		 * the view or doesn't pass the 'with-check' policy for the table.
2057 		 */
2058 		if (!ExecQual(wcoExpr, econtext))
2059 		{
2060 			char	   *val_desc;
2061 			Bitmapset  *modifiedCols;
2062 
2063 			switch (wco->kind)
2064 			{
2065 					/*
2066 					 * For WITH CHECK OPTIONs coming from views, we might be
2067 					 * able to provide the details on the row, depending on
2068 					 * the permissions on the relation (that is, if the user
2069 					 * could view it directly anyway).  For RLS violations, we
2070 					 * don't include the data since we don't know if the user
2071 					 * should be able to view the tuple as that depends on the
2072 					 * USING policy.
2073 					 */
2074 				case WCO_VIEW_CHECK:
2075 					/* See the comment in ExecConstraints(). */
2076 					if (resultRelInfo->ri_RootResultRelInfo)
2077 					{
2078 						ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2079 						TupleDesc	old_tupdesc = RelationGetDescr(rel);
2080 						AttrNumber *map;
2081 
2082 						tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2083 						/* a reverse map */
2084 						map = convert_tuples_by_name_map_if_req(old_tupdesc,
2085 																tupdesc,
2086 																gettext_noop("could not convert row type"));
2087 
2088 						/*
2089 						 * Partition-specific slot's tupdesc can't be changed,
2090 						 * so allocate a new one.
2091 						 */
2092 						if (map != NULL)
2093 							slot = execute_attr_map_slot(map, slot,
2094 														 MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2095 
2096 						modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2097 												 ExecGetUpdatedCols(rootrel, estate));
2098 						rel = rootrel->ri_RelationDesc;
2099 					}
2100 					else
2101 						modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2102 												 ExecGetUpdatedCols(resultRelInfo, estate));
2103 					val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2104 															 slot,
2105 															 tupdesc,
2106 															 modifiedCols,
2107 															 64);
2108 
2109 					ereport(ERROR,
2110 							(errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2111 							 errmsg("new row violates check option for view \"%s\"",
2112 									wco->relname),
2113 							 val_desc ? errdetail("Failing row contains %s.",
2114 												  val_desc) : 0));
2115 					break;
2116 				case WCO_RLS_INSERT_CHECK:
2117 				case WCO_RLS_UPDATE_CHECK:
2118 					if (wco->polname != NULL)
2119 						ereport(ERROR,
2120 								(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2121 								 errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2122 										wco->polname, wco->relname)));
2123 					else
2124 						ereport(ERROR,
2125 								(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2126 								 errmsg("new row violates row-level security policy for table \"%s\"",
2127 										wco->relname)));
2128 					break;
2129 				case WCO_RLS_CONFLICT_CHECK:
2130 					if (wco->polname != NULL)
2131 						ereport(ERROR,
2132 								(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2133 								 errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2134 										wco->polname, wco->relname)));
2135 					else
2136 						ereport(ERROR,
2137 								(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2138 								 errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2139 										wco->relname)));
2140 					break;
2141 				default:
2142 					elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2143 					break;
2144 			}
2145 		}
2146 	}
2147 }
2148 
2149 /*
2150  * ExecBuildSlotValueDescription -- construct a string representing a tuple
2151  *
2152  * This is intentionally very similar to BuildIndexValueDescription, but
2153  * unlike that function, we truncate long field values (to at most maxfieldlen
2154  * bytes).  That seems necessary here since heap field values could be very
2155  * long, whereas index entries typically aren't so wide.
2156  *
2157  * Also, unlike the case with index entries, we need to be prepared to ignore
2158  * dropped columns.  We used to use the slot's tuple descriptor to decode the
2159  * data, but the slot's descriptor doesn't identify dropped columns, so we
2160  * now need to be passed the relation's descriptor.
2161  *
2162  * Note that, like BuildIndexValueDescription, if the user does not have
2163  * permission to view any of the columns involved, a NULL is returned.  Unlike
2164  * BuildIndexValueDescription, if the user has access to view a subset of the
2165  * column involved, that subset will be returned with a key identifying which
2166  * columns they are.
2167  */
2168 static char *
ExecBuildSlotValueDescription(Oid reloid,TupleTableSlot * slot,TupleDesc tupdesc,Bitmapset * modifiedCols,int maxfieldlen)2169 ExecBuildSlotValueDescription(Oid reloid,
2170 							  TupleTableSlot *slot,
2171 							  TupleDesc tupdesc,
2172 							  Bitmapset *modifiedCols,
2173 							  int maxfieldlen)
2174 {
2175 	StringInfoData buf;
2176 	StringInfoData collist;
2177 	bool		write_comma = false;
2178 	bool		write_comma_collist = false;
2179 	int			i;
2180 	AclResult	aclresult;
2181 	bool		table_perm = false;
2182 	bool		any_perm = false;
2183 
2184 	/*
2185 	 * Check if RLS is enabled and should be active for the relation; if so,
2186 	 * then don't return anything.  Otherwise, go through normal permission
2187 	 * checks.
2188 	 */
2189 	if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2190 		return NULL;
2191 
2192 	initStringInfo(&buf);
2193 
2194 	appendStringInfoChar(&buf, '(');
2195 
2196 	/*
2197 	 * Check if the user has permissions to see the row.  Table-level SELECT
2198 	 * allows access to all columns.  If the user does not have table-level
2199 	 * SELECT then we check each column and include those the user has SELECT
2200 	 * rights on.  Additionally, we always include columns the user provided
2201 	 * data for.
2202 	 */
2203 	aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2204 	if (aclresult != ACLCHECK_OK)
2205 	{
2206 		/* Set up the buffer for the column list */
2207 		initStringInfo(&collist);
2208 		appendStringInfoChar(&collist, '(');
2209 	}
2210 	else
2211 		table_perm = any_perm = true;
2212 
2213 	/* Make sure the tuple is fully deconstructed */
2214 	slot_getallattrs(slot);
2215 
2216 	for (i = 0; i < tupdesc->natts; i++)
2217 	{
2218 		bool		column_perm = false;
2219 		char	   *val;
2220 		int			vallen;
2221 		Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2222 
2223 		/* ignore dropped columns */
2224 		if (att->attisdropped)
2225 			continue;
2226 
2227 		if (!table_perm)
2228 		{
2229 			/*
2230 			 * No table-level SELECT, so need to make sure they either have
2231 			 * SELECT rights on the column or that they have provided the data
2232 			 * for the column.  If not, omit this column from the error
2233 			 * message.
2234 			 */
2235 			aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2236 											  GetUserId(), ACL_SELECT);
2237 			if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2238 							  modifiedCols) || aclresult == ACLCHECK_OK)
2239 			{
2240 				column_perm = any_perm = true;
2241 
2242 				if (write_comma_collist)
2243 					appendStringInfoString(&collist, ", ");
2244 				else
2245 					write_comma_collist = true;
2246 
2247 				appendStringInfoString(&collist, NameStr(att->attname));
2248 			}
2249 		}
2250 
2251 		if (table_perm || column_perm)
2252 		{
2253 			if (slot->tts_isnull[i])
2254 				val = "null";
2255 			else
2256 			{
2257 				Oid			foutoid;
2258 				bool		typisvarlena;
2259 
2260 				getTypeOutputInfo(att->atttypid,
2261 								  &foutoid, &typisvarlena);
2262 				val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2263 			}
2264 
2265 			if (write_comma)
2266 				appendStringInfoString(&buf, ", ");
2267 			else
2268 				write_comma = true;
2269 
2270 			/* truncate if needed */
2271 			vallen = strlen(val);
2272 			if (vallen <= maxfieldlen)
2273 				appendStringInfoString(&buf, val);
2274 			else
2275 			{
2276 				vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2277 				appendBinaryStringInfo(&buf, val, vallen);
2278 				appendStringInfoString(&buf, "...");
2279 			}
2280 		}
2281 	}
2282 
2283 	/* If we end up with zero columns being returned, then return NULL. */
2284 	if (!any_perm)
2285 		return NULL;
2286 
2287 	appendStringInfoChar(&buf, ')');
2288 
2289 	if (!table_perm)
2290 	{
2291 		appendStringInfoString(&collist, ") = ");
2292 		appendStringInfoString(&collist, buf.data);
2293 
2294 		return collist.data;
2295 	}
2296 
2297 	return buf.data;
2298 }
2299 
2300 
2301 /*
2302  * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2303  * given ResultRelInfo
2304  */
2305 LockTupleMode
ExecUpdateLockMode(EState * estate,ResultRelInfo * relinfo)2306 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2307 {
2308 	Bitmapset  *keyCols;
2309 	Bitmapset  *updatedCols;
2310 
2311 	/*
2312 	 * Compute lock mode to use.  If columns that are part of the key have not
2313 	 * been modified, then we can use a weaker lock, allowing for better
2314 	 * concurrency.
2315 	 */
2316 	updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2317 	keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2318 										 INDEX_ATTR_BITMAP_KEY);
2319 
2320 	if (bms_overlap(keyCols, updatedCols))
2321 		return LockTupleExclusive;
2322 
2323 	return LockTupleNoKeyExclusive;
2324 }
2325 
2326 /*
2327  * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2328  *
2329  * If no such struct, either return NULL or throw error depending on missing_ok
2330  */
2331 ExecRowMark *
ExecFindRowMark(EState * estate,Index rti,bool missing_ok)2332 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2333 {
2334 	if (rti > 0 && rti <= estate->es_range_table_size &&
2335 		estate->es_rowmarks != NULL)
2336 	{
2337 		ExecRowMark *erm = estate->es_rowmarks[rti - 1];
2338 
2339 		if (erm)
2340 			return erm;
2341 	}
2342 	if (!missing_ok)
2343 		elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2344 	return NULL;
2345 }
2346 
2347 /*
2348  * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2349  *
2350  * Inputs are the underlying ExecRowMark struct and the targetlist of the
2351  * input plan node (not planstate node!).  We need the latter to find out
2352  * the column numbers of the resjunk columns.
2353  */
2354 ExecAuxRowMark *
ExecBuildAuxRowMark(ExecRowMark * erm,List * targetlist)2355 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2356 {
2357 	ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2358 	char		resname[32];
2359 
2360 	aerm->rowmark = erm;
2361 
2362 	/* Look up the resjunk columns associated with this rowmark */
2363 	if (erm->markType != ROW_MARK_COPY)
2364 	{
2365 		/* need ctid for all methods other than COPY */
2366 		snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2367 		aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2368 													   resname);
2369 		if (!AttributeNumberIsValid(aerm->ctidAttNo))
2370 			elog(ERROR, "could not find junk %s column", resname);
2371 	}
2372 	else
2373 	{
2374 		/* need wholerow if COPY */
2375 		snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2376 		aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2377 														resname);
2378 		if (!AttributeNumberIsValid(aerm->wholeAttNo))
2379 			elog(ERROR, "could not find junk %s column", resname);
2380 	}
2381 
2382 	/* if child rel, need tableoid */
2383 	if (erm->rti != erm->prti)
2384 	{
2385 		snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2386 		aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2387 													   resname);
2388 		if (!AttributeNumberIsValid(aerm->toidAttNo))
2389 			elog(ERROR, "could not find junk %s column", resname);
2390 	}
2391 
2392 	return aerm;
2393 }
2394 
2395 
2396 /*
2397  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2398  * process the updated version under READ COMMITTED rules.
2399  *
2400  * See backend/executor/README for some info about how this works.
2401  */
2402 
2403 
2404 /*
2405  * Check the updated version of a tuple to see if we want to process it under
2406  * READ COMMITTED rules.
2407  *
2408  *	epqstate - state for EvalPlanQual rechecking
2409  *	relation - table containing tuple
2410  *	rti - rangetable index of table containing tuple
2411  *	inputslot - tuple for processing - this can be the slot from
2412  *		EvalPlanQualSlot(), for the increased efficiency.
2413  *
2414  * This tests whether the tuple in inputslot still matches the relevant
2415  * quals. For that result to be useful, typically the input tuple has to be
2416  * last row version (otherwise the result isn't particularly useful) and
2417  * locked (otherwise the result might be out of date). That's typically
2418  * achieved by using table_tuple_lock() with the
2419  * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2420  *
2421  * Returns a slot containing the new candidate update/delete tuple, or
2422  * NULL if we determine we shouldn't process the row.
2423  */
2424 TupleTableSlot *
EvalPlanQual(EPQState * epqstate,Relation relation,Index rti,TupleTableSlot * inputslot)2425 EvalPlanQual(EPQState *epqstate, Relation relation,
2426 			 Index rti, TupleTableSlot *inputslot)
2427 {
2428 	TupleTableSlot *slot;
2429 	TupleTableSlot *testslot;
2430 
2431 	Assert(rti > 0);
2432 
2433 	/*
2434 	 * Need to run a recheck subquery.  Initialize or reinitialize EPQ state.
2435 	 */
2436 	EvalPlanQualBegin(epqstate);
2437 
2438 	/*
2439 	 * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2440 	 * an unnecessary copy.
2441 	 */
2442 	testslot = EvalPlanQualSlot(epqstate, relation, rti);
2443 	if (testslot != inputslot)
2444 		ExecCopySlot(testslot, inputslot);
2445 
2446 	/*
2447 	 * Run the EPQ query.  We assume it will return at most one tuple.
2448 	 */
2449 	slot = EvalPlanQualNext(epqstate);
2450 
2451 	/*
2452 	 * If we got a tuple, force the slot to materialize the tuple so that it
2453 	 * is not dependent on any local state in the EPQ query (in particular,
2454 	 * it's highly likely that the slot contains references to any pass-by-ref
2455 	 * datums that may be present in copyTuple).  As with the next step, this
2456 	 * is to guard against early re-use of the EPQ query.
2457 	 */
2458 	if (!TupIsNull(slot))
2459 		ExecMaterializeSlot(slot);
2460 
2461 	/*
2462 	 * Clear out the test tuple.  This is needed in case the EPQ query is
2463 	 * re-used to test a tuple for a different relation.  (Not clear that can
2464 	 * really happen, but let's be safe.)
2465 	 */
2466 	ExecClearTuple(testslot);
2467 
2468 	return slot;
2469 }
2470 
2471 /*
2472  * EvalPlanQualInit -- initialize during creation of a plan state node
2473  * that might need to invoke EPQ processing.
2474  *
2475  * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2476  * with EvalPlanQualSetPlan.
2477  */
2478 void
EvalPlanQualInit(EPQState * epqstate,EState * parentestate,Plan * subplan,List * auxrowmarks,int epqParam)2479 EvalPlanQualInit(EPQState *epqstate, EState *parentestate,
2480 				 Plan *subplan, List *auxrowmarks, int epqParam)
2481 {
2482 	Index		rtsize = parentestate->es_range_table_size;
2483 
2484 	/* initialize data not changing over EPQState's lifetime */
2485 	epqstate->parentestate = parentestate;
2486 	epqstate->epqParam = epqParam;
2487 
2488 	/*
2489 	 * Allocate space to reference a slot for each potential rti - do so now
2490 	 * rather than in EvalPlanQualBegin(), as done for other dynamically
2491 	 * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2492 	 * that *may* need EPQ later, without forcing the overhead of
2493 	 * EvalPlanQualBegin().
2494 	 */
2495 	epqstate->tuple_table = NIL;
2496 	epqstate->relsubs_slot = (TupleTableSlot **)
2497 		palloc0(rtsize * sizeof(TupleTableSlot *));
2498 
2499 	/* ... and remember data that EvalPlanQualBegin will need */
2500 	epqstate->plan = subplan;
2501 	epqstate->arowMarks = auxrowmarks;
2502 
2503 	/* ... and mark the EPQ state inactive */
2504 	epqstate->origslot = NULL;
2505 	epqstate->recheckestate = NULL;
2506 	epqstate->recheckplanstate = NULL;
2507 	epqstate->relsubs_rowmark = NULL;
2508 	epqstate->relsubs_done = NULL;
2509 }
2510 
2511 /*
2512  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2513  *
2514  * We need this so that ModifyTable can deal with multiple subplans.
2515  */
2516 void
EvalPlanQualSetPlan(EPQState * epqstate,Plan * subplan,List * auxrowmarks)2517 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2518 {
2519 	/* If we have a live EPQ query, shut it down */
2520 	EvalPlanQualEnd(epqstate);
2521 	/* And set/change the plan pointer */
2522 	epqstate->plan = subplan;
2523 	/* The rowmarks depend on the plan, too */
2524 	epqstate->arowMarks = auxrowmarks;
2525 }
2526 
2527 /*
2528  * Return, and create if necessary, a slot for an EPQ test tuple.
2529  *
2530  * Note this only requires EvalPlanQualInit() to have been called,
2531  * EvalPlanQualBegin() is not necessary.
2532  */
2533 TupleTableSlot *
EvalPlanQualSlot(EPQState * epqstate,Relation relation,Index rti)2534 EvalPlanQualSlot(EPQState *epqstate,
2535 				 Relation relation, Index rti)
2536 {
2537 	TupleTableSlot **slot;
2538 
2539 	Assert(relation);
2540 	Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size);
2541 	slot = &epqstate->relsubs_slot[rti - 1];
2542 
2543 	if (*slot == NULL)
2544 	{
2545 		MemoryContext oldcontext;
2546 
2547 		oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
2548 		*slot = table_slot_create(relation, &epqstate->tuple_table);
2549 		MemoryContextSwitchTo(oldcontext);
2550 	}
2551 
2552 	return *slot;
2553 }
2554 
2555 /*
2556  * Fetch the current row value for a non-locked relation, identified by rti,
2557  * that needs to be scanned by an EvalPlanQual operation.  origslot must have
2558  * been set to contain the current result row (top-level row) that we need to
2559  * recheck.  Returns true if a substitution tuple was found, false if not.
2560  */
2561 bool
EvalPlanQualFetchRowMark(EPQState * epqstate,Index rti,TupleTableSlot * slot)2562 EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot)
2563 {
2564 	ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1];
2565 	ExecRowMark *erm = earm->rowmark;
2566 	Datum		datum;
2567 	bool		isNull;
2568 
2569 	Assert(earm != NULL);
2570 	Assert(epqstate->origslot != NULL);
2571 
2572 	if (RowMarkRequiresRowShareLock(erm->markType))
2573 		elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2574 
2575 	/* if child rel, must check whether it produced this row */
2576 	if (erm->rti != erm->prti)
2577 	{
2578 		Oid			tableoid;
2579 
2580 		datum = ExecGetJunkAttribute(epqstate->origslot,
2581 									 earm->toidAttNo,
2582 									 &isNull);
2583 		/* non-locked rels could be on the inside of outer joins */
2584 		if (isNull)
2585 			return false;
2586 
2587 		tableoid = DatumGetObjectId(datum);
2588 
2589 		Assert(OidIsValid(erm->relid));
2590 		if (tableoid != erm->relid)
2591 		{
2592 			/* this child is inactive right now */
2593 			return false;
2594 		}
2595 	}
2596 
2597 	if (erm->markType == ROW_MARK_REFERENCE)
2598 	{
2599 		Assert(erm->relation != NULL);
2600 
2601 		/* fetch the tuple's ctid */
2602 		datum = ExecGetJunkAttribute(epqstate->origslot,
2603 									 earm->ctidAttNo,
2604 									 &isNull);
2605 		/* non-locked rels could be on the inside of outer joins */
2606 		if (isNull)
2607 			return false;
2608 
2609 		/* fetch requests on foreign tables must be passed to their FDW */
2610 		if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2611 		{
2612 			FdwRoutine *fdwroutine;
2613 			bool		updated = false;
2614 
2615 			fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2616 			/* this should have been checked already, but let's be safe */
2617 			if (fdwroutine->RefetchForeignRow == NULL)
2618 				ereport(ERROR,
2619 						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2620 						 errmsg("cannot lock rows in foreign table \"%s\"",
2621 								RelationGetRelationName(erm->relation))));
2622 
2623 			fdwroutine->RefetchForeignRow(epqstate->recheckestate,
2624 										  erm,
2625 										  datum,
2626 										  slot,
2627 										  &updated);
2628 			if (TupIsNull(slot))
2629 				elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2630 
2631 			/*
2632 			 * Ideally we'd insist on updated == false here, but that assumes
2633 			 * that FDWs can track that exactly, which they might not be able
2634 			 * to.  So just ignore the flag.
2635 			 */
2636 			return true;
2637 		}
2638 		else
2639 		{
2640 			/* ordinary table, fetch the tuple */
2641 			if (!table_tuple_fetch_row_version(erm->relation,
2642 											   (ItemPointer) DatumGetPointer(datum),
2643 											   SnapshotAny, slot))
2644 				elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2645 			return true;
2646 		}
2647 	}
2648 	else
2649 	{
2650 		Assert(erm->markType == ROW_MARK_COPY);
2651 
2652 		/* fetch the whole-row Var for the relation */
2653 		datum = ExecGetJunkAttribute(epqstate->origslot,
2654 									 earm->wholeAttNo,
2655 									 &isNull);
2656 		/* non-locked rels could be on the inside of outer joins */
2657 		if (isNull)
2658 			return false;
2659 
2660 		ExecStoreHeapTupleDatum(datum, slot);
2661 		return true;
2662 	}
2663 }
2664 
2665 /*
2666  * Fetch the next row (if any) from EvalPlanQual testing
2667  *
2668  * (In practice, there should never be more than one row...)
2669  */
2670 TupleTableSlot *
EvalPlanQualNext(EPQState * epqstate)2671 EvalPlanQualNext(EPQState *epqstate)
2672 {
2673 	MemoryContext oldcontext;
2674 	TupleTableSlot *slot;
2675 
2676 	oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt);
2677 	slot = ExecProcNode(epqstate->recheckplanstate);
2678 	MemoryContextSwitchTo(oldcontext);
2679 
2680 	return slot;
2681 }
2682 
2683 /*
2684  * Initialize or reset an EvalPlanQual state tree
2685  */
2686 void
EvalPlanQualBegin(EPQState * epqstate)2687 EvalPlanQualBegin(EPQState *epqstate)
2688 {
2689 	EState	   *parentestate = epqstate->parentestate;
2690 	EState	   *recheckestate = epqstate->recheckestate;
2691 
2692 	if (recheckestate == NULL)
2693 	{
2694 		/* First time through, so create a child EState */
2695 		EvalPlanQualStart(epqstate, epqstate->plan);
2696 	}
2697 	else
2698 	{
2699 		/*
2700 		 * We already have a suitable child EPQ tree, so just reset it.
2701 		 */
2702 		Index		rtsize = parentestate->es_range_table_size;
2703 		PlanState  *rcplanstate = epqstate->recheckplanstate;
2704 
2705 		MemSet(epqstate->relsubs_done, 0, rtsize * sizeof(bool));
2706 
2707 		/* Recopy current values of parent parameters */
2708 		if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2709 		{
2710 			int			i;
2711 
2712 			/*
2713 			 * Force evaluation of any InitPlan outputs that could be needed
2714 			 * by the subplan, just in case they got reset since
2715 			 * EvalPlanQualStart (see comments therein).
2716 			 */
2717 			ExecSetParamPlanMulti(rcplanstate->plan->extParam,
2718 								  GetPerTupleExprContext(parentestate));
2719 
2720 			i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2721 
2722 			while (--i >= 0)
2723 			{
2724 				/* copy value if any, but not execPlan link */
2725 				recheckestate->es_param_exec_vals[i].value =
2726 					parentestate->es_param_exec_vals[i].value;
2727 				recheckestate->es_param_exec_vals[i].isnull =
2728 					parentestate->es_param_exec_vals[i].isnull;
2729 			}
2730 		}
2731 
2732 		/*
2733 		 * Mark child plan tree as needing rescan at all scan nodes.  The
2734 		 * first ExecProcNode will take care of actually doing the rescan.
2735 		 */
2736 		rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam,
2737 											   epqstate->epqParam);
2738 	}
2739 }
2740 
2741 /*
2742  * Start execution of an EvalPlanQual plan tree.
2743  *
2744  * This is a cut-down version of ExecutorStart(): we copy some state from
2745  * the top-level estate rather than initializing it fresh.
2746  */
2747 static void
EvalPlanQualStart(EPQState * epqstate,Plan * planTree)2748 EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
2749 {
2750 	EState	   *parentestate = epqstate->parentestate;
2751 	Index		rtsize = parentestate->es_range_table_size;
2752 	EState	   *rcestate;
2753 	MemoryContext oldcontext;
2754 	ListCell   *l;
2755 
2756 	epqstate->recheckestate = rcestate = CreateExecutorState();
2757 
2758 	oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt);
2759 
2760 	/* signal that this is an EState for executing EPQ */
2761 	rcestate->es_epq_active = epqstate;
2762 
2763 	/*
2764 	 * Child EPQ EStates share the parent's copy of unchanging state such as
2765 	 * the snapshot, rangetable, result-rel info, and external Param info.
2766 	 * They need their own copies of local state, including a tuple table,
2767 	 * es_param_exec_vals, etc.
2768 	 *
2769 	 * The ResultRelInfo array management is trickier than it looks.  We
2770 	 * create fresh arrays for the child but copy all the content from the
2771 	 * parent.  This is because it's okay for the child to share any
2772 	 * per-relation state the parent has already created --- but if the child
2773 	 * sets up any ResultRelInfo fields, such as its own junkfilter, that
2774 	 * state must *not* propagate back to the parent.  (For one thing, the
2775 	 * pointed-to data is in a memory context that won't last long enough.)
2776 	 */
2777 	rcestate->es_direction = ForwardScanDirection;
2778 	rcestate->es_snapshot = parentestate->es_snapshot;
2779 	rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2780 	rcestate->es_range_table = parentestate->es_range_table;
2781 	rcestate->es_range_table_array = parentestate->es_range_table_array;
2782 	rcestate->es_range_table_size = parentestate->es_range_table_size;
2783 	rcestate->es_relations = parentestate->es_relations;
2784 	rcestate->es_queryEnv = parentestate->es_queryEnv;
2785 	rcestate->es_rowmarks = parentestate->es_rowmarks;
2786 	rcestate->es_plannedstmt = parentestate->es_plannedstmt;
2787 	rcestate->es_junkFilter = parentestate->es_junkFilter;
2788 	rcestate->es_output_cid = parentestate->es_output_cid;
2789 	if (parentestate->es_num_result_relations > 0)
2790 	{
2791 		int			numResultRelations = parentestate->es_num_result_relations;
2792 		int			numRootResultRels = parentestate->es_num_root_result_relations;
2793 		ResultRelInfo *resultRelInfos;
2794 
2795 		resultRelInfos = (ResultRelInfo *)
2796 			palloc(numResultRelations * sizeof(ResultRelInfo));
2797 		memcpy(resultRelInfos, parentestate->es_result_relations,
2798 			   numResultRelations * sizeof(ResultRelInfo));
2799 		rcestate->es_result_relations = resultRelInfos;
2800 		rcestate->es_num_result_relations = numResultRelations;
2801 
2802 		/* Also transfer partitioned root result relations. */
2803 		if (numRootResultRels > 0)
2804 		{
2805 			resultRelInfos = (ResultRelInfo *)
2806 				palloc(numRootResultRels * sizeof(ResultRelInfo));
2807 			memcpy(resultRelInfos, parentestate->es_root_result_relations,
2808 				   numRootResultRels * sizeof(ResultRelInfo));
2809 			rcestate->es_root_result_relations = resultRelInfos;
2810 			rcestate->es_num_root_result_relations = numRootResultRels;
2811 		}
2812 	}
2813 	/* es_result_relation_info must NOT be copied */
2814 	/* es_trig_target_relations must NOT be copied */
2815 	rcestate->es_top_eflags = parentestate->es_top_eflags;
2816 	rcestate->es_instrument = parentestate->es_instrument;
2817 	/* es_auxmodifytables must NOT be copied */
2818 
2819 	/*
2820 	 * The external param list is simply shared from parent.  The internal
2821 	 * param workspace has to be local state, but we copy the initial values
2822 	 * from the parent, so as to have access to any param values that were
2823 	 * already set from other parts of the parent's plan tree.
2824 	 */
2825 	rcestate->es_param_list_info = parentestate->es_param_list_info;
2826 	if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2827 	{
2828 		int			i;
2829 
2830 		/*
2831 		 * Force evaluation of any InitPlan outputs that could be needed by
2832 		 * the subplan.  (With more complexity, maybe we could postpone this
2833 		 * till the subplan actually demands them, but it doesn't seem worth
2834 		 * the trouble; this is a corner case already, since usually the
2835 		 * InitPlans would have been evaluated before reaching EvalPlanQual.)
2836 		 *
2837 		 * This will not touch output params of InitPlans that occur somewhere
2838 		 * within the subplan tree, only those that are attached to the
2839 		 * ModifyTable node or above it and are referenced within the subplan.
2840 		 * That's OK though, because the planner would only attach such
2841 		 * InitPlans to a lower-level SubqueryScan node, and EPQ execution
2842 		 * will not descend into a SubqueryScan.
2843 		 *
2844 		 * The EState's per-output-tuple econtext is sufficiently short-lived
2845 		 * for this, since it should get reset before there is any chance of
2846 		 * doing EvalPlanQual again.
2847 		 */
2848 		ExecSetParamPlanMulti(planTree->extParam,
2849 							  GetPerTupleExprContext(parentestate));
2850 
2851 		/* now make the internal param workspace ... */
2852 		i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2853 		rcestate->es_param_exec_vals = (ParamExecData *)
2854 			palloc0(i * sizeof(ParamExecData));
2855 		/* ... and copy down all values, whether really needed or not */
2856 		while (--i >= 0)
2857 		{
2858 			/* copy value if any, but not execPlan link */
2859 			rcestate->es_param_exec_vals[i].value =
2860 				parentestate->es_param_exec_vals[i].value;
2861 			rcestate->es_param_exec_vals[i].isnull =
2862 				parentestate->es_param_exec_vals[i].isnull;
2863 		}
2864 	}
2865 
2866 	/*
2867 	 * Initialize private state information for each SubPlan.  We must do this
2868 	 * before running ExecInitNode on the main query tree, since
2869 	 * ExecInitSubPlan expects to be able to find these entries. Some of the
2870 	 * SubPlans might not be used in the part of the plan tree we intend to
2871 	 * run, but since it's not easy to tell which, we just initialize them
2872 	 * all.
2873 	 */
2874 	Assert(rcestate->es_subplanstates == NIL);
2875 	foreach(l, parentestate->es_plannedstmt->subplans)
2876 	{
2877 		Plan	   *subplan = (Plan *) lfirst(l);
2878 		PlanState  *subplanstate;
2879 
2880 		subplanstate = ExecInitNode(subplan, rcestate, 0);
2881 		rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
2882 											 subplanstate);
2883 	}
2884 
2885 	/*
2886 	 * Build an RTI indexed array of rowmarks, so that
2887 	 * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
2888 	 * rowmark.
2889 	 */
2890 	epqstate->relsubs_rowmark = (ExecAuxRowMark **)
2891 		palloc0(rtsize * sizeof(ExecAuxRowMark *));
2892 	foreach(l, epqstate->arowMarks)
2893 	{
2894 		ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l);
2895 
2896 		epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm;
2897 	}
2898 
2899 	/*
2900 	 * Initialize per-relation EPQ tuple states to not-fetched.
2901 	 */
2902 	epqstate->relsubs_done = (bool *)
2903 		palloc0(rtsize * sizeof(bool));
2904 
2905 	/*
2906 	 * Initialize the private state information for all the nodes in the part
2907 	 * of the plan tree we need to run.  This opens files, allocates storage
2908 	 * and leaves us ready to start processing tuples.
2909 	 */
2910 	epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0);
2911 
2912 	MemoryContextSwitchTo(oldcontext);
2913 }
2914 
2915 /*
2916  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2917  * or if we are done with the current EPQ child.
2918  *
2919  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2920  * of the normal cleanup, but *not* close result relations (which we are
2921  * just sharing from the outer query).  We do, however, have to close any
2922  * trigger target relations that got opened, since those are not shared.
2923  * (There probably shouldn't be any of the latter, but just in case...)
2924  */
2925 void
EvalPlanQualEnd(EPQState * epqstate)2926 EvalPlanQualEnd(EPQState *epqstate)
2927 {
2928 	EState	   *estate = epqstate->recheckestate;
2929 	Index		rtsize;
2930 	MemoryContext oldcontext;
2931 	ListCell   *l;
2932 
2933 	rtsize = epqstate->parentestate->es_range_table_size;
2934 
2935 	/*
2936 	 * We may have a tuple table, even if EPQ wasn't started, because we allow
2937 	 * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
2938 	 */
2939 	if (epqstate->tuple_table != NIL)
2940 	{
2941 		memset(epqstate->relsubs_slot, 0,
2942 			   rtsize * sizeof(TupleTableSlot *));
2943 		ExecResetTupleTable(epqstate->tuple_table, true);
2944 		epqstate->tuple_table = NIL;
2945 	}
2946 
2947 	/* EPQ wasn't started, nothing further to do */
2948 	if (estate == NULL)
2949 		return;
2950 
2951 	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2952 
2953 	ExecEndNode(epqstate->recheckplanstate);
2954 
2955 	foreach(l, estate->es_subplanstates)
2956 	{
2957 		PlanState  *subplanstate = (PlanState *) lfirst(l);
2958 
2959 		ExecEndNode(subplanstate);
2960 	}
2961 
2962 	/* throw away the per-estate tuple table, some node may have used it */
2963 	ExecResetTupleTable(estate->es_tupleTable, false);
2964 
2965 	/* close any trigger target relations attached to this EState */
2966 	ExecCleanUpTriggerState(estate);
2967 
2968 	MemoryContextSwitchTo(oldcontext);
2969 
2970 	FreeExecutorState(estate);
2971 
2972 	/* Mark EPQState idle */
2973 	epqstate->origslot = NULL;
2974 	epqstate->recheckestate = NULL;
2975 	epqstate->recheckplanstate = NULL;
2976 	epqstate->relsubs_rowmark = NULL;
2977 	epqstate->relsubs_done = NULL;
2978 }
2979