1 /*-------------------------------------------------------------------------
2  *
3  * nodeModifyTable.c
4  *	  routines to handle ModifyTable nodes.
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/executor/nodeModifyTable.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /* INTERFACE ROUTINES
16  *		ExecInitModifyTable - initialize the ModifyTable node
17  *		ExecModifyTable		- retrieve the next tuple from the node
18  *		ExecEndModifyTable	- shut down the ModifyTable node
19  *		ExecReScanModifyTable - rescan the ModifyTable node
20  *
21  *	 NOTES
22  *		Each ModifyTable node contains a list of one or more subplans,
23  *		much like an Append node.  There is one subplan per result relation.
24  *		The key reason for this is that in an inherited UPDATE command, each
25  *		result relation could have a different schema (more or different
26  *		columns) requiring a different plan tree to produce it.  In an
27  *		inherited DELETE, all the subplans should produce the same output
28  *		rowtype, but we might still find that different plans are appropriate
29  *		for different child relations.
30  *
31  *		If the query specifies RETURNING, then the ModifyTable returns a
32  *		RETURNING tuple after completing each row insert, update, or delete.
33  *		It must be called again to continue the operation.  Without RETURNING,
34  *		we just loop within the node until all the work is done, then
35  *		return NULL.  This avoids useless call/return overhead.
36  */
37 
38 #include "postgres.h"
39 
40 #include "access/htup_details.h"
41 #include "access/xact.h"
42 #include "commands/trigger.h"
43 #include "executor/executor.h"
44 #include "executor/nodeModifyTable.h"
45 #include "foreign/fdwapi.h"
46 #include "miscadmin.h"
47 #include "nodes/nodeFuncs.h"
48 #include "storage/bufmgr.h"
49 #include "storage/lmgr.h"
50 #include "utils/builtins.h"
51 #include "utils/memutils.h"
52 #include "utils/rel.h"
53 #include "utils/tqual.h"
54 
55 
56 static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
57 					 ResultRelInfo *resultRelInfo,
58 					 ItemPointer conflictTid,
59 					 TupleTableSlot *planSlot,
60 					 TupleTableSlot *excludedSlot,
61 					 EState *estate,
62 					 bool canSetTag,
63 					 TupleTableSlot **returning);
64 
65 /*
66  * Verify that the tuples to be produced by INSERT or UPDATE match the
67  * target relation's rowtype
68  *
69  * We do this to guard against stale plans.  If plan invalidation is
70  * functioning properly then we should never get a failure here, but better
71  * safe than sorry.  Note that this is called after we have obtained lock
72  * on the target rel, so the rowtype can't change underneath us.
73  *
74  * The plan output is represented by its targetlist, because that makes
75  * handling the dropped-column case easier.
76  */
77 static void
ExecCheckPlanOutput(Relation resultRel,List * targetList)78 ExecCheckPlanOutput(Relation resultRel, List *targetList)
79 {
80 	TupleDesc	resultDesc = RelationGetDescr(resultRel);
81 	int			attno = 0;
82 	ListCell   *lc;
83 
84 	foreach(lc, targetList)
85 	{
86 		TargetEntry *tle = (TargetEntry *) lfirst(lc);
87 		Form_pg_attribute attr;
88 
89 		if (tle->resjunk)
90 			continue;			/* ignore junk tlist items */
91 
92 		if (attno >= resultDesc->natts)
93 			ereport(ERROR,
94 					(errcode(ERRCODE_DATATYPE_MISMATCH),
95 					 errmsg("table row type and query-specified row type do not match"),
96 					 errdetail("Query has too many columns.")));
97 		attr = resultDesc->attrs[attno++];
98 
99 		if (!attr->attisdropped)
100 		{
101 			/* Normal case: demand type match */
102 			if (exprType((Node *) tle->expr) != attr->atttypid)
103 				ereport(ERROR,
104 						(errcode(ERRCODE_DATATYPE_MISMATCH),
105 						 errmsg("table row type and query-specified row type do not match"),
106 						 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
107 								   format_type_be(attr->atttypid),
108 								   attno,
109 							 format_type_be(exprType((Node *) tle->expr)))));
110 		}
111 		else
112 		{
113 			/*
114 			 * For a dropped column, we can't check atttypid (it's likely 0).
115 			 * In any case the planner has most likely inserted an INT4 null.
116 			 * What we insist on is just *some* NULL constant.
117 			 */
118 			if (!IsA(tle->expr, Const) ||
119 				!((Const *) tle->expr)->constisnull)
120 				ereport(ERROR,
121 						(errcode(ERRCODE_DATATYPE_MISMATCH),
122 						 errmsg("table row type and query-specified row type do not match"),
123 						 errdetail("Query provides a value for a dropped column at ordinal position %d.",
124 								   attno)));
125 		}
126 	}
127 	if (attno != resultDesc->natts)
128 		ereport(ERROR,
129 				(errcode(ERRCODE_DATATYPE_MISMATCH),
130 		  errmsg("table row type and query-specified row type do not match"),
131 				 errdetail("Query has too few columns.")));
132 }
133 
134 /*
135  * ExecProcessReturning --- evaluate a RETURNING list
136  *
137  * projectReturning: RETURNING projection info for current result rel
138  * tupleSlot: slot holding tuple actually inserted/updated/deleted
139  * planSlot: slot holding tuple returned by top subplan node
140  *
141  * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
142  * scan tuple.
143  *
144  * Returns a slot holding the result tuple
145  */
146 static TupleTableSlot *
ExecProcessReturning(ResultRelInfo * resultRelInfo,TupleTableSlot * tupleSlot,TupleTableSlot * planSlot)147 ExecProcessReturning(ResultRelInfo *resultRelInfo,
148 					 TupleTableSlot *tupleSlot,
149 					 TupleTableSlot *planSlot)
150 {
151 	ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
152 	ExprContext *econtext = projectReturning->pi_exprContext;
153 
154 	/*
155 	 * Reset per-tuple memory context to free any expression evaluation
156 	 * storage allocated in the previous cycle.
157 	 */
158 	ResetExprContext(econtext);
159 
160 	/* Make tuple and any needed join variables available to ExecProject */
161 	if (tupleSlot)
162 		econtext->ecxt_scantuple = tupleSlot;
163 	else
164 	{
165 		HeapTuple	tuple;
166 
167 		/*
168 		 * RETURNING expressions might reference the tableoid column, so
169 		 * initialize t_tableOid before evaluating them.
170 		 */
171 		Assert(!TupIsNull(econtext->ecxt_scantuple));
172 		tuple = ExecMaterializeSlot(econtext->ecxt_scantuple);
173 		tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
174 	}
175 	econtext->ecxt_outertuple = planSlot;
176 
177 	/* Compute the RETURNING expressions */
178 	return ExecProject(projectReturning, NULL);
179 }
180 
181 /*
182  * ExecCheckHeapTupleVisible -- verify heap tuple is visible
183  *
184  * It would not be consistent with guarantees of the higher isolation levels to
185  * proceed with avoiding insertion (taking speculative insertion's alternative
186  * path) on the basis of another tuple that is not visible to MVCC snapshot.
187  * Check for the need to raise a serialization failure, and do so as necessary.
188  */
189 static void
ExecCheckHeapTupleVisible(EState * estate,HeapTuple tuple,Buffer buffer)190 ExecCheckHeapTupleVisible(EState *estate,
191 						  HeapTuple tuple,
192 						  Buffer buffer)
193 {
194 	if (!IsolationUsesXactSnapshot())
195 		return;
196 
197 	/*
198 	 * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
199 	 * Caller should be holding pin, but not lock.
200 	 */
201 	LockBuffer(buffer, BUFFER_LOCK_SHARE);
202 	if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
203 	{
204 		/*
205 		 * We should not raise a serialization failure if the conflict is
206 		 * against a tuple inserted by our own transaction, even if it's not
207 		 * visible to our snapshot.  (This would happen, for example, if
208 		 * conflicting keys are proposed for insertion in a single command.)
209 		 */
210 		if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
211 			ereport(ERROR,
212 					(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
213 			 errmsg("could not serialize access due to concurrent update")));
214 	}
215 	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
216 }
217 
218 /*
219  * ExecCheckTIDVisible -- convenience variant of ExecCheckHeapTupleVisible()
220  */
221 static void
ExecCheckTIDVisible(EState * estate,ResultRelInfo * relinfo,ItemPointer tid)222 ExecCheckTIDVisible(EState *estate,
223 					ResultRelInfo *relinfo,
224 					ItemPointer tid)
225 {
226 	Relation	rel = relinfo->ri_RelationDesc;
227 	Buffer		buffer;
228 	HeapTupleData tuple;
229 
230 	/* Redundantly check isolation level */
231 	if (!IsolationUsesXactSnapshot())
232 		return;
233 
234 	tuple.t_self = *tid;
235 	if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, false, NULL))
236 		elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
237 	ExecCheckHeapTupleVisible(estate, &tuple, buffer);
238 	ReleaseBuffer(buffer);
239 }
240 
241 /* ----------------------------------------------------------------
242  *		ExecInsert
243  *
244  *		For INSERT, we have to insert the tuple into the target relation
245  *		and insert appropriate tuples into the index relations.
246  *
247  *		Returns RETURNING result if any, otherwise NULL.
248  * ----------------------------------------------------------------
249  */
250 static TupleTableSlot *
ExecInsert(ModifyTableState * mtstate,TupleTableSlot * slot,TupleTableSlot * planSlot,List * arbiterIndexes,OnConflictAction onconflict,EState * estate,bool canSetTag)251 ExecInsert(ModifyTableState *mtstate,
252 		   TupleTableSlot *slot,
253 		   TupleTableSlot *planSlot,
254 		   List *arbiterIndexes,
255 		   OnConflictAction onconflict,
256 		   EState *estate,
257 		   bool canSetTag)
258 {
259 	HeapTuple	tuple;
260 	ResultRelInfo *resultRelInfo;
261 	Relation	resultRelationDesc;
262 	Oid			newId;
263 	List	   *recheckIndexes = NIL;
264 
265 	/*
266 	 * get the heap tuple out of the tuple table slot, making sure we have a
267 	 * writable copy
268 	 */
269 	tuple = ExecMaterializeSlot(slot);
270 
271 	/*
272 	 * get information on the (current) result relation
273 	 */
274 	resultRelInfo = estate->es_result_relation_info;
275 	resultRelationDesc = resultRelInfo->ri_RelationDesc;
276 
277 	/*
278 	 * If the result relation has OIDs, force the tuple's OID to zero so that
279 	 * heap_insert will assign a fresh OID.  Usually the OID already will be
280 	 * zero at this point, but there are corner cases where the plan tree can
281 	 * return a tuple extracted literally from some table with the same
282 	 * rowtype.
283 	 *
284 	 * XXX if we ever wanted to allow users to assign their own OIDs to new
285 	 * rows, this'd be the place to do it.  For the moment, we make a point of
286 	 * doing this before calling triggers, so that a user-supplied trigger
287 	 * could hack the OID if desired.
288 	 */
289 	if (resultRelationDesc->rd_rel->relhasoids)
290 		HeapTupleSetOid(tuple, InvalidOid);
291 
292 	/*
293 	 * BEFORE ROW INSERT Triggers.
294 	 *
295 	 * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
296 	 * INSERT ... ON CONFLICT statement.  We cannot check for constraint
297 	 * violations before firing these triggers, because they can change the
298 	 * values to insert.  Also, they can run arbitrary user-defined code with
299 	 * side-effects that we can't cancel by just not inserting the tuple.
300 	 */
301 	if (resultRelInfo->ri_TrigDesc &&
302 		resultRelInfo->ri_TrigDesc->trig_insert_before_row)
303 	{
304 		slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
305 
306 		if (slot == NULL)		/* "do nothing" */
307 			return NULL;
308 
309 		/* trigger might have changed tuple */
310 		tuple = ExecMaterializeSlot(slot);
311 	}
312 
313 	/* INSTEAD OF ROW INSERT Triggers */
314 	if (resultRelInfo->ri_TrigDesc &&
315 		resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
316 	{
317 		slot = ExecIRInsertTriggers(estate, resultRelInfo, slot);
318 
319 		if (slot == NULL)		/* "do nothing" */
320 			return NULL;
321 
322 		/* trigger might have changed tuple */
323 		tuple = ExecMaterializeSlot(slot);
324 
325 		newId = InvalidOid;
326 	}
327 	else if (resultRelInfo->ri_FdwRoutine)
328 	{
329 		/*
330 		 * insert into foreign table: let the FDW do it
331 		 */
332 		slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
333 															   resultRelInfo,
334 															   slot,
335 															   planSlot);
336 
337 		if (slot == NULL)		/* "do nothing" */
338 			return NULL;
339 
340 		/* FDW might have changed tuple */
341 		tuple = ExecMaterializeSlot(slot);
342 
343 		/*
344 		 * AFTER ROW Triggers or RETURNING expressions might reference the
345 		 * tableoid column, so initialize t_tableOid before evaluating them.
346 		 */
347 		tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
348 
349 		newId = InvalidOid;
350 	}
351 	else
352 	{
353 		/*
354 		 * Constraints might reference the tableoid column, so initialize
355 		 * t_tableOid before evaluating them.
356 		 */
357 		tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
358 
359 		/*
360 		 * Check any RLS INSERT WITH CHECK policies
361 		 *
362 		 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
363 		 * we are looking for at this point.
364 		 */
365 		if (resultRelInfo->ri_WithCheckOptions != NIL)
366 			ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
367 								 resultRelInfo, slot, estate);
368 
369 		/*
370 		 * Check the constraints of the tuple
371 		 */
372 		if (resultRelationDesc->rd_att->constr)
373 			ExecConstraints(resultRelInfo, slot, estate);
374 
375 		if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
376 		{
377 			/* Perform a speculative insertion. */
378 			uint32		specToken;
379 			ItemPointerData conflictTid;
380 			bool		specConflict;
381 
382 			/*
383 			 * Do a non-conclusive check for conflicts first.
384 			 *
385 			 * We're not holding any locks yet, so this doesn't guarantee that
386 			 * the later insert won't conflict.  But it avoids leaving behind
387 			 * a lot of canceled speculative insertions, if you run a lot of
388 			 * INSERT ON CONFLICT statements that do conflict.
389 			 *
390 			 * We loop back here if we find a conflict below, either during
391 			 * the pre-check, or when we re-check after inserting the tuple
392 			 * speculatively.
393 			 */
394 	vlock:
395 			specConflict = false;
396 			if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
397 										   arbiterIndexes))
398 			{
399 				/* committed conflict tuple found */
400 				if (onconflict == ONCONFLICT_UPDATE)
401 				{
402 					/*
403 					 * In case of ON CONFLICT DO UPDATE, execute the UPDATE
404 					 * part.  Be prepared to retry if the UPDATE fails because
405 					 * of another concurrent UPDATE/DELETE to the conflict
406 					 * tuple.
407 					 */
408 					TupleTableSlot *returning = NULL;
409 
410 					if (ExecOnConflictUpdate(mtstate, resultRelInfo,
411 											 &conflictTid, planSlot, slot,
412 											 estate, canSetTag, &returning))
413 					{
414 						InstrCountFiltered2(&mtstate->ps, 1);
415 						return returning;
416 					}
417 					else
418 						goto vlock;
419 				}
420 				else
421 				{
422 					/*
423 					 * In case of ON CONFLICT DO NOTHING, do nothing. However,
424 					 * verify that the tuple is visible to the executor's MVCC
425 					 * snapshot at higher isolation levels.
426 					 */
427 					Assert(onconflict == ONCONFLICT_NOTHING);
428 					ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
429 					InstrCountFiltered2(&mtstate->ps, 1);
430 					return NULL;
431 				}
432 			}
433 
434 			/*
435 			 * Before we start insertion proper, acquire our "speculative
436 			 * insertion lock".  Others can use that to wait for us to decide
437 			 * if we're going to go ahead with the insertion, instead of
438 			 * waiting for the whole transaction to complete.
439 			 */
440 			specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
441 			HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
442 
443 			/* insert the tuple, with the speculative token */
444 			newId = heap_insert(resultRelationDesc, tuple,
445 								estate->es_output_cid,
446 								HEAP_INSERT_SPECULATIVE,
447 								NULL);
448 
449 			/* insert index entries for tuple */
450 			recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
451 												 estate, true, &specConflict,
452 												   arbiterIndexes);
453 
454 			/* adjust the tuple's state accordingly */
455 			if (!specConflict)
456 				heap_finish_speculative(resultRelationDesc, tuple);
457 			else
458 				heap_abort_speculative(resultRelationDesc, tuple);
459 
460 			/*
461 			 * Wake up anyone waiting for our decision.  They will re-check
462 			 * the tuple, see that it's no longer speculative, and wait on our
463 			 * XID as if this was a regularly inserted tuple all along.  Or if
464 			 * we killed the tuple, they will see it's dead, and proceed as if
465 			 * the tuple never existed.
466 			 */
467 			SpeculativeInsertionLockRelease(GetCurrentTransactionId());
468 
469 			/*
470 			 * If there was a conflict, start from the beginning.  We'll do
471 			 * the pre-check again, which will now find the conflicting tuple
472 			 * (unless it aborts before we get there).
473 			 */
474 			if (specConflict)
475 			{
476 				list_free(recheckIndexes);
477 				goto vlock;
478 			}
479 
480 			/* Since there was no insertion conflict, we're done */
481 		}
482 		else
483 		{
484 			/*
485 			 * insert the tuple normally.
486 			 *
487 			 * Note: heap_insert returns the tid (location) of the new tuple
488 			 * in the t_self field.
489 			 */
490 			newId = heap_insert(resultRelationDesc, tuple,
491 								estate->es_output_cid,
492 								0, NULL);
493 
494 			/* insert index entries for tuple */
495 			if (resultRelInfo->ri_NumIndices > 0)
496 				recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
497 													   estate, false, NULL,
498 													   arbiterIndexes);
499 		}
500 	}
501 
502 	if (canSetTag)
503 	{
504 		(estate->es_processed)++;
505 		estate->es_lastoid = newId;
506 		setLastTid(&(tuple->t_self));
507 	}
508 
509 	/* AFTER ROW INSERT Triggers */
510 	ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes);
511 
512 	list_free(recheckIndexes);
513 
514 	/*
515 	 * Check any WITH CHECK OPTION constraints from parent views.  We are
516 	 * required to do this after testing all constraints and uniqueness
517 	 * violations per the SQL spec, so we do it after actually inserting the
518 	 * record into the heap and all indexes.
519 	 *
520 	 * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
521 	 * tuple will never be seen, if it violates the WITH CHECK OPTION.
522 	 *
523 	 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
524 	 * are looking for at this point.
525 	 */
526 	if (resultRelInfo->ri_WithCheckOptions != NIL)
527 		ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
528 
529 	/* Process RETURNING if present */
530 	if (resultRelInfo->ri_projectReturning)
531 		return ExecProcessReturning(resultRelInfo, slot, planSlot);
532 
533 	return NULL;
534 }
535 
536 /* ----------------------------------------------------------------
537  *		ExecDelete
538  *
539  *		DELETE is like UPDATE, except that we delete the tuple and no
540  *		index modifications are needed.
541  *
542  *		When deleting from a table, tupleid identifies the tuple to
543  *		delete and oldtuple is NULL.  When deleting from a view,
544  *		oldtuple is passed to the INSTEAD OF triggers and identifies
545  *		what to delete, and tupleid is invalid.  When deleting from a
546  *		foreign table, tupleid is invalid; the FDW has to figure out
547  *		which row to delete using data from the planSlot.  oldtuple is
548  *		passed to foreign table triggers; it is NULL when the foreign
549  *		table has no relevant triggers.
550  *
551  *		Returns RETURNING result if any, otherwise NULL.
552  * ----------------------------------------------------------------
553  */
554 static TupleTableSlot *
ExecDelete(ItemPointer tupleid,HeapTuple oldtuple,TupleTableSlot * planSlot,EPQState * epqstate,EState * estate,bool canSetTag)555 ExecDelete(ItemPointer tupleid,
556 		   HeapTuple oldtuple,
557 		   TupleTableSlot *planSlot,
558 		   EPQState *epqstate,
559 		   EState *estate,
560 		   bool canSetTag)
561 {
562 	ResultRelInfo *resultRelInfo;
563 	Relation	resultRelationDesc;
564 	HTSU_Result result;
565 	HeapUpdateFailureData hufd;
566 	TupleTableSlot *slot = NULL;
567 
568 	/*
569 	 * get information on the (current) result relation
570 	 */
571 	resultRelInfo = estate->es_result_relation_info;
572 	resultRelationDesc = resultRelInfo->ri_RelationDesc;
573 
574 	/* BEFORE ROW DELETE Triggers */
575 	if (resultRelInfo->ri_TrigDesc &&
576 		resultRelInfo->ri_TrigDesc->trig_delete_before_row)
577 	{
578 		bool		dodelete;
579 
580 		dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
581 										tupleid, oldtuple);
582 
583 		if (!dodelete)			/* "do nothing" */
584 			return NULL;
585 	}
586 
587 	/* INSTEAD OF ROW DELETE Triggers */
588 	if (resultRelInfo->ri_TrigDesc &&
589 		resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
590 	{
591 		bool		dodelete;
592 
593 		Assert(oldtuple != NULL);
594 		dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
595 
596 		if (!dodelete)			/* "do nothing" */
597 			return NULL;
598 	}
599 	else if (resultRelInfo->ri_FdwRoutine)
600 	{
601 		HeapTuple	tuple;
602 
603 		/*
604 		 * delete from foreign table: let the FDW do it
605 		 *
606 		 * We offer the trigger tuple slot as a place to store RETURNING data,
607 		 * although the FDW can return some other slot if it wants.  Set up
608 		 * the slot's tupdesc so the FDW doesn't need to do that for itself.
609 		 */
610 		slot = estate->es_trig_tuple_slot;
611 		if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
612 			ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
613 
614 		slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
615 															   resultRelInfo,
616 															   slot,
617 															   planSlot);
618 
619 		if (slot == NULL)		/* "do nothing" */
620 			return NULL;
621 
622 		/*
623 		 * RETURNING expressions might reference the tableoid column, so
624 		 * initialize t_tableOid before evaluating them.
625 		 */
626 		if (slot->tts_isempty)
627 			ExecStoreAllNullTuple(slot);
628 		tuple = ExecMaterializeSlot(slot);
629 		tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
630 	}
631 	else
632 	{
633 		/*
634 		 * delete the tuple
635 		 *
636 		 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
637 		 * that the row to be deleted is visible to that snapshot, and throw a
638 		 * can't-serialize error if not. This is a special-case behavior
639 		 * needed for referential integrity updates in transaction-snapshot
640 		 * mode transactions.
641 		 */
642 ldelete:;
643 		result = heap_delete(resultRelationDesc, tupleid,
644 							 estate->es_output_cid,
645 							 estate->es_crosscheck_snapshot,
646 							 true /* wait for commit */ ,
647 							 &hufd);
648 		switch (result)
649 		{
650 			case HeapTupleSelfUpdated:
651 
652 				/*
653 				 * The target tuple was already updated or deleted by the
654 				 * current command, or by a later command in the current
655 				 * transaction.  The former case is possible in a join DELETE
656 				 * where multiple tuples join to the same target tuple. This
657 				 * is somewhat questionable, but Postgres has always allowed
658 				 * it: we just ignore additional deletion attempts.
659 				 *
660 				 * The latter case arises if the tuple is modified by a
661 				 * command in a BEFORE trigger, or perhaps by a command in a
662 				 * volatile function used in the query.  In such situations we
663 				 * should not ignore the deletion, but it is equally unsafe to
664 				 * proceed.  We don't want to discard the original DELETE
665 				 * while keeping the triggered actions based on its deletion;
666 				 * and it would be no better to allow the original DELETE
667 				 * while discarding updates that it triggered.  The row update
668 				 * carries some information that might be important according
669 				 * to business rules; so throwing an error is the only safe
670 				 * course.
671 				 *
672 				 * If a trigger actually intends this type of interaction, it
673 				 * can re-execute the DELETE and then return NULL to cancel
674 				 * the outer delete.
675 				 */
676 				if (hufd.cmax != estate->es_output_cid)
677 					ereport(ERROR,
678 							(errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
679 							 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
680 							 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
681 
682 				/* Else, already deleted by self; nothing to do */
683 				return NULL;
684 
685 			case HeapTupleMayBeUpdated:
686 				break;
687 
688 			case HeapTupleUpdated:
689 				if (IsolationUsesXactSnapshot())
690 					ereport(ERROR,
691 							(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
692 							 errmsg("could not serialize access due to concurrent update")));
693 				if (!ItemPointerEquals(tupleid, &hufd.ctid))
694 				{
695 					TupleTableSlot *epqslot;
696 
697 					epqslot = EvalPlanQual(estate,
698 										   epqstate,
699 										   resultRelationDesc,
700 										   resultRelInfo->ri_RangeTableIndex,
701 										   LockTupleExclusive,
702 										   &hufd.ctid,
703 										   hufd.xmax);
704 					if (!TupIsNull(epqslot))
705 					{
706 						*tupleid = hufd.ctid;
707 						goto ldelete;
708 					}
709 				}
710 				/* tuple already deleted; nothing to do */
711 				return NULL;
712 
713 			default:
714 				elog(ERROR, "unrecognized heap_delete status: %u", result);
715 				return NULL;
716 		}
717 
718 		/*
719 		 * Note: Normally one would think that we have to delete index tuples
720 		 * associated with the heap tuple now...
721 		 *
722 		 * ... but in POSTGRES, we have no need to do this because VACUUM will
723 		 * take care of it later.  We can't delete index tuples immediately
724 		 * anyway, since the tuple is still visible to other transactions.
725 		 */
726 	}
727 
728 	if (canSetTag)
729 		(estate->es_processed)++;
730 
731 	/* AFTER ROW DELETE Triggers */
732 	ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple);
733 
734 	/* Process RETURNING if present */
735 	if (resultRelInfo->ri_projectReturning)
736 	{
737 		/*
738 		 * We have to put the target tuple into a slot, which means first we
739 		 * gotta fetch it.  We can use the trigger tuple slot.
740 		 */
741 		TupleTableSlot *rslot;
742 		HeapTupleData deltuple;
743 		Buffer		delbuffer;
744 
745 		if (resultRelInfo->ri_FdwRoutine)
746 		{
747 			/* FDW must have provided a slot containing the deleted row */
748 			Assert(!TupIsNull(slot));
749 			delbuffer = InvalidBuffer;
750 		}
751 		else
752 		{
753 			slot = estate->es_trig_tuple_slot;
754 			if (oldtuple != NULL)
755 			{
756 				deltuple = *oldtuple;
757 				delbuffer = InvalidBuffer;
758 			}
759 			else
760 			{
761 				deltuple.t_self = *tupleid;
762 				if (!heap_fetch(resultRelationDesc, SnapshotAny,
763 								&deltuple, &delbuffer, false, NULL))
764 					elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
765 			}
766 
767 			if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
768 				ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
769 			ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
770 		}
771 
772 		rslot = ExecProcessReturning(resultRelInfo, slot, planSlot);
773 
774 		/*
775 		 * Before releasing the target tuple again, make sure rslot has a
776 		 * local copy of any pass-by-reference values.
777 		 */
778 		ExecMaterializeSlot(rslot);
779 
780 		ExecClearTuple(slot);
781 		if (BufferIsValid(delbuffer))
782 			ReleaseBuffer(delbuffer);
783 
784 		return rslot;
785 	}
786 
787 	return NULL;
788 }
789 
790 /* ----------------------------------------------------------------
791  *		ExecUpdate
792  *
793  *		note: we can't run UPDATE queries with transactions
794  *		off because UPDATEs are actually INSERTs and our
795  *		scan will mistakenly loop forever, updating the tuple
796  *		it just inserted..  This should be fixed but until it
797  *		is, we don't want to get stuck in an infinite loop
798  *		which corrupts your database..
799  *
800  *		When updating a table, tupleid identifies the tuple to
801  *		update and oldtuple is NULL.  When updating a view, oldtuple
802  *		is passed to the INSTEAD OF triggers and identifies what to
803  *		update, and tupleid is invalid.  When updating a foreign table,
804  *		tupleid is invalid; the FDW has to figure out which row to
805  *		update using data from the planSlot.  oldtuple is passed to
806  *		foreign table triggers; it is NULL when the foreign table has
807  *		no relevant triggers.
808  *
809  *		Returns RETURNING result if any, otherwise NULL.
810  * ----------------------------------------------------------------
811  */
812 static TupleTableSlot *
ExecUpdate(ItemPointer tupleid,HeapTuple oldtuple,TupleTableSlot * slot,TupleTableSlot * planSlot,EPQState * epqstate,EState * estate,bool canSetTag)813 ExecUpdate(ItemPointer tupleid,
814 		   HeapTuple oldtuple,
815 		   TupleTableSlot *slot,
816 		   TupleTableSlot *planSlot,
817 		   EPQState *epqstate,
818 		   EState *estate,
819 		   bool canSetTag)
820 {
821 	HeapTuple	tuple;
822 	ResultRelInfo *resultRelInfo;
823 	Relation	resultRelationDesc;
824 	HTSU_Result result;
825 	HeapUpdateFailureData hufd;
826 	List	   *recheckIndexes = NIL;
827 
828 	/*
829 	 * abort the operation if not running transactions
830 	 */
831 	if (IsBootstrapProcessingMode())
832 		elog(ERROR, "cannot UPDATE during bootstrap");
833 
834 	/*
835 	 * get the heap tuple out of the tuple table slot, making sure we have a
836 	 * writable copy
837 	 */
838 	tuple = ExecMaterializeSlot(slot);
839 
840 	/*
841 	 * get information on the (current) result relation
842 	 */
843 	resultRelInfo = estate->es_result_relation_info;
844 	resultRelationDesc = resultRelInfo->ri_RelationDesc;
845 
846 	/* BEFORE ROW UPDATE Triggers */
847 	if (resultRelInfo->ri_TrigDesc &&
848 		resultRelInfo->ri_TrigDesc->trig_update_before_row)
849 	{
850 		slot = ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
851 									tupleid, oldtuple, slot);
852 
853 		if (slot == NULL)		/* "do nothing" */
854 			return NULL;
855 
856 		/* trigger might have changed tuple */
857 		tuple = ExecMaterializeSlot(slot);
858 	}
859 
860 	/* INSTEAD OF ROW UPDATE Triggers */
861 	if (resultRelInfo->ri_TrigDesc &&
862 		resultRelInfo->ri_TrigDesc->trig_update_instead_row)
863 	{
864 		slot = ExecIRUpdateTriggers(estate, resultRelInfo,
865 									oldtuple, slot);
866 
867 		if (slot == NULL)		/* "do nothing" */
868 			return NULL;
869 
870 		/* trigger might have changed tuple */
871 		tuple = ExecMaterializeSlot(slot);
872 	}
873 	else if (resultRelInfo->ri_FdwRoutine)
874 	{
875 		/*
876 		 * update in foreign table: let the FDW do it
877 		 */
878 		slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
879 															   resultRelInfo,
880 															   slot,
881 															   planSlot);
882 
883 		if (slot == NULL)		/* "do nothing" */
884 			return NULL;
885 
886 		/* FDW might have changed tuple */
887 		tuple = ExecMaterializeSlot(slot);
888 
889 		/*
890 		 * AFTER ROW Triggers or RETURNING expressions might reference the
891 		 * tableoid column, so initialize t_tableOid before evaluating them.
892 		 */
893 		tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
894 	}
895 	else
896 	{
897 		LockTupleMode lockmode;
898 
899 		/*
900 		 * Constraints might reference the tableoid column, so initialize
901 		 * t_tableOid before evaluating them.
902 		 */
903 		tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
904 
905 		/*
906 		 * Check any RLS UPDATE WITH CHECK policies
907 		 *
908 		 * If we generate a new candidate tuple after EvalPlanQual testing, we
909 		 * must loop back here and recheck any RLS policies and constraints.
910 		 * (We don't need to redo triggers, however.  If there are any BEFORE
911 		 * triggers then trigger.c will have done heap_lock_tuple to lock the
912 		 * correct tuple, so there's no need to do them again.)
913 		 *
914 		 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
915 		 * we are looking for at this point.
916 		 */
917 lreplace:;
918 		if (resultRelInfo->ri_WithCheckOptions != NIL)
919 			ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
920 								 resultRelInfo, slot, estate);
921 
922 		/*
923 		 * Check the constraints of the tuple
924 		 */
925 		if (resultRelationDesc->rd_att->constr)
926 			ExecConstraints(resultRelInfo, slot, estate);
927 
928 		/*
929 		 * replace the heap tuple
930 		 *
931 		 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
932 		 * that the row to be updated is visible to that snapshot, and throw a
933 		 * can't-serialize error if not. This is a special-case behavior
934 		 * needed for referential integrity updates in transaction-snapshot
935 		 * mode transactions.
936 		 */
937 		result = heap_update(resultRelationDesc, tupleid, tuple,
938 							 estate->es_output_cid,
939 							 estate->es_crosscheck_snapshot,
940 							 true /* wait for commit */ ,
941 							 &hufd, &lockmode);
942 		switch (result)
943 		{
944 			case HeapTupleSelfUpdated:
945 
946 				/*
947 				 * The target tuple was already updated or deleted by the
948 				 * current command, or by a later command in the current
949 				 * transaction.  The former case is possible in a join UPDATE
950 				 * where multiple tuples join to the same target tuple. This
951 				 * is pretty questionable, but Postgres has always allowed it:
952 				 * we just execute the first update action and ignore
953 				 * additional update attempts.
954 				 *
955 				 * The latter case arises if the tuple is modified by a
956 				 * command in a BEFORE trigger, or perhaps by a command in a
957 				 * volatile function used in the query.  In such situations we
958 				 * should not ignore the update, but it is equally unsafe to
959 				 * proceed.  We don't want to discard the original UPDATE
960 				 * while keeping the triggered actions based on it; and we
961 				 * have no principled way to merge this update with the
962 				 * previous ones.  So throwing an error is the only safe
963 				 * course.
964 				 *
965 				 * If a trigger actually intends this type of interaction, it
966 				 * can re-execute the UPDATE (assuming it can figure out how)
967 				 * and then return NULL to cancel the outer update.
968 				 */
969 				if (hufd.cmax != estate->es_output_cid)
970 					ereport(ERROR,
971 							(errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
972 							 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
973 							 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
974 
975 				/* Else, already updated by self; nothing to do */
976 				return NULL;
977 
978 			case HeapTupleMayBeUpdated:
979 				break;
980 
981 			case HeapTupleUpdated:
982 				if (IsolationUsesXactSnapshot())
983 					ereport(ERROR,
984 							(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
985 							 errmsg("could not serialize access due to concurrent update")));
986 				if (!ItemPointerEquals(tupleid, &hufd.ctid))
987 				{
988 					TupleTableSlot *epqslot;
989 
990 					epqslot = EvalPlanQual(estate,
991 										   epqstate,
992 										   resultRelationDesc,
993 										   resultRelInfo->ri_RangeTableIndex,
994 										   lockmode,
995 										   &hufd.ctid,
996 										   hufd.xmax);
997 					if (!TupIsNull(epqslot))
998 					{
999 						*tupleid = hufd.ctid;
1000 						slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
1001 						tuple = ExecMaterializeSlot(slot);
1002 						goto lreplace;
1003 					}
1004 				}
1005 				/* tuple already deleted; nothing to do */
1006 				return NULL;
1007 
1008 			default:
1009 				elog(ERROR, "unrecognized heap_update status: %u", result);
1010 				return NULL;
1011 		}
1012 
1013 		/*
1014 		 * Note: instead of having to update the old index tuples associated
1015 		 * with the heap tuple, all we do is form and insert new index tuples.
1016 		 * This is because UPDATEs are actually DELETEs and INSERTs, and index
1017 		 * tuple deletion is done later by VACUUM (see notes in ExecDelete).
1018 		 * All we do here is insert new index tuples.  -cim 9/27/89
1019 		 */
1020 
1021 		/*
1022 		 * insert index entries for tuple
1023 		 *
1024 		 * Note: heap_update returns the tid (location) of the new tuple in
1025 		 * the t_self field.
1026 		 *
1027 		 * If it's a HOT update, we mustn't insert new index entries.
1028 		 */
1029 		if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1030 			recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
1031 												   estate, false, NULL, NIL);
1032 	}
1033 
1034 	if (canSetTag)
1035 		(estate->es_processed)++;
1036 
1037 	/* AFTER ROW UPDATE Triggers */
1038 	ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, tuple,
1039 						 recheckIndexes);
1040 
1041 	list_free(recheckIndexes);
1042 
1043 	/*
1044 	 * Check any WITH CHECK OPTION constraints from parent views.  We are
1045 	 * required to do this after testing all constraints and uniqueness
1046 	 * violations per the SQL spec, so we do it after actually updating the
1047 	 * record in the heap and all indexes.
1048 	 *
1049 	 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1050 	 * are looking for at this point.
1051 	 */
1052 	if (resultRelInfo->ri_WithCheckOptions != NIL)
1053 		ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1054 
1055 	/* Process RETURNING if present */
1056 	if (resultRelInfo->ri_projectReturning)
1057 		return ExecProcessReturning(resultRelInfo, slot, planSlot);
1058 
1059 	return NULL;
1060 }
1061 
1062 /*
1063  * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
1064  *
1065  * Try to lock tuple for update as part of speculative insertion.  If
1066  * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
1067  * (but still lock row, even though it may not satisfy estate's
1068  * snapshot).
1069  *
1070  * Returns true if we're done (with or without an update), or false if
1071  * the caller must retry the INSERT from scratch.
1072  */
1073 static bool
ExecOnConflictUpdate(ModifyTableState * mtstate,ResultRelInfo * resultRelInfo,ItemPointer conflictTid,TupleTableSlot * planSlot,TupleTableSlot * excludedSlot,EState * estate,bool canSetTag,TupleTableSlot ** returning)1074 ExecOnConflictUpdate(ModifyTableState *mtstate,
1075 					 ResultRelInfo *resultRelInfo,
1076 					 ItemPointer conflictTid,
1077 					 TupleTableSlot *planSlot,
1078 					 TupleTableSlot *excludedSlot,
1079 					 EState *estate,
1080 					 bool canSetTag,
1081 					 TupleTableSlot **returning)
1082 {
1083 	ExprContext *econtext = mtstate->ps.ps_ExprContext;
1084 	Relation	relation = resultRelInfo->ri_RelationDesc;
1085 	List	   *onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere;
1086 	HeapTupleData tuple;
1087 	HeapUpdateFailureData hufd;
1088 	LockTupleMode lockmode;
1089 	HTSU_Result test;
1090 	Buffer		buffer;
1091 
1092 	/* Determine lock mode to use */
1093 	lockmode = ExecUpdateLockMode(estate, resultRelInfo);
1094 
1095 	/*
1096 	 * Lock tuple for update.  Don't follow updates when tuple cannot be
1097 	 * locked without doing so.  A row locking conflict here means our
1098 	 * previous conclusion that the tuple is conclusively committed is not
1099 	 * true anymore.
1100 	 */
1101 	tuple.t_self = *conflictTid;
1102 	test = heap_lock_tuple(relation, &tuple, estate->es_output_cid,
1103 						   lockmode, LockWaitBlock, false, &buffer,
1104 						   &hufd);
1105 	switch (test)
1106 	{
1107 		case HeapTupleMayBeUpdated:
1108 			/* success! */
1109 			break;
1110 
1111 		case HeapTupleInvisible:
1112 
1113 			/*
1114 			 * This can occur when a just inserted tuple is updated again in
1115 			 * the same command. E.g. because multiple rows with the same
1116 			 * conflicting key values are inserted.
1117 			 *
1118 			 * This is somewhat similar to the ExecUpdate()
1119 			 * HeapTupleSelfUpdated case.  We do not want to proceed because
1120 			 * it would lead to the same row being updated a second time in
1121 			 * some unspecified order, and in contrast to plain UPDATEs
1122 			 * there's no historical behavior to break.
1123 			 *
1124 			 * It is the user's responsibility to prevent this situation from
1125 			 * occurring.  These problems are why SQL-2003 similarly specifies
1126 			 * that for SQL MERGE, an exception must be raised in the event of
1127 			 * an attempt to update the same row twice.
1128 			 */
1129 			if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple.t_data)))
1130 				ereport(ERROR,
1131 						(errcode(ERRCODE_CARDINALITY_VIOLATION),
1132 						 errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
1133 						 errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
1134 
1135 			/* This shouldn't happen */
1136 			elog(ERROR, "attempted to lock invisible tuple");
1137 
1138 		case HeapTupleSelfUpdated:
1139 
1140 			/*
1141 			 * This state should never be reached. As a dirty snapshot is used
1142 			 * to find conflicting tuples, speculative insertion wouldn't have
1143 			 * seen this row to conflict with.
1144 			 */
1145 			elog(ERROR, "unexpected self-updated tuple");
1146 
1147 		case HeapTupleUpdated:
1148 			if (IsolationUsesXactSnapshot())
1149 				ereport(ERROR,
1150 						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1151 						 errmsg("could not serialize access due to concurrent update")));
1152 
1153 			/*
1154 			 * Tell caller to try again from the very start.
1155 			 *
1156 			 * It does not make sense to use the usual EvalPlanQual() style
1157 			 * loop here, as the new version of the row might not conflict
1158 			 * anymore, or the conflicting tuple has actually been deleted.
1159 			 */
1160 			ReleaseBuffer(buffer);
1161 			return false;
1162 
1163 		default:
1164 			elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
1165 	}
1166 
1167 	/*
1168 	 * Success, the tuple is locked.
1169 	 *
1170 	 * Reset per-tuple memory context to free any expression evaluation
1171 	 * storage allocated in the previous cycle.
1172 	 */
1173 	ResetExprContext(econtext);
1174 
1175 	/*
1176 	 * Verify that the tuple is visible to our MVCC snapshot if the current
1177 	 * isolation level mandates that.
1178 	 *
1179 	 * It's not sufficient to rely on the check within ExecUpdate() as e.g.
1180 	 * CONFLICT ... WHERE clause may prevent us from reaching that.
1181 	 *
1182 	 * This means we only ever continue when a new command in the current
1183 	 * transaction could see the row, even though in READ COMMITTED mode the
1184 	 * tuple will not be visible according to the current statement's
1185 	 * snapshot.  This is in line with the way UPDATE deals with newer tuple
1186 	 * versions.
1187 	 */
1188 	ExecCheckHeapTupleVisible(estate, &tuple, buffer);
1189 
1190 	/* Store target's existing tuple in the state's dedicated slot */
1191 	ExecStoreTuple(&tuple, mtstate->mt_existing, buffer, false);
1192 
1193 	/*
1194 	 * Make tuple and any needed join variables available to ExecQual and
1195 	 * ExecProject.  The EXCLUDED tuple is installed in ecxt_innertuple, while
1196 	 * the target's existing tuple is installed in the scantuple.  EXCLUDED
1197 	 * has been made to reference INNER_VAR in setrefs.c, but there is no
1198 	 * other redirection.
1199 	 */
1200 	econtext->ecxt_scantuple = mtstate->mt_existing;
1201 	econtext->ecxt_innertuple = excludedSlot;
1202 	econtext->ecxt_outertuple = NULL;
1203 
1204 	if (!ExecQual(onConflictSetWhere, econtext, false))
1205 	{
1206 		ReleaseBuffer(buffer);
1207 		InstrCountFiltered1(&mtstate->ps, 1);
1208 		return true;			/* done with the tuple */
1209 	}
1210 
1211 	if (resultRelInfo->ri_WithCheckOptions != NIL)
1212 	{
1213 		/*
1214 		 * Check target's existing tuple against UPDATE-applicable USING
1215 		 * security barrier quals (if any), enforced here as RLS checks/WCOs.
1216 		 *
1217 		 * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
1218 		 * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
1219 		 * but that's almost the extent of its special handling for ON
1220 		 * CONFLICT DO UPDATE.
1221 		 *
1222 		 * The rewriter will also have associated UPDATE applicable straight
1223 		 * RLS checks/WCOs for the benefit of the ExecUpdate() call that
1224 		 * follows.  INSERTs and UPDATEs naturally have mutually exclusive WCO
1225 		 * kinds, so there is no danger of spurious over-enforcement in the
1226 		 * INSERT or UPDATE path.
1227 		 */
1228 		ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
1229 							 mtstate->mt_existing,
1230 							 mtstate->ps.state);
1231 	}
1232 
1233 	/* Project the new tuple version */
1234 	ExecProject(resultRelInfo->ri_onConflictSetProj, NULL);
1235 
1236 	if (mtstate->mt_confljunk)
1237 		(void) ExecFilterJunk(mtstate->mt_confljunk,
1238 							  resultRelInfo->ri_onConflictSetProj->pi_slot);
1239 
1240 	/*
1241 	 * Note that it is possible that the target tuple has been modified in
1242 	 * this session, after the above heap_lock_tuple. We choose to not error
1243 	 * out in that case, in line with ExecUpdate's treatment of similar cases.
1244 	 * This can happen if an UPDATE is triggered from within ExecQual(),
1245 	 * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
1246 	 * wCTE in the ON CONFLICT's SET.
1247 	 */
1248 
1249 	/* Execute UPDATE with projection */
1250 	*returning = ExecUpdate(&tuple.t_self, NULL,
1251 							mtstate->mt_conflproj, planSlot,
1252 							&mtstate->mt_epqstate, mtstate->ps.state,
1253 							canSetTag);
1254 
1255 	ReleaseBuffer(buffer);
1256 	return true;
1257 }
1258 
1259 
1260 /*
1261  * Process BEFORE EACH STATEMENT triggers
1262  */
1263 static void
fireBSTriggers(ModifyTableState * node)1264 fireBSTriggers(ModifyTableState *node)
1265 {
1266 	switch (node->operation)
1267 	{
1268 		case CMD_INSERT:
1269 			ExecBSInsertTriggers(node->ps.state, node->resultRelInfo);
1270 			if (node->mt_onconflict == ONCONFLICT_UPDATE)
1271 				ExecBSUpdateTriggers(node->ps.state,
1272 									 node->resultRelInfo);
1273 			break;
1274 		case CMD_UPDATE:
1275 			ExecBSUpdateTriggers(node->ps.state, node->resultRelInfo);
1276 			break;
1277 		case CMD_DELETE:
1278 			ExecBSDeleteTriggers(node->ps.state, node->resultRelInfo);
1279 			break;
1280 		default:
1281 			elog(ERROR, "unknown operation");
1282 			break;
1283 	}
1284 }
1285 
1286 /*
1287  * Process AFTER EACH STATEMENT triggers
1288  */
1289 static void
fireASTriggers(ModifyTableState * node)1290 fireASTriggers(ModifyTableState *node)
1291 {
1292 	switch (node->operation)
1293 	{
1294 		case CMD_INSERT:
1295 			if (node->mt_onconflict == ONCONFLICT_UPDATE)
1296 				ExecASUpdateTriggers(node->ps.state,
1297 									 node->resultRelInfo);
1298 			ExecASInsertTriggers(node->ps.state, node->resultRelInfo);
1299 			break;
1300 		case CMD_UPDATE:
1301 			ExecASUpdateTriggers(node->ps.state, node->resultRelInfo);
1302 			break;
1303 		case CMD_DELETE:
1304 			ExecASDeleteTriggers(node->ps.state, node->resultRelInfo);
1305 			break;
1306 		default:
1307 			elog(ERROR, "unknown operation");
1308 			break;
1309 	}
1310 }
1311 
1312 
1313 /* ----------------------------------------------------------------
1314  *	   ExecModifyTable
1315  *
1316  *		Perform table modifications as required, and return RETURNING results
1317  *		if needed.
1318  * ----------------------------------------------------------------
1319  */
1320 TupleTableSlot *
ExecModifyTable(ModifyTableState * node)1321 ExecModifyTable(ModifyTableState *node)
1322 {
1323 	EState	   *estate = node->ps.state;
1324 	CmdType		operation = node->operation;
1325 	ResultRelInfo *saved_resultRelInfo;
1326 	ResultRelInfo *resultRelInfo;
1327 	PlanState  *subplanstate;
1328 	JunkFilter *junkfilter;
1329 	TupleTableSlot *slot;
1330 	TupleTableSlot *planSlot;
1331 	ItemPointer tupleid;
1332 	ItemPointerData tuple_ctid;
1333 	HeapTupleData oldtupdata;
1334 	HeapTuple	oldtuple;
1335 
1336 	/*
1337 	 * This should NOT get called during EvalPlanQual; we should have passed a
1338 	 * subplan tree to EvalPlanQual, instead.  Use a runtime test not just
1339 	 * Assert because this condition is easy to miss in testing.  (Note:
1340 	 * although ModifyTable should not get executed within an EvalPlanQual
1341 	 * operation, we do have to allow it to be initialized and shut down in
1342 	 * case it is within a CTE subplan.  Hence this test must be here, not in
1343 	 * ExecInitModifyTable.)
1344 	 */
1345 	if (estate->es_epqTuple != NULL)
1346 		elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
1347 
1348 	/*
1349 	 * If we've already completed processing, don't try to do more.  We need
1350 	 * this test because ExecPostprocessPlan might call us an extra time, and
1351 	 * our subplan's nodes aren't necessarily robust against being called
1352 	 * extra times.
1353 	 */
1354 	if (node->mt_done)
1355 		return NULL;
1356 
1357 	/*
1358 	 * On first call, fire BEFORE STATEMENT triggers before proceeding.
1359 	 */
1360 	if (node->fireBSTriggers)
1361 	{
1362 		fireBSTriggers(node);
1363 		node->fireBSTriggers = false;
1364 	}
1365 
1366 	/* Preload local variables */
1367 	resultRelInfo = node->resultRelInfo + node->mt_whichplan;
1368 	subplanstate = node->mt_plans[node->mt_whichplan];
1369 	junkfilter = resultRelInfo->ri_junkFilter;
1370 
1371 	/*
1372 	 * es_result_relation_info must point to the currently active result
1373 	 * relation while we are within this ModifyTable node.  Even though
1374 	 * ModifyTable nodes can't be nested statically, they can be nested
1375 	 * dynamically (since our subplan could include a reference to a modifying
1376 	 * CTE).  So we have to save and restore the caller's value.
1377 	 */
1378 	saved_resultRelInfo = estate->es_result_relation_info;
1379 
1380 	estate->es_result_relation_info = resultRelInfo;
1381 
1382 	/*
1383 	 * Fetch rows from subplan(s), and execute the required table modification
1384 	 * for each row.
1385 	 */
1386 	for (;;)
1387 	{
1388 		/*
1389 		 * Reset the per-output-tuple exprcontext.  This is needed because
1390 		 * triggers expect to use that context as workspace.  It's a bit ugly
1391 		 * to do this below the top level of the plan, however.  We might need
1392 		 * to rethink this later.
1393 		 */
1394 		ResetPerTupleExprContext(estate);
1395 
1396 		planSlot = ExecProcNode(subplanstate);
1397 
1398 		if (TupIsNull(planSlot))
1399 		{
1400 			/* advance to next subplan if any */
1401 			node->mt_whichplan++;
1402 			if (node->mt_whichplan < node->mt_nplans)
1403 			{
1404 				resultRelInfo++;
1405 				subplanstate = node->mt_plans[node->mt_whichplan];
1406 				junkfilter = resultRelInfo->ri_junkFilter;
1407 				estate->es_result_relation_info = resultRelInfo;
1408 				EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
1409 									node->mt_arowmarks[node->mt_whichplan]);
1410 				continue;
1411 			}
1412 			else
1413 				break;
1414 		}
1415 
1416 		/*
1417 		 * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
1418 		 * here is compute the RETURNING expressions.
1419 		 */
1420 		if (resultRelInfo->ri_usesFdwDirectModify)
1421 		{
1422 			Assert(resultRelInfo->ri_projectReturning);
1423 
1424 			/*
1425 			 * A scan slot containing the data that was actually inserted,
1426 			 * updated or deleted has already been made available to
1427 			 * ExecProcessReturning by IterateDirectModify, so no need to
1428 			 * provide it here.
1429 			 */
1430 			slot = ExecProcessReturning(resultRelInfo, NULL, planSlot);
1431 
1432 			estate->es_result_relation_info = saved_resultRelInfo;
1433 			return slot;
1434 		}
1435 
1436 		EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
1437 		slot = planSlot;
1438 
1439 		tupleid = NULL;
1440 		oldtuple = NULL;
1441 		if (junkfilter != NULL)
1442 		{
1443 			/*
1444 			 * extract the 'ctid' or 'wholerow' junk attribute.
1445 			 */
1446 			if (operation == CMD_UPDATE || operation == CMD_DELETE)
1447 			{
1448 				char		relkind;
1449 				Datum		datum;
1450 				bool		isNull;
1451 
1452 				relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
1453 				if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)
1454 				{
1455 					datum = ExecGetJunkAttribute(slot,
1456 												 junkfilter->jf_junkAttNo,
1457 												 &isNull);
1458 					/* shouldn't ever get a null result... */
1459 					if (isNull)
1460 						elog(ERROR, "ctid is NULL");
1461 
1462 					tupleid = (ItemPointer) DatumGetPointer(datum);
1463 					tuple_ctid = *tupleid;		/* be sure we don't free
1464 												 * ctid!! */
1465 					tupleid = &tuple_ctid;
1466 				}
1467 
1468 				/*
1469 				 * Use the wholerow attribute, when available, to reconstruct
1470 				 * the old relation tuple.
1471 				 *
1472 				 * Foreign table updates have a wholerow attribute when the
1473 				 * relation has an AFTER ROW trigger.  Note that the wholerow
1474 				 * attribute does not carry system columns.  Foreign table
1475 				 * triggers miss seeing those, except that we know enough here
1476 				 * to set t_tableOid.  Quite separately from this, the FDW may
1477 				 * fetch its own junk attrs to identify the row.
1478 				 *
1479 				 * Other relevant relkinds, currently limited to views, always
1480 				 * have a wholerow attribute.
1481 				 */
1482 				else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
1483 				{
1484 					datum = ExecGetJunkAttribute(slot,
1485 												 junkfilter->jf_junkAttNo,
1486 												 &isNull);
1487 					/* shouldn't ever get a null result... */
1488 					if (isNull)
1489 						elog(ERROR, "wholerow is NULL");
1490 
1491 					oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
1492 					oldtupdata.t_len =
1493 						HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
1494 					ItemPointerSetInvalid(&(oldtupdata.t_self));
1495 					/* Historically, view triggers see invalid t_tableOid. */
1496 					oldtupdata.t_tableOid =
1497 						(relkind == RELKIND_VIEW) ? InvalidOid :
1498 						RelationGetRelid(resultRelInfo->ri_RelationDesc);
1499 
1500 					oldtuple = &oldtupdata;
1501 				}
1502 				else
1503 					Assert(relkind == RELKIND_FOREIGN_TABLE);
1504 			}
1505 
1506 			/*
1507 			 * apply the junkfilter if needed.
1508 			 */
1509 			if (operation != CMD_DELETE)
1510 				slot = ExecFilterJunk(junkfilter, slot);
1511 		}
1512 
1513 		switch (operation)
1514 		{
1515 			case CMD_INSERT:
1516 				slot = ExecInsert(node, slot, planSlot,
1517 								node->mt_arbiterindexes, node->mt_onconflict,
1518 								  estate, node->canSetTag);
1519 				break;
1520 			case CMD_UPDATE:
1521 				slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
1522 								&node->mt_epqstate, estate, node->canSetTag);
1523 				break;
1524 			case CMD_DELETE:
1525 				slot = ExecDelete(tupleid, oldtuple, planSlot,
1526 								&node->mt_epqstate, estate, node->canSetTag);
1527 				break;
1528 			default:
1529 				elog(ERROR, "unknown operation");
1530 				break;
1531 		}
1532 
1533 		/*
1534 		 * If we got a RETURNING result, return it to caller.  We'll continue
1535 		 * the work on next call.
1536 		 */
1537 		if (slot)
1538 		{
1539 			estate->es_result_relation_info = saved_resultRelInfo;
1540 			return slot;
1541 		}
1542 	}
1543 
1544 	/* Restore es_result_relation_info before exiting */
1545 	estate->es_result_relation_info = saved_resultRelInfo;
1546 
1547 	/*
1548 	 * We're done, but fire AFTER STATEMENT triggers before exiting.
1549 	 */
1550 	fireASTriggers(node);
1551 
1552 	node->mt_done = true;
1553 
1554 	return NULL;
1555 }
1556 
1557 /* ----------------------------------------------------------------
1558  *		ExecInitModifyTable
1559  * ----------------------------------------------------------------
1560  */
1561 ModifyTableState *
ExecInitModifyTable(ModifyTable * node,EState * estate,int eflags)1562 ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
1563 {
1564 	ModifyTableState *mtstate;
1565 	CmdType		operation = node->operation;
1566 	int			nplans = list_length(node->plans);
1567 	ResultRelInfo *saved_resultRelInfo;
1568 	ResultRelInfo *resultRelInfo;
1569 	TupleDesc	tupDesc;
1570 	Plan	   *subplan;
1571 	ListCell   *l;
1572 	int			i;
1573 
1574 	/* check for unsupported flags */
1575 	Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
1576 
1577 	/*
1578 	 * create state structure
1579 	 */
1580 	mtstate = makeNode(ModifyTableState);
1581 	mtstate->ps.plan = (Plan *) node;
1582 	mtstate->ps.state = estate;
1583 	mtstate->ps.targetlist = NIL;		/* not actually used */
1584 
1585 	mtstate->operation = operation;
1586 	mtstate->canSetTag = node->canSetTag;
1587 	mtstate->mt_done = false;
1588 
1589 	mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
1590 	mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
1591 	mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
1592 	mtstate->mt_nplans = nplans;
1593 	mtstate->mt_onconflict = node->onConflictAction;
1594 	mtstate->mt_arbiterindexes = node->arbiterIndexes;
1595 
1596 	/* set up epqstate with dummy subplan data for the moment */
1597 	EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
1598 	mtstate->fireBSTriggers = true;
1599 
1600 	/*
1601 	 * call ExecInitNode on each of the plans to be executed and save the
1602 	 * results into the array "mt_plans".  This is also a convenient place to
1603 	 * verify that the proposed target relations are valid and open their
1604 	 * indexes for insertion of new index entries.  Note we *must* set
1605 	 * estate->es_result_relation_info correctly while we initialize each
1606 	 * sub-plan; ExecContextForcesOids depends on that!
1607 	 */
1608 	saved_resultRelInfo = estate->es_result_relation_info;
1609 
1610 	resultRelInfo = mtstate->resultRelInfo;
1611 	i = 0;
1612 	foreach(l, node->plans)
1613 	{
1614 		subplan = (Plan *) lfirst(l);
1615 
1616 		/* Initialize the usesFdwDirectModify flag */
1617 		resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
1618 												 node->fdwDirectModifyPlans);
1619 
1620 		/*
1621 		 * Verify result relation is a valid target for the current operation
1622 		 */
1623 		CheckValidResultRel(resultRelInfo->ri_RelationDesc, operation);
1624 
1625 		/*
1626 		 * If there are indices on the result relation, open them and save
1627 		 * descriptors in the result relation info, so that we can add new
1628 		 * index entries for the tuples we add/update.  We need not do this
1629 		 * for a DELETE, however, since deletion doesn't affect indexes. Also,
1630 		 * inside an EvalPlanQual operation, the indexes might be open
1631 		 * already, since we share the resultrel state with the original
1632 		 * query.
1633 		 */
1634 		if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
1635 			operation != CMD_DELETE &&
1636 			resultRelInfo->ri_IndexRelationDescs == NULL)
1637 			ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE);
1638 
1639 		/* Now init the plan for this result rel */
1640 		estate->es_result_relation_info = resultRelInfo;
1641 		mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
1642 
1643 		/* Also let FDWs init themselves for foreign-table result rels */
1644 		if (!resultRelInfo->ri_usesFdwDirectModify &&
1645 			resultRelInfo->ri_FdwRoutine != NULL &&
1646 			resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
1647 		{
1648 			List	   *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
1649 
1650 			resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
1651 															 resultRelInfo,
1652 															 fdw_private,
1653 															 i,
1654 															 eflags);
1655 		}
1656 
1657 		resultRelInfo++;
1658 		i++;
1659 	}
1660 
1661 	estate->es_result_relation_info = saved_resultRelInfo;
1662 
1663 	/*
1664 	 * Initialize any WITH CHECK OPTION constraints if needed.
1665 	 */
1666 	resultRelInfo = mtstate->resultRelInfo;
1667 	i = 0;
1668 	foreach(l, node->withCheckOptionLists)
1669 	{
1670 		List	   *wcoList = (List *) lfirst(l);
1671 		List	   *wcoExprs = NIL;
1672 		ListCell   *ll;
1673 
1674 		foreach(ll, wcoList)
1675 		{
1676 			WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
1677 			ExprState  *wcoExpr = ExecInitExpr((Expr *) wco->qual,
1678 											   &mtstate->ps);
1679 
1680 			wcoExprs = lappend(wcoExprs, wcoExpr);
1681 		}
1682 
1683 		resultRelInfo->ri_WithCheckOptions = wcoList;
1684 		resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
1685 		resultRelInfo++;
1686 		i++;
1687 	}
1688 
1689 	/*
1690 	 * Initialize RETURNING projections if needed.
1691 	 */
1692 	if (node->returningLists)
1693 	{
1694 		TupleTableSlot *slot;
1695 		ExprContext *econtext;
1696 
1697 		/*
1698 		 * Initialize result tuple slot and assign its rowtype using the first
1699 		 * RETURNING list.  We assume the rest will look the same.
1700 		 */
1701 		tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
1702 								 false);
1703 
1704 		/* Set up a slot for the output of the RETURNING projection(s) */
1705 		ExecInitResultTupleSlot(estate, &mtstate->ps);
1706 		ExecAssignResultType(&mtstate->ps, tupDesc);
1707 		slot = mtstate->ps.ps_ResultTupleSlot;
1708 
1709 		/* Need an econtext too */
1710 		econtext = CreateExprContext(estate);
1711 		mtstate->ps.ps_ExprContext = econtext;
1712 
1713 		/*
1714 		 * Build a projection for each result rel.
1715 		 */
1716 		resultRelInfo = mtstate->resultRelInfo;
1717 		foreach(l, node->returningLists)
1718 		{
1719 			List	   *rlist = (List *) lfirst(l);
1720 			List	   *rliststate;
1721 
1722 			rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps);
1723 			resultRelInfo->ri_projectReturning =
1724 				ExecBuildProjectionInfo(rliststate, econtext, slot,
1725 									 resultRelInfo->ri_RelationDesc->rd_att);
1726 			resultRelInfo++;
1727 		}
1728 	}
1729 	else
1730 	{
1731 		/*
1732 		 * We still must construct a dummy result tuple type, because InitPlan
1733 		 * expects one (maybe should change that?).
1734 		 */
1735 		tupDesc = ExecTypeFromTL(NIL, false);
1736 		ExecInitResultTupleSlot(estate, &mtstate->ps);
1737 		ExecAssignResultType(&mtstate->ps, tupDesc);
1738 
1739 		mtstate->ps.ps_ExprContext = NULL;
1740 	}
1741 
1742 	/*
1743 	 * If needed, Initialize target list, projection and qual for ON CONFLICT
1744 	 * DO UPDATE.
1745 	 */
1746 	resultRelInfo = mtstate->resultRelInfo;
1747 	if (node->onConflictAction == ONCONFLICT_UPDATE)
1748 	{
1749 		ExprContext *econtext;
1750 		ExprState  *setexpr;
1751 
1752 		/* insert may only have one plan, inheritance is not expanded */
1753 		Assert(nplans == 1);
1754 
1755 		/* already exists if created by RETURNING processing above */
1756 		if (mtstate->ps.ps_ExprContext == NULL)
1757 			ExecAssignExprContext(estate, &mtstate->ps);
1758 
1759 		econtext = mtstate->ps.ps_ExprContext;
1760 
1761 		/* initialize slot for the existing tuple */
1762 		mtstate->mt_existing = ExecInitExtraTupleSlot(mtstate->ps.state);
1763 		ExecSetSlotDescriptor(mtstate->mt_existing,
1764 							  resultRelInfo->ri_RelationDesc->rd_att);
1765 
1766 		/* carried forward solely for the benefit of explain */
1767 		mtstate->mt_excludedtlist = node->exclRelTlist;
1768 
1769 		/* create target slot for UPDATE SET projection */
1770 		mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state);
1771 		ExecSetSlotDescriptor(mtstate->mt_conflproj,
1772 							  resultRelInfo->ri_RelationDesc->rd_att);
1773 
1774 		/* initialize UPDATE SET tlist expressions */
1775 		setexpr = ExecInitExpr((Expr *) node->onConflictSet, &mtstate->ps);
1776 
1777 		/*
1778 		 * The onConflictSet tlist should already have been adjusted to emit
1779 		 * the table's exact column list.
1780 		 */
1781 		ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
1782 							node->onConflictSet);
1783 
1784 		/*
1785 		 * However, it might also contain resjunk columns, in which case we'll
1786 		 * need a junkfilter to get rid of those.
1787 		 */
1788 		if (ExecCleanTargetListLength(node->onConflictSet) ==
1789 			list_length(node->onConflictSet))
1790 		{
1791 			/* No junk columns, so we'll just project into mt_conflproj. */
1792 			resultRelInfo->ri_onConflictSetProj =
1793 				ExecBuildProjectionInfo((List *) setexpr, econtext,
1794 										mtstate->mt_conflproj,
1795 										resultRelInfo->ri_RelationDesc->rd_att);
1796 			mtstate->mt_confljunk = NULL;
1797 		}
1798 		else
1799 		{
1800 			/*
1801 			 * Project into a slot matching the tlist's output rowtype, then
1802 			 * apply a junkfilter.
1803 			 */
1804 			TupleDesc	tupDesc = ExecTypeFromTL(node->onConflictSet, false);
1805 			TupleTableSlot *ocsSlot;
1806 
1807 			ocsSlot = ExecInitExtraTupleSlot(mtstate->ps.state);
1808 			ExecSetSlotDescriptor(ocsSlot, tupDesc);
1809 			resultRelInfo->ri_onConflictSetProj =
1810 				ExecBuildProjectionInfo((List *) setexpr, econtext,
1811 										ocsSlot,
1812 										resultRelInfo->ri_RelationDesc->rd_att);
1813 			mtstate->mt_confljunk =
1814 				ExecInitJunkFilter(node->onConflictSet,
1815 								   resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
1816 								   mtstate->mt_conflproj);
1817 		}
1818 
1819 		/* build DO UPDATE WHERE clause expression */
1820 		if (node->onConflictWhere)
1821 		{
1822 			ExprState  *qualexpr;
1823 
1824 			qualexpr = ExecInitExpr((Expr *) node->onConflictWhere,
1825 									&mtstate->ps);
1826 
1827 			resultRelInfo->ri_onConflictSetWhere = (List *) qualexpr;
1828 		}
1829 	}
1830 
1831 	/*
1832 	 * If we have any secondary relations in an UPDATE or DELETE, they need to
1833 	 * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
1834 	 * EvalPlanQual mechanism needs to be told about them.  Locate the
1835 	 * relevant ExecRowMarks.
1836 	 */
1837 	foreach(l, node->rowMarks)
1838 	{
1839 		PlanRowMark *rc = (PlanRowMark *) lfirst(l);
1840 		ExecRowMark *erm;
1841 
1842 		Assert(IsA(rc, PlanRowMark));
1843 
1844 		/* ignore "parent" rowmarks; they are irrelevant at runtime */
1845 		if (rc->isParent)
1846 			continue;
1847 
1848 		/* find ExecRowMark (same for all subplans) */
1849 		erm = ExecFindRowMark(estate, rc->rti, false);
1850 
1851 		/* build ExecAuxRowMark for each subplan */
1852 		for (i = 0; i < nplans; i++)
1853 		{
1854 			ExecAuxRowMark *aerm;
1855 
1856 			subplan = mtstate->mt_plans[i]->plan;
1857 			aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
1858 			mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm);
1859 		}
1860 	}
1861 
1862 	/* select first subplan */
1863 	mtstate->mt_whichplan = 0;
1864 	subplan = (Plan *) linitial(node->plans);
1865 	EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan,
1866 						mtstate->mt_arowmarks[0]);
1867 
1868 	/*
1869 	 * Initialize the junk filter(s) if needed.  INSERT queries need a filter
1870 	 * if there are any junk attrs in the tlist.  UPDATE and DELETE always
1871 	 * need a filter, since there's always a junk 'ctid' or 'wholerow'
1872 	 * attribute present --- no need to look first.
1873 	 *
1874 	 * If there are multiple result relations, each one needs its own junk
1875 	 * filter.  Note multiple rels are only possible for UPDATE/DELETE, so we
1876 	 * can't be fooled by some needing a filter and some not.
1877 	 *
1878 	 * This section of code is also a convenient place to verify that the
1879 	 * output of an INSERT or UPDATE matches the target table(s).
1880 	 */
1881 	{
1882 		bool		junk_filter_needed = false;
1883 
1884 		switch (operation)
1885 		{
1886 			case CMD_INSERT:
1887 				foreach(l, subplan->targetlist)
1888 				{
1889 					TargetEntry *tle = (TargetEntry *) lfirst(l);
1890 
1891 					if (tle->resjunk)
1892 					{
1893 						junk_filter_needed = true;
1894 						break;
1895 					}
1896 				}
1897 				break;
1898 			case CMD_UPDATE:
1899 			case CMD_DELETE:
1900 				junk_filter_needed = true;
1901 				break;
1902 			default:
1903 				elog(ERROR, "unknown operation");
1904 				break;
1905 		}
1906 
1907 		if (junk_filter_needed)
1908 		{
1909 			resultRelInfo = mtstate->resultRelInfo;
1910 			for (i = 0; i < nplans; i++)
1911 			{
1912 				JunkFilter *j;
1913 
1914 				subplan = mtstate->mt_plans[i]->plan;
1915 				if (operation == CMD_INSERT || operation == CMD_UPDATE)
1916 					ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
1917 										subplan->targetlist);
1918 
1919 				j = ExecInitJunkFilter(subplan->targetlist,
1920 							resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
1921 									   ExecInitExtraTupleSlot(estate));
1922 
1923 				if (operation == CMD_UPDATE || operation == CMD_DELETE)
1924 				{
1925 					/* For UPDATE/DELETE, find the appropriate junk attr now */
1926 					char		relkind;
1927 
1928 					relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
1929 					if (relkind == RELKIND_RELATION ||
1930 						relkind == RELKIND_MATVIEW)
1931 					{
1932 						j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
1933 						if (!AttributeNumberIsValid(j->jf_junkAttNo))
1934 							elog(ERROR, "could not find junk ctid column");
1935 					}
1936 					else if (relkind == RELKIND_FOREIGN_TABLE)
1937 					{
1938 						/*
1939 						 * When there is an AFTER trigger, there should be a
1940 						 * wholerow attribute.
1941 						 */
1942 						j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
1943 					}
1944 					else
1945 					{
1946 						j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
1947 						if (!AttributeNumberIsValid(j->jf_junkAttNo))
1948 							elog(ERROR, "could not find junk wholerow column");
1949 					}
1950 				}
1951 
1952 				resultRelInfo->ri_junkFilter = j;
1953 				resultRelInfo++;
1954 			}
1955 		}
1956 		else
1957 		{
1958 			if (operation == CMD_INSERT)
1959 				ExecCheckPlanOutput(mtstate->resultRelInfo->ri_RelationDesc,
1960 									subplan->targetlist);
1961 		}
1962 	}
1963 
1964 	/*
1965 	 * Set up a tuple table slot for use for trigger output tuples. In a plan
1966 	 * containing multiple ModifyTable nodes, all can share one such slot, so
1967 	 * we keep it in the estate.
1968 	 */
1969 	if (estate->es_trig_tuple_slot == NULL)
1970 		estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
1971 
1972 	/*
1973 	 * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
1974 	 * to estate->es_auxmodifytables so that it will be run to completion by
1975 	 * ExecPostprocessPlan.  (It'd actually work fine to add the primary
1976 	 * ModifyTable node too, but there's no need.)  Note the use of lcons not
1977 	 * lappend: we need later-initialized ModifyTable nodes to be shut down
1978 	 * before earlier ones.  This ensures that we don't throw away RETURNING
1979 	 * rows that need to be seen by a later CTE subplan.
1980 	 */
1981 	if (!mtstate->canSetTag)
1982 		estate->es_auxmodifytables = lcons(mtstate,
1983 										   estate->es_auxmodifytables);
1984 
1985 	return mtstate;
1986 }
1987 
1988 /* ----------------------------------------------------------------
1989  *		ExecEndModifyTable
1990  *
1991  *		Shuts down the plan.
1992  *
1993  *		Returns nothing of interest.
1994  * ----------------------------------------------------------------
1995  */
1996 void
ExecEndModifyTable(ModifyTableState * node)1997 ExecEndModifyTable(ModifyTableState *node)
1998 {
1999 	int			i;
2000 
2001 	/*
2002 	 * Allow any FDWs to shut down
2003 	 */
2004 	for (i = 0; i < node->mt_nplans; i++)
2005 	{
2006 		ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
2007 
2008 		if (!resultRelInfo->ri_usesFdwDirectModify &&
2009 			resultRelInfo->ri_FdwRoutine != NULL &&
2010 			resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
2011 			resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
2012 														   resultRelInfo);
2013 	}
2014 
2015 	/*
2016 	 * Free the exprcontext
2017 	 */
2018 	ExecFreeExprContext(&node->ps);
2019 
2020 	/*
2021 	 * clean out the tuple table
2022 	 */
2023 	ExecClearTuple(node->ps.ps_ResultTupleSlot);
2024 
2025 	/*
2026 	 * Terminate EPQ execution if active
2027 	 */
2028 	EvalPlanQualEnd(&node->mt_epqstate);
2029 
2030 	/*
2031 	 * shut down subplans
2032 	 */
2033 	for (i = 0; i < node->mt_nplans; i++)
2034 		ExecEndNode(node->mt_plans[i]);
2035 }
2036 
2037 void
ExecReScanModifyTable(ModifyTableState * node)2038 ExecReScanModifyTable(ModifyTableState *node)
2039 {
2040 	/*
2041 	 * Currently, we don't need to support rescan on ModifyTable nodes. The
2042 	 * semantics of that would be a bit debatable anyway.
2043 	 */
2044 	elog(ERROR, "ExecReScanModifyTable is not implemented");
2045 }
2046