1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  *	  PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *	  src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/indexing.h"
24 #include "catalog/objectaccess.h"
25 #include "catalog/pg_constraint.h"
26 #include "catalog/pg_constraint_fn.h"
27 #include "catalog/pg_proc.h"
28 #include "catalog/pg_trigger.h"
29 #include "catalog/pg_type.h"
30 #include "commands/dbcommands.h"
31 #include "commands/defrem.h"
32 #include "commands/trigger.h"
33 #include "executor/executor.h"
34 #include "miscadmin.h"
35 #include "nodes/bitmapset.h"
36 #include "nodes/makefuncs.h"
37 #include "optimizer/clauses.h"
38 #include "optimizer/var.h"
39 #include "parser/parse_clause.h"
40 #include "parser/parse_collate.h"
41 #include "parser/parse_func.h"
42 #include "parser/parse_relation.h"
43 #include "parser/parsetree.h"
44 #include "pgstat.h"
45 #include "rewrite/rewriteManip.h"
46 #include "storage/bufmgr.h"
47 #include "storage/lmgr.h"
48 #include "tcop/utility.h"
49 #include "utils/acl.h"
50 #include "utils/builtins.h"
51 #include "utils/bytea.h"
52 #include "utils/fmgroids.h"
53 #include "utils/inval.h"
54 #include "utils/lsyscache.h"
55 #include "utils/memutils.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tqual.h"
60 #include "utils/tuplestore.h"
61 
62 
63 /* GUC variables */
64 int			SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN;
65 
66 /* How many levels deep into trigger execution are we? */
67 static int	MyTriggerDepth = 0;
68 
69 /*
70  * Note that similar macros also exist in executor/execMain.c.  There does not
71  * appear to be any good header to put them into, given the structures that
72  * they use, so we let them be duplicated.  Be sure to update all if one needs
73  * to be changed, however.
74  */
75 #define GetUpdatedColumns(relinfo, estate) \
76 	(rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
77 
78 /* Local function prototypes */
79 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
80 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
81 static HeapTuple GetTupleForTrigger(EState *estate,
82 				   EPQState *epqstate,
83 				   ResultRelInfo *relinfo,
84 				   ItemPointer tid,
85 				   LockTupleMode lockmode,
86 				   TupleTableSlot **newSlot);
87 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
88 			   Trigger *trigger, TriggerEvent event,
89 			   Bitmapset *modifiedCols,
90 			   HeapTuple oldtup, HeapTuple newtup);
91 static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
92 					int tgindx,
93 					FmgrInfo *finfo,
94 					Instrumentation *instr,
95 					MemoryContext per_tuple_context);
96 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
97 					  int event, bool row_trigger,
98 					  HeapTuple oldtup, HeapTuple newtup,
99 					  List *recheckIndexes, Bitmapset *modifiedCols);
100 static void AfterTriggerEnlargeQueryState(void);
101 
102 
103 /*
104  * Create a trigger.  Returns the address of the created trigger.
105  *
106  * queryString is the source text of the CREATE TRIGGER command.
107  * This must be supplied if a whenClause is specified, else it can be NULL.
108  *
109  * relOid, if nonzero, is the relation on which the trigger should be
110  * created.  If zero, the name provided in the statement will be looked up.
111  *
112  * refRelOid, if nonzero, is the relation to which the constraint trigger
113  * refers.  If zero, the constraint relation name provided in the statement
114  * will be looked up as needed.
115  *
116  * constraintOid, if nonzero, says that this trigger is being created
117  * internally to implement that constraint.  A suitable pg_depend entry will
118  * be made to link the trigger to that constraint.  constraintOid is zero when
119  * executing a user-entered CREATE TRIGGER command.  (For CREATE CONSTRAINT
120  * TRIGGER, we build a pg_constraint entry internally.)
121  *
122  * indexOid, if nonzero, is the OID of an index associated with the constraint.
123  * We do nothing with this except store it into pg_trigger.tgconstrindid.
124  *
125  * If isInternal is true then this is an internally-generated trigger.
126  * This argument sets the tgisinternal field of the pg_trigger entry, and
127  * if TRUE causes us to modify the given trigger name to ensure uniqueness.
128  *
129  * When isInternal is not true we require ACL_TRIGGER permissions on the
130  * relation, as well as ACL_EXECUTE on the trigger function.  For internal
131  * triggers the caller must apply any required permission checks.
132  *
133  * Note: can return InvalidObjectAddress if we decided to not create a trigger
134  * at all, but a foreign-key constraint.  This is a kluge for backwards
135  * compatibility.
136  */
137 ObjectAddress
CreateTrigger(CreateTrigStmt * stmt,const char * queryString,Oid relOid,Oid refRelOid,Oid constraintOid,Oid indexOid,bool isInternal)138 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
139 			  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
140 			  bool isInternal)
141 {
142 	int16		tgtype;
143 	int			ncolumns;
144 	int16	   *columns;
145 	int2vector *tgattr;
146 	Node	   *whenClause;
147 	List	   *whenRtable;
148 	char	   *qual;
149 	Datum		values[Natts_pg_trigger];
150 	bool		nulls[Natts_pg_trigger];
151 	Relation	rel;
152 	AclResult	aclresult;
153 	Relation	tgrel;
154 	SysScanDesc tgscan;
155 	ScanKeyData key;
156 	Relation	pgrel;
157 	HeapTuple	tuple;
158 	Oid			fargtypes[1];	/* dummy */
159 	Oid			funcoid;
160 	Oid			funcrettype;
161 	Oid			trigoid;
162 	char		internaltrigname[NAMEDATALEN];
163 	char	   *trigname;
164 	Oid			constrrelid = InvalidOid;
165 	ObjectAddress myself,
166 				referenced;
167 
168 	if (OidIsValid(relOid))
169 		rel = heap_open(relOid, ShareRowExclusiveLock);
170 	else
171 		rel = heap_openrv(stmt->relation, ShareRowExclusiveLock);
172 
173 	/*
174 	 * Triggers must be on tables or views, and there are additional
175 	 * relation-type-specific restrictions.
176 	 */
177 	if (rel->rd_rel->relkind == RELKIND_RELATION)
178 	{
179 		/* Tables can't have INSTEAD OF triggers */
180 		if (stmt->timing != TRIGGER_TYPE_BEFORE &&
181 			stmt->timing != TRIGGER_TYPE_AFTER)
182 			ereport(ERROR,
183 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
184 					 errmsg("\"%s\" is a table",
185 							RelationGetRelationName(rel)),
186 					 errdetail("Tables cannot have INSTEAD OF triggers.")));
187 	}
188 	else if (rel->rd_rel->relkind == RELKIND_VIEW)
189 	{
190 		/*
191 		 * Views can have INSTEAD OF triggers (which we check below are
192 		 * row-level), or statement-level BEFORE/AFTER triggers.
193 		 */
194 		if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
195 			ereport(ERROR,
196 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
197 					 errmsg("\"%s\" is a view",
198 							RelationGetRelationName(rel)),
199 					 errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
200 		/* Disallow TRUNCATE triggers on VIEWs */
201 		if (TRIGGER_FOR_TRUNCATE(stmt->events))
202 			ereport(ERROR,
203 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
204 					 errmsg("\"%s\" is a view",
205 							RelationGetRelationName(rel)),
206 					 errdetail("Views cannot have TRUNCATE triggers.")));
207 	}
208 	else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
209 	{
210 		if (stmt->timing != TRIGGER_TYPE_BEFORE &&
211 			stmt->timing != TRIGGER_TYPE_AFTER)
212 			ereport(ERROR,
213 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
214 					 errmsg("\"%s\" is a foreign table",
215 							RelationGetRelationName(rel)),
216 			  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
217 
218 		if (TRIGGER_FOR_TRUNCATE(stmt->events))
219 			ereport(ERROR,
220 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
221 					 errmsg("\"%s\" is a foreign table",
222 							RelationGetRelationName(rel)),
223 				errdetail("Foreign tables cannot have TRUNCATE triggers.")));
224 
225 		if (stmt->isconstraint)
226 			ereport(ERROR,
227 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
228 					 errmsg("\"%s\" is a foreign table",
229 							RelationGetRelationName(rel)),
230 			  errdetail("Foreign tables cannot have constraint triggers.")));
231 	}
232 	else
233 		ereport(ERROR,
234 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
235 				 errmsg("\"%s\" is not a table or view",
236 						RelationGetRelationName(rel))));
237 
238 	if (!allowSystemTableMods && IsSystemRelation(rel))
239 		ereport(ERROR,
240 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
241 				 errmsg("permission denied: \"%s\" is a system catalog",
242 						RelationGetRelationName(rel))));
243 
244 	if (stmt->isconstraint)
245 	{
246 		/*
247 		 * We must take a lock on the target relation to protect against
248 		 * concurrent drop.  It's not clear that AccessShareLock is strong
249 		 * enough, but we certainly need at least that much... otherwise, we
250 		 * might end up creating a pg_constraint entry referencing a
251 		 * nonexistent table.
252 		 */
253 		if (OidIsValid(refRelOid))
254 		{
255 			LockRelationOid(refRelOid, AccessShareLock);
256 			constrrelid = refRelOid;
257 		}
258 		else if (stmt->constrrel != NULL)
259 			constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
260 										   false);
261 	}
262 
263 	/* permission checks */
264 	if (!isInternal)
265 	{
266 		aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
267 									  ACL_TRIGGER);
268 		if (aclresult != ACLCHECK_OK)
269 			aclcheck_error(aclresult, ACL_KIND_CLASS,
270 						   RelationGetRelationName(rel));
271 
272 		if (OidIsValid(constrrelid))
273 		{
274 			aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
275 										  ACL_TRIGGER);
276 			if (aclresult != ACLCHECK_OK)
277 				aclcheck_error(aclresult, ACL_KIND_CLASS,
278 							   get_rel_name(constrrelid));
279 		}
280 	}
281 
282 	/* Compute tgtype */
283 	TRIGGER_CLEAR_TYPE(tgtype);
284 	if (stmt->row)
285 		TRIGGER_SETT_ROW(tgtype);
286 	tgtype |= stmt->timing;
287 	tgtype |= stmt->events;
288 
289 	/* Disallow ROW-level TRUNCATE triggers */
290 	if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
291 		ereport(ERROR,
292 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
293 				 errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
294 
295 	/* INSTEAD triggers must be row-level, and can't have WHEN or columns */
296 	if (TRIGGER_FOR_INSTEAD(tgtype))
297 	{
298 		if (!TRIGGER_FOR_ROW(tgtype))
299 			ereport(ERROR,
300 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
301 					 errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
302 		if (stmt->whenClause)
303 			ereport(ERROR,
304 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
305 				 errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
306 		if (stmt->columns != NIL)
307 			ereport(ERROR,
308 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
309 					 errmsg("INSTEAD OF triggers cannot have column lists")));
310 	}
311 
312 	/*
313 	 * Parse the WHEN clause, if any
314 	 */
315 	if (stmt->whenClause)
316 	{
317 		ParseState *pstate;
318 		RangeTblEntry *rte;
319 		List	   *varList;
320 		ListCell   *lc;
321 
322 		/* Set up a pstate to parse with */
323 		pstate = make_parsestate(NULL);
324 		pstate->p_sourcetext = queryString;
325 
326 		/*
327 		 * Set up RTEs for OLD and NEW references.
328 		 *
329 		 * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
330 		 */
331 		rte = addRangeTableEntryForRelation(pstate, rel,
332 											makeAlias("old", NIL),
333 											false, false);
334 		addRTEtoQuery(pstate, rte, false, true, true);
335 		rte = addRangeTableEntryForRelation(pstate, rel,
336 											makeAlias("new", NIL),
337 											false, false);
338 		addRTEtoQuery(pstate, rte, false, true, true);
339 
340 		/* Transform expression.  Copy to be sure we don't modify original */
341 		whenClause = transformWhereClause(pstate,
342 										  copyObject(stmt->whenClause),
343 										  EXPR_KIND_TRIGGER_WHEN,
344 										  "WHEN");
345 		/* we have to fix its collations too */
346 		assign_expr_collations(pstate, whenClause);
347 
348 		/*
349 		 * Check for disallowed references to OLD/NEW.
350 		 *
351 		 * NB: pull_var_clause is okay here only because we don't allow
352 		 * subselects in WHEN clauses; it would fail to examine the contents
353 		 * of subselects.
354 		 */
355 		varList = pull_var_clause(whenClause, 0);
356 		foreach(lc, varList)
357 		{
358 			Var		   *var = (Var *) lfirst(lc);
359 
360 			switch (var->varno)
361 			{
362 				case PRS2_OLD_VARNO:
363 					if (!TRIGGER_FOR_ROW(tgtype))
364 						ereport(ERROR,
365 								(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
366 								 errmsg("statement trigger's WHEN condition cannot reference column values"),
367 								 parser_errposition(pstate, var->location)));
368 					if (TRIGGER_FOR_INSERT(tgtype))
369 						ereport(ERROR,
370 								(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
371 								 errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
372 								 parser_errposition(pstate, var->location)));
373 					/* system columns are okay here */
374 					break;
375 				case PRS2_NEW_VARNO:
376 					if (!TRIGGER_FOR_ROW(tgtype))
377 						ereport(ERROR,
378 								(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
379 								 errmsg("statement trigger's WHEN condition cannot reference column values"),
380 								 parser_errposition(pstate, var->location)));
381 					if (TRIGGER_FOR_DELETE(tgtype))
382 						ereport(ERROR,
383 								(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
384 								 errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
385 								 parser_errposition(pstate, var->location)));
386 					if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
387 						ereport(ERROR,
388 								(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
389 								 errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
390 								 parser_errposition(pstate, var->location)));
391 					break;
392 				default:
393 					/* can't happen without add_missing_from, so just elog */
394 					elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
395 					break;
396 			}
397 		}
398 
399 		/* we'll need the rtable for recordDependencyOnExpr */
400 		whenRtable = pstate->p_rtable;
401 
402 		qual = nodeToString(whenClause);
403 
404 		free_parsestate(pstate);
405 	}
406 	else
407 	{
408 		whenClause = NULL;
409 		whenRtable = NIL;
410 		qual = NULL;
411 	}
412 
413 	/*
414 	 * Find and validate the trigger function.
415 	 */
416 	funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
417 	if (!isInternal)
418 	{
419 		aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
420 		if (aclresult != ACLCHECK_OK)
421 			aclcheck_error(aclresult, ACL_KIND_PROC,
422 						   NameListToString(stmt->funcname));
423 	}
424 	funcrettype = get_func_rettype(funcoid);
425 	if (funcrettype != TRIGGEROID)
426 	{
427 		/*
428 		 * We allow OPAQUE just so we can load old dump files.  When we see a
429 		 * trigger function declared OPAQUE, change it to TRIGGER.
430 		 */
431 		if (funcrettype == OPAQUEOID)
432 		{
433 			ereport(WARNING,
434 					(errmsg("changing return type of function %s from \"opaque\" to \"trigger\"",
435 							NameListToString(stmt->funcname))));
436 			SetFunctionReturnType(funcoid, TRIGGEROID);
437 		}
438 		else
439 			ereport(ERROR,
440 					(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
441 					 errmsg("function %s must return type %s",
442 							NameListToString(stmt->funcname), "trigger")));
443 	}
444 
445 	/*
446 	 * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
447 	 * references one of the built-in RI_FKey trigger functions, assume it is
448 	 * from a dump of a pre-7.3 foreign key constraint, and take steps to
449 	 * convert this legacy representation into a regular foreign key
450 	 * constraint.  Ugly, but necessary for loading old dump files.
451 	 */
452 	if (stmt->isconstraint && !isInternal &&
453 		list_length(stmt->args) >= 6 &&
454 		(list_length(stmt->args) % 2) == 0 &&
455 		RI_FKey_trigger_type(funcoid) != RI_TRIGGER_NONE)
456 	{
457 		/* Keep lock on target rel until end of xact */
458 		heap_close(rel, NoLock);
459 
460 		ConvertTriggerToFK(stmt, funcoid);
461 
462 		return InvalidObjectAddress;
463 	}
464 
465 	/*
466 	 * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
467 	 * corresponding pg_constraint entry.
468 	 */
469 	if (stmt->isconstraint && !OidIsValid(constraintOid))
470 	{
471 		/* Internal callers should have made their own constraints */
472 		Assert(!isInternal);
473 		constraintOid = CreateConstraintEntry(stmt->trigname,
474 											  RelationGetNamespace(rel),
475 											  CONSTRAINT_TRIGGER,
476 											  stmt->deferrable,
477 											  stmt->initdeferred,
478 											  true,
479 											  RelationGetRelid(rel),
480 											  NULL,		/* no conkey */
481 											  0,
482 											  InvalidOid,		/* no domain */
483 											  InvalidOid,		/* no index */
484 											  InvalidOid,		/* no foreign key */
485 											  NULL,
486 											  NULL,
487 											  NULL,
488 											  NULL,
489 											  0,
490 											  ' ',
491 											  ' ',
492 											  ' ',
493 											  NULL,		/* no exclusion */
494 											  NULL,		/* no check constraint */
495 											  NULL,
496 											  NULL,
497 											  true,		/* islocal */
498 											  0,		/* inhcount */
499 											  true,		/* isnoinherit */
500 											  isInternal);		/* is_internal */
501 	}
502 
503 	/*
504 	 * Generate the trigger's OID now, so that we can use it in the name if
505 	 * needed.
506 	 */
507 	tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
508 
509 	trigoid = GetNewOid(tgrel);
510 
511 	/*
512 	 * If trigger is internally generated, modify the provided trigger name to
513 	 * ensure uniqueness by appending the trigger OID.  (Callers will usually
514 	 * supply a simple constant trigger name in these cases.)
515 	 */
516 	if (isInternal)
517 	{
518 		snprintf(internaltrigname, sizeof(internaltrigname),
519 				 "%s_%u", stmt->trigname, trigoid);
520 		trigname = internaltrigname;
521 	}
522 	else
523 	{
524 		/* user-defined trigger; use the specified trigger name as-is */
525 		trigname = stmt->trigname;
526 	}
527 
528 	/*
529 	 * Scan pg_trigger for existing triggers on relation.  We do this only to
530 	 * give a nice error message if there's already a trigger of the same
531 	 * name.  (The unique index on tgrelid/tgname would complain anyway.) We
532 	 * can skip this for internally generated triggers, since the name
533 	 * modification above should be sufficient.
534 	 *
535 	 * NOTE that this is cool only because we have ShareRowExclusiveLock on
536 	 * the relation, so the trigger set won't be changing underneath us.
537 	 */
538 	if (!isInternal)
539 	{
540 		ScanKeyInit(&key,
541 					Anum_pg_trigger_tgrelid,
542 					BTEqualStrategyNumber, F_OIDEQ,
543 					ObjectIdGetDatum(RelationGetRelid(rel)));
544 		tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
545 									NULL, 1, &key);
546 		while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
547 		{
548 			Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
549 
550 			if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
551 				ereport(ERROR,
552 						(errcode(ERRCODE_DUPLICATE_OBJECT),
553 				  errmsg("trigger \"%s\" for relation \"%s\" already exists",
554 						 trigname, RelationGetRelationName(rel))));
555 		}
556 		systable_endscan(tgscan);
557 	}
558 
559 	/*
560 	 * Build the new pg_trigger tuple.
561 	 */
562 	memset(nulls, false, sizeof(nulls));
563 
564 	values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
565 	values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
566 												  CStringGetDatum(trigname));
567 	values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
568 	values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
569 	values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(TRIGGER_FIRES_ON_ORIGIN);
570 	values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
571 	values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
572 	values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
573 	values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
574 	values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
575 	values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
576 
577 	if (stmt->args)
578 	{
579 		ListCell   *le;
580 		char	   *args;
581 		int16		nargs = list_length(stmt->args);
582 		int			len = 0;
583 
584 		foreach(le, stmt->args)
585 		{
586 			char	   *ar = strVal(lfirst(le));
587 
588 			len += strlen(ar) + 4;
589 			for (; *ar; ar++)
590 			{
591 				if (*ar == '\\')
592 					len++;
593 			}
594 		}
595 		args = (char *) palloc(len + 1);
596 		args[0] = '\0';
597 		foreach(le, stmt->args)
598 		{
599 			char	   *s = strVal(lfirst(le));
600 			char	   *d = args + strlen(args);
601 
602 			while (*s)
603 			{
604 				if (*s == '\\')
605 					*d++ = '\\';
606 				*d++ = *s++;
607 			}
608 			strcpy(d, "\\000");
609 		}
610 		values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
611 		values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
612 													  CStringGetDatum(args));
613 	}
614 	else
615 	{
616 		values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
617 		values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
618 														CStringGetDatum(""));
619 	}
620 
621 	/* build column number array if it's a column-specific trigger */
622 	ncolumns = list_length(stmt->columns);
623 	if (ncolumns == 0)
624 		columns = NULL;
625 	else
626 	{
627 		ListCell   *cell;
628 		int			i = 0;
629 
630 		columns = (int16 *) palloc(ncolumns * sizeof(int16));
631 		foreach(cell, stmt->columns)
632 		{
633 			char	   *name = strVal(lfirst(cell));
634 			int16		attnum;
635 			int			j;
636 
637 			/* Lookup column name.  System columns are not allowed */
638 			attnum = attnameAttNum(rel, name, false);
639 			if (attnum == InvalidAttrNumber)
640 				ereport(ERROR,
641 						(errcode(ERRCODE_UNDEFINED_COLUMN),
642 					errmsg("column \"%s\" of relation \"%s\" does not exist",
643 						   name, RelationGetRelationName(rel))));
644 
645 			/* Check for duplicates */
646 			for (j = i - 1; j >= 0; j--)
647 			{
648 				if (columns[j] == attnum)
649 					ereport(ERROR,
650 							(errcode(ERRCODE_DUPLICATE_COLUMN),
651 							 errmsg("column \"%s\" specified more than once",
652 									name)));
653 			}
654 
655 			columns[i++] = attnum;
656 		}
657 	}
658 	tgattr = buildint2vector(columns, ncolumns);
659 	values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
660 
661 	/* set tgqual if trigger has WHEN clause */
662 	if (qual)
663 		values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
664 	else
665 		nulls[Anum_pg_trigger_tgqual - 1] = true;
666 
667 	tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
668 
669 	/* force tuple to have the desired OID */
670 	HeapTupleSetOid(tuple, trigoid);
671 
672 	/*
673 	 * Insert tuple into pg_trigger.
674 	 */
675 	simple_heap_insert(tgrel, tuple);
676 
677 	CatalogUpdateIndexes(tgrel, tuple);
678 
679 	heap_freetuple(tuple);
680 	heap_close(tgrel, RowExclusiveLock);
681 
682 	pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
683 	pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
684 	pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
685 
686 	/*
687 	 * Update relation's pg_class entry.  Crucial side-effect: other backends
688 	 * (and this one too!) are sent SI message to make them rebuild relcache
689 	 * entries.
690 	 */
691 	pgrel = heap_open(RelationRelationId, RowExclusiveLock);
692 	tuple = SearchSysCacheCopy1(RELOID,
693 								ObjectIdGetDatum(RelationGetRelid(rel)));
694 	if (!HeapTupleIsValid(tuple))
695 		elog(ERROR, "cache lookup failed for relation %u",
696 			 RelationGetRelid(rel));
697 
698 	((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
699 
700 	simple_heap_update(pgrel, &tuple->t_self, tuple);
701 
702 	CatalogUpdateIndexes(pgrel, tuple);
703 
704 	heap_freetuple(tuple);
705 	heap_close(pgrel, RowExclusiveLock);
706 
707 	/*
708 	 * We used to try to update the rel's relcache entry here, but that's
709 	 * fairly pointless since it will happen as a byproduct of the upcoming
710 	 * CommandCounterIncrement...
711 	 */
712 
713 	/*
714 	 * Record dependencies for trigger.  Always place a normal dependency on
715 	 * the function.
716 	 */
717 	myself.classId = TriggerRelationId;
718 	myself.objectId = trigoid;
719 	myself.objectSubId = 0;
720 
721 	referenced.classId = ProcedureRelationId;
722 	referenced.objectId = funcoid;
723 	referenced.objectSubId = 0;
724 	recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
725 
726 	if (isInternal && OidIsValid(constraintOid))
727 	{
728 		/*
729 		 * Internally-generated trigger for a constraint, so make it an
730 		 * internal dependency of the constraint.  We can skip depending on
731 		 * the relation(s), as there'll be an indirect dependency via the
732 		 * constraint.
733 		 */
734 		referenced.classId = ConstraintRelationId;
735 		referenced.objectId = constraintOid;
736 		referenced.objectSubId = 0;
737 		recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
738 	}
739 	else
740 	{
741 		/*
742 		 * User CREATE TRIGGER, so place dependencies.  We make trigger be
743 		 * auto-dropped if its relation is dropped or if the FK relation is
744 		 * dropped.  (Auto drop is compatible with our pre-7.3 behavior.)
745 		 */
746 		referenced.classId = RelationRelationId;
747 		referenced.objectId = RelationGetRelid(rel);
748 		referenced.objectSubId = 0;
749 		recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
750 		if (OidIsValid(constrrelid))
751 		{
752 			referenced.classId = RelationRelationId;
753 			referenced.objectId = constrrelid;
754 			referenced.objectSubId = 0;
755 			recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
756 		}
757 		/* Not possible to have an index dependency in this case */
758 		Assert(!OidIsValid(indexOid));
759 
760 		/*
761 		 * If it's a user-specified constraint trigger, make the constraint
762 		 * internally dependent on the trigger instead of vice versa.
763 		 */
764 		if (OidIsValid(constraintOid))
765 		{
766 			referenced.classId = ConstraintRelationId;
767 			referenced.objectId = constraintOid;
768 			referenced.objectSubId = 0;
769 			recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
770 		}
771 	}
772 
773 	/* If column-specific trigger, add normal dependencies on columns */
774 	if (columns != NULL)
775 	{
776 		int			i;
777 
778 		referenced.classId = RelationRelationId;
779 		referenced.objectId = RelationGetRelid(rel);
780 		for (i = 0; i < ncolumns; i++)
781 		{
782 			referenced.objectSubId = columns[i];
783 			recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
784 		}
785 	}
786 
787 	/*
788 	 * If it has a WHEN clause, add dependencies on objects mentioned in the
789 	 * expression (eg, functions, as well as any columns used).
790 	 */
791 	if (whenClause != NULL)
792 		recordDependencyOnExpr(&myself, whenClause, whenRtable,
793 							   DEPENDENCY_NORMAL);
794 
795 	/* Post creation hook for new trigger */
796 	InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
797 								  isInternal);
798 
799 	/* Keep lock on target rel until end of xact */
800 	heap_close(rel, NoLock);
801 
802 	return myself;
803 }
804 
805 
806 /*
807  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
808  * full-fledged foreign key constraints.
809  *
810  * The conversion is complex because a pre-7.3 foreign key involved three
811  * separate triggers, which were reported separately in dumps.  While the
812  * single trigger on the referencing table adds no new information, we need
813  * to know the trigger functions of both of the triggers on the referenced
814  * table to build the constraint declaration.  Also, due to lack of proper
815  * dependency checking pre-7.3, it is possible that the source database had
816  * an incomplete set of triggers resulting in an only partially enforced
817  * FK constraint.  (This would happen if one of the tables had been dropped
818  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
819  * that caused loss of tgconstrrelid information.)	We choose to translate to
820  * an FK constraint only when we've seen all three triggers of a set.  This is
821  * implemented by storing unmatched items in a list in TopMemoryContext.
822  * We match triggers together by comparing the trigger arguments (which
823  * include constraint name, table and column names, so should be good enough).
824  */
825 typedef struct
826 {
827 	List	   *args;			/* list of (T_String) Values or NIL */
828 	Oid			funcoids[3];	/* OIDs of trigger functions */
829 	/* The three function OIDs are stored in the order update, delete, child */
830 } OldTriggerInfo;
831 
832 static void
ConvertTriggerToFK(CreateTrigStmt * stmt,Oid funcoid)833 ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid)
834 {
835 	static List *info_list = NIL;
836 
837 	static const char *const funcdescr[3] = {
838 		gettext_noop("Found referenced table's UPDATE trigger."),
839 		gettext_noop("Found referenced table's DELETE trigger."),
840 		gettext_noop("Found referencing table's trigger.")
841 	};
842 
843 	char	   *constr_name;
844 	char	   *fk_table_name;
845 	char	   *pk_table_name;
846 	char		fk_matchtype = FKCONSTR_MATCH_SIMPLE;
847 	List	   *fk_attrs = NIL;
848 	List	   *pk_attrs = NIL;
849 	StringInfoData buf;
850 	int			funcnum;
851 	OldTriggerInfo *info = NULL;
852 	ListCell   *l;
853 	int			i;
854 
855 	/* Parse out the trigger arguments */
856 	constr_name = strVal(linitial(stmt->args));
857 	fk_table_name = strVal(lsecond(stmt->args));
858 	pk_table_name = strVal(lthird(stmt->args));
859 	i = 0;
860 	foreach(l, stmt->args)
861 	{
862 		Value	   *arg = (Value *) lfirst(l);
863 
864 		i++;
865 		if (i < 4)				/* skip constraint and table names */
866 			continue;
867 		if (i == 4)				/* handle match type */
868 		{
869 			if (strcmp(strVal(arg), "FULL") == 0)
870 				fk_matchtype = FKCONSTR_MATCH_FULL;
871 			else
872 				fk_matchtype = FKCONSTR_MATCH_SIMPLE;
873 			continue;
874 		}
875 		if (i % 2)
876 			fk_attrs = lappend(fk_attrs, arg);
877 		else
878 			pk_attrs = lappend(pk_attrs, arg);
879 	}
880 
881 	/* Prepare description of constraint for use in messages */
882 	initStringInfo(&buf);
883 	appendStringInfo(&buf, "FOREIGN KEY %s(",
884 					 quote_identifier(fk_table_name));
885 	i = 0;
886 	foreach(l, fk_attrs)
887 	{
888 		Value	   *arg = (Value *) lfirst(l);
889 
890 		if (i++ > 0)
891 			appendStringInfoChar(&buf, ',');
892 		appendStringInfoString(&buf, quote_identifier(strVal(arg)));
893 	}
894 	appendStringInfo(&buf, ") REFERENCES %s(",
895 					 quote_identifier(pk_table_name));
896 	i = 0;
897 	foreach(l, pk_attrs)
898 	{
899 		Value	   *arg = (Value *) lfirst(l);
900 
901 		if (i++ > 0)
902 			appendStringInfoChar(&buf, ',');
903 		appendStringInfoString(&buf, quote_identifier(strVal(arg)));
904 	}
905 	appendStringInfoChar(&buf, ')');
906 
907 	/* Identify class of trigger --- update, delete, or referencing-table */
908 	switch (funcoid)
909 	{
910 		case F_RI_FKEY_CASCADE_UPD:
911 		case F_RI_FKEY_RESTRICT_UPD:
912 		case F_RI_FKEY_SETNULL_UPD:
913 		case F_RI_FKEY_SETDEFAULT_UPD:
914 		case F_RI_FKEY_NOACTION_UPD:
915 			funcnum = 0;
916 			break;
917 
918 		case F_RI_FKEY_CASCADE_DEL:
919 		case F_RI_FKEY_RESTRICT_DEL:
920 		case F_RI_FKEY_SETNULL_DEL:
921 		case F_RI_FKEY_SETDEFAULT_DEL:
922 		case F_RI_FKEY_NOACTION_DEL:
923 			funcnum = 1;
924 			break;
925 
926 		default:
927 			funcnum = 2;
928 			break;
929 	}
930 
931 	/* See if we have a match to this trigger */
932 	foreach(l, info_list)
933 	{
934 		info = (OldTriggerInfo *) lfirst(l);
935 		if (info->funcoids[funcnum] == InvalidOid &&
936 			equal(info->args, stmt->args))
937 		{
938 			info->funcoids[funcnum] = funcoid;
939 			break;
940 		}
941 	}
942 
943 	if (l == NULL)
944 	{
945 		/* First trigger of set, so create a new list entry */
946 		MemoryContext oldContext;
947 
948 		ereport(NOTICE,
949 		(errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
950 				constr_name, buf.data),
951 		 errdetail_internal("%s", _(funcdescr[funcnum]))));
952 		oldContext = MemoryContextSwitchTo(TopMemoryContext);
953 		info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
954 		info->args = copyObject(stmt->args);
955 		info->funcoids[funcnum] = funcoid;
956 		info_list = lappend(info_list, info);
957 		MemoryContextSwitchTo(oldContext);
958 	}
959 	else if (info->funcoids[0] == InvalidOid ||
960 			 info->funcoids[1] == InvalidOid ||
961 			 info->funcoids[2] == InvalidOid)
962 	{
963 		/* Second trigger of set */
964 		ereport(NOTICE,
965 		(errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
966 				constr_name, buf.data),
967 		 errdetail_internal("%s", _(funcdescr[funcnum]))));
968 	}
969 	else
970 	{
971 		/* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
972 		AlterTableStmt *atstmt = makeNode(AlterTableStmt);
973 		AlterTableCmd *atcmd = makeNode(AlterTableCmd);
974 		Constraint *fkcon = makeNode(Constraint);
975 
976 		ereport(NOTICE,
977 				(errmsg("converting trigger group into constraint \"%s\" %s",
978 						constr_name, buf.data),
979 				 errdetail_internal("%s", _(funcdescr[funcnum]))));
980 		fkcon->contype = CONSTR_FOREIGN;
981 		fkcon->location = -1;
982 		if (funcnum == 2)
983 		{
984 			/* This trigger is on the FK table */
985 			atstmt->relation = stmt->relation;
986 			if (stmt->constrrel)
987 				fkcon->pktable = stmt->constrrel;
988 			else
989 			{
990 				/* Work around ancient pg_dump bug that omitted constrrel */
991 				fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
992 			}
993 		}
994 		else
995 		{
996 			/* This trigger is on the PK table */
997 			fkcon->pktable = stmt->relation;
998 			if (stmt->constrrel)
999 				atstmt->relation = stmt->constrrel;
1000 			else
1001 			{
1002 				/* Work around ancient pg_dump bug that omitted constrrel */
1003 				atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1004 			}
1005 		}
1006 		atstmt->cmds = list_make1(atcmd);
1007 		atstmt->relkind = OBJECT_TABLE;
1008 		atcmd->subtype = AT_AddConstraint;
1009 		atcmd->def = (Node *) fkcon;
1010 		if (strcmp(constr_name, "<unnamed>") == 0)
1011 			fkcon->conname = NULL;
1012 		else
1013 			fkcon->conname = constr_name;
1014 		fkcon->fk_attrs = fk_attrs;
1015 		fkcon->pk_attrs = pk_attrs;
1016 		fkcon->fk_matchtype = fk_matchtype;
1017 		switch (info->funcoids[0])
1018 		{
1019 			case F_RI_FKEY_NOACTION_UPD:
1020 				fkcon->fk_upd_action = FKCONSTR_ACTION_NOACTION;
1021 				break;
1022 			case F_RI_FKEY_CASCADE_UPD:
1023 				fkcon->fk_upd_action = FKCONSTR_ACTION_CASCADE;
1024 				break;
1025 			case F_RI_FKEY_RESTRICT_UPD:
1026 				fkcon->fk_upd_action = FKCONSTR_ACTION_RESTRICT;
1027 				break;
1028 			case F_RI_FKEY_SETNULL_UPD:
1029 				fkcon->fk_upd_action = FKCONSTR_ACTION_SETNULL;
1030 				break;
1031 			case F_RI_FKEY_SETDEFAULT_UPD:
1032 				fkcon->fk_upd_action = FKCONSTR_ACTION_SETDEFAULT;
1033 				break;
1034 			default:
1035 				/* can't get here because of earlier checks */
1036 				elog(ERROR, "confused about RI update function");
1037 		}
1038 		switch (info->funcoids[1])
1039 		{
1040 			case F_RI_FKEY_NOACTION_DEL:
1041 				fkcon->fk_del_action = FKCONSTR_ACTION_NOACTION;
1042 				break;
1043 			case F_RI_FKEY_CASCADE_DEL:
1044 				fkcon->fk_del_action = FKCONSTR_ACTION_CASCADE;
1045 				break;
1046 			case F_RI_FKEY_RESTRICT_DEL:
1047 				fkcon->fk_del_action = FKCONSTR_ACTION_RESTRICT;
1048 				break;
1049 			case F_RI_FKEY_SETNULL_DEL:
1050 				fkcon->fk_del_action = FKCONSTR_ACTION_SETNULL;
1051 				break;
1052 			case F_RI_FKEY_SETDEFAULT_DEL:
1053 				fkcon->fk_del_action = FKCONSTR_ACTION_SETDEFAULT;
1054 				break;
1055 			default:
1056 				/* can't get here because of earlier checks */
1057 				elog(ERROR, "confused about RI delete function");
1058 		}
1059 		fkcon->deferrable = stmt->deferrable;
1060 		fkcon->initdeferred = stmt->initdeferred;
1061 		fkcon->skip_validation = false;
1062 		fkcon->initially_valid = true;
1063 
1064 		/* ... and execute it */
1065 		ProcessUtility((Node *) atstmt,
1066 					   "(generated ALTER TABLE ADD FOREIGN KEY command)",
1067 					   PROCESS_UTILITY_SUBCOMMAND, NULL,
1068 					   None_Receiver, NULL);
1069 
1070 		/* Remove the matched item from the list */
1071 		info_list = list_delete_ptr(info_list, info);
1072 		pfree(info);
1073 		/* We leak the copied args ... not worth worrying about */
1074 	}
1075 }
1076 
1077 /*
1078  * Guts of trigger deletion.
1079  */
1080 void
RemoveTriggerById(Oid trigOid)1081 RemoveTriggerById(Oid trigOid)
1082 {
1083 	Relation	tgrel;
1084 	SysScanDesc tgscan;
1085 	ScanKeyData skey[1];
1086 	HeapTuple	tup;
1087 	Oid			relid;
1088 	Relation	rel;
1089 
1090 	tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
1091 
1092 	/*
1093 	 * Find the trigger to delete.
1094 	 */
1095 	ScanKeyInit(&skey[0],
1096 				ObjectIdAttributeNumber,
1097 				BTEqualStrategyNumber, F_OIDEQ,
1098 				ObjectIdGetDatum(trigOid));
1099 
1100 	tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1101 								NULL, 1, skey);
1102 
1103 	tup = systable_getnext(tgscan);
1104 	if (!HeapTupleIsValid(tup))
1105 		elog(ERROR, "could not find tuple for trigger %u", trigOid);
1106 
1107 	/*
1108 	 * Open and exclusive-lock the relation the trigger belongs to.
1109 	 */
1110 	relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1111 
1112 	rel = heap_open(relid, AccessExclusiveLock);
1113 
1114 	if (rel->rd_rel->relkind != RELKIND_RELATION &&
1115 		rel->rd_rel->relkind != RELKIND_VIEW &&
1116 		rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
1117 		ereport(ERROR,
1118 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1119 				 errmsg("\"%s\" is not a table, view, or foreign table",
1120 						RelationGetRelationName(rel))));
1121 
1122 	if (!allowSystemTableMods && IsSystemRelation(rel))
1123 		ereport(ERROR,
1124 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1125 				 errmsg("permission denied: \"%s\" is a system catalog",
1126 						RelationGetRelationName(rel))));
1127 
1128 	/*
1129 	 * Delete the pg_trigger tuple.
1130 	 */
1131 	simple_heap_delete(tgrel, &tup->t_self);
1132 
1133 	systable_endscan(tgscan);
1134 	heap_close(tgrel, RowExclusiveLock);
1135 
1136 	/*
1137 	 * We do not bother to try to determine whether any other triggers remain,
1138 	 * which would be needed in order to decide whether it's safe to clear the
1139 	 * relation's relhastriggers.  (In any case, there might be a concurrent
1140 	 * process adding new triggers.)  Instead, just force a relcache inval to
1141 	 * make other backends (and this one too!) rebuild their relcache entries.
1142 	 * There's no great harm in leaving relhastriggers true even if there are
1143 	 * no triggers left.
1144 	 */
1145 	CacheInvalidateRelcache(rel);
1146 
1147 	/* Keep lock on trigger's rel until end of xact */
1148 	heap_close(rel, NoLock);
1149 }
1150 
1151 /*
1152  * get_trigger_oid - Look up a trigger by name to find its OID.
1153  *
1154  * If missing_ok is false, throw an error if trigger not found.  If
1155  * true, just return InvalidOid.
1156  */
1157 Oid
get_trigger_oid(Oid relid,const char * trigname,bool missing_ok)1158 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1159 {
1160 	Relation	tgrel;
1161 	ScanKeyData skey[2];
1162 	SysScanDesc tgscan;
1163 	HeapTuple	tup;
1164 	Oid			oid;
1165 
1166 	/*
1167 	 * Find the trigger, verify permissions, set up object address
1168 	 */
1169 	tgrel = heap_open(TriggerRelationId, AccessShareLock);
1170 
1171 	ScanKeyInit(&skey[0],
1172 				Anum_pg_trigger_tgrelid,
1173 				BTEqualStrategyNumber, F_OIDEQ,
1174 				ObjectIdGetDatum(relid));
1175 	ScanKeyInit(&skey[1],
1176 				Anum_pg_trigger_tgname,
1177 				BTEqualStrategyNumber, F_NAMEEQ,
1178 				CStringGetDatum(trigname));
1179 
1180 	tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1181 								NULL, 2, skey);
1182 
1183 	tup = systable_getnext(tgscan);
1184 
1185 	if (!HeapTupleIsValid(tup))
1186 	{
1187 		if (!missing_ok)
1188 			ereport(ERROR,
1189 					(errcode(ERRCODE_UNDEFINED_OBJECT),
1190 					 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1191 							trigname, get_rel_name(relid))));
1192 		oid = InvalidOid;
1193 	}
1194 	else
1195 	{
1196 		oid = HeapTupleGetOid(tup);
1197 	}
1198 
1199 	systable_endscan(tgscan);
1200 	heap_close(tgrel, AccessShareLock);
1201 	return oid;
1202 }
1203 
1204 /*
1205  * Perform permissions and integrity checks before acquiring a relation lock.
1206  */
1207 static void
RangeVarCallbackForRenameTrigger(const RangeVar * rv,Oid relid,Oid oldrelid,void * arg)1208 RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid,
1209 								 void *arg)
1210 {
1211 	HeapTuple	tuple;
1212 	Form_pg_class form;
1213 
1214 	tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1215 	if (!HeapTupleIsValid(tuple))
1216 		return;					/* concurrently dropped */
1217 	form = (Form_pg_class) GETSTRUCT(tuple);
1218 
1219 	/* only tables and views can have triggers */
1220 	if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1221 		form->relkind != RELKIND_FOREIGN_TABLE)
1222 		ereport(ERROR,
1223 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
1224 				 errmsg("\"%s\" is not a table, view, or foreign table",
1225 						rv->relname)));
1226 
1227 	/* you must own the table to rename one of its triggers */
1228 	if (!pg_class_ownercheck(relid, GetUserId()))
1229 		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname);
1230 	if (!allowSystemTableMods && IsSystemClass(relid, form))
1231 		ereport(ERROR,
1232 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1233 				 errmsg("permission denied: \"%s\" is a system catalog",
1234 						rv->relname)));
1235 
1236 	ReleaseSysCache(tuple);
1237 }
1238 
1239 /*
1240  *		renametrig		- changes the name of a trigger on a relation
1241  *
1242  *		trigger name is changed in trigger catalog.
1243  *		No record of the previous name is kept.
1244  *
1245  *		get proper relrelation from relation catalog (if not arg)
1246  *		scan trigger catalog
1247  *				for name conflict (within rel)
1248  *				for original trigger (if not arg)
1249  *		modify tgname in trigger tuple
1250  *		update row in catalog
1251  */
1252 ObjectAddress
renametrig(RenameStmt * stmt)1253 renametrig(RenameStmt *stmt)
1254 {
1255 	Oid			tgoid;
1256 	Relation	targetrel;
1257 	Relation	tgrel;
1258 	HeapTuple	tuple;
1259 	SysScanDesc tgscan;
1260 	ScanKeyData key[2];
1261 	Oid			relid;
1262 	ObjectAddress address;
1263 
1264 	/*
1265 	 * Look up name, check permissions, and acquire lock (which we will NOT
1266 	 * release until end of transaction).
1267 	 */
1268 	relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
1269 									 false, false,
1270 									 RangeVarCallbackForRenameTrigger,
1271 									 NULL);
1272 
1273 	/* Have lock already, so just need to build relcache entry. */
1274 	targetrel = relation_open(relid, NoLock);
1275 
1276 	/*
1277 	 * Scan pg_trigger twice for existing triggers on relation.  We do this in
1278 	 * order to ensure a trigger does not exist with newname (The unique index
1279 	 * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1280 	 * exist with oldname.
1281 	 *
1282 	 * NOTE that this is cool only because we have AccessExclusiveLock on the
1283 	 * relation, so the trigger set won't be changing underneath us.
1284 	 */
1285 	tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
1286 
1287 	/*
1288 	 * First pass -- look for name conflict
1289 	 */
1290 	ScanKeyInit(&key[0],
1291 				Anum_pg_trigger_tgrelid,
1292 				BTEqualStrategyNumber, F_OIDEQ,
1293 				ObjectIdGetDatum(relid));
1294 	ScanKeyInit(&key[1],
1295 				Anum_pg_trigger_tgname,
1296 				BTEqualStrategyNumber, F_NAMEEQ,
1297 				PointerGetDatum(stmt->newname));
1298 	tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1299 								NULL, 2, key);
1300 	if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1301 		ereport(ERROR,
1302 				(errcode(ERRCODE_DUPLICATE_OBJECT),
1303 				 errmsg("trigger \"%s\" for relation \"%s\" already exists",
1304 						stmt->newname, RelationGetRelationName(targetrel))));
1305 	systable_endscan(tgscan);
1306 
1307 	/*
1308 	 * Second pass -- look for trigger existing with oldname and update
1309 	 */
1310 	ScanKeyInit(&key[0],
1311 				Anum_pg_trigger_tgrelid,
1312 				BTEqualStrategyNumber, F_OIDEQ,
1313 				ObjectIdGetDatum(relid));
1314 	ScanKeyInit(&key[1],
1315 				Anum_pg_trigger_tgname,
1316 				BTEqualStrategyNumber, F_NAMEEQ,
1317 				PointerGetDatum(stmt->subname));
1318 	tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1319 								NULL, 2, key);
1320 	if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1321 	{
1322 		tgoid = HeapTupleGetOid(tuple);
1323 
1324 		/*
1325 		 * Update pg_trigger tuple with new tgname.
1326 		 */
1327 		tuple = heap_copytuple(tuple);	/* need a modifiable copy */
1328 
1329 		namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1330 				   stmt->newname);
1331 
1332 		simple_heap_update(tgrel, &tuple->t_self, tuple);
1333 
1334 		/* keep system catalog indexes current */
1335 		CatalogUpdateIndexes(tgrel, tuple);
1336 
1337 		InvokeObjectPostAlterHook(TriggerRelationId,
1338 								  HeapTupleGetOid(tuple), 0);
1339 
1340 		/*
1341 		 * Invalidate relation's relcache entry so that other backends (and
1342 		 * this one too!) are sent SI message to make them rebuild relcache
1343 		 * entries.  (Ideally this should happen automatically...)
1344 		 */
1345 		CacheInvalidateRelcache(targetrel);
1346 	}
1347 	else
1348 	{
1349 		ereport(ERROR,
1350 				(errcode(ERRCODE_UNDEFINED_OBJECT),
1351 				 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1352 						stmt->subname, RelationGetRelationName(targetrel))));
1353 	}
1354 
1355 	ObjectAddressSet(address, TriggerRelationId, tgoid);
1356 
1357 	systable_endscan(tgscan);
1358 
1359 	heap_close(tgrel, RowExclusiveLock);
1360 
1361 	/*
1362 	 * Close rel, but keep exclusive lock!
1363 	 */
1364 	relation_close(targetrel, NoLock);
1365 
1366 	return address;
1367 }
1368 
1369 
1370 /*
1371  * EnableDisableTrigger()
1372  *
1373  *	Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1374  *	to change 'tgenabled' field for the specified trigger(s)
1375  *
1376  * rel: relation to process (caller must hold suitable lock on it)
1377  * tgname: trigger to process, or NULL to scan all triggers
1378  * fires_when: new value for tgenabled field. In addition to generic
1379  *			   enablement/disablement, this also defines when the trigger
1380  *			   should be fired in session replication roles.
1381  * skip_system: if true, skip "system" triggers (constraint triggers)
1382  *
1383  * Caller should have checked permissions for the table; here we also
1384  * enforce that superuser privilege is required to alter the state of
1385  * system triggers
1386  */
1387 void
EnableDisableTrigger(Relation rel,const char * tgname,char fires_when,bool skip_system)1388 EnableDisableTrigger(Relation rel, const char *tgname,
1389 					 char fires_when, bool skip_system)
1390 {
1391 	Relation	tgrel;
1392 	int			nkeys;
1393 	ScanKeyData keys[2];
1394 	SysScanDesc tgscan;
1395 	HeapTuple	tuple;
1396 	bool		found;
1397 	bool		changed;
1398 
1399 	/* Scan the relevant entries in pg_triggers */
1400 	tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
1401 
1402 	ScanKeyInit(&keys[0],
1403 				Anum_pg_trigger_tgrelid,
1404 				BTEqualStrategyNumber, F_OIDEQ,
1405 				ObjectIdGetDatum(RelationGetRelid(rel)));
1406 	if (tgname)
1407 	{
1408 		ScanKeyInit(&keys[1],
1409 					Anum_pg_trigger_tgname,
1410 					BTEqualStrategyNumber, F_NAMEEQ,
1411 					CStringGetDatum(tgname));
1412 		nkeys = 2;
1413 	}
1414 	else
1415 		nkeys = 1;
1416 
1417 	tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1418 								NULL, nkeys, keys);
1419 
1420 	found = changed = false;
1421 
1422 	while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1423 	{
1424 		Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1425 
1426 		if (oldtrig->tgisinternal)
1427 		{
1428 			/* system trigger ... ok to process? */
1429 			if (skip_system)
1430 				continue;
1431 			if (!superuser())
1432 				ereport(ERROR,
1433 						(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1434 					  errmsg("permission denied: \"%s\" is a system trigger",
1435 							 NameStr(oldtrig->tgname))));
1436 		}
1437 
1438 		found = true;
1439 
1440 		if (oldtrig->tgenabled != fires_when)
1441 		{
1442 			/* need to change this one ... make a copy to scribble on */
1443 			HeapTuple	newtup = heap_copytuple(tuple);
1444 			Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1445 
1446 			newtrig->tgenabled = fires_when;
1447 
1448 			simple_heap_update(tgrel, &newtup->t_self, newtup);
1449 
1450 			/* Keep catalog indexes current */
1451 			CatalogUpdateIndexes(tgrel, newtup);
1452 
1453 			heap_freetuple(newtup);
1454 
1455 			changed = true;
1456 		}
1457 
1458 		InvokeObjectPostAlterHook(TriggerRelationId,
1459 								  HeapTupleGetOid(tuple), 0);
1460 	}
1461 
1462 	systable_endscan(tgscan);
1463 
1464 	heap_close(tgrel, RowExclusiveLock);
1465 
1466 	if (tgname && !found)
1467 		ereport(ERROR,
1468 				(errcode(ERRCODE_UNDEFINED_OBJECT),
1469 				 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1470 						tgname, RelationGetRelationName(rel))));
1471 
1472 	/*
1473 	 * If we changed anything, broadcast a SI inval message to force each
1474 	 * backend (including our own!) to rebuild relation's relcache entry.
1475 	 * Otherwise they will fail to apply the change promptly.
1476 	 */
1477 	if (changed)
1478 		CacheInvalidateRelcache(rel);
1479 }
1480 
1481 
1482 /*
1483  * Build trigger data to attach to the given relcache entry.
1484  *
1485  * Note that trigger data attached to a relcache entry must be stored in
1486  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1487  * But we should be running in a less long-lived working context.  To avoid
1488  * leaking cache memory if this routine fails partway through, we build a
1489  * temporary TriggerDesc in working memory and then copy the completed
1490  * structure into cache memory.
1491  */
1492 void
RelationBuildTriggers(Relation relation)1493 RelationBuildTriggers(Relation relation)
1494 {
1495 	TriggerDesc *trigdesc;
1496 	int			numtrigs;
1497 	int			maxtrigs;
1498 	Trigger    *triggers;
1499 	Relation	tgrel;
1500 	ScanKeyData skey;
1501 	SysScanDesc tgscan;
1502 	HeapTuple	htup;
1503 	MemoryContext oldContext;
1504 	int			i;
1505 
1506 	/*
1507 	 * Allocate a working array to hold the triggers (the array is extended if
1508 	 * necessary)
1509 	 */
1510 	maxtrigs = 16;
1511 	triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1512 	numtrigs = 0;
1513 
1514 	/*
1515 	 * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1516 	 * be reading the triggers in name order, except possibly during
1517 	 * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1518 	 * ensures that triggers will be fired in name order.
1519 	 */
1520 	ScanKeyInit(&skey,
1521 				Anum_pg_trigger_tgrelid,
1522 				BTEqualStrategyNumber, F_OIDEQ,
1523 				ObjectIdGetDatum(RelationGetRelid(relation)));
1524 
1525 	tgrel = heap_open(TriggerRelationId, AccessShareLock);
1526 	tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1527 								NULL, 1, &skey);
1528 
1529 	while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1530 	{
1531 		Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1532 		Trigger    *build;
1533 		Datum		datum;
1534 		bool		isnull;
1535 
1536 		if (numtrigs >= maxtrigs)
1537 		{
1538 			maxtrigs *= 2;
1539 			triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1540 		}
1541 		build = &(triggers[numtrigs]);
1542 
1543 		build->tgoid = HeapTupleGetOid(htup);
1544 		build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
1545 										 NameGetDatum(&pg_trigger->tgname)));
1546 		build->tgfoid = pg_trigger->tgfoid;
1547 		build->tgtype = pg_trigger->tgtype;
1548 		build->tgenabled = pg_trigger->tgenabled;
1549 		build->tgisinternal = pg_trigger->tgisinternal;
1550 		build->tgconstrrelid = pg_trigger->tgconstrrelid;
1551 		build->tgconstrindid = pg_trigger->tgconstrindid;
1552 		build->tgconstraint = pg_trigger->tgconstraint;
1553 		build->tgdeferrable = pg_trigger->tgdeferrable;
1554 		build->tginitdeferred = pg_trigger->tginitdeferred;
1555 		build->tgnargs = pg_trigger->tgnargs;
1556 		/* tgattr is first var-width field, so OK to access directly */
1557 		build->tgnattr = pg_trigger->tgattr.dim1;
1558 		if (build->tgnattr > 0)
1559 		{
1560 			build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1561 			memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1562 				   build->tgnattr * sizeof(int16));
1563 		}
1564 		else
1565 			build->tgattr = NULL;
1566 		if (build->tgnargs > 0)
1567 		{
1568 			bytea	   *val;
1569 			char	   *p;
1570 
1571 			val = DatumGetByteaP(fastgetattr(htup,
1572 											 Anum_pg_trigger_tgargs,
1573 											 tgrel->rd_att, &isnull));
1574 			if (isnull)
1575 				elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1576 					 RelationGetRelationName(relation));
1577 			p = (char *) VARDATA(val);
1578 			build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1579 			for (i = 0; i < build->tgnargs; i++)
1580 			{
1581 				build->tgargs[i] = pstrdup(p);
1582 				p += strlen(p) + 1;
1583 			}
1584 		}
1585 		else
1586 			build->tgargs = NULL;
1587 		datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1588 							tgrel->rd_att, &isnull);
1589 		if (!isnull)
1590 			build->tgqual = TextDatumGetCString(datum);
1591 		else
1592 			build->tgqual = NULL;
1593 
1594 		numtrigs++;
1595 	}
1596 
1597 	systable_endscan(tgscan);
1598 	heap_close(tgrel, AccessShareLock);
1599 
1600 	/* There might not be any triggers */
1601 	if (numtrigs == 0)
1602 	{
1603 		pfree(triggers);
1604 		return;
1605 	}
1606 
1607 	/* Build trigdesc */
1608 	trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1609 	trigdesc->triggers = triggers;
1610 	trigdesc->numtriggers = numtrigs;
1611 	for (i = 0; i < numtrigs; i++)
1612 		SetTriggerFlags(trigdesc, &(triggers[i]));
1613 
1614 	/* Copy completed trigdesc into cache storage */
1615 	oldContext = MemoryContextSwitchTo(CacheMemoryContext);
1616 	relation->trigdesc = CopyTriggerDesc(trigdesc);
1617 	MemoryContextSwitchTo(oldContext);
1618 
1619 	/* Release working memory */
1620 	FreeTriggerDesc(trigdesc);
1621 }
1622 
1623 /*
1624  * Update the TriggerDesc's hint flags to include the specified trigger
1625  */
1626 static void
SetTriggerFlags(TriggerDesc * trigdesc,Trigger * trigger)1627 SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger)
1628 {
1629 	int16		tgtype = trigger->tgtype;
1630 
1631 	trigdesc->trig_insert_before_row |=
1632 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1633 							 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1634 	trigdesc->trig_insert_after_row |=
1635 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1636 							 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1637 	trigdesc->trig_insert_instead_row |=
1638 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1639 							 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
1640 	trigdesc->trig_insert_before_statement |=
1641 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1642 							 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1643 	trigdesc->trig_insert_after_statement |=
1644 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1645 							 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1646 	trigdesc->trig_update_before_row |=
1647 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1648 							 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1649 	trigdesc->trig_update_after_row |=
1650 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1651 							 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1652 	trigdesc->trig_update_instead_row |=
1653 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1654 							 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
1655 	trigdesc->trig_update_before_statement |=
1656 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1657 							 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1658 	trigdesc->trig_update_after_statement |=
1659 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1660 							 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1661 	trigdesc->trig_delete_before_row |=
1662 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1663 							 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1664 	trigdesc->trig_delete_after_row |=
1665 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1666 							 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1667 	trigdesc->trig_delete_instead_row |=
1668 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1669 							 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
1670 	trigdesc->trig_delete_before_statement |=
1671 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1672 							 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1673 	trigdesc->trig_delete_after_statement |=
1674 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1675 							 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1676 	/* there are no row-level truncate triggers */
1677 	trigdesc->trig_truncate_before_statement |=
1678 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1679 							 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
1680 	trigdesc->trig_truncate_after_statement |=
1681 		TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1682 							 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
1683 }
1684 
1685 /*
1686  * Copy a TriggerDesc data structure.
1687  *
1688  * The copy is allocated in the current memory context.
1689  */
1690 TriggerDesc *
CopyTriggerDesc(TriggerDesc * trigdesc)1691 CopyTriggerDesc(TriggerDesc *trigdesc)
1692 {
1693 	TriggerDesc *newdesc;
1694 	Trigger    *trigger;
1695 	int			i;
1696 
1697 	if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1698 		return NULL;
1699 
1700 	newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1701 	memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1702 
1703 	trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1704 	memcpy(trigger, trigdesc->triggers,
1705 		   trigdesc->numtriggers * sizeof(Trigger));
1706 	newdesc->triggers = trigger;
1707 
1708 	for (i = 0; i < trigdesc->numtriggers; i++)
1709 	{
1710 		trigger->tgname = pstrdup(trigger->tgname);
1711 		if (trigger->tgnattr > 0)
1712 		{
1713 			int16	   *newattr;
1714 
1715 			newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1716 			memcpy(newattr, trigger->tgattr,
1717 				   trigger->tgnattr * sizeof(int16));
1718 			trigger->tgattr = newattr;
1719 		}
1720 		if (trigger->tgnargs > 0)
1721 		{
1722 			char	  **newargs;
1723 			int16		j;
1724 
1725 			newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1726 			for (j = 0; j < trigger->tgnargs; j++)
1727 				newargs[j] = pstrdup(trigger->tgargs[j]);
1728 			trigger->tgargs = newargs;
1729 		}
1730 		if (trigger->tgqual)
1731 			trigger->tgqual = pstrdup(trigger->tgqual);
1732 		trigger++;
1733 	}
1734 
1735 	return newdesc;
1736 }
1737 
1738 /*
1739  * Free a TriggerDesc data structure.
1740  */
1741 void
FreeTriggerDesc(TriggerDesc * trigdesc)1742 FreeTriggerDesc(TriggerDesc *trigdesc)
1743 {
1744 	Trigger    *trigger;
1745 	int			i;
1746 
1747 	if (trigdesc == NULL)
1748 		return;
1749 
1750 	trigger = trigdesc->triggers;
1751 	for (i = 0; i < trigdesc->numtriggers; i++)
1752 	{
1753 		pfree(trigger->tgname);
1754 		if (trigger->tgnattr > 0)
1755 			pfree(trigger->tgattr);
1756 		if (trigger->tgnargs > 0)
1757 		{
1758 			while (--(trigger->tgnargs) >= 0)
1759 				pfree(trigger->tgargs[trigger->tgnargs]);
1760 			pfree(trigger->tgargs);
1761 		}
1762 		if (trigger->tgqual)
1763 			pfree(trigger->tgqual);
1764 		trigger++;
1765 	}
1766 	pfree(trigdesc->triggers);
1767 	pfree(trigdesc);
1768 }
1769 
1770 /*
1771  * Compare two TriggerDesc structures for logical equality.
1772  */
1773 #ifdef NOT_USED
1774 bool
equalTriggerDescs(TriggerDesc * trigdesc1,TriggerDesc * trigdesc2)1775 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1776 {
1777 	int			i,
1778 				j;
1779 
1780 	/*
1781 	 * We need not examine the hint flags, just the trigger array itself; if
1782 	 * we have the same triggers with the same types, the flags should match.
1783 	 *
1784 	 * As of 7.3 we assume trigger set ordering is significant in the
1785 	 * comparison; so we just compare corresponding slots of the two sets.
1786 	 *
1787 	 * Note: comparing the stringToNode forms of the WHEN clauses means that
1788 	 * parse column locations will affect the result.  This is okay as long as
1789 	 * this function is only used for detecting exact equality, as for example
1790 	 * in checking for staleness of a cache entry.
1791 	 */
1792 	if (trigdesc1 != NULL)
1793 	{
1794 		if (trigdesc2 == NULL)
1795 			return false;
1796 		if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1797 			return false;
1798 		for (i = 0; i < trigdesc1->numtriggers; i++)
1799 		{
1800 			Trigger    *trig1 = trigdesc1->triggers + i;
1801 			Trigger    *trig2 = trigdesc2->triggers + i;
1802 
1803 			if (trig1->tgoid != trig2->tgoid)
1804 				return false;
1805 			if (strcmp(trig1->tgname, trig2->tgname) != 0)
1806 				return false;
1807 			if (trig1->tgfoid != trig2->tgfoid)
1808 				return false;
1809 			if (trig1->tgtype != trig2->tgtype)
1810 				return false;
1811 			if (trig1->tgenabled != trig2->tgenabled)
1812 				return false;
1813 			if (trig1->tgisinternal != trig2->tgisinternal)
1814 				return false;
1815 			if (trig1->tgconstrrelid != trig2->tgconstrrelid)
1816 				return false;
1817 			if (trig1->tgconstrindid != trig2->tgconstrindid)
1818 				return false;
1819 			if (trig1->tgconstraint != trig2->tgconstraint)
1820 				return false;
1821 			if (trig1->tgdeferrable != trig2->tgdeferrable)
1822 				return false;
1823 			if (trig1->tginitdeferred != trig2->tginitdeferred)
1824 				return false;
1825 			if (trig1->tgnargs != trig2->tgnargs)
1826 				return false;
1827 			if (trig1->tgnattr != trig2->tgnattr)
1828 				return false;
1829 			if (trig1->tgnattr > 0 &&
1830 				memcmp(trig1->tgattr, trig2->tgattr,
1831 					   trig1->tgnattr * sizeof(int16)) != 0)
1832 				return false;
1833 			for (j = 0; j < trig1->tgnargs; j++)
1834 				if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
1835 					return false;
1836 			if (trig1->tgqual == NULL && trig2->tgqual == NULL)
1837 				 /* ok */ ;
1838 			else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
1839 				return false;
1840 			else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
1841 				return false;
1842 		}
1843 	}
1844 	else if (trigdesc2 != NULL)
1845 		return false;
1846 	return true;
1847 }
1848 #endif   /* NOT_USED */
1849 
1850 /*
1851  * Call a trigger function.
1852  *
1853  *		trigdata: trigger descriptor.
1854  *		tgindx: trigger's index in finfo and instr arrays.
1855  *		finfo: array of cached trigger function call information.
1856  *		instr: optional array of EXPLAIN ANALYZE instrumentation state.
1857  *		per_tuple_context: memory context to execute the function in.
1858  *
1859  * Returns the tuple (or NULL) as returned by the function.
1860  */
1861 static HeapTuple
ExecCallTriggerFunc(TriggerData * trigdata,int tgindx,FmgrInfo * finfo,Instrumentation * instr,MemoryContext per_tuple_context)1862 ExecCallTriggerFunc(TriggerData *trigdata,
1863 					int tgindx,
1864 					FmgrInfo *finfo,
1865 					Instrumentation *instr,
1866 					MemoryContext per_tuple_context)
1867 {
1868 	FunctionCallInfoData fcinfo;
1869 	PgStat_FunctionCallUsage fcusage;
1870 	Datum		result;
1871 	MemoryContext oldContext;
1872 
1873 	finfo += tgindx;
1874 
1875 	/*
1876 	 * We cache fmgr lookup info, to avoid making the lookup again on each
1877 	 * call.
1878 	 */
1879 	if (finfo->fn_oid == InvalidOid)
1880 		fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
1881 
1882 	Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
1883 
1884 	/*
1885 	 * If doing EXPLAIN ANALYZE, start charging time to this trigger.
1886 	 */
1887 	if (instr)
1888 		InstrStartNode(instr + tgindx);
1889 
1890 	/*
1891 	 * Do the function evaluation in the per-tuple memory context, so that
1892 	 * leaked memory will be reclaimed once per tuple. Note in particular that
1893 	 * any new tuple created by the trigger function will live till the end of
1894 	 * the tuple cycle.
1895 	 */
1896 	oldContext = MemoryContextSwitchTo(per_tuple_context);
1897 
1898 	/*
1899 	 * Call the function, passing no arguments but setting a context.
1900 	 */
1901 	InitFunctionCallInfoData(fcinfo, finfo, 0,
1902 							 InvalidOid, (Node *) trigdata, NULL);
1903 
1904 	pgstat_init_function_usage(&fcinfo, &fcusage);
1905 
1906 	MyTriggerDepth++;
1907 	PG_TRY();
1908 	{
1909 		result = FunctionCallInvoke(&fcinfo);
1910 	}
1911 	PG_CATCH();
1912 	{
1913 		MyTriggerDepth--;
1914 		PG_RE_THROW();
1915 	}
1916 	PG_END_TRY();
1917 	MyTriggerDepth--;
1918 
1919 	pgstat_end_function_usage(&fcusage, true);
1920 
1921 	MemoryContextSwitchTo(oldContext);
1922 
1923 	/*
1924 	 * Trigger protocol allows function to return a null pointer, but NOT to
1925 	 * set the isnull result flag.
1926 	 */
1927 	if (fcinfo.isnull)
1928 		ereport(ERROR,
1929 				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
1930 				 errmsg("trigger function %u returned null value",
1931 						fcinfo.flinfo->fn_oid)));
1932 
1933 	/*
1934 	 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
1935 	 * one "tuple returned" (really the number of firings).
1936 	 */
1937 	if (instr)
1938 		InstrStopNode(instr + tgindx, 1);
1939 
1940 	return (HeapTuple) DatumGetPointer(result);
1941 }
1942 
1943 void
ExecBSInsertTriggers(EState * estate,ResultRelInfo * relinfo)1944 ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
1945 {
1946 	TriggerDesc *trigdesc;
1947 	int			i;
1948 	TriggerData LocTriggerData;
1949 
1950 	trigdesc = relinfo->ri_TrigDesc;
1951 
1952 	if (trigdesc == NULL)
1953 		return;
1954 	if (!trigdesc->trig_insert_before_statement)
1955 		return;
1956 
1957 	LocTriggerData.type = T_TriggerData;
1958 	LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
1959 		TRIGGER_EVENT_BEFORE;
1960 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
1961 	LocTriggerData.tg_trigtuple = NULL;
1962 	LocTriggerData.tg_newtuple = NULL;
1963 	LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
1964 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
1965 	for (i = 0; i < trigdesc->numtriggers; i++)
1966 	{
1967 		Trigger    *trigger = &trigdesc->triggers[i];
1968 		HeapTuple	newtuple;
1969 
1970 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
1971 								  TRIGGER_TYPE_STATEMENT,
1972 								  TRIGGER_TYPE_BEFORE,
1973 								  TRIGGER_TYPE_INSERT))
1974 			continue;
1975 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
1976 							NULL, NULL, NULL))
1977 			continue;
1978 
1979 		LocTriggerData.tg_trigger = trigger;
1980 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
1981 									   i,
1982 									   relinfo->ri_TrigFunctions,
1983 									   relinfo->ri_TrigInstrument,
1984 									   GetPerTupleMemoryContext(estate));
1985 
1986 		if (newtuple)
1987 			ereport(ERROR,
1988 					(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
1989 				  errmsg("BEFORE STATEMENT trigger cannot return a value")));
1990 	}
1991 }
1992 
1993 void
ExecASInsertTriggers(EState * estate,ResultRelInfo * relinfo)1994 ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo)
1995 {
1996 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
1997 
1998 	if (trigdesc && trigdesc->trig_insert_after_statement)
1999 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_INSERT,
2000 							  false, NULL, NULL, NIL, NULL);
2001 }
2002 
2003 TupleTableSlot *
ExecBRInsertTriggers(EState * estate,ResultRelInfo * relinfo,TupleTableSlot * slot)2004 ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2005 					 TupleTableSlot *slot)
2006 {
2007 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2008 	HeapTuple	slottuple = ExecMaterializeSlot(slot);
2009 	HeapTuple	newtuple = slottuple;
2010 	HeapTuple	oldtuple;
2011 	TriggerData LocTriggerData;
2012 	int			i;
2013 
2014 	LocTriggerData.type = T_TriggerData;
2015 	LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2016 		TRIGGER_EVENT_ROW |
2017 		TRIGGER_EVENT_BEFORE;
2018 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2019 	LocTriggerData.tg_newtuple = NULL;
2020 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2021 	for (i = 0; i < trigdesc->numtriggers; i++)
2022 	{
2023 		Trigger    *trigger = &trigdesc->triggers[i];
2024 
2025 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2026 								  TRIGGER_TYPE_ROW,
2027 								  TRIGGER_TYPE_BEFORE,
2028 								  TRIGGER_TYPE_INSERT))
2029 			continue;
2030 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2031 							NULL, NULL, newtuple))
2032 			continue;
2033 
2034 		LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2035 		LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2036 		LocTriggerData.tg_trigger = trigger;
2037 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2038 									   i,
2039 									   relinfo->ri_TrigFunctions,
2040 									   relinfo->ri_TrigInstrument,
2041 									   GetPerTupleMemoryContext(estate));
2042 		if (oldtuple != newtuple && oldtuple != slottuple)
2043 			heap_freetuple(oldtuple);
2044 		if (newtuple == NULL)
2045 			return NULL;		/* "do nothing" */
2046 	}
2047 
2048 	if (newtuple != slottuple)
2049 	{
2050 		/*
2051 		 * Return the modified tuple using the es_trig_tuple_slot.  We assume
2052 		 * the tuple was allocated in per-tuple memory context, and therefore
2053 		 * will go away by itself. The tuple table slot should not try to
2054 		 * clear it.
2055 		 */
2056 		TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2057 		TupleDesc	tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2058 
2059 		if (newslot->tts_tupleDescriptor != tupdesc)
2060 			ExecSetSlotDescriptor(newslot, tupdesc);
2061 		ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2062 		slot = newslot;
2063 	}
2064 	return slot;
2065 }
2066 
2067 void
ExecARInsertTriggers(EState * estate,ResultRelInfo * relinfo,HeapTuple trigtuple,List * recheckIndexes)2068 ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2069 					 HeapTuple trigtuple, List *recheckIndexes)
2070 {
2071 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2072 
2073 	if (trigdesc && trigdesc->trig_insert_after_row)
2074 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_INSERT,
2075 							  true, NULL, trigtuple, recheckIndexes, NULL);
2076 }
2077 
2078 TupleTableSlot *
ExecIRInsertTriggers(EState * estate,ResultRelInfo * relinfo,TupleTableSlot * slot)2079 ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2080 					 TupleTableSlot *slot)
2081 {
2082 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2083 	HeapTuple	slottuple = ExecMaterializeSlot(slot);
2084 	HeapTuple	newtuple = slottuple;
2085 	HeapTuple	oldtuple;
2086 	TriggerData LocTriggerData;
2087 	int			i;
2088 
2089 	LocTriggerData.type = T_TriggerData;
2090 	LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2091 		TRIGGER_EVENT_ROW |
2092 		TRIGGER_EVENT_INSTEAD;
2093 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2094 	LocTriggerData.tg_newtuple = NULL;
2095 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2096 	for (i = 0; i < trigdesc->numtriggers; i++)
2097 	{
2098 		Trigger    *trigger = &trigdesc->triggers[i];
2099 
2100 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2101 								  TRIGGER_TYPE_ROW,
2102 								  TRIGGER_TYPE_INSTEAD,
2103 								  TRIGGER_TYPE_INSERT))
2104 			continue;
2105 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2106 							NULL, NULL, newtuple))
2107 			continue;
2108 
2109 		LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2110 		LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2111 		LocTriggerData.tg_trigger = trigger;
2112 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2113 									   i,
2114 									   relinfo->ri_TrigFunctions,
2115 									   relinfo->ri_TrigInstrument,
2116 									   GetPerTupleMemoryContext(estate));
2117 		if (oldtuple != newtuple && oldtuple != slottuple)
2118 			heap_freetuple(oldtuple);
2119 		if (newtuple == NULL)
2120 			return NULL;		/* "do nothing" */
2121 	}
2122 
2123 	if (newtuple != slottuple)
2124 	{
2125 		/*
2126 		 * Return the modified tuple using the es_trig_tuple_slot.  We assume
2127 		 * the tuple was allocated in per-tuple memory context, and therefore
2128 		 * will go away by itself. The tuple table slot should not try to
2129 		 * clear it.
2130 		 */
2131 		TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2132 		TupleDesc	tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2133 
2134 		if (newslot->tts_tupleDescriptor != tupdesc)
2135 			ExecSetSlotDescriptor(newslot, tupdesc);
2136 		ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2137 		slot = newslot;
2138 	}
2139 	return slot;
2140 }
2141 
2142 void
ExecBSDeleteTriggers(EState * estate,ResultRelInfo * relinfo)2143 ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
2144 {
2145 	TriggerDesc *trigdesc;
2146 	int			i;
2147 	TriggerData LocTriggerData;
2148 
2149 	trigdesc = relinfo->ri_TrigDesc;
2150 
2151 	if (trigdesc == NULL)
2152 		return;
2153 	if (!trigdesc->trig_delete_before_statement)
2154 		return;
2155 
2156 	LocTriggerData.type = T_TriggerData;
2157 	LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2158 		TRIGGER_EVENT_BEFORE;
2159 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2160 	LocTriggerData.tg_trigtuple = NULL;
2161 	LocTriggerData.tg_newtuple = NULL;
2162 	LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2163 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2164 	for (i = 0; i < trigdesc->numtriggers; i++)
2165 	{
2166 		Trigger    *trigger = &trigdesc->triggers[i];
2167 		HeapTuple	newtuple;
2168 
2169 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2170 								  TRIGGER_TYPE_STATEMENT,
2171 								  TRIGGER_TYPE_BEFORE,
2172 								  TRIGGER_TYPE_DELETE))
2173 			continue;
2174 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2175 							NULL, NULL, NULL))
2176 			continue;
2177 
2178 		LocTriggerData.tg_trigger = trigger;
2179 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2180 									   i,
2181 									   relinfo->ri_TrigFunctions,
2182 									   relinfo->ri_TrigInstrument,
2183 									   GetPerTupleMemoryContext(estate));
2184 
2185 		if (newtuple)
2186 			ereport(ERROR,
2187 					(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2188 				  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2189 	}
2190 }
2191 
2192 void
ExecASDeleteTriggers(EState * estate,ResultRelInfo * relinfo)2193 ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
2194 {
2195 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2196 
2197 	if (trigdesc && trigdesc->trig_delete_after_statement)
2198 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
2199 							  false, NULL, NULL, NIL, NULL);
2200 }
2201 
2202 bool
ExecBRDeleteTriggers(EState * estate,EPQState * epqstate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple)2203 ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
2204 					 ResultRelInfo *relinfo,
2205 					 ItemPointer tupleid,
2206 					 HeapTuple fdw_trigtuple)
2207 {
2208 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2209 	bool		result = true;
2210 	TriggerData LocTriggerData;
2211 	HeapTuple	trigtuple;
2212 	HeapTuple	newtuple;
2213 	TupleTableSlot *newSlot;
2214 	int			i;
2215 
2216 	Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2217 	if (fdw_trigtuple == NULL)
2218 	{
2219 		trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2220 									   LockTupleExclusive, &newSlot);
2221 		if (trigtuple == NULL)
2222 			return false;
2223 	}
2224 	else
2225 		trigtuple = fdw_trigtuple;
2226 
2227 	LocTriggerData.type = T_TriggerData;
2228 	LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2229 		TRIGGER_EVENT_ROW |
2230 		TRIGGER_EVENT_BEFORE;
2231 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2232 	LocTriggerData.tg_newtuple = NULL;
2233 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2234 	for (i = 0; i < trigdesc->numtriggers; i++)
2235 	{
2236 		Trigger    *trigger = &trigdesc->triggers[i];
2237 
2238 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2239 								  TRIGGER_TYPE_ROW,
2240 								  TRIGGER_TYPE_BEFORE,
2241 								  TRIGGER_TYPE_DELETE))
2242 			continue;
2243 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2244 							NULL, trigtuple, NULL))
2245 			continue;
2246 
2247 		LocTriggerData.tg_trigtuple = trigtuple;
2248 		LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2249 		LocTriggerData.tg_trigger = trigger;
2250 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2251 									   i,
2252 									   relinfo->ri_TrigFunctions,
2253 									   relinfo->ri_TrigInstrument,
2254 									   GetPerTupleMemoryContext(estate));
2255 		if (newtuple == NULL)
2256 		{
2257 			result = false;		/* tell caller to suppress delete */
2258 			break;
2259 		}
2260 		if (newtuple != trigtuple)
2261 			heap_freetuple(newtuple);
2262 	}
2263 	if (trigtuple != fdw_trigtuple)
2264 		heap_freetuple(trigtuple);
2265 
2266 	return result;
2267 }
2268 
2269 void
ExecARDeleteTriggers(EState * estate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple)2270 ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2271 					 ItemPointer tupleid,
2272 					 HeapTuple fdw_trigtuple)
2273 {
2274 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2275 
2276 	if (trigdesc && trigdesc->trig_delete_after_row)
2277 	{
2278 		HeapTuple	trigtuple;
2279 
2280 		Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2281 		if (fdw_trigtuple == NULL)
2282 			trigtuple = GetTupleForTrigger(estate,
2283 										   NULL,
2284 										   relinfo,
2285 										   tupleid,
2286 										   LockTupleExclusive,
2287 										   NULL);
2288 		else
2289 			trigtuple = fdw_trigtuple;
2290 
2291 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
2292 							  true, trigtuple, NULL, NIL, NULL);
2293 		if (trigtuple != fdw_trigtuple)
2294 			heap_freetuple(trigtuple);
2295 	}
2296 }
2297 
2298 bool
ExecIRDeleteTriggers(EState * estate,ResultRelInfo * relinfo,HeapTuple trigtuple)2299 ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2300 					 HeapTuple trigtuple)
2301 {
2302 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2303 	TriggerData LocTriggerData;
2304 	HeapTuple	rettuple;
2305 	int			i;
2306 
2307 	LocTriggerData.type = T_TriggerData;
2308 	LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2309 		TRIGGER_EVENT_ROW |
2310 		TRIGGER_EVENT_INSTEAD;
2311 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2312 	LocTriggerData.tg_newtuple = NULL;
2313 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2314 	for (i = 0; i < trigdesc->numtriggers; i++)
2315 	{
2316 		Trigger    *trigger = &trigdesc->triggers[i];
2317 
2318 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2319 								  TRIGGER_TYPE_ROW,
2320 								  TRIGGER_TYPE_INSTEAD,
2321 								  TRIGGER_TYPE_DELETE))
2322 			continue;
2323 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2324 							NULL, trigtuple, NULL))
2325 			continue;
2326 
2327 		LocTriggerData.tg_trigtuple = trigtuple;
2328 		LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2329 		LocTriggerData.tg_trigger = trigger;
2330 		rettuple = ExecCallTriggerFunc(&LocTriggerData,
2331 									   i,
2332 									   relinfo->ri_TrigFunctions,
2333 									   relinfo->ri_TrigInstrument,
2334 									   GetPerTupleMemoryContext(estate));
2335 		if (rettuple == NULL)
2336 			return false;		/* Delete was suppressed */
2337 		if (rettuple != trigtuple)
2338 			heap_freetuple(rettuple);
2339 	}
2340 	return true;
2341 }
2342 
2343 void
ExecBSUpdateTriggers(EState * estate,ResultRelInfo * relinfo)2344 ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
2345 {
2346 	TriggerDesc *trigdesc;
2347 	int			i;
2348 	TriggerData LocTriggerData;
2349 	Bitmapset  *updatedCols;
2350 
2351 	trigdesc = relinfo->ri_TrigDesc;
2352 
2353 	if (trigdesc == NULL)
2354 		return;
2355 	if (!trigdesc->trig_update_before_statement)
2356 		return;
2357 
2358 	updatedCols = GetUpdatedColumns(relinfo, estate);
2359 
2360 	LocTriggerData.type = T_TriggerData;
2361 	LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2362 		TRIGGER_EVENT_BEFORE;
2363 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2364 	LocTriggerData.tg_trigtuple = NULL;
2365 	LocTriggerData.tg_newtuple = NULL;
2366 	LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2367 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2368 	for (i = 0; i < trigdesc->numtriggers; i++)
2369 	{
2370 		Trigger    *trigger = &trigdesc->triggers[i];
2371 		HeapTuple	newtuple;
2372 
2373 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2374 								  TRIGGER_TYPE_STATEMENT,
2375 								  TRIGGER_TYPE_BEFORE,
2376 								  TRIGGER_TYPE_UPDATE))
2377 			continue;
2378 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2379 							updatedCols, NULL, NULL))
2380 			continue;
2381 
2382 		LocTriggerData.tg_trigger = trigger;
2383 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2384 									   i,
2385 									   relinfo->ri_TrigFunctions,
2386 									   relinfo->ri_TrigInstrument,
2387 									   GetPerTupleMemoryContext(estate));
2388 
2389 		if (newtuple)
2390 			ereport(ERROR,
2391 					(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2392 				  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2393 	}
2394 }
2395 
2396 void
ExecASUpdateTriggers(EState * estate,ResultRelInfo * relinfo)2397 ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
2398 {
2399 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2400 
2401 	if (trigdesc && trigdesc->trig_update_after_statement)
2402 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
2403 							  false, NULL, NULL, NIL,
2404 							  GetUpdatedColumns(relinfo, estate));
2405 }
2406 
2407 TupleTableSlot *
ExecBRUpdateTriggers(EState * estate,EPQState * epqstate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple,TupleTableSlot * slot)2408 ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
2409 					 ResultRelInfo *relinfo,
2410 					 ItemPointer tupleid,
2411 					 HeapTuple fdw_trigtuple,
2412 					 TupleTableSlot *slot)
2413 {
2414 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2415 	HeapTuple	slottuple = ExecMaterializeSlot(slot);
2416 	HeapTuple	newtuple = slottuple;
2417 	TriggerData LocTriggerData;
2418 	HeapTuple	trigtuple;
2419 	HeapTuple	oldtuple;
2420 	TupleTableSlot *newSlot;
2421 	int			i;
2422 	Bitmapset  *updatedCols;
2423 	LockTupleMode lockmode;
2424 
2425 	/* Determine lock mode to use */
2426 	lockmode = ExecUpdateLockMode(estate, relinfo);
2427 
2428 	Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2429 	if (fdw_trigtuple == NULL)
2430 	{
2431 		/* get a copy of the on-disk tuple we are planning to update */
2432 		trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2433 									   lockmode, &newSlot);
2434 		if (trigtuple == NULL)
2435 			return NULL;		/* cancel the update action */
2436 	}
2437 	else
2438 	{
2439 		trigtuple = fdw_trigtuple;
2440 		newSlot = NULL;
2441 	}
2442 
2443 	/*
2444 	 * In READ COMMITTED isolation level it's possible that target tuple was
2445 	 * changed due to concurrent update.  In that case we have a raw subplan
2446 	 * output tuple in newSlot, and need to run it through the junk filter to
2447 	 * produce an insertable tuple.
2448 	 *
2449 	 * Caution: more than likely, the passed-in slot is the same as the
2450 	 * junkfilter's output slot, so we are clobbering the original value of
2451 	 * slottuple by doing the filtering.  This is OK since neither we nor our
2452 	 * caller have any more interest in the prior contents of that slot.
2453 	 */
2454 	if (newSlot != NULL)
2455 	{
2456 		slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
2457 		slottuple = ExecMaterializeSlot(slot);
2458 		newtuple = slottuple;
2459 	}
2460 
2461 
2462 	LocTriggerData.type = T_TriggerData;
2463 	LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2464 		TRIGGER_EVENT_ROW |
2465 		TRIGGER_EVENT_BEFORE;
2466 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2467 	updatedCols = GetUpdatedColumns(relinfo, estate);
2468 	for (i = 0; i < trigdesc->numtriggers; i++)
2469 	{
2470 		Trigger    *trigger = &trigdesc->triggers[i];
2471 
2472 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2473 								  TRIGGER_TYPE_ROW,
2474 								  TRIGGER_TYPE_BEFORE,
2475 								  TRIGGER_TYPE_UPDATE))
2476 			continue;
2477 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2478 							updatedCols, trigtuple, newtuple))
2479 			continue;
2480 
2481 		LocTriggerData.tg_trigtuple = trigtuple;
2482 		LocTriggerData.tg_newtuple = oldtuple = newtuple;
2483 		LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2484 		LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2485 		LocTriggerData.tg_trigger = trigger;
2486 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2487 									   i,
2488 									   relinfo->ri_TrigFunctions,
2489 									   relinfo->ri_TrigInstrument,
2490 									   GetPerTupleMemoryContext(estate));
2491 		if (oldtuple != newtuple &&
2492 			oldtuple != slottuple &&
2493 			oldtuple != trigtuple)
2494 			heap_freetuple(oldtuple);
2495 		if (newtuple == NULL)
2496 		{
2497 			if (trigtuple != fdw_trigtuple)
2498 				heap_freetuple(trigtuple);
2499 			return NULL;		/* "do nothing" */
2500 		}
2501 	}
2502 	if (trigtuple != fdw_trigtuple && trigtuple != newtuple)
2503 		heap_freetuple(trigtuple);
2504 
2505 	if (newtuple != slottuple)
2506 	{
2507 		/*
2508 		 * Return the modified tuple using the es_trig_tuple_slot.  We assume
2509 		 * the tuple was allocated in per-tuple memory context, and therefore
2510 		 * will go away by itself. The tuple table slot should not try to
2511 		 * clear it.
2512 		 */
2513 		TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2514 		TupleDesc	tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2515 
2516 		if (newslot->tts_tupleDescriptor != tupdesc)
2517 			ExecSetSlotDescriptor(newslot, tupdesc);
2518 		ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2519 		slot = newslot;
2520 	}
2521 	return slot;
2522 }
2523 
2524 void
ExecARUpdateTriggers(EState * estate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple,HeapTuple newtuple,List * recheckIndexes)2525 ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2526 					 ItemPointer tupleid,
2527 					 HeapTuple fdw_trigtuple,
2528 					 HeapTuple newtuple,
2529 					 List *recheckIndexes)
2530 {
2531 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2532 
2533 	if (trigdesc && trigdesc->trig_update_after_row)
2534 	{
2535 		HeapTuple	trigtuple;
2536 
2537 		Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2538 		if (fdw_trigtuple == NULL)
2539 			trigtuple = GetTupleForTrigger(estate,
2540 										   NULL,
2541 										   relinfo,
2542 										   tupleid,
2543 										   LockTupleExclusive,
2544 										   NULL);
2545 		else
2546 			trigtuple = fdw_trigtuple;
2547 
2548 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
2549 							  true, trigtuple, newtuple, recheckIndexes,
2550 							  GetUpdatedColumns(relinfo, estate));
2551 		if (trigtuple != fdw_trigtuple)
2552 			heap_freetuple(trigtuple);
2553 	}
2554 }
2555 
2556 TupleTableSlot *
ExecIRUpdateTriggers(EState * estate,ResultRelInfo * relinfo,HeapTuple trigtuple,TupleTableSlot * slot)2557 ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2558 					 HeapTuple trigtuple, TupleTableSlot *slot)
2559 {
2560 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2561 	HeapTuple	slottuple = ExecMaterializeSlot(slot);
2562 	HeapTuple	newtuple = slottuple;
2563 	TriggerData LocTriggerData;
2564 	HeapTuple	oldtuple;
2565 	int			i;
2566 
2567 	LocTriggerData.type = T_TriggerData;
2568 	LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2569 		TRIGGER_EVENT_ROW |
2570 		TRIGGER_EVENT_INSTEAD;
2571 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2572 	for (i = 0; i < trigdesc->numtriggers; i++)
2573 	{
2574 		Trigger    *trigger = &trigdesc->triggers[i];
2575 
2576 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2577 								  TRIGGER_TYPE_ROW,
2578 								  TRIGGER_TYPE_INSTEAD,
2579 								  TRIGGER_TYPE_UPDATE))
2580 			continue;
2581 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2582 							NULL, trigtuple, newtuple))
2583 			continue;
2584 
2585 		LocTriggerData.tg_trigtuple = trigtuple;
2586 		LocTriggerData.tg_newtuple = oldtuple = newtuple;
2587 		LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2588 		LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2589 		LocTriggerData.tg_trigger = trigger;
2590 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2591 									   i,
2592 									   relinfo->ri_TrigFunctions,
2593 									   relinfo->ri_TrigInstrument,
2594 									   GetPerTupleMemoryContext(estate));
2595 		if (oldtuple != newtuple && oldtuple != slottuple)
2596 			heap_freetuple(oldtuple);
2597 		if (newtuple == NULL)
2598 			return NULL;		/* "do nothing" */
2599 	}
2600 
2601 	if (newtuple != slottuple)
2602 	{
2603 		/*
2604 		 * Return the modified tuple using the es_trig_tuple_slot.  We assume
2605 		 * the tuple was allocated in per-tuple memory context, and therefore
2606 		 * will go away by itself. The tuple table slot should not try to
2607 		 * clear it.
2608 		 */
2609 		TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2610 		TupleDesc	tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2611 
2612 		if (newslot->tts_tupleDescriptor != tupdesc)
2613 			ExecSetSlotDescriptor(newslot, tupdesc);
2614 		ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2615 		slot = newslot;
2616 	}
2617 	return slot;
2618 }
2619 
2620 void
ExecBSTruncateTriggers(EState * estate,ResultRelInfo * relinfo)2621 ExecBSTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
2622 {
2623 	TriggerDesc *trigdesc;
2624 	int			i;
2625 	TriggerData LocTriggerData;
2626 
2627 	trigdesc = relinfo->ri_TrigDesc;
2628 
2629 	if (trigdesc == NULL)
2630 		return;
2631 	if (!trigdesc->trig_truncate_before_statement)
2632 		return;
2633 
2634 	LocTriggerData.type = T_TriggerData;
2635 	LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2636 		TRIGGER_EVENT_BEFORE;
2637 	LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2638 	LocTriggerData.tg_trigtuple = NULL;
2639 	LocTriggerData.tg_newtuple = NULL;
2640 	LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2641 	LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2642 	for (i = 0; i < trigdesc->numtriggers; i++)
2643 	{
2644 		Trigger    *trigger = &trigdesc->triggers[i];
2645 		HeapTuple	newtuple;
2646 
2647 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2648 								  TRIGGER_TYPE_STATEMENT,
2649 								  TRIGGER_TYPE_BEFORE,
2650 								  TRIGGER_TYPE_TRUNCATE))
2651 			continue;
2652 		if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2653 							NULL, NULL, NULL))
2654 			continue;
2655 
2656 		LocTriggerData.tg_trigger = trigger;
2657 		newtuple = ExecCallTriggerFunc(&LocTriggerData,
2658 									   i,
2659 									   relinfo->ri_TrigFunctions,
2660 									   relinfo->ri_TrigInstrument,
2661 									   GetPerTupleMemoryContext(estate));
2662 
2663 		if (newtuple)
2664 			ereport(ERROR,
2665 					(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2666 				  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2667 	}
2668 }
2669 
2670 void
ExecASTruncateTriggers(EState * estate,ResultRelInfo * relinfo)2671 ExecASTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
2672 {
2673 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2674 
2675 	if (trigdesc && trigdesc->trig_truncate_after_statement)
2676 		AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_TRUNCATE,
2677 							  false, NULL, NULL, NIL, NULL);
2678 }
2679 
2680 
2681 static HeapTuple
GetTupleForTrigger(EState * estate,EPQState * epqstate,ResultRelInfo * relinfo,ItemPointer tid,LockTupleMode lockmode,TupleTableSlot ** newSlot)2682 GetTupleForTrigger(EState *estate,
2683 				   EPQState *epqstate,
2684 				   ResultRelInfo *relinfo,
2685 				   ItemPointer tid,
2686 				   LockTupleMode lockmode,
2687 				   TupleTableSlot **newSlot)
2688 {
2689 	Relation	relation = relinfo->ri_RelationDesc;
2690 	HeapTupleData tuple;
2691 	HeapTuple	result;
2692 	Buffer		buffer;
2693 
2694 	if (newSlot != NULL)
2695 	{
2696 		HTSU_Result test;
2697 		HeapUpdateFailureData hufd;
2698 
2699 		*newSlot = NULL;
2700 
2701 		/* caller must pass an epqstate if EvalPlanQual is possible */
2702 		Assert(epqstate != NULL);
2703 
2704 		/*
2705 		 * lock tuple for update
2706 		 */
2707 ltrmark:;
2708 		tuple.t_self = *tid;
2709 		test = heap_lock_tuple(relation, &tuple,
2710 							   estate->es_output_cid,
2711 							   lockmode, LockWaitBlock,
2712 							   false, &buffer, &hufd);
2713 		switch (test)
2714 		{
2715 			case HeapTupleSelfUpdated:
2716 
2717 				/*
2718 				 * The target tuple was already updated or deleted by the
2719 				 * current command, or by a later command in the current
2720 				 * transaction.  We ignore the tuple in the former case, and
2721 				 * throw error in the latter case, for the same reasons
2722 				 * enumerated in ExecUpdate and ExecDelete in
2723 				 * nodeModifyTable.c.
2724 				 */
2725 				if (hufd.cmax != estate->es_output_cid)
2726 					ereport(ERROR,
2727 							(errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2728 							 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2729 							 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2730 
2731 				/* treat it as deleted; do not process */
2732 				ReleaseBuffer(buffer);
2733 				return NULL;
2734 
2735 			case HeapTupleMayBeUpdated:
2736 				break;
2737 
2738 			case HeapTupleUpdated:
2739 				ReleaseBuffer(buffer);
2740 				if (IsolationUsesXactSnapshot())
2741 					ereport(ERROR,
2742 							(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2743 							 errmsg("could not serialize access due to concurrent update")));
2744 				if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2745 				{
2746 					/* it was updated, so look at the updated version */
2747 					TupleTableSlot *epqslot;
2748 
2749 					epqslot = EvalPlanQual(estate,
2750 										   epqstate,
2751 										   relation,
2752 										   relinfo->ri_RangeTableIndex,
2753 										   lockmode,
2754 										   &hufd.ctid,
2755 										   hufd.xmax);
2756 					if (!TupIsNull(epqslot))
2757 					{
2758 						*tid = hufd.ctid;
2759 						*newSlot = epqslot;
2760 
2761 						/*
2762 						 * EvalPlanQual already locked the tuple, but we
2763 						 * re-call heap_lock_tuple anyway as an easy way of
2764 						 * re-fetching the correct tuple.  Speed is hardly a
2765 						 * criterion in this path anyhow.
2766 						 */
2767 						goto ltrmark;
2768 					}
2769 				}
2770 
2771 				/*
2772 				 * if tuple was deleted or PlanQual failed for updated tuple -
2773 				 * we must not process this tuple!
2774 				 */
2775 				return NULL;
2776 
2777 			case HeapTupleInvisible:
2778 				elog(ERROR, "attempted to lock invisible tuple");
2779 
2780 			default:
2781 				ReleaseBuffer(buffer);
2782 				elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
2783 				return NULL;	/* keep compiler quiet */
2784 		}
2785 	}
2786 	else
2787 	{
2788 		Page		page;
2789 		ItemId		lp;
2790 
2791 		buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2792 
2793 		/*
2794 		 * Although we already know this tuple is valid, we must lock the
2795 		 * buffer to ensure that no one has a buffer cleanup lock; otherwise
2796 		 * they might move the tuple while we try to copy it.  But we can
2797 		 * release the lock before actually doing the heap_copytuple call,
2798 		 * since holding pin is sufficient to prevent anyone from getting a
2799 		 * cleanup lock they don't already hold.
2800 		 */
2801 		LockBuffer(buffer, BUFFER_LOCK_SHARE);
2802 
2803 		page = BufferGetPage(buffer);
2804 		lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2805 
2806 		Assert(ItemIdIsNormal(lp));
2807 
2808 		tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2809 		tuple.t_len = ItemIdGetLength(lp);
2810 		tuple.t_self = *tid;
2811 		tuple.t_tableOid = RelationGetRelid(relation);
2812 
2813 		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2814 	}
2815 
2816 	result = heap_copytuple(&tuple);
2817 	ReleaseBuffer(buffer);
2818 
2819 	return result;
2820 }
2821 
2822 /*
2823  * Is trigger enabled to fire?
2824  */
2825 static bool
TriggerEnabled(EState * estate,ResultRelInfo * relinfo,Trigger * trigger,TriggerEvent event,Bitmapset * modifiedCols,HeapTuple oldtup,HeapTuple newtup)2826 TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
2827 			   Trigger *trigger, TriggerEvent event,
2828 			   Bitmapset *modifiedCols,
2829 			   HeapTuple oldtup, HeapTuple newtup)
2830 {
2831 	/* Check replication-role-dependent enable state */
2832 	if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA)
2833 	{
2834 		if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
2835 			trigger->tgenabled == TRIGGER_DISABLED)
2836 			return false;
2837 	}
2838 	else	/* ORIGIN or LOCAL role */
2839 	{
2840 		if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
2841 			trigger->tgenabled == TRIGGER_DISABLED)
2842 			return false;
2843 	}
2844 
2845 	/*
2846 	 * Check for column-specific trigger (only possible for UPDATE, and in
2847 	 * fact we *must* ignore tgattr for other event types)
2848 	 */
2849 	if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
2850 	{
2851 		int			i;
2852 		bool		modified;
2853 
2854 		modified = false;
2855 		for (i = 0; i < trigger->tgnattr; i++)
2856 		{
2857 			if (bms_is_member(trigger->tgattr[i] - FirstLowInvalidHeapAttributeNumber,
2858 							  modifiedCols))
2859 			{
2860 				modified = true;
2861 				break;
2862 			}
2863 		}
2864 		if (!modified)
2865 			return false;
2866 	}
2867 
2868 	/* Check for WHEN clause */
2869 	if (trigger->tgqual)
2870 	{
2871 		TupleDesc	tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2872 		List	  **predicate;
2873 		ExprContext *econtext;
2874 		TupleTableSlot *oldslot = NULL;
2875 		TupleTableSlot *newslot = NULL;
2876 		MemoryContext oldContext;
2877 		int			i;
2878 
2879 		Assert(estate != NULL);
2880 
2881 		/*
2882 		 * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
2883 		 * matching element of relinfo->ri_TrigWhenExprs[]
2884 		 */
2885 		i = trigger - relinfo->ri_TrigDesc->triggers;
2886 		predicate = &relinfo->ri_TrigWhenExprs[i];
2887 
2888 		/*
2889 		 * If first time through for this WHEN expression, build expression
2890 		 * nodetrees for it.  Keep them in the per-query memory context so
2891 		 * they'll survive throughout the query.
2892 		 */
2893 		if (*predicate == NIL)
2894 		{
2895 			Node	   *tgqual;
2896 
2897 			oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2898 			tgqual = stringToNode(trigger->tgqual);
2899 			/* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
2900 			ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER_VAR, 0);
2901 			ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER_VAR, 0);
2902 			/* ExecQual wants implicit-AND form */
2903 			tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
2904 			*predicate = (List *) ExecPrepareExpr((Expr *) tgqual, estate);
2905 			MemoryContextSwitchTo(oldContext);
2906 		}
2907 
2908 		/*
2909 		 * We will use the EState's per-tuple context for evaluating WHEN
2910 		 * expressions (creating it if it's not already there).
2911 		 */
2912 		econtext = GetPerTupleExprContext(estate);
2913 
2914 		/*
2915 		 * Put OLD and NEW tuples into tupleslots for expression evaluation.
2916 		 * These slots can be shared across the whole estate, but be careful
2917 		 * that they have the current resultrel's tupdesc.
2918 		 */
2919 		if (HeapTupleIsValid(oldtup))
2920 		{
2921 			if (estate->es_trig_oldtup_slot == NULL)
2922 			{
2923 				oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2924 				estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate);
2925 				MemoryContextSwitchTo(oldContext);
2926 			}
2927 			oldslot = estate->es_trig_oldtup_slot;
2928 			if (oldslot->tts_tupleDescriptor != tupdesc)
2929 				ExecSetSlotDescriptor(oldslot, tupdesc);
2930 			ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
2931 		}
2932 		if (HeapTupleIsValid(newtup))
2933 		{
2934 			if (estate->es_trig_newtup_slot == NULL)
2935 			{
2936 				oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2937 				estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
2938 				MemoryContextSwitchTo(oldContext);
2939 			}
2940 			newslot = estate->es_trig_newtup_slot;
2941 			if (newslot->tts_tupleDescriptor != tupdesc)
2942 				ExecSetSlotDescriptor(newslot, tupdesc);
2943 			ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
2944 		}
2945 
2946 		/*
2947 		 * Finally evaluate the expression, making the old and/or new tuples
2948 		 * available as INNER_VAR/OUTER_VAR respectively.
2949 		 */
2950 		econtext->ecxt_innertuple = oldslot;
2951 		econtext->ecxt_outertuple = newslot;
2952 		if (!ExecQual(*predicate, econtext, false))
2953 			return false;
2954 	}
2955 
2956 	return true;
2957 }
2958 
2959 
2960 /* ----------
2961  * After-trigger stuff
2962  *
2963  * The AfterTriggersData struct holds data about pending AFTER trigger events
2964  * during the current transaction tree.  (BEFORE triggers are fired
2965  * immediately so we don't need any persistent state about them.)  The struct
2966  * and most of its subsidiary data are kept in TopTransactionContext; however
2967  * the individual event records are kept in a separate sub-context.  This is
2968  * done mainly so that it's easy to tell from a memory context dump how much
2969  * space is being eaten by trigger events.
2970  *
2971  * Because the list of pending events can grow large, we go to some
2972  * considerable effort to minimize per-event memory consumption.  The event
2973  * records are grouped into chunks and common data for similar events in the
2974  * same chunk is only stored once.
2975  *
2976  * XXX We need to be able to save the per-event data in a file if it grows too
2977  * large.
2978  * ----------
2979  */
2980 
2981 /* Per-trigger SET CONSTRAINT status */
2982 typedef struct SetConstraintTriggerData
2983 {
2984 	Oid			sct_tgoid;
2985 	bool		sct_tgisdeferred;
2986 } SetConstraintTriggerData;
2987 
2988 typedef struct SetConstraintTriggerData *SetConstraintTrigger;
2989 
2990 /*
2991  * SET CONSTRAINT intra-transaction status.
2992  *
2993  * We make this a single palloc'd object so it can be copied and freed easily.
2994  *
2995  * all_isset and all_isdeferred are used to keep track
2996  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
2997  *
2998  * trigstates[] stores per-trigger tgisdeferred settings.
2999  */
3000 typedef struct SetConstraintStateData
3001 {
3002 	bool		all_isset;
3003 	bool		all_isdeferred;
3004 	int			numstates;		/* number of trigstates[] entries in use */
3005 	int			numalloc;		/* allocated size of trigstates[] */
3006 	SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3007 } SetConstraintStateData;
3008 
3009 typedef SetConstraintStateData *SetConstraintState;
3010 
3011 
3012 /*
3013  * Per-trigger-event data
3014  *
3015  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3016  * status bits and up to two tuple CTIDs.  Each event record also has an
3017  * associated AfterTriggerSharedData that is shared across all instances of
3018  * similar events within a "chunk".
3019  *
3020  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3021  * fields.  Updates of regular tables use two; inserts and deletes of regular
3022  * tables use one; foreign tables always use zero and save the tuple(s) to a
3023  * tuplestore.  AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3024  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3025  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3026  * tuple(s).  This permits storing tuples once regardless of the number of
3027  * row-level triggers on a foreign table.
3028  *
3029  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3030  * require no ctid field.  We lack the flag bit space to neatly represent that
3031  * distinct case, and it seems unlikely to be worth much trouble.
3032  *
3033  * Note: ats_firing_id is initially zero and is set to something else when
3034  * AFTER_TRIGGER_IN_PROGRESS is set.  It indicates which trigger firing
3035  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3036  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3037  * because all instances of the same type of event in a given event list will
3038  * be fired at the same time, if they were queued between the same firing
3039  * cycles.  So we need only ensure that ats_firing_id is zero when attaching
3040  * a new event to an existing AfterTriggerSharedData record.
3041  */
3042 typedef uint32 TriggerFlags;
3043 
3044 #define AFTER_TRIGGER_OFFSET			0x0FFFFFFF		/* must be low-order
3045 														 * bits */
3046 #define AFTER_TRIGGER_DONE				0x10000000
3047 #define AFTER_TRIGGER_IN_PROGRESS		0x20000000
3048 /* bits describing the size and tuple sources of this event */
3049 #define AFTER_TRIGGER_FDW_REUSE			0x00000000
3050 #define AFTER_TRIGGER_FDW_FETCH			0x80000000
3051 #define AFTER_TRIGGER_1CTID				0x40000000
3052 #define AFTER_TRIGGER_2CTID				0xC0000000
3053 #define AFTER_TRIGGER_TUP_BITS			0xC0000000
3054 
3055 typedef struct AfterTriggerSharedData *AfterTriggerShared;
3056 
3057 typedef struct AfterTriggerSharedData
3058 {
3059 	TriggerEvent ats_event;		/* event type indicator, see trigger.h */
3060 	Oid			ats_tgoid;		/* the trigger's ID */
3061 	Oid			ats_relid;		/* the relation it's on */
3062 	CommandId	ats_firing_id;	/* ID for firing cycle */
3063 } AfterTriggerSharedData;
3064 
3065 typedef struct AfterTriggerEventData *AfterTriggerEvent;
3066 
3067 typedef struct AfterTriggerEventData
3068 {
3069 	TriggerFlags ate_flags;		/* status bits and offset to shared data */
3070 	ItemPointerData ate_ctid1;	/* inserted, deleted, or old updated tuple */
3071 	ItemPointerData ate_ctid2;	/* new updated tuple */
3072 } AfterTriggerEventData;
3073 
3074 /* AfterTriggerEventData, minus ate_ctid2 */
3075 typedef struct AfterTriggerEventDataOneCtid
3076 {
3077 	TriggerFlags ate_flags;		/* status bits and offset to shared data */
3078 	ItemPointerData ate_ctid1;	/* inserted, deleted, or old updated tuple */
3079 }	AfterTriggerEventDataOneCtid;
3080 
3081 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3082 typedef struct AfterTriggerEventDataZeroCtids
3083 {
3084 	TriggerFlags ate_flags;		/* status bits and offset to shared data */
3085 }	AfterTriggerEventDataZeroCtids;
3086 
3087 #define SizeofTriggerEvent(evt) \
3088 	(((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3089 	 sizeof(AfterTriggerEventData) : \
3090 		((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3091 		sizeof(AfterTriggerEventDataOneCtid) : \
3092 			sizeof(AfterTriggerEventDataZeroCtids))
3093 
3094 #define GetTriggerSharedData(evt) \
3095 	((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3096 
3097 /*
3098  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3099  * larger chunks (a slightly more sophisticated version of an expansible
3100  * array).  The space between CHUNK_DATA_START and freeptr is occupied by
3101  * AfterTriggerEventData records; the space between endfree and endptr is
3102  * occupied by AfterTriggerSharedData records.
3103  */
3104 typedef struct AfterTriggerEventChunk
3105 {
3106 	struct AfterTriggerEventChunk *next;		/* list link */
3107 	char	   *freeptr;		/* start of free space in chunk */
3108 	char	   *endfree;		/* end of free space in chunk */
3109 	char	   *endptr;			/* end of chunk */
3110 	/* event data follows here */
3111 } AfterTriggerEventChunk;
3112 
3113 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3114 
3115 /* A list of events */
3116 typedef struct AfterTriggerEventList
3117 {
3118 	AfterTriggerEventChunk *head;
3119 	AfterTriggerEventChunk *tail;
3120 	char	   *tailfree;		/* freeptr of tail chunk */
3121 } AfterTriggerEventList;
3122 
3123 /* Macros to help in iterating over a list of events */
3124 #define for_each_chunk(cptr, evtlist) \
3125 	for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3126 #define for_each_event(eptr, cptr) \
3127 	for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3128 		 (char *) eptr < (cptr)->freeptr; \
3129 		 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3130 /* Use this if no special per-chunk processing is needed */
3131 #define for_each_event_chunk(eptr, cptr, evtlist) \
3132 	for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3133 
3134 
3135 /*
3136  * All per-transaction data for the AFTER TRIGGERS module.
3137  *
3138  * AfterTriggersData has the following fields:
3139  *
3140  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3141  * We mark firable events with the current firing cycle's ID so that we can
3142  * tell which ones to work on.  This ensures sane behavior if a trigger
3143  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3144  * only fire those events that weren't already scheduled for firing.
3145  *
3146  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3147  * This is saved and restored across failed subtransactions.
3148  *
3149  * events is the current list of deferred events.  This is global across
3150  * all subtransactions of the current transaction.  In a subtransaction
3151  * abort, we know that the events added by the subtransaction are at the
3152  * end of the list, so it is relatively easy to discard them.  The event
3153  * list chunks themselves are stored in event_cxt.
3154  *
3155  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3156  * (-1 when the stack is empty).
3157  *
3158  * query_stack[query_depth] is a list of AFTER trigger events queued by the
3159  * current query (and the query_stack entries below it are lists of trigger
3160  * events queued by calling queries).  None of these are valid until the
3161  * matching AfterTriggerEndQuery call occurs.  At that point we fire
3162  * immediate-mode triggers, and append any deferred events to the main events
3163  * list.
3164  *
3165  * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples
3166  * needed for the current query.
3167  *
3168  * maxquerydepth is just the allocated length of query_stack and
3169  * fdw_tuplestores.
3170  *
3171  * state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS
3172  * state data; each subtransaction level that modifies that state first
3173  * saves a copy, which we use to restore the state if we abort.
3174  *
3175  * events_stack is a stack of copies of the events head/tail pointers,
3176  * which we use to restore those values during subtransaction abort.
3177  *
3178  * depth_stack is a stack of copies of subtransaction-start-time query_depth,
3179  * which we similarly use to clean up at subtransaction abort.
3180  *
3181  * firing_stack is a stack of copies of subtransaction-start-time
3182  * firing_counter.  We use this to recognize which deferred triggers were
3183  * fired (or marked for firing) within an aborted subtransaction.
3184  *
3185  * We use GetCurrentTransactionNestLevel() to determine the correct array
3186  * index in these stacks.  maxtransdepth is the number of allocated entries in
3187  * each stack.  (By not keeping our own stack pointer, we can avoid trouble
3188  * in cases where errors during subxact abort cause multiple invocations
3189  * of AfterTriggerEndSubXact() at the same nesting depth.)
3190  */
3191 typedef struct AfterTriggersData
3192 {
3193 	CommandId	firing_counter; /* next firing ID to assign */
3194 	SetConstraintState state;	/* the active S C state */
3195 	AfterTriggerEventList events;		/* deferred-event list */
3196 	int			query_depth;	/* current query list index */
3197 	AfterTriggerEventList *query_stack; /* events pending from each query */
3198 	Tuplestorestate **fdw_tuplestores;	/* foreign tuples from each query */
3199 	int			maxquerydepth;	/* allocated len of above array */
3200 	MemoryContext event_cxt;	/* memory context for events, if any */
3201 
3202 	/* these fields are just for resetting at subtrans abort: */
3203 
3204 	SetConstraintState *state_stack;	/* stacked S C states */
3205 	AfterTriggerEventList *events_stack;		/* stacked list pointers */
3206 	int		   *depth_stack;	/* stacked query_depths */
3207 	CommandId  *firing_stack;	/* stacked firing_counters */
3208 	int			maxtransdepth;	/* allocated len of above arrays */
3209 } AfterTriggersData;
3210 
3211 static AfterTriggersData afterTriggers;
3212 
3213 static void AfterTriggerExecute(AfterTriggerEvent event,
3214 					Relation rel, TriggerDesc *trigdesc,
3215 					FmgrInfo *finfo,
3216 					Instrumentation *instr,
3217 					MemoryContext per_tuple_context,
3218 					TupleTableSlot *trig_tuple_slot1,
3219 					TupleTableSlot *trig_tuple_slot2);
3220 static SetConstraintState SetConstraintStateCreate(int numalloc);
3221 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3222 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3223 						  Oid tgoid, bool tgisdeferred);
3224 
3225 
3226 /*
3227  * Gets the current query fdw tuplestore and initializes it if necessary
3228  */
3229 static Tuplestorestate *
GetCurrentFDWTuplestore(void)3230 GetCurrentFDWTuplestore(void)
3231 {
3232 	Tuplestorestate *ret;
3233 
3234 	ret = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
3235 	if (ret == NULL)
3236 	{
3237 		MemoryContext oldcxt;
3238 		ResourceOwner saveResourceOwner;
3239 
3240 		/*
3241 		 * Make the tuplestore valid until end of transaction.  This is the
3242 		 * allocation lifespan of the associated events list, but we really
3243 		 * only need it until AfterTriggerEndQuery().
3244 		 */
3245 		oldcxt = MemoryContextSwitchTo(TopTransactionContext);
3246 		saveResourceOwner = CurrentResourceOwner;
3247 		PG_TRY();
3248 		{
3249 			CurrentResourceOwner = TopTransactionResourceOwner;
3250 			ret = tuplestore_begin_heap(false, false, work_mem);
3251 		}
3252 		PG_CATCH();
3253 		{
3254 			CurrentResourceOwner = saveResourceOwner;
3255 			PG_RE_THROW();
3256 		}
3257 		PG_END_TRY();
3258 		CurrentResourceOwner = saveResourceOwner;
3259 		MemoryContextSwitchTo(oldcxt);
3260 
3261 		afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = ret;
3262 	}
3263 
3264 	return ret;
3265 }
3266 
3267 /* ----------
3268  * afterTriggerCheckState()
3269  *
3270  *	Returns true if the trigger event is actually in state DEFERRED.
3271  * ----------
3272  */
3273 static bool
afterTriggerCheckState(AfterTriggerShared evtshared)3274 afterTriggerCheckState(AfterTriggerShared evtshared)
3275 {
3276 	Oid			tgoid = evtshared->ats_tgoid;
3277 	SetConstraintState state = afterTriggers.state;
3278 	int			i;
3279 
3280 	/*
3281 	 * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3282 	 * constraints declared NOT DEFERRABLE), the state is always false.
3283 	 */
3284 	if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3285 		return false;
3286 
3287 	/*
3288 	 * If constraint state exists, SET CONSTRAINTS might have been executed
3289 	 * either for this trigger or for all triggers.
3290 	 */
3291 	if (state != NULL)
3292 	{
3293 		/* Check for SET CONSTRAINTS for this specific trigger. */
3294 		for (i = 0; i < state->numstates; i++)
3295 		{
3296 			if (state->trigstates[i].sct_tgoid == tgoid)
3297 				return state->trigstates[i].sct_tgisdeferred;
3298 		}
3299 
3300 		/* Check for SET CONSTRAINTS ALL. */
3301 		if (state->all_isset)
3302 			return state->all_isdeferred;
3303 	}
3304 
3305 	/*
3306 	 * Otherwise return the default state for the trigger.
3307 	 */
3308 	return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3309 }
3310 
3311 
3312 /* ----------
3313  * afterTriggerAddEvent()
3314  *
3315  *	Add a new trigger event to the specified queue.
3316  *	The passed-in event data is copied.
3317  * ----------
3318  */
3319 static void
afterTriggerAddEvent(AfterTriggerEventList * events,AfterTriggerEvent event,AfterTriggerShared evtshared)3320 afterTriggerAddEvent(AfterTriggerEventList *events,
3321 					 AfterTriggerEvent event, AfterTriggerShared evtshared)
3322 {
3323 	Size		eventsize = SizeofTriggerEvent(event);
3324 	Size		needed = eventsize + sizeof(AfterTriggerSharedData);
3325 	AfterTriggerEventChunk *chunk;
3326 	AfterTriggerShared newshared;
3327 	AfterTriggerEvent newevent;
3328 
3329 	/*
3330 	 * If empty list or not enough room in the tail chunk, make a new chunk.
3331 	 * We assume here that a new shared record will always be needed.
3332 	 */
3333 	chunk = events->tail;
3334 	if (chunk == NULL ||
3335 		chunk->endfree - chunk->freeptr < needed)
3336 	{
3337 		Size		chunksize;
3338 
3339 		/* Create event context if we didn't already */
3340 		if (afterTriggers.event_cxt == NULL)
3341 			afterTriggers.event_cxt =
3342 				AllocSetContextCreate(TopTransactionContext,
3343 									  "AfterTriggerEvents",
3344 									  ALLOCSET_DEFAULT_SIZES);
3345 
3346 		/*
3347 		 * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3348 		 * These numbers are fairly arbitrary, though there is a hard limit at
3349 		 * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3350 		 * shared records using the available space in ate_flags.  Another
3351 		 * constraint is that if the chunk size gets too huge, the search loop
3352 		 * below would get slow given a (not too common) usage pattern with
3353 		 * many distinct event types in a chunk.  Therefore, we double the
3354 		 * preceding chunk size only if there weren't too many shared records
3355 		 * in the preceding chunk; otherwise we halve it.  This gives us some
3356 		 * ability to adapt to the actual usage pattern of the current query
3357 		 * while still having large chunk sizes in typical usage.  All chunk
3358 		 * sizes used should be MAXALIGN multiples, to ensure that the shared
3359 		 * records will be aligned safely.
3360 		 */
3361 #define MIN_CHUNK_SIZE 1024
3362 #define MAX_CHUNK_SIZE (1024*1024)
3363 
3364 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3365 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3366 #endif
3367 
3368 		if (chunk == NULL)
3369 			chunksize = MIN_CHUNK_SIZE;
3370 		else
3371 		{
3372 			/* preceding chunk size... */
3373 			chunksize = chunk->endptr - (char *) chunk;
3374 			/* check number of shared records in preceding chunk */
3375 			if ((chunk->endptr - chunk->endfree) <=
3376 				(100 * sizeof(AfterTriggerSharedData)))
3377 				chunksize *= 2; /* okay, double it */
3378 			else
3379 				chunksize /= 2; /* too many shared records */
3380 			chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3381 		}
3382 		chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3383 		chunk->next = NULL;
3384 		chunk->freeptr = CHUNK_DATA_START(chunk);
3385 		chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3386 		Assert(chunk->endfree - chunk->freeptr >= needed);
3387 
3388 		if (events->head == NULL)
3389 			events->head = chunk;
3390 		else
3391 			events->tail->next = chunk;
3392 		events->tail = chunk;
3393 		/* events->tailfree is now out of sync, but we'll fix it below */
3394 	}
3395 
3396 	/*
3397 	 * Try to locate a matching shared-data record already in the chunk. If
3398 	 * none, make a new one.
3399 	 */
3400 	for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3401 		 (char *) newshared >= chunk->endfree;
3402 		 newshared--)
3403 	{
3404 		if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3405 			newshared->ats_relid == evtshared->ats_relid &&
3406 			newshared->ats_event == evtshared->ats_event &&
3407 			newshared->ats_firing_id == 0)
3408 			break;
3409 	}
3410 	if ((char *) newshared < chunk->endfree)
3411 	{
3412 		*newshared = *evtshared;
3413 		newshared->ats_firing_id = 0;	/* just to be sure */
3414 		chunk->endfree = (char *) newshared;
3415 	}
3416 
3417 	/* Insert the data */
3418 	newevent = (AfterTriggerEvent) chunk->freeptr;
3419 	memcpy(newevent, event, eventsize);
3420 	/* ... and link the new event to its shared record */
3421 	newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3422 	newevent->ate_flags |= (char *) newshared - (char *) newevent;
3423 
3424 	chunk->freeptr += eventsize;
3425 	events->tailfree = chunk->freeptr;
3426 }
3427 
3428 /* ----------
3429  * afterTriggerFreeEventList()
3430  *
3431  *	Free all the event storage in the given list.
3432  * ----------
3433  */
3434 static void
afterTriggerFreeEventList(AfterTriggerEventList * events)3435 afterTriggerFreeEventList(AfterTriggerEventList *events)
3436 {
3437 	AfterTriggerEventChunk *chunk;
3438 
3439 	while ((chunk = events->head) != NULL)
3440 	{
3441 		events->head = chunk->next;
3442 		pfree(chunk);
3443 	}
3444 	events->tail = NULL;
3445 	events->tailfree = NULL;
3446 }
3447 
3448 /* ----------
3449  * afterTriggerRestoreEventList()
3450  *
3451  *	Restore an event list to its prior length, removing all the events
3452  *	added since it had the value old_events.
3453  * ----------
3454  */
3455 static void
afterTriggerRestoreEventList(AfterTriggerEventList * events,const AfterTriggerEventList * old_events)3456 afterTriggerRestoreEventList(AfterTriggerEventList *events,
3457 							 const AfterTriggerEventList *old_events)
3458 {
3459 	AfterTriggerEventChunk *chunk;
3460 	AfterTriggerEventChunk *next_chunk;
3461 
3462 	if (old_events->tail == NULL)
3463 	{
3464 		/* restoring to a completely empty state, so free everything */
3465 		afterTriggerFreeEventList(events);
3466 	}
3467 	else
3468 	{
3469 		*events = *old_events;
3470 		/* free any chunks after the last one we want to keep */
3471 		for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3472 		{
3473 			next_chunk = chunk->next;
3474 			pfree(chunk);
3475 		}
3476 		/* and clean up the tail chunk to be the right length */
3477 		events->tail->next = NULL;
3478 		events->tail->freeptr = events->tailfree;
3479 
3480 		/*
3481 		 * We don't make any effort to remove now-unused shared data records.
3482 		 * They might still be useful, anyway.
3483 		 */
3484 	}
3485 }
3486 
3487 /* ----------
3488  * afterTriggerDeleteHeadEventChunk()
3489  *
3490  *	Remove the first chunk of events from the given event list.
3491  * ----------
3492  */
3493 static void
afterTriggerDeleteHeadEventChunk(AfterTriggerEventList * events)3494 afterTriggerDeleteHeadEventChunk(AfterTriggerEventList *events)
3495 {
3496 	AfterTriggerEventChunk *target = events->head;
3497 
3498 	Assert(target && target->next);
3499 
3500 	events->head = target->next;
3501 	pfree(target);
3502 }
3503 
3504 
3505 /* ----------
3506  * AfterTriggerExecute()
3507  *
3508  *	Fetch the required tuples back from the heap and fire one
3509  *	single trigger function.
3510  *
3511  *	Frequently, this will be fired many times in a row for triggers of
3512  *	a single relation.  Therefore, we cache the open relation and provide
3513  *	fmgr lookup cache space at the caller level.  (For triggers fired at
3514  *	the end of a query, we can even piggyback on the executor's state.)
3515  *
3516  *	event: event currently being fired.
3517  *	rel: open relation for event.
3518  *	trigdesc: working copy of rel's trigger info.
3519  *	finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3520  *	instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3521  *		or NULL if no instrumentation is wanted.
3522  *	per_tuple_context: memory context to call trigger function in.
3523  *	trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3524  *	trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3525  * ----------
3526  */
3527 static void
AfterTriggerExecute(AfterTriggerEvent event,Relation rel,TriggerDesc * trigdesc,FmgrInfo * finfo,Instrumentation * instr,MemoryContext per_tuple_context,TupleTableSlot * trig_tuple_slot1,TupleTableSlot * trig_tuple_slot2)3528 AfterTriggerExecute(AfterTriggerEvent event,
3529 					Relation rel, TriggerDesc *trigdesc,
3530 					FmgrInfo *finfo, Instrumentation *instr,
3531 					MemoryContext per_tuple_context,
3532 					TupleTableSlot *trig_tuple_slot1,
3533 					TupleTableSlot *trig_tuple_slot2)
3534 {
3535 	AfterTriggerShared evtshared = GetTriggerSharedData(event);
3536 	Oid			tgoid = evtshared->ats_tgoid;
3537 	TriggerData LocTriggerData;
3538 	HeapTupleData tuple1;
3539 	HeapTupleData tuple2;
3540 	HeapTuple	rettuple;
3541 	Buffer		buffer1 = InvalidBuffer;
3542 	Buffer		buffer2 = InvalidBuffer;
3543 	int			tgindx;
3544 
3545 	/*
3546 	 * Locate trigger in trigdesc.
3547 	 */
3548 	LocTriggerData.tg_trigger = NULL;
3549 	for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3550 	{
3551 		if (trigdesc->triggers[tgindx].tgoid == tgoid)
3552 		{
3553 			LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3554 			break;
3555 		}
3556 	}
3557 	if (LocTriggerData.tg_trigger == NULL)
3558 		elog(ERROR, "could not find trigger %u", tgoid);
3559 
3560 	/*
3561 	 * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3562 	 * to include time spent re-fetching tuples in the trigger cost.
3563 	 */
3564 	if (instr)
3565 		InstrStartNode(instr + tgindx);
3566 
3567 	/*
3568 	 * Fetch the required tuple(s).
3569 	 */
3570 	switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3571 	{
3572 		case AFTER_TRIGGER_FDW_FETCH:
3573 			{
3574 				Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
3575 
3576 				if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3577 											 trig_tuple_slot1))
3578 					elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3579 
3580 				if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3581 					TRIGGER_EVENT_UPDATE &&
3582 					!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3583 											 trig_tuple_slot2))
3584 					elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3585 			}
3586 			/* fall through */
3587 		case AFTER_TRIGGER_FDW_REUSE:
3588 
3589 			/*
3590 			 * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
3591 			 * ensures that tg_trigtuple does not reference tuplestore memory.
3592 			 * (It is formally possible for the trigger function to queue
3593 			 * trigger events that add to the same tuplestore, which can push
3594 			 * other tuples out of memory.)  The distinction is academic,
3595 			 * because we start with a minimal tuple that ExecFetchSlotTuple()
3596 			 * must materialize anyway.
3597 			 */
3598 			LocTriggerData.tg_trigtuple =
3599 				ExecMaterializeSlot(trig_tuple_slot1);
3600 			LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3601 
3602 			LocTriggerData.tg_newtuple =
3603 				((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3604 				 TRIGGER_EVENT_UPDATE) ?
3605 				ExecMaterializeSlot(trig_tuple_slot2) : NULL;
3606 			LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3607 
3608 			break;
3609 
3610 		default:
3611 			if (ItemPointerIsValid(&(event->ate_ctid1)))
3612 			{
3613 				ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
3614 				if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
3615 					elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3616 				LocTriggerData.tg_trigtuple = &tuple1;
3617 				LocTriggerData.tg_trigtuplebuf = buffer1;
3618 			}
3619 			else
3620 			{
3621 				LocTriggerData.tg_trigtuple = NULL;
3622 				LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3623 			}
3624 
3625 			/* don't touch ctid2 if not there */
3626 			if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3627 				AFTER_TRIGGER_2CTID &&
3628 				ItemPointerIsValid(&(event->ate_ctid2)))
3629 			{
3630 				ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
3631 				if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
3632 					elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3633 				LocTriggerData.tg_newtuple = &tuple2;
3634 				LocTriggerData.tg_newtuplebuf = buffer2;
3635 			}
3636 			else
3637 			{
3638 				LocTriggerData.tg_newtuple = NULL;
3639 				LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3640 			}
3641 	}
3642 
3643 	/*
3644 	 * Setup the remaining trigger information
3645 	 */
3646 	LocTriggerData.type = T_TriggerData;
3647 	LocTriggerData.tg_event =
3648 		evtshared->ats_event & (TRIGGER_EVENT_OPMASK | TRIGGER_EVENT_ROW);
3649 	LocTriggerData.tg_relation = rel;
3650 
3651 	MemoryContextReset(per_tuple_context);
3652 
3653 	/*
3654 	 * Call the trigger and throw away any possibly returned updated tuple.
3655 	 * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
3656 	 */
3657 	rettuple = ExecCallTriggerFunc(&LocTriggerData,
3658 								   tgindx,
3659 								   finfo,
3660 								   NULL,
3661 								   per_tuple_context);
3662 	if (rettuple != NULL &&
3663 		rettuple != LocTriggerData.tg_trigtuple &&
3664 		rettuple != LocTriggerData.tg_newtuple)
3665 		heap_freetuple(rettuple);
3666 
3667 	/*
3668 	 * Release buffers
3669 	 */
3670 	if (buffer1 != InvalidBuffer)
3671 		ReleaseBuffer(buffer1);
3672 	if (buffer2 != InvalidBuffer)
3673 		ReleaseBuffer(buffer2);
3674 
3675 	/*
3676 	 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
3677 	 * one "tuple returned" (really the number of firings).
3678 	 */
3679 	if (instr)
3680 		InstrStopNode(instr + tgindx, 1);
3681 }
3682 
3683 
3684 /*
3685  * afterTriggerMarkEvents()
3686  *
3687  *	Scan the given event list for not yet invoked events.  Mark the ones
3688  *	that can be invoked now with the current firing ID.
3689  *
3690  *	If move_list isn't NULL, events that are not to be invoked now are
3691  *	transferred to move_list.
3692  *
3693  *	When immediate_only is TRUE, do not invoke currently-deferred triggers.
3694  *	(This will be FALSE only at main transaction exit.)
3695  *
3696  *	Returns TRUE if any invokable events were found.
3697  */
3698 static bool
afterTriggerMarkEvents(AfterTriggerEventList * events,AfterTriggerEventList * move_list,bool immediate_only)3699 afterTriggerMarkEvents(AfterTriggerEventList *events,
3700 					   AfterTriggerEventList *move_list,
3701 					   bool immediate_only)
3702 {
3703 	bool		found = false;
3704 	bool		deferred_found = false;
3705 	AfterTriggerEvent event;
3706 	AfterTriggerEventChunk *chunk;
3707 
3708 	for_each_event_chunk(event, chunk, *events)
3709 	{
3710 		AfterTriggerShared evtshared = GetTriggerSharedData(event);
3711 		bool		defer_it = false;
3712 
3713 		if (!(event->ate_flags &
3714 			  (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS)))
3715 		{
3716 			/*
3717 			 * This trigger hasn't been called or scheduled yet. Check if we
3718 			 * should call it now.
3719 			 */
3720 			if (immediate_only && afterTriggerCheckState(evtshared))
3721 			{
3722 				defer_it = true;
3723 			}
3724 			else
3725 			{
3726 				/*
3727 				 * Mark it as to be fired in this firing cycle.
3728 				 */
3729 				evtshared->ats_firing_id = afterTriggers.firing_counter;
3730 				event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
3731 				found = true;
3732 			}
3733 		}
3734 
3735 		/*
3736 		 * If it's deferred, move it to move_list, if requested.
3737 		 */
3738 		if (defer_it && move_list != NULL)
3739 		{
3740 			deferred_found = true;
3741 			/* add it to move_list */
3742 			afterTriggerAddEvent(move_list, event, evtshared);
3743 			/* mark original copy "done" so we don't do it again */
3744 			event->ate_flags |= AFTER_TRIGGER_DONE;
3745 		}
3746 	}
3747 
3748 	/*
3749 	 * We could allow deferred triggers if, before the end of the
3750 	 * security-restricted operation, we were to verify that a SET CONSTRAINTS
3751 	 * ... IMMEDIATE has fired all such triggers.  For now, don't bother.
3752 	 */
3753 	if (deferred_found && InSecurityRestrictedOperation())
3754 		ereport(ERROR,
3755 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
3756 				 errmsg("cannot fire deferred trigger within security-restricted operation")));
3757 
3758 	return found;
3759 }
3760 
3761 /*
3762  * afterTriggerInvokeEvents()
3763  *
3764  *	Scan the given event list for events that are marked as to be fired
3765  *	in the current firing cycle, and fire them.
3766  *
3767  *	If estate isn't NULL, we use its result relation info to avoid repeated
3768  *	openings and closing of trigger target relations.  If it is NULL, we
3769  *	make one locally to cache the info in case there are multiple trigger
3770  *	events per rel.
3771  *
3772  *	When delete_ok is TRUE, it's safe to delete fully-processed events.
3773  *	(We are not very tense about that: we simply reset a chunk to be empty
3774  *	if all its events got fired.  The objective here is just to avoid useless
3775  *	rescanning of events when a trigger queues new events during transaction
3776  *	end, so it's not necessary to worry much about the case where only
3777  *	some events are fired.)
3778  *
3779  *	Returns TRUE if no unfired events remain in the list (this allows us
3780  *	to avoid repeating afterTriggerMarkEvents).
3781  */
3782 static bool
afterTriggerInvokeEvents(AfterTriggerEventList * events,CommandId firing_id,EState * estate,bool delete_ok)3783 afterTriggerInvokeEvents(AfterTriggerEventList *events,
3784 						 CommandId firing_id,
3785 						 EState *estate,
3786 						 bool delete_ok)
3787 {
3788 	bool		all_fired = true;
3789 	AfterTriggerEventChunk *chunk;
3790 	MemoryContext per_tuple_context;
3791 	bool		local_estate = false;
3792 	Relation	rel = NULL;
3793 	TriggerDesc *trigdesc = NULL;
3794 	FmgrInfo   *finfo = NULL;
3795 	Instrumentation *instr = NULL;
3796 	TupleTableSlot *slot1 = NULL,
3797 			   *slot2 = NULL;
3798 
3799 	/* Make a local EState if need be */
3800 	if (estate == NULL)
3801 	{
3802 		estate = CreateExecutorState();
3803 		local_estate = true;
3804 	}
3805 
3806 	/* Make a per-tuple memory context for trigger function calls */
3807 	per_tuple_context =
3808 		AllocSetContextCreate(CurrentMemoryContext,
3809 							  "AfterTriggerTupleContext",
3810 							  ALLOCSET_DEFAULT_SIZES);
3811 
3812 	for_each_chunk(chunk, *events)
3813 	{
3814 		AfterTriggerEvent event;
3815 		bool		all_fired_in_chunk = true;
3816 
3817 		for_each_event(event, chunk)
3818 		{
3819 			AfterTriggerShared evtshared = GetTriggerSharedData(event);
3820 
3821 			/*
3822 			 * Is it one for me to fire?
3823 			 */
3824 			if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
3825 				evtshared->ats_firing_id == firing_id)
3826 			{
3827 				/*
3828 				 * So let's fire it... but first, find the correct relation if
3829 				 * this is not the same relation as before.
3830 				 */
3831 				if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
3832 				{
3833 					ResultRelInfo *rInfo;
3834 
3835 					rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
3836 					rel = rInfo->ri_RelationDesc;
3837 					trigdesc = rInfo->ri_TrigDesc;
3838 					finfo = rInfo->ri_TrigFunctions;
3839 					instr = rInfo->ri_TrigInstrument;
3840 					if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
3841 					{
3842 						if (slot1 != NULL)
3843 						{
3844 							ExecDropSingleTupleTableSlot(slot1);
3845 							ExecDropSingleTupleTableSlot(slot2);
3846 						}
3847 						slot1 = MakeSingleTupleTableSlot(rel->rd_att);
3848 						slot2 = MakeSingleTupleTableSlot(rel->rd_att);
3849 					}
3850 					if (trigdesc == NULL)		/* should not happen */
3851 						elog(ERROR, "relation %u has no triggers",
3852 							 evtshared->ats_relid);
3853 				}
3854 
3855 				/*
3856 				 * Fire it.  Note that the AFTER_TRIGGER_IN_PROGRESS flag is
3857 				 * still set, so recursive examinations of the event list
3858 				 * won't try to re-fire it.
3859 				 */
3860 				AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
3861 									per_tuple_context, slot1, slot2);
3862 
3863 				/*
3864 				 * Mark the event as done.
3865 				 */
3866 				event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
3867 				event->ate_flags |= AFTER_TRIGGER_DONE;
3868 			}
3869 			else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
3870 			{
3871 				/* something remains to be done */
3872 				all_fired = all_fired_in_chunk = false;
3873 			}
3874 		}
3875 
3876 		/* Clear the chunk if delete_ok and nothing left of interest */
3877 		if (delete_ok && all_fired_in_chunk)
3878 		{
3879 			chunk->freeptr = CHUNK_DATA_START(chunk);
3880 			chunk->endfree = chunk->endptr;
3881 
3882 			/*
3883 			 * If it's last chunk, must sync event list's tailfree too.  Note
3884 			 * that delete_ok must NOT be passed as true if there could be
3885 			 * additional AfterTriggerEventList values pointing at this event
3886 			 * list, since we'd fail to fix their copies of tailfree.
3887 			 */
3888 			if (chunk == events->tail)
3889 				events->tailfree = chunk->freeptr;
3890 		}
3891 	}
3892 	if (slot1 != NULL)
3893 	{
3894 		ExecDropSingleTupleTableSlot(slot1);
3895 		ExecDropSingleTupleTableSlot(slot2);
3896 	}
3897 
3898 	/* Release working resources */
3899 	MemoryContextDelete(per_tuple_context);
3900 
3901 	if (local_estate)
3902 	{
3903 		ListCell   *l;
3904 
3905 		foreach(l, estate->es_trig_target_relations)
3906 		{
3907 			ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
3908 
3909 			/* Close indices and then the relation itself */
3910 			ExecCloseIndices(resultRelInfo);
3911 			heap_close(resultRelInfo->ri_RelationDesc, NoLock);
3912 		}
3913 		FreeExecutorState(estate);
3914 	}
3915 
3916 	return all_fired;
3917 }
3918 
3919 
3920 /* ----------
3921  * AfterTriggerBeginXact()
3922  *
3923  *	Called at transaction start (either BEGIN or implicit for single
3924  *	statement outside of transaction block).
3925  * ----------
3926  */
3927 void
AfterTriggerBeginXact(void)3928 AfterTriggerBeginXact(void)
3929 {
3930 	/*
3931 	 * Initialize after-trigger state structure to empty
3932 	 */
3933 	afterTriggers.firing_counter = (CommandId) 1;		/* mustn't be 0 */
3934 	afterTriggers.query_depth = -1;
3935 
3936 	/*
3937 	 * Verify that there is no leftover state remaining.  If these assertions
3938 	 * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
3939 	 * up properly.
3940 	 */
3941 	Assert(afterTriggers.state == NULL);
3942 	Assert(afterTriggers.query_stack == NULL);
3943 	Assert(afterTriggers.fdw_tuplestores == NULL);
3944 	Assert(afterTriggers.maxquerydepth == 0);
3945 	Assert(afterTriggers.event_cxt == NULL);
3946 	Assert(afterTriggers.events.head == NULL);
3947 	Assert(afterTriggers.state_stack == NULL);
3948 	Assert(afterTriggers.events_stack == NULL);
3949 	Assert(afterTriggers.depth_stack == NULL);
3950 	Assert(afterTriggers.firing_stack == NULL);
3951 	Assert(afterTriggers.maxtransdepth == 0);
3952 }
3953 
3954 
3955 /* ----------
3956  * AfterTriggerBeginQuery()
3957  *
3958  *	Called just before we start processing a single query within a
3959  *	transaction (or subtransaction).  Most of the real work gets deferred
3960  *	until somebody actually tries to queue a trigger event.
3961  * ----------
3962  */
3963 void
AfterTriggerBeginQuery(void)3964 AfterTriggerBeginQuery(void)
3965 {
3966 	/* Increase the query stack depth */
3967 	afterTriggers.query_depth++;
3968 }
3969 
3970 
3971 /* ----------
3972  * AfterTriggerEndQuery()
3973  *
3974  *	Called after one query has been completely processed. At this time
3975  *	we invoke all AFTER IMMEDIATE trigger events queued by the query, and
3976  *	transfer deferred trigger events to the global deferred-trigger list.
3977  *
3978  *	Note that this must be called BEFORE closing down the executor
3979  *	with ExecutorEnd, because we make use of the EState's info about
3980  *	target relations.  Normally it is called from ExecutorFinish.
3981  * ----------
3982  */
3983 void
AfterTriggerEndQuery(EState * estate)3984 AfterTriggerEndQuery(EState *estate)
3985 {
3986 	AfterTriggerEventList *events;
3987 	Tuplestorestate *fdw_tuplestore;
3988 
3989 	/* Must be inside a query, too */
3990 	Assert(afterTriggers.query_depth >= 0);
3991 
3992 	/*
3993 	 * If we never even got as far as initializing the event stack, there
3994 	 * certainly won't be any events, so exit quickly.
3995 	 */
3996 	if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
3997 	{
3998 		afterTriggers.query_depth--;
3999 		return;
4000 	}
4001 
4002 	/*
4003 	 * Process all immediate-mode triggers queued by the query, and move the
4004 	 * deferred ones to the main list of deferred events.
4005 	 *
4006 	 * Notice that we decide which ones will be fired, and put the deferred
4007 	 * ones on the main list, before anything is actually fired.  This ensures
4008 	 * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4009 	 * IMMEDIATE: all events we have decided to defer will be available for it
4010 	 * to fire.
4011 	 *
4012 	 * We loop in case a trigger queues more events at the same query level.
4013 	 * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4014 	 * will instead fire any triggers in a dedicated query level.  Foreign key
4015 	 * enforcement triggers do add to the current query level, thanks to their
4016 	 * passing fire_triggers = false to SPI_execute_snapshot().  Other
4017 	 * C-language triggers might do likewise.
4018 	 *
4019 	 * If we find no firable events, we don't have to increment
4020 	 * firing_counter.
4021 	 */
4022 	events = &afterTriggers.query_stack[afterTriggers.query_depth];
4023 
4024 	for (;;)
4025 	{
4026 		if (afterTriggerMarkEvents(events, &afterTriggers.events, true))
4027 		{
4028 			CommandId	firing_id = afterTriggers.firing_counter++;
4029 			AfterTriggerEventChunk *oldtail = events->tail;
4030 
4031 			if (afterTriggerInvokeEvents(events, firing_id, estate, false))
4032 				break;			/* all fired */
4033 
4034 			/*
4035 			 * Firing a trigger could result in query_stack being repalloc'd,
4036 			 * so we must recalculate ptr after each afterTriggerInvokeEvents
4037 			 * call.  Furthermore, it's unsafe to pass delete_ok = true here,
4038 			 * because that could cause afterTriggerInvokeEvents to try to
4039 			 * access *events after the stack has been repalloc'd.
4040 			 */
4041 			events = &afterTriggers.query_stack[afterTriggers.query_depth];
4042 
4043 			/*
4044 			 * We'll need to scan the events list again.  To reduce the cost
4045 			 * of doing so, get rid of completely-fired chunks.  We know that
4046 			 * all events were marked IN_PROGRESS or DONE at the conclusion of
4047 			 * afterTriggerMarkEvents, so any still-interesting events must
4048 			 * have been added after that, and so must be in the chunk that
4049 			 * was then the tail chunk, or in later chunks.  So, zap all
4050 			 * chunks before oldtail.  This is approximately the same set of
4051 			 * events we would have gotten rid of by passing delete_ok = true.
4052 			 */
4053 			Assert(oldtail != NULL);
4054 			while (events->head != oldtail)
4055 				afterTriggerDeleteHeadEventChunk(events);
4056 		}
4057 		else
4058 			break;
4059 	}
4060 
4061 	/* Release query-local storage for events, including tuplestore if any */
4062 	fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4063 	if (fdw_tuplestore)
4064 	{
4065 		tuplestore_end(fdw_tuplestore);
4066 		afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4067 	}
4068 	afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4069 
4070 	afterTriggers.query_depth--;
4071 }
4072 
4073 
4074 /* ----------
4075  * AfterTriggerFireDeferred()
4076  *
4077  *	Called just before the current transaction is committed. At this
4078  *	time we invoke all pending DEFERRED triggers.
4079  *
4080  *	It is possible for other modules to queue additional deferred triggers
4081  *	during pre-commit processing; therefore xact.c may have to call this
4082  *	multiple times.
4083  * ----------
4084  */
4085 void
AfterTriggerFireDeferred(void)4086 AfterTriggerFireDeferred(void)
4087 {
4088 	AfterTriggerEventList *events;
4089 	bool		snap_pushed = false;
4090 
4091 	/* Must not be inside a query */
4092 	Assert(afterTriggers.query_depth == -1);
4093 
4094 	/*
4095 	 * If there are any triggers to fire, make sure we have set a snapshot for
4096 	 * them to use.  (Since PortalRunUtility doesn't set a snap for COMMIT, we
4097 	 * can't assume ActiveSnapshot is valid on entry.)
4098 	 */
4099 	events = &afterTriggers.events;
4100 	if (events->head != NULL)
4101 	{
4102 		PushActiveSnapshot(GetTransactionSnapshot());
4103 		snap_pushed = true;
4104 	}
4105 
4106 	/*
4107 	 * Run all the remaining triggers.  Loop until they are all gone, in case
4108 	 * some trigger queues more for us to do.
4109 	 */
4110 	while (afterTriggerMarkEvents(events, NULL, false))
4111 	{
4112 		CommandId	firing_id = afterTriggers.firing_counter++;
4113 
4114 		if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4115 			break;				/* all fired */
4116 	}
4117 
4118 	/*
4119 	 * We don't bother freeing the event list, since it will go away anyway
4120 	 * (and more efficiently than via pfree) in AfterTriggerEndXact.
4121 	 */
4122 
4123 	if (snap_pushed)
4124 		PopActiveSnapshot();
4125 }
4126 
4127 
4128 /* ----------
4129  * AfterTriggerEndXact()
4130  *
4131  *	The current transaction is finishing.
4132  *
4133  *	Any unfired triggers are canceled so we simply throw
4134  *	away anything we know.
4135  *
4136  *	Note: it is possible for this to be called repeatedly in case of
4137  *	error during transaction abort; therefore, do not complain if
4138  *	already closed down.
4139  * ----------
4140  */
4141 void
AfterTriggerEndXact(bool isCommit)4142 AfterTriggerEndXact(bool isCommit)
4143 {
4144 	/*
4145 	 * Forget the pending-events list.
4146 	 *
4147 	 * Since all the info is in TopTransactionContext or children thereof, we
4148 	 * don't really need to do anything to reclaim memory.  However, the
4149 	 * pending-events list could be large, and so it's useful to discard it as
4150 	 * soon as possible --- especially if we are aborting because we ran out
4151 	 * of memory for the list!
4152 	 */
4153 	if (afterTriggers.event_cxt)
4154 	{
4155 		MemoryContextDelete(afterTriggers.event_cxt);
4156 		afterTriggers.event_cxt = NULL;
4157 		afterTriggers.events.head = NULL;
4158 		afterTriggers.events.tail = NULL;
4159 		afterTriggers.events.tailfree = NULL;
4160 	}
4161 
4162 	/*
4163 	 * Forget any subtransaction state as well.  Since this can't be very
4164 	 * large, we let the eventual reset of TopTransactionContext free the
4165 	 * memory instead of doing it here.
4166 	 */
4167 	afterTriggers.state_stack = NULL;
4168 	afterTriggers.events_stack = NULL;
4169 	afterTriggers.depth_stack = NULL;
4170 	afterTriggers.firing_stack = NULL;
4171 	afterTriggers.maxtransdepth = 0;
4172 
4173 
4174 	/*
4175 	 * Forget the query stack and constraint-related state information.  As
4176 	 * with the subtransaction state information, we don't bother freeing the
4177 	 * memory here.
4178 	 */
4179 	afterTriggers.query_stack = NULL;
4180 	afterTriggers.fdw_tuplestores = NULL;
4181 	afterTriggers.maxquerydepth = 0;
4182 	afterTriggers.state = NULL;
4183 
4184 	/* No more afterTriggers manipulation until next transaction starts. */
4185 	afterTriggers.query_depth = -1;
4186 }
4187 
4188 /*
4189  * AfterTriggerBeginSubXact()
4190  *
4191  *	Start a subtransaction.
4192  */
4193 void
AfterTriggerBeginSubXact(void)4194 AfterTriggerBeginSubXact(void)
4195 {
4196 	int			my_level = GetCurrentTransactionNestLevel();
4197 
4198 	/*
4199 	 * Allocate more space in the stacks if needed.  (Note: because the
4200 	 * minimum nest level of a subtransaction is 2, we waste the first couple
4201 	 * entries of each array; not worth the notational effort to avoid it.)
4202 	 */
4203 	while (my_level >= afterTriggers.maxtransdepth)
4204 	{
4205 		if (afterTriggers.maxtransdepth == 0)
4206 		{
4207 			MemoryContext old_cxt;
4208 
4209 			old_cxt = MemoryContextSwitchTo(TopTransactionContext);
4210 
4211 #define DEFTRIG_INITALLOC 8
4212 			afterTriggers.state_stack = (SetConstraintState *)
4213 				palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState));
4214 			afterTriggers.events_stack = (AfterTriggerEventList *)
4215 				palloc(DEFTRIG_INITALLOC * sizeof(AfterTriggerEventList));
4216 			afterTriggers.depth_stack = (int *)
4217 				palloc(DEFTRIG_INITALLOC * sizeof(int));
4218 			afterTriggers.firing_stack = (CommandId *)
4219 				palloc(DEFTRIG_INITALLOC * sizeof(CommandId));
4220 			afterTriggers.maxtransdepth = DEFTRIG_INITALLOC;
4221 
4222 			MemoryContextSwitchTo(old_cxt);
4223 		}
4224 		else
4225 		{
4226 			/* repalloc will keep the stacks in the same context */
4227 			int			new_alloc = afterTriggers.maxtransdepth * 2;
4228 
4229 			afterTriggers.state_stack = (SetConstraintState *)
4230 				repalloc(afterTriggers.state_stack,
4231 						 new_alloc * sizeof(SetConstraintState));
4232 			afterTriggers.events_stack = (AfterTriggerEventList *)
4233 				repalloc(afterTriggers.events_stack,
4234 						 new_alloc * sizeof(AfterTriggerEventList));
4235 			afterTriggers.depth_stack = (int *)
4236 				repalloc(afterTriggers.depth_stack,
4237 						 new_alloc * sizeof(int));
4238 			afterTriggers.firing_stack = (CommandId *)
4239 				repalloc(afterTriggers.firing_stack,
4240 						 new_alloc * sizeof(CommandId));
4241 			afterTriggers.maxtransdepth = new_alloc;
4242 		}
4243 	}
4244 
4245 	/*
4246 	 * Push the current information into the stack.  The SET CONSTRAINTS state
4247 	 * is not saved until/unless changed.  Likewise, we don't make a
4248 	 * per-subtransaction event context until needed.
4249 	 */
4250 	afterTriggers.state_stack[my_level] = NULL;
4251 	afterTriggers.events_stack[my_level] = afterTriggers.events;
4252 	afterTriggers.depth_stack[my_level] = afterTriggers.query_depth;
4253 	afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter;
4254 }
4255 
4256 /*
4257  * AfterTriggerEndSubXact()
4258  *
4259  *	The current subtransaction is ending.
4260  */
4261 void
AfterTriggerEndSubXact(bool isCommit)4262 AfterTriggerEndSubXact(bool isCommit)
4263 {
4264 	int			my_level = GetCurrentTransactionNestLevel();
4265 	SetConstraintState state;
4266 	AfterTriggerEvent event;
4267 	AfterTriggerEventChunk *chunk;
4268 	CommandId	subxact_firing_id;
4269 
4270 	/*
4271 	 * Pop the prior state if needed.
4272 	 */
4273 	if (isCommit)
4274 	{
4275 		Assert(my_level < afterTriggers.maxtransdepth);
4276 		/* If we saved a prior state, we don't need it anymore */
4277 		state = afterTriggers.state_stack[my_level];
4278 		if (state != NULL)
4279 			pfree(state);
4280 		/* this avoids double pfree if error later: */
4281 		afterTriggers.state_stack[my_level] = NULL;
4282 		Assert(afterTriggers.query_depth ==
4283 			   afterTriggers.depth_stack[my_level]);
4284 	}
4285 	else
4286 	{
4287 		/*
4288 		 * Aborting.  It is possible subxact start failed before calling
4289 		 * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4290 		 * stack levels that aren't there.
4291 		 */
4292 		if (my_level >= afterTriggers.maxtransdepth)
4293 			return;
4294 
4295 		/*
4296 		 * Release any event lists from queries being aborted, and restore
4297 		 * query_depth to its pre-subxact value.  This assumes that a
4298 		 * subtransaction will not add events to query levels started in a
4299 		 * earlier transaction state.
4300 		 */
4301 		while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level])
4302 		{
4303 			if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4304 			{
4305 				Tuplestorestate *ts;
4306 
4307 				ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4308 				if (ts)
4309 				{
4310 					tuplestore_end(ts);
4311 					afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4312 				}
4313 
4314 				afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4315 			}
4316 
4317 			afterTriggers.query_depth--;
4318 		}
4319 		Assert(afterTriggers.query_depth ==
4320 			   afterTriggers.depth_stack[my_level]);
4321 
4322 		/*
4323 		 * Restore the global deferred-event list to its former length,
4324 		 * discarding any events queued by the subxact.
4325 		 */
4326 		afterTriggerRestoreEventList(&afterTriggers.events,
4327 									 &afterTriggers.events_stack[my_level]);
4328 
4329 		/*
4330 		 * Restore the trigger state.  If the saved state is NULL, then this
4331 		 * subxact didn't save it, so it doesn't need restoring.
4332 		 */
4333 		state = afterTriggers.state_stack[my_level];
4334 		if (state != NULL)
4335 		{
4336 			pfree(afterTriggers.state);
4337 			afterTriggers.state = state;
4338 		}
4339 		/* this avoids double pfree if error later: */
4340 		afterTriggers.state_stack[my_level] = NULL;
4341 
4342 		/*
4343 		 * Scan for any remaining deferred events that were marked DONE or IN
4344 		 * PROGRESS by this subxact or a child, and un-mark them. We can
4345 		 * recognize such events because they have a firing ID greater than or
4346 		 * equal to the firing_counter value we saved at subtransaction start.
4347 		 * (This essentially assumes that the current subxact includes all
4348 		 * subxacts started after it.)
4349 		 */
4350 		subxact_firing_id = afterTriggers.firing_stack[my_level];
4351 		for_each_event_chunk(event, chunk, afterTriggers.events)
4352 		{
4353 			AfterTriggerShared evtshared = GetTriggerSharedData(event);
4354 
4355 			if (event->ate_flags &
4356 				(AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS))
4357 			{
4358 				if (evtshared->ats_firing_id >= subxact_firing_id)
4359 					event->ate_flags &=
4360 						~(AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS);
4361 			}
4362 		}
4363 	}
4364 }
4365 
4366 /* ----------
4367  * AfterTriggerEnlargeQueryState()
4368  *
4369  *	Prepare the necessary state so that we can record AFTER trigger events
4370  *	queued by a query.  It is allowed to have nested queries within a
4371  *	(sub)transaction, so we need to have separate state for each query
4372  *	nesting level.
4373  * ----------
4374  */
4375 static void
AfterTriggerEnlargeQueryState(void)4376 AfterTriggerEnlargeQueryState(void)
4377 {
4378 	int			init_depth = afterTriggers.maxquerydepth;
4379 
4380 	Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4381 
4382 	if (afterTriggers.maxquerydepth == 0)
4383 	{
4384 		int			new_alloc = Max(afterTriggers.query_depth + 1, 8);
4385 
4386 		afterTriggers.query_stack = (AfterTriggerEventList *)
4387 			MemoryContextAlloc(TopTransactionContext,
4388 							   new_alloc * sizeof(AfterTriggerEventList));
4389 		afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4390 			MemoryContextAllocZero(TopTransactionContext,
4391 								   new_alloc * sizeof(Tuplestorestate *));
4392 		afterTriggers.maxquerydepth = new_alloc;
4393 	}
4394 	else
4395 	{
4396 		/* repalloc will keep the stack in the same context */
4397 		int			old_alloc = afterTriggers.maxquerydepth;
4398 		int			new_alloc = Max(afterTriggers.query_depth + 1,
4399 									old_alloc * 2);
4400 
4401 		afterTriggers.query_stack = (AfterTriggerEventList *)
4402 			repalloc(afterTriggers.query_stack,
4403 					 new_alloc * sizeof(AfterTriggerEventList));
4404 		afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4405 			repalloc(afterTriggers.fdw_tuplestores,
4406 					 new_alloc * sizeof(Tuplestorestate *));
4407 		/* Clear newly-allocated slots for subsequent lazy initialization. */
4408 		memset(afterTriggers.fdw_tuplestores + old_alloc,
4409 			   0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4410 		afterTriggers.maxquerydepth = new_alloc;
4411 	}
4412 
4413 	/* Initialize new query lists to empty */
4414 	while (init_depth < afterTriggers.maxquerydepth)
4415 	{
4416 		AfterTriggerEventList *events;
4417 
4418 		events = &afterTriggers.query_stack[init_depth];
4419 		events->head = NULL;
4420 		events->tail = NULL;
4421 		events->tailfree = NULL;
4422 
4423 		++init_depth;
4424 	}
4425 }
4426 
4427 /*
4428  * Create an empty SetConstraintState with room for numalloc trigstates
4429  */
4430 static SetConstraintState
SetConstraintStateCreate(int numalloc)4431 SetConstraintStateCreate(int numalloc)
4432 {
4433 	SetConstraintState state;
4434 
4435 	/* Behave sanely with numalloc == 0 */
4436 	if (numalloc <= 0)
4437 		numalloc = 1;
4438 
4439 	/*
4440 	 * We assume that zeroing will correctly initialize the state values.
4441 	 */
4442 	state = (SetConstraintState)
4443 		MemoryContextAllocZero(TopTransactionContext,
4444 							   offsetof(SetConstraintStateData, trigstates) +
4445 							   numalloc * sizeof(SetConstraintTriggerData));
4446 
4447 	state->numalloc = numalloc;
4448 
4449 	return state;
4450 }
4451 
4452 /*
4453  * Copy a SetConstraintState
4454  */
4455 static SetConstraintState
SetConstraintStateCopy(SetConstraintState origstate)4456 SetConstraintStateCopy(SetConstraintState origstate)
4457 {
4458 	SetConstraintState state;
4459 
4460 	state = SetConstraintStateCreate(origstate->numstates);
4461 
4462 	state->all_isset = origstate->all_isset;
4463 	state->all_isdeferred = origstate->all_isdeferred;
4464 	state->numstates = origstate->numstates;
4465 	memcpy(state->trigstates, origstate->trigstates,
4466 		   origstate->numstates * sizeof(SetConstraintTriggerData));
4467 
4468 	return state;
4469 }
4470 
4471 /*
4472  * Add a per-trigger item to a SetConstraintState.  Returns possibly-changed
4473  * pointer to the state object (it will change if we have to repalloc).
4474  */
4475 static SetConstraintState
SetConstraintStateAddItem(SetConstraintState state,Oid tgoid,bool tgisdeferred)4476 SetConstraintStateAddItem(SetConstraintState state,
4477 						  Oid tgoid, bool tgisdeferred)
4478 {
4479 	if (state->numstates >= state->numalloc)
4480 	{
4481 		int			newalloc = state->numalloc * 2;
4482 
4483 		newalloc = Max(newalloc, 8);	/* in case original has size 0 */
4484 		state = (SetConstraintState)
4485 			repalloc(state,
4486 					 offsetof(SetConstraintStateData, trigstates) +
4487 					 newalloc * sizeof(SetConstraintTriggerData));
4488 		state->numalloc = newalloc;
4489 		Assert(state->numstates < state->numalloc);
4490 	}
4491 
4492 	state->trigstates[state->numstates].sct_tgoid = tgoid;
4493 	state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4494 	state->numstates++;
4495 
4496 	return state;
4497 }
4498 
4499 /* ----------
4500  * AfterTriggerSetState()
4501  *
4502  *	Execute the SET CONSTRAINTS ... utility command.
4503  * ----------
4504  */
4505 void
AfterTriggerSetState(ConstraintsSetStmt * stmt)4506 AfterTriggerSetState(ConstraintsSetStmt *stmt)
4507 {
4508 	int			my_level = GetCurrentTransactionNestLevel();
4509 
4510 	/* If we haven't already done so, initialize our state. */
4511 	if (afterTriggers.state == NULL)
4512 		afterTriggers.state = SetConstraintStateCreate(8);
4513 
4514 	/*
4515 	 * If in a subtransaction, and we didn't save the current state already,
4516 	 * save it so it can be restored if the subtransaction aborts.
4517 	 */
4518 	if (my_level > 1 &&
4519 		afterTriggers.state_stack[my_level] == NULL)
4520 	{
4521 		afterTriggers.state_stack[my_level] =
4522 			SetConstraintStateCopy(afterTriggers.state);
4523 	}
4524 
4525 	/*
4526 	 * Handle SET CONSTRAINTS ALL ...
4527 	 */
4528 	if (stmt->constraints == NIL)
4529 	{
4530 		/*
4531 		 * Forget any previous SET CONSTRAINTS commands in this transaction.
4532 		 */
4533 		afterTriggers.state->numstates = 0;
4534 
4535 		/*
4536 		 * Set the per-transaction ALL state to known.
4537 		 */
4538 		afterTriggers.state->all_isset = true;
4539 		afterTriggers.state->all_isdeferred = stmt->deferred;
4540 	}
4541 	else
4542 	{
4543 		Relation	conrel;
4544 		Relation	tgrel;
4545 		List	   *conoidlist = NIL;
4546 		List	   *tgoidlist = NIL;
4547 		ListCell   *lc;
4548 
4549 		/*
4550 		 * Handle SET CONSTRAINTS constraint-name [, ...]
4551 		 *
4552 		 * First, identify all the named constraints and make a list of their
4553 		 * OIDs.  Since, unlike the SQL spec, we allow multiple constraints of
4554 		 * the same name within a schema, the specifications are not
4555 		 * necessarily unique.  Our strategy is to target all matching
4556 		 * constraints within the first search-path schema that has any
4557 		 * matches, but disregard matches in schemas beyond the first match.
4558 		 * (This is a bit odd but it's the historical behavior.)
4559 		 */
4560 		conrel = heap_open(ConstraintRelationId, AccessShareLock);
4561 
4562 		foreach(lc, stmt->constraints)
4563 		{
4564 			RangeVar   *constraint = lfirst(lc);
4565 			bool		found;
4566 			List	   *namespacelist;
4567 			ListCell   *nslc;
4568 
4569 			if (constraint->catalogname)
4570 			{
4571 				if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
4572 					ereport(ERROR,
4573 							(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4574 							 errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
4575 							 constraint->catalogname, constraint->schemaname,
4576 									constraint->relname)));
4577 			}
4578 
4579 			/*
4580 			 * If we're given the schema name with the constraint, look only
4581 			 * in that schema.  If given a bare constraint name, use the
4582 			 * search path to find the first matching constraint.
4583 			 */
4584 			if (constraint->schemaname)
4585 			{
4586 				Oid			namespaceId = LookupExplicitNamespace(constraint->schemaname,
4587 																  false);
4588 
4589 				namespacelist = list_make1_oid(namespaceId);
4590 			}
4591 			else
4592 			{
4593 				namespacelist = fetch_search_path(true);
4594 			}
4595 
4596 			found = false;
4597 			foreach(nslc, namespacelist)
4598 			{
4599 				Oid			namespaceId = lfirst_oid(nslc);
4600 				SysScanDesc conscan;
4601 				ScanKeyData skey[2];
4602 				HeapTuple	tup;
4603 
4604 				ScanKeyInit(&skey[0],
4605 							Anum_pg_constraint_conname,
4606 							BTEqualStrategyNumber, F_NAMEEQ,
4607 							CStringGetDatum(constraint->relname));
4608 				ScanKeyInit(&skey[1],
4609 							Anum_pg_constraint_connamespace,
4610 							BTEqualStrategyNumber, F_OIDEQ,
4611 							ObjectIdGetDatum(namespaceId));
4612 
4613 				conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
4614 											 true, NULL, 2, skey);
4615 
4616 				while (HeapTupleIsValid(tup = systable_getnext(conscan)))
4617 				{
4618 					Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
4619 
4620 					if (con->condeferrable)
4621 						conoidlist = lappend_oid(conoidlist,
4622 												 HeapTupleGetOid(tup));
4623 					else if (stmt->deferred)
4624 						ereport(ERROR,
4625 								(errcode(ERRCODE_WRONG_OBJECT_TYPE),
4626 								 errmsg("constraint \"%s\" is not deferrable",
4627 										constraint->relname)));
4628 					found = true;
4629 				}
4630 
4631 				systable_endscan(conscan);
4632 
4633 				/*
4634 				 * Once we've found a matching constraint we do not search
4635 				 * later parts of the search path.
4636 				 */
4637 				if (found)
4638 					break;
4639 			}
4640 
4641 			list_free(namespacelist);
4642 
4643 			/*
4644 			 * Not found ?
4645 			 */
4646 			if (!found)
4647 				ereport(ERROR,
4648 						(errcode(ERRCODE_UNDEFINED_OBJECT),
4649 						 errmsg("constraint \"%s\" does not exist",
4650 								constraint->relname)));
4651 		}
4652 
4653 		heap_close(conrel, AccessShareLock);
4654 
4655 		/*
4656 		 * Now, locate the trigger(s) implementing each of these constraints,
4657 		 * and make a list of their OIDs.
4658 		 */
4659 		tgrel = heap_open(TriggerRelationId, AccessShareLock);
4660 
4661 		foreach(lc, conoidlist)
4662 		{
4663 			Oid			conoid = lfirst_oid(lc);
4664 			bool		found;
4665 			ScanKeyData skey;
4666 			SysScanDesc tgscan;
4667 			HeapTuple	htup;
4668 
4669 			found = false;
4670 
4671 			ScanKeyInit(&skey,
4672 						Anum_pg_trigger_tgconstraint,
4673 						BTEqualStrategyNumber, F_OIDEQ,
4674 						ObjectIdGetDatum(conoid));
4675 
4676 			tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
4677 										NULL, 1, &skey);
4678 
4679 			while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
4680 			{
4681 				Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
4682 
4683 				/*
4684 				 * Silently skip triggers that are marked as non-deferrable in
4685 				 * pg_trigger.  This is not an error condition, since a
4686 				 * deferrable RI constraint may have some non-deferrable
4687 				 * actions.
4688 				 */
4689 				if (pg_trigger->tgdeferrable)
4690 					tgoidlist = lappend_oid(tgoidlist,
4691 											HeapTupleGetOid(htup));
4692 
4693 				found = true;
4694 			}
4695 
4696 			systable_endscan(tgscan);
4697 
4698 			/* Safety check: a deferrable constraint should have triggers */
4699 			if (!found)
4700 				elog(ERROR, "no triggers found for constraint with OID %u",
4701 					 conoid);
4702 		}
4703 
4704 		heap_close(tgrel, AccessShareLock);
4705 
4706 		/*
4707 		 * Now we can set the trigger states of individual triggers for this
4708 		 * xact.
4709 		 */
4710 		foreach(lc, tgoidlist)
4711 		{
4712 			Oid			tgoid = lfirst_oid(lc);
4713 			SetConstraintState state = afterTriggers.state;
4714 			bool		found = false;
4715 			int			i;
4716 
4717 			for (i = 0; i < state->numstates; i++)
4718 			{
4719 				if (state->trigstates[i].sct_tgoid == tgoid)
4720 				{
4721 					state->trigstates[i].sct_tgisdeferred = stmt->deferred;
4722 					found = true;
4723 					break;
4724 				}
4725 			}
4726 			if (!found)
4727 			{
4728 				afterTriggers.state =
4729 					SetConstraintStateAddItem(state, tgoid, stmt->deferred);
4730 			}
4731 		}
4732 	}
4733 
4734 	/*
4735 	 * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
4736 	 * checks against that constraint must be made when the SET CONSTRAINTS
4737 	 * command is executed -- i.e. the effects of the SET CONSTRAINTS command
4738 	 * apply retroactively.  We've updated the constraints state, so scan the
4739 	 * list of previously deferred events to fire any that have now become
4740 	 * immediate.
4741 	 *
4742 	 * Obviously, if this was SET ... DEFERRED then it can't have converted
4743 	 * any unfired events to immediate, so we need do nothing in that case.
4744 	 */
4745 	if (!stmt->deferred)
4746 	{
4747 		AfterTriggerEventList *events = &afterTriggers.events;
4748 		bool		snapshot_set = false;
4749 
4750 		while (afterTriggerMarkEvents(events, NULL, true))
4751 		{
4752 			CommandId	firing_id = afterTriggers.firing_counter++;
4753 
4754 			/*
4755 			 * Make sure a snapshot has been established in case trigger
4756 			 * functions need one.  Note that we avoid setting a snapshot if
4757 			 * we don't find at least one trigger that has to be fired now.
4758 			 * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
4759 			 * ISOLATION LEVEL SERIALIZABLE; ... works properly.  (If we are
4760 			 * at the start of a transaction it's not possible for any trigger
4761 			 * events to be queued yet.)
4762 			 */
4763 			if (!snapshot_set)
4764 			{
4765 				PushActiveSnapshot(GetTransactionSnapshot());
4766 				snapshot_set = true;
4767 			}
4768 
4769 			/*
4770 			 * We can delete fired events if we are at top transaction level,
4771 			 * but we'd better not if inside a subtransaction, since the
4772 			 * subtransaction could later get rolled back.
4773 			 */
4774 			if (afterTriggerInvokeEvents(events, firing_id, NULL,
4775 										 !IsSubTransaction()))
4776 				break;			/* all fired */
4777 		}
4778 
4779 		if (snapshot_set)
4780 			PopActiveSnapshot();
4781 	}
4782 }
4783 
4784 /* ----------
4785  * AfterTriggerPendingOnRel()
4786  *		Test to see if there are any pending after-trigger events for rel.
4787  *
4788  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
4789  * it is unsafe to perform major surgery on a relation.  Note that only
4790  * local pending events are examined.  We assume that having exclusive lock
4791  * on a rel guarantees there are no unserviced events in other backends ---
4792  * but having a lock does not prevent there being such events in our own.
4793  *
4794  * In some scenarios it'd be reasonable to remove pending events (more
4795  * specifically, mark them DONE by the current subxact) but without a lot
4796  * of knowledge of the trigger semantics we can't do this in general.
4797  * ----------
4798  */
4799 bool
AfterTriggerPendingOnRel(Oid relid)4800 AfterTriggerPendingOnRel(Oid relid)
4801 {
4802 	AfterTriggerEvent event;
4803 	AfterTriggerEventChunk *chunk;
4804 	int			depth;
4805 
4806 	/* Scan queued events */
4807 	for_each_event_chunk(event, chunk, afterTriggers.events)
4808 	{
4809 		AfterTriggerShared evtshared = GetTriggerSharedData(event);
4810 
4811 		/*
4812 		 * We can ignore completed events.  (Even if a DONE flag is rolled
4813 		 * back by subxact abort, it's OK because the effects of the TRUNCATE
4814 		 * or whatever must get rolled back too.)
4815 		 */
4816 		if (event->ate_flags & AFTER_TRIGGER_DONE)
4817 			continue;
4818 
4819 		if (evtshared->ats_relid == relid)
4820 			return true;
4821 	}
4822 
4823 	/*
4824 	 * Also scan events queued by incomplete queries.  This could only matter
4825 	 * if TRUNCATE/etc is executed by a function or trigger within an updating
4826 	 * query on the same relation, which is pretty perverse, but let's check.
4827 	 */
4828 	for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
4829 	{
4830 		for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth])
4831 		{
4832 			AfterTriggerShared evtshared = GetTriggerSharedData(event);
4833 
4834 			if (event->ate_flags & AFTER_TRIGGER_DONE)
4835 				continue;
4836 
4837 			if (evtshared->ats_relid == relid)
4838 				return true;
4839 		}
4840 	}
4841 
4842 	return false;
4843 }
4844 
4845 
4846 /* ----------
4847  * AfterTriggerSaveEvent()
4848  *
4849  *	Called by ExecA[RS]...Triggers() to queue up the triggers that should
4850  *	be fired for an event.
4851  *
4852  *	NOTE: this is called whenever there are any triggers associated with
4853  *	the event (even if they are disabled).  This function decides which
4854  *	triggers actually need to be queued.
4855  * ----------
4856  */
4857 static void
AfterTriggerSaveEvent(EState * estate,ResultRelInfo * relinfo,int event,bool row_trigger,HeapTuple oldtup,HeapTuple newtup,List * recheckIndexes,Bitmapset * modifiedCols)4858 AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
4859 					  int event, bool row_trigger,
4860 					  HeapTuple oldtup, HeapTuple newtup,
4861 					  List *recheckIndexes, Bitmapset *modifiedCols)
4862 {
4863 	Relation	rel = relinfo->ri_RelationDesc;
4864 	TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
4865 	AfterTriggerEventData new_event;
4866 	AfterTriggerSharedData new_shared;
4867 	char		relkind = relinfo->ri_RelationDesc->rd_rel->relkind;
4868 	int			tgtype_event;
4869 	int			tgtype_level;
4870 	int			i;
4871 	Tuplestorestate *fdw_tuplestore = NULL;
4872 
4873 	/*
4874 	 * Check state.  We use a normal test not Assert because it is possible to
4875 	 * reach here in the wrong state given misconfigured RI triggers, in
4876 	 * particular deferring a cascade action trigger.
4877 	 */
4878 	if (afterTriggers.query_depth < 0)
4879 		elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
4880 
4881 	/* Be sure we have enough space to record events at this query depth. */
4882 	if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4883 		AfterTriggerEnlargeQueryState();
4884 
4885 	/*
4886 	 * Validate the event code and collect the associated tuple CTIDs.
4887 	 *
4888 	 * The event code will be used both as a bitmask and an array offset, so
4889 	 * validation is important to make sure we don't walk off the edge of our
4890 	 * arrays.
4891 	 */
4892 	switch (event)
4893 	{
4894 		case TRIGGER_EVENT_INSERT:
4895 			tgtype_event = TRIGGER_TYPE_INSERT;
4896 			if (row_trigger)
4897 			{
4898 				Assert(oldtup == NULL);
4899 				Assert(newtup != NULL);
4900 				ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid1));
4901 				ItemPointerSetInvalid(&(new_event.ate_ctid2));
4902 			}
4903 			else
4904 			{
4905 				Assert(oldtup == NULL);
4906 				Assert(newtup == NULL);
4907 				ItemPointerSetInvalid(&(new_event.ate_ctid1));
4908 				ItemPointerSetInvalid(&(new_event.ate_ctid2));
4909 			}
4910 			break;
4911 		case TRIGGER_EVENT_DELETE:
4912 			tgtype_event = TRIGGER_TYPE_DELETE;
4913 			if (row_trigger)
4914 			{
4915 				Assert(oldtup != NULL);
4916 				Assert(newtup == NULL);
4917 				ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
4918 				ItemPointerSetInvalid(&(new_event.ate_ctid2));
4919 			}
4920 			else
4921 			{
4922 				Assert(oldtup == NULL);
4923 				Assert(newtup == NULL);
4924 				ItemPointerSetInvalid(&(new_event.ate_ctid1));
4925 				ItemPointerSetInvalid(&(new_event.ate_ctid2));
4926 			}
4927 			break;
4928 		case TRIGGER_EVENT_UPDATE:
4929 			tgtype_event = TRIGGER_TYPE_UPDATE;
4930 			if (row_trigger)
4931 			{
4932 				Assert(oldtup != NULL);
4933 				Assert(newtup != NULL);
4934 				ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
4935 				ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid2));
4936 			}
4937 			else
4938 			{
4939 				Assert(oldtup == NULL);
4940 				Assert(newtup == NULL);
4941 				ItemPointerSetInvalid(&(new_event.ate_ctid1));
4942 				ItemPointerSetInvalid(&(new_event.ate_ctid2));
4943 			}
4944 			break;
4945 		case TRIGGER_EVENT_TRUNCATE:
4946 			tgtype_event = TRIGGER_TYPE_TRUNCATE;
4947 			Assert(oldtup == NULL);
4948 			Assert(newtup == NULL);
4949 			ItemPointerSetInvalid(&(new_event.ate_ctid1));
4950 			ItemPointerSetInvalid(&(new_event.ate_ctid2));
4951 			break;
4952 		default:
4953 			elog(ERROR, "invalid after-trigger event code: %d", event);
4954 			tgtype_event = 0;	/* keep compiler quiet */
4955 			break;
4956 	}
4957 
4958 	if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
4959 		new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
4960 			AFTER_TRIGGER_2CTID : AFTER_TRIGGER_1CTID;
4961 	/* else, we'll initialize ate_flags for each trigger */
4962 
4963 	tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
4964 
4965 	for (i = 0; i < trigdesc->numtriggers; i++)
4966 	{
4967 		Trigger    *trigger = &trigdesc->triggers[i];
4968 
4969 		if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
4970 								  tgtype_level,
4971 								  TRIGGER_TYPE_AFTER,
4972 								  tgtype_event))
4973 			continue;
4974 		if (!TriggerEnabled(estate, relinfo, trigger, event,
4975 							modifiedCols, oldtup, newtup))
4976 			continue;
4977 
4978 		if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
4979 		{
4980 			if (fdw_tuplestore == NULL)
4981 			{
4982 				fdw_tuplestore = GetCurrentFDWTuplestore();
4983 				new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
4984 			}
4985 			else
4986 				/* subsequent event for the same tuple */
4987 				new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
4988 		}
4989 
4990 		/*
4991 		 * If the trigger is a foreign key enforcement trigger, there are
4992 		 * certain cases where we can skip queueing the event because we can
4993 		 * tell by inspection that the FK constraint will still pass.
4994 		 */
4995 		if (TRIGGER_FIRED_BY_UPDATE(event))
4996 		{
4997 			switch (RI_FKey_trigger_type(trigger->tgfoid))
4998 			{
4999 				case RI_TRIGGER_PK:
5000 					/* Update on trigger's PK table */
5001 					if (!RI_FKey_pk_upd_check_required(trigger, rel,
5002 													   oldtup, newtup))
5003 					{
5004 						/* skip queuing this event */
5005 						continue;
5006 					}
5007 					break;
5008 
5009 				case RI_TRIGGER_FK:
5010 					/* Update on trigger's FK table */
5011 					if (!RI_FKey_fk_upd_check_required(trigger, rel,
5012 													   oldtup, newtup))
5013 					{
5014 						/* skip queuing this event */
5015 						continue;
5016 					}
5017 					break;
5018 
5019 				case RI_TRIGGER_NONE:
5020 					/* Not an FK trigger */
5021 					break;
5022 			}
5023 		}
5024 
5025 		/*
5026 		 * If the trigger is a deferred unique constraint check trigger, only
5027 		 * queue it if the unique constraint was potentially violated, which
5028 		 * we know from index insertion time.
5029 		 */
5030 		if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5031 		{
5032 			if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5033 				continue;		/* Uniqueness definitely not violated */
5034 		}
5035 
5036 		/*
5037 		 * Fill in event structure and add it to the current query's queue.
5038 		 */
5039 		new_shared.ats_event =
5040 			(event & TRIGGER_EVENT_OPMASK) |
5041 			(row_trigger ? TRIGGER_EVENT_ROW : 0) |
5042 			(trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5043 			(trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5044 		new_shared.ats_tgoid = trigger->tgoid;
5045 		new_shared.ats_relid = RelationGetRelid(rel);
5046 		new_shared.ats_firing_id = 0;
5047 
5048 		afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth],
5049 							 &new_event, &new_shared);
5050 	}
5051 
5052 	/*
5053 	 * Finally, spool any foreign tuple(s).  The tuplestore squashes them to
5054 	 * minimal tuples, so this loses any system columns.  The executor lost
5055 	 * those columns before us, for an unrelated reason, so this is fine.
5056 	 */
5057 	if (fdw_tuplestore)
5058 	{
5059 		if (oldtup != NULL)
5060 			tuplestore_puttuple(fdw_tuplestore, oldtup);
5061 		if (newtup != NULL)
5062 			tuplestore_puttuple(fdw_tuplestore, newtup);
5063 	}
5064 }
5065 
5066 Datum
pg_trigger_depth(PG_FUNCTION_ARGS)5067 pg_trigger_depth(PG_FUNCTION_ARGS)
5068 {
5069 	PG_RETURN_INT32(MyTriggerDepth);
5070 }
5071