1 /*-------------------------------------------------------------------------
2 *
3 * trigger.c
4 * PostgreSQL TRIGGERs support code.
5 *
6 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * IDENTIFICATION
10 * src/backend/commands/trigger.c
11 *
12 *-------------------------------------------------------------------------
13 */
14 #include "postgres.h"
15
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/inval.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/rel.h"
62 #include "utils/snapmgr.h"
63 #include "utils/syscache.h"
64 #include "utils/tuplestore.h"
65
66
67 /* GUC variables */
68 int SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN;
69
70 /* How many levels deep into trigger execution are we? */
71 static int MyTriggerDepth = 0;
72
73 /* Local function prototypes */
74 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
75 static bool GetTupleForTrigger(EState *estate,
76 EPQState *epqstate,
77 ResultRelInfo *relinfo,
78 ItemPointer tid,
79 LockTupleMode lockmode,
80 TupleTableSlot *oldslot,
81 TupleTableSlot **newSlot);
82 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
83 Trigger *trigger, TriggerEvent event,
84 Bitmapset *modifiedCols,
85 TupleTableSlot *oldslot, TupleTableSlot *newslot);
86 static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
87 int tgindx,
88 FmgrInfo *finfo,
89 Instrumentation *instr,
90 MemoryContext per_tuple_context);
91 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
92 int event, bool row_trigger,
93 TupleTableSlot *oldtup, TupleTableSlot *newtup,
94 List *recheckIndexes, Bitmapset *modifiedCols,
95 TransitionCaptureState *transition_capture);
96 static void AfterTriggerEnlargeQueryState(void);
97 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
98
99
100 /*
101 * Create a trigger. Returns the address of the created trigger.
102 *
103 * queryString is the source text of the CREATE TRIGGER command.
104 * This must be supplied if a whenClause is specified, else it can be NULL.
105 *
106 * relOid, if nonzero, is the relation on which the trigger should be
107 * created. If zero, the name provided in the statement will be looked up.
108 *
109 * refRelOid, if nonzero, is the relation to which the constraint trigger
110 * refers. If zero, the constraint relation name provided in the statement
111 * will be looked up as needed.
112 *
113 * constraintOid, if nonzero, says that this trigger is being created
114 * internally to implement that constraint. A suitable pg_depend entry will
115 * be made to link the trigger to that constraint. constraintOid is zero when
116 * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
117 * TRIGGER, we build a pg_constraint entry internally.)
118 *
119 * indexOid, if nonzero, is the OID of an index associated with the constraint.
120 * We do nothing with this except store it into pg_trigger.tgconstrindid;
121 * but when creating a trigger for a deferrable unique constraint on a
122 * partitioned table, its children are looked up. Note we don't cope with
123 * invalid indexes in that case.
124 *
125 * funcoid, if nonzero, is the OID of the function to invoke. When this is
126 * given, stmt->funcname is ignored.
127 *
128 * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
129 * if that trigger is dropped, this one should be too. (This is passed as
130 * Invalid by most callers; it's set here when recursing on a partition.)
131 *
132 * If whenClause is passed, it is an already-transformed expression for
133 * WHEN. In this case, we ignore any that may come in stmt->whenClause.
134 *
135 * If isInternal is true then this is an internally-generated trigger.
136 * This argument sets the tgisinternal field of the pg_trigger entry, and
137 * if true causes us to modify the given trigger name to ensure uniqueness.
138 *
139 * When isInternal is not true we require ACL_TRIGGER permissions on the
140 * relation, as well as ACL_EXECUTE on the trigger function. For internal
141 * triggers the caller must apply any required permission checks.
142 *
143 * When called on partitioned tables, this function recurses to create the
144 * trigger on all the partitions, except if isInternal is true, in which
145 * case caller is expected to execute recursion on its own. in_partition
146 * indicates such a recursive call; outside callers should pass "false"
147 * (but see CloneRowTriggersToPartition).
148 */
149 ObjectAddress
CreateTrigger(CreateTrigStmt * stmt,const char * queryString,Oid relOid,Oid refRelOid,Oid constraintOid,Oid indexOid,Oid funcoid,Oid parentTriggerOid,Node * whenClause,bool isInternal,bool in_partition)150 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
151 Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
152 Oid funcoid, Oid parentTriggerOid, Node *whenClause,
153 bool isInternal, bool in_partition)
154 {
155 return
156 CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
157 constraintOid, indexOid, funcoid,
158 parentTriggerOid, whenClause, isInternal,
159 in_partition, TRIGGER_FIRES_ON_ORIGIN);
160 }
161
162 /*
163 * Like the above; additionally the firing condition
164 * (always/origin/replica/disabled) can be specified.
165 */
166 ObjectAddress
CreateTriggerFiringOn(CreateTrigStmt * stmt,const char * queryString,Oid relOid,Oid refRelOid,Oid constraintOid,Oid indexOid,Oid funcoid,Oid parentTriggerOid,Node * whenClause,bool isInternal,bool in_partition,char trigger_fires_when)167 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
168 Oid relOid, Oid refRelOid, Oid constraintOid,
169 Oid indexOid, Oid funcoid, Oid parentTriggerOid,
170 Node *whenClause, bool isInternal, bool in_partition,
171 char trigger_fires_when)
172 {
173 int16 tgtype;
174 int ncolumns;
175 int16 *columns;
176 int2vector *tgattr;
177 List *whenRtable;
178 char *qual;
179 Datum values[Natts_pg_trigger];
180 bool nulls[Natts_pg_trigger];
181 Relation rel;
182 AclResult aclresult;
183 Relation tgrel;
184 Relation pgrel;
185 HeapTuple tuple = NULL;
186 Oid funcrettype;
187 Oid trigoid = InvalidOid;
188 char internaltrigname[NAMEDATALEN];
189 char *trigname;
190 Oid constrrelid = InvalidOid;
191 ObjectAddress myself,
192 referenced;
193 char *oldtablename = NULL;
194 char *newtablename = NULL;
195 bool partition_recurse;
196 bool trigger_exists = false;
197 Oid existing_constraint_oid = InvalidOid;
198 bool existing_isInternal = false;
199
200 if (OidIsValid(relOid))
201 rel = table_open(relOid, ShareRowExclusiveLock);
202 else
203 rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
204
205 /*
206 * Triggers must be on tables or views, and there are additional
207 * relation-type-specific restrictions.
208 */
209 if (rel->rd_rel->relkind == RELKIND_RELATION)
210 {
211 /* Tables can't have INSTEAD OF triggers */
212 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
213 stmt->timing != TRIGGER_TYPE_AFTER)
214 ereport(ERROR,
215 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
216 errmsg("\"%s\" is a table",
217 RelationGetRelationName(rel)),
218 errdetail("Tables cannot have INSTEAD OF triggers.")));
219 }
220 else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
221 {
222 /* Partitioned tables can't have INSTEAD OF triggers */
223 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
224 stmt->timing != TRIGGER_TYPE_AFTER)
225 ereport(ERROR,
226 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
227 errmsg("\"%s\" is a table",
228 RelationGetRelationName(rel)),
229 errdetail("Tables cannot have INSTEAD OF triggers.")));
230
231 /*
232 * FOR EACH ROW triggers have further restrictions
233 */
234 if (stmt->row)
235 {
236 /*
237 * Disallow use of transition tables.
238 *
239 * Note that we have another restriction about transition tables
240 * in partitions; search for 'has_superclass' below for an
241 * explanation. The check here is just to protect from the fact
242 * that if we allowed it here, the creation would succeed for a
243 * partitioned table with no partitions, but would be blocked by
244 * the other restriction when the first partition was created,
245 * which is very unfriendly behavior.
246 */
247 if (stmt->transitionRels != NIL)
248 ereport(ERROR,
249 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
250 errmsg("\"%s\" is a partitioned table",
251 RelationGetRelationName(rel)),
252 errdetail("Triggers on partitioned tables cannot have transition tables.")));
253 }
254 }
255 else if (rel->rd_rel->relkind == RELKIND_VIEW)
256 {
257 /*
258 * Views can have INSTEAD OF triggers (which we check below are
259 * row-level), or statement-level BEFORE/AFTER triggers.
260 */
261 if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
262 ereport(ERROR,
263 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
264 errmsg("\"%s\" is a view",
265 RelationGetRelationName(rel)),
266 errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
267 /* Disallow TRUNCATE triggers on VIEWs */
268 if (TRIGGER_FOR_TRUNCATE(stmt->events))
269 ereport(ERROR,
270 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
271 errmsg("\"%s\" is a view",
272 RelationGetRelationName(rel)),
273 errdetail("Views cannot have TRUNCATE triggers.")));
274 }
275 else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
276 {
277 if (stmt->timing != TRIGGER_TYPE_BEFORE &&
278 stmt->timing != TRIGGER_TYPE_AFTER)
279 ereport(ERROR,
280 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
281 errmsg("\"%s\" is a foreign table",
282 RelationGetRelationName(rel)),
283 errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
284
285 if (TRIGGER_FOR_TRUNCATE(stmt->events))
286 ereport(ERROR,
287 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
288 errmsg("\"%s\" is a foreign table",
289 RelationGetRelationName(rel)),
290 errdetail("Foreign tables cannot have TRUNCATE triggers.")));
291
292 /*
293 * We disallow constraint triggers to protect the assumption that
294 * triggers on FKs can't be deferred. See notes with AfterTriggers
295 * data structures, below.
296 */
297 if (stmt->isconstraint)
298 ereport(ERROR,
299 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
300 errmsg("\"%s\" is a foreign table",
301 RelationGetRelationName(rel)),
302 errdetail("Foreign tables cannot have constraint triggers.")));
303 }
304 else
305 ereport(ERROR,
306 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
307 errmsg("\"%s\" is not a table or view",
308 RelationGetRelationName(rel))));
309
310 if (!allowSystemTableMods && IsSystemRelation(rel))
311 ereport(ERROR,
312 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
313 errmsg("permission denied: \"%s\" is a system catalog",
314 RelationGetRelationName(rel))));
315
316 if (stmt->isconstraint)
317 {
318 /*
319 * We must take a lock on the target relation to protect against
320 * concurrent drop. It's not clear that AccessShareLock is strong
321 * enough, but we certainly need at least that much... otherwise, we
322 * might end up creating a pg_constraint entry referencing a
323 * nonexistent table.
324 */
325 if (OidIsValid(refRelOid))
326 {
327 LockRelationOid(refRelOid, AccessShareLock);
328 constrrelid = refRelOid;
329 }
330 else if (stmt->constrrel != NULL)
331 constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
332 false);
333 }
334
335 /* permission checks */
336 if (!isInternal)
337 {
338 aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
339 ACL_TRIGGER);
340 if (aclresult != ACLCHECK_OK)
341 aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
342 RelationGetRelationName(rel));
343
344 if (OidIsValid(constrrelid))
345 {
346 aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
347 ACL_TRIGGER);
348 if (aclresult != ACLCHECK_OK)
349 aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
350 get_rel_name(constrrelid));
351 }
352 }
353
354 /*
355 * When called on a partitioned table to create a FOR EACH ROW trigger
356 * that's not internal, we create one trigger for each partition, too.
357 *
358 * For that, we'd better hold lock on all of them ahead of time.
359 */
360 partition_recurse = !isInternal && stmt->row &&
361 rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
362 if (partition_recurse)
363 list_free(find_all_inheritors(RelationGetRelid(rel),
364 ShareRowExclusiveLock, NULL));
365
366 /* Compute tgtype */
367 TRIGGER_CLEAR_TYPE(tgtype);
368 if (stmt->row)
369 TRIGGER_SETT_ROW(tgtype);
370 tgtype |= stmt->timing;
371 tgtype |= stmt->events;
372
373 /* Disallow ROW-level TRUNCATE triggers */
374 if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
375 ereport(ERROR,
376 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
377 errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
378
379 /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
380 if (TRIGGER_FOR_INSTEAD(tgtype))
381 {
382 if (!TRIGGER_FOR_ROW(tgtype))
383 ereport(ERROR,
384 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
385 errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
386 if (stmt->whenClause)
387 ereport(ERROR,
388 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
389 errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
390 if (stmt->columns != NIL)
391 ereport(ERROR,
392 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
393 errmsg("INSTEAD OF triggers cannot have column lists")));
394 }
395
396 /*
397 * We don't yet support naming ROW transition variables, but the parser
398 * recognizes the syntax so we can give a nicer message here.
399 *
400 * Per standard, REFERENCING TABLE names are only allowed on AFTER
401 * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
402 * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
403 * only allowed once. Per standard, OLD may not be specified when
404 * creating a trigger only for INSERT, and NEW may not be specified when
405 * creating a trigger only for DELETE.
406 *
407 * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
408 * reference both ROW and TABLE transition data.
409 */
410 if (stmt->transitionRels != NIL)
411 {
412 List *varList = stmt->transitionRels;
413 ListCell *lc;
414
415 foreach(lc, varList)
416 {
417 TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
418
419 if (!(tt->isTable))
420 ereport(ERROR,
421 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
422 errmsg("ROW variable naming in the REFERENCING clause is not supported"),
423 errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
424
425 /*
426 * Because of the above test, we omit further ROW-related testing
427 * below. If we later allow naming OLD and NEW ROW variables,
428 * adjustments will be needed below.
429 */
430
431 if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
432 ereport(ERROR,
433 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
434 errmsg("\"%s\" is a foreign table",
435 RelationGetRelationName(rel)),
436 errdetail("Triggers on foreign tables cannot have transition tables.")));
437
438 if (rel->rd_rel->relkind == RELKIND_VIEW)
439 ereport(ERROR,
440 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
441 errmsg("\"%s\" is a view",
442 RelationGetRelationName(rel)),
443 errdetail("Triggers on views cannot have transition tables.")));
444
445 /*
446 * We currently don't allow row-level triggers with transition
447 * tables on partition or inheritance children. Such triggers
448 * would somehow need to see tuples converted to the format of the
449 * table they're attached to, and it's not clear which subset of
450 * tuples each child should see. See also the prohibitions in
451 * ATExecAttachPartition() and ATExecAddInherit().
452 */
453 if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
454 {
455 /* Use appropriate error message. */
456 if (rel->rd_rel->relispartition)
457 ereport(ERROR,
458 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
459 errmsg("ROW triggers with transition tables are not supported on partitions")));
460 else
461 ereport(ERROR,
462 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
463 errmsg("ROW triggers with transition tables are not supported on inheritance children")));
464 }
465
466 if (stmt->timing != TRIGGER_TYPE_AFTER)
467 ereport(ERROR,
468 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
469 errmsg("transition table name can only be specified for an AFTER trigger")));
470
471 if (TRIGGER_FOR_TRUNCATE(tgtype))
472 ereport(ERROR,
473 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
474 errmsg("TRUNCATE triggers with transition tables are not supported")));
475
476 /*
477 * We currently don't allow multi-event triggers ("INSERT OR
478 * UPDATE") with transition tables, because it's not clear how to
479 * handle INSERT ... ON CONFLICT statements which can fire both
480 * INSERT and UPDATE triggers. We show the inserted tuples to
481 * INSERT triggers and the updated tuples to UPDATE triggers, but
482 * it's not yet clear what INSERT OR UPDATE trigger should see.
483 * This restriction could be lifted if we can decide on the right
484 * semantics in a later release.
485 */
486 if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
487 (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
488 (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
489 ereport(ERROR,
490 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
491 errmsg("transition tables cannot be specified for triggers with more than one event")));
492
493 /*
494 * We currently don't allow column-specific triggers with
495 * transition tables. Per spec, that seems to require
496 * accumulating separate transition tables for each combination of
497 * columns, which is a lot of work for a rather marginal feature.
498 */
499 if (stmt->columns != NIL)
500 ereport(ERROR,
501 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
502 errmsg("transition tables cannot be specified for triggers with column lists")));
503
504 /*
505 * We disallow constraint triggers with transition tables, to
506 * protect the assumption that such triggers can't be deferred.
507 * See notes with AfterTriggers data structures, below.
508 *
509 * Currently this is enforced by the grammar, so just Assert here.
510 */
511 Assert(!stmt->isconstraint);
512
513 if (tt->isNew)
514 {
515 if (!(TRIGGER_FOR_INSERT(tgtype) ||
516 TRIGGER_FOR_UPDATE(tgtype)))
517 ereport(ERROR,
518 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
519 errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
520
521 if (newtablename != NULL)
522 ereport(ERROR,
523 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
524 errmsg("NEW TABLE cannot be specified multiple times")));
525
526 newtablename = tt->name;
527 }
528 else
529 {
530 if (!(TRIGGER_FOR_DELETE(tgtype) ||
531 TRIGGER_FOR_UPDATE(tgtype)))
532 ereport(ERROR,
533 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
534 errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
535
536 if (oldtablename != NULL)
537 ereport(ERROR,
538 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
539 errmsg("OLD TABLE cannot be specified multiple times")));
540
541 oldtablename = tt->name;
542 }
543 }
544
545 if (newtablename != NULL && oldtablename != NULL &&
546 strcmp(newtablename, oldtablename) == 0)
547 ereport(ERROR,
548 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
549 errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
550 }
551
552 /*
553 * Parse the WHEN clause, if any and we weren't passed an already
554 * transformed one.
555 *
556 * Note that as a side effect, we fill whenRtable when parsing. If we got
557 * an already parsed clause, this does not occur, which is what we want --
558 * no point in adding redundant dependencies below.
559 */
560 if (!whenClause && stmt->whenClause)
561 {
562 ParseState *pstate;
563 ParseNamespaceItem *nsitem;
564 List *varList;
565 ListCell *lc;
566
567 /* Set up a pstate to parse with */
568 pstate = make_parsestate(NULL);
569 pstate->p_sourcetext = queryString;
570
571 /*
572 * Set up nsitems for OLD and NEW references.
573 *
574 * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
575 */
576 nsitem = addRangeTableEntryForRelation(pstate, rel,
577 AccessShareLock,
578 makeAlias("old", NIL),
579 false, false);
580 addNSItemToQuery(pstate, nsitem, false, true, true);
581 nsitem = addRangeTableEntryForRelation(pstate, rel,
582 AccessShareLock,
583 makeAlias("new", NIL),
584 false, false);
585 addNSItemToQuery(pstate, nsitem, false, true, true);
586
587 /* Transform expression. Copy to be sure we don't modify original */
588 whenClause = transformWhereClause(pstate,
589 copyObject(stmt->whenClause),
590 EXPR_KIND_TRIGGER_WHEN,
591 "WHEN");
592 /* we have to fix its collations too */
593 assign_expr_collations(pstate, whenClause);
594
595 /*
596 * Check for disallowed references to OLD/NEW.
597 *
598 * NB: pull_var_clause is okay here only because we don't allow
599 * subselects in WHEN clauses; it would fail to examine the contents
600 * of subselects.
601 */
602 varList = pull_var_clause(whenClause, 0);
603 foreach(lc, varList)
604 {
605 Var *var = (Var *) lfirst(lc);
606
607 switch (var->varno)
608 {
609 case PRS2_OLD_VARNO:
610 if (!TRIGGER_FOR_ROW(tgtype))
611 ereport(ERROR,
612 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
613 errmsg("statement trigger's WHEN condition cannot reference column values"),
614 parser_errposition(pstate, var->location)));
615 if (TRIGGER_FOR_INSERT(tgtype))
616 ereport(ERROR,
617 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
618 errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
619 parser_errposition(pstate, var->location)));
620 /* system columns are okay here */
621 break;
622 case PRS2_NEW_VARNO:
623 if (!TRIGGER_FOR_ROW(tgtype))
624 ereport(ERROR,
625 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
626 errmsg("statement trigger's WHEN condition cannot reference column values"),
627 parser_errposition(pstate, var->location)));
628 if (TRIGGER_FOR_DELETE(tgtype))
629 ereport(ERROR,
630 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
631 errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
632 parser_errposition(pstate, var->location)));
633 if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
634 ereport(ERROR,
635 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
636 errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
637 parser_errposition(pstate, var->location)));
638 if (TRIGGER_FOR_BEFORE(tgtype) &&
639 var->varattno == 0 &&
640 RelationGetDescr(rel)->constr &&
641 RelationGetDescr(rel)->constr->has_generated_stored)
642 ereport(ERROR,
643 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
644 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
645 errdetail("A whole-row reference is used and the table contains generated columns."),
646 parser_errposition(pstate, var->location)));
647 if (TRIGGER_FOR_BEFORE(tgtype) &&
648 var->varattno > 0 &&
649 TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
650 ereport(ERROR,
651 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
652 errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
653 errdetail("Column \"%s\" is a generated column.",
654 NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
655 parser_errposition(pstate, var->location)));
656 break;
657 default:
658 /* can't happen without add_missing_from, so just elog */
659 elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
660 break;
661 }
662 }
663
664 /* we'll need the rtable for recordDependencyOnExpr */
665 whenRtable = pstate->p_rtable;
666
667 qual = nodeToString(whenClause);
668
669 free_parsestate(pstate);
670 }
671 else if (!whenClause)
672 {
673 whenClause = NULL;
674 whenRtable = NIL;
675 qual = NULL;
676 }
677 else
678 {
679 qual = nodeToString(whenClause);
680 whenRtable = NIL;
681 }
682
683 /*
684 * Find and validate the trigger function.
685 */
686 if (!OidIsValid(funcoid))
687 funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
688 if (!isInternal)
689 {
690 aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
691 if (aclresult != ACLCHECK_OK)
692 aclcheck_error(aclresult, OBJECT_FUNCTION,
693 NameListToString(stmt->funcname));
694 }
695 funcrettype = get_func_rettype(funcoid);
696 if (funcrettype != TRIGGEROID)
697 ereport(ERROR,
698 (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
699 errmsg("function %s must return type %s",
700 NameListToString(stmt->funcname), "trigger")));
701
702 /*
703 * Scan pg_trigger to see if there is already a trigger of the same name.
704 * Skip this for internally generated triggers, since we'll modify the
705 * name to be unique below.
706 *
707 * NOTE that this is cool only because we have ShareRowExclusiveLock on
708 * the relation, so the trigger set won't be changing underneath us.
709 */
710 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
711 if (!isInternal)
712 {
713 ScanKeyData skeys[2];
714 SysScanDesc tgscan;
715
716 ScanKeyInit(&skeys[0],
717 Anum_pg_trigger_tgrelid,
718 BTEqualStrategyNumber, F_OIDEQ,
719 ObjectIdGetDatum(RelationGetRelid(rel)));
720
721 ScanKeyInit(&skeys[1],
722 Anum_pg_trigger_tgname,
723 BTEqualStrategyNumber, F_NAMEEQ,
724 CStringGetDatum(stmt->trigname));
725
726 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
727 NULL, 2, skeys);
728
729 /* There should be at most one matching tuple */
730 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
731 {
732 Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
733
734 trigoid = oldtrigger->oid;
735 existing_constraint_oid = oldtrigger->tgconstraint;
736 existing_isInternal = oldtrigger->tgisinternal;
737 trigger_exists = true;
738 /* copy the tuple to use in CatalogTupleUpdate() */
739 tuple = heap_copytuple(tuple);
740 }
741 systable_endscan(tgscan);
742 }
743
744 if (!trigger_exists)
745 {
746 /* Generate the OID for the new trigger. */
747 trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
748 Anum_pg_trigger_oid);
749 }
750 else
751 {
752 /*
753 * If OR REPLACE was specified, we'll replace the old trigger;
754 * otherwise complain about the duplicate name.
755 */
756 if (!stmt->replace)
757 ereport(ERROR,
758 (errcode(ERRCODE_DUPLICATE_OBJECT),
759 errmsg("trigger \"%s\" for relation \"%s\" already exists",
760 stmt->trigname, RelationGetRelationName(rel))));
761
762 /*
763 * An internal trigger cannot be replaced by a user-defined trigger.
764 * However, skip this test when in_partition, because then we're
765 * recursing from a partitioned table and the check was made at the
766 * parent level. Child triggers will always be marked "internal" (so
767 * this test does protect us from the user trying to replace a child
768 * trigger directly).
769 */
770 if (existing_isInternal && !isInternal && !in_partition)
771 ereport(ERROR,
772 (errcode(ERRCODE_DUPLICATE_OBJECT),
773 errmsg("trigger \"%s\" for relation \"%s\" is an internal trigger",
774 stmt->trigname, RelationGetRelationName(rel))));
775
776 /*
777 * It is not allowed to replace with a constraint trigger; gram.y
778 * should have enforced this already.
779 */
780 Assert(!stmt->isconstraint);
781
782 /*
783 * It is not allowed to replace an existing constraint trigger,
784 * either. (The reason for these restrictions is partly that it seems
785 * difficult to deal with pending trigger events in such cases, and
786 * partly that the command might imply changing the constraint's
787 * properties as well, which doesn't seem nice.)
788 */
789 if (OidIsValid(existing_constraint_oid))
790 ereport(ERROR,
791 (errcode(ERRCODE_DUPLICATE_OBJECT),
792 errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
793 stmt->trigname, RelationGetRelationName(rel))));
794 }
795
796 /*
797 * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
798 * corresponding pg_constraint entry.
799 */
800 if (stmt->isconstraint && !OidIsValid(constraintOid))
801 {
802 /* Internal callers should have made their own constraints */
803 Assert(!isInternal);
804 constraintOid = CreateConstraintEntry(stmt->trigname,
805 RelationGetNamespace(rel),
806 CONSTRAINT_TRIGGER,
807 stmt->deferrable,
808 stmt->initdeferred,
809 true,
810 InvalidOid, /* no parent */
811 RelationGetRelid(rel),
812 NULL, /* no conkey */
813 0,
814 0,
815 InvalidOid, /* no domain */
816 InvalidOid, /* no index */
817 InvalidOid, /* no foreign key */
818 NULL,
819 NULL,
820 NULL,
821 NULL,
822 0,
823 ' ',
824 ' ',
825 ' ',
826 NULL, /* no exclusion */
827 NULL, /* no check constraint */
828 NULL,
829 true, /* islocal */
830 0, /* inhcount */
831 true, /* noinherit */
832 isInternal); /* is_internal */
833 }
834
835 /*
836 * If trigger is internally generated, modify the provided trigger name to
837 * ensure uniqueness by appending the trigger OID. (Callers will usually
838 * supply a simple constant trigger name in these cases.)
839 */
840 if (isInternal)
841 {
842 snprintf(internaltrigname, sizeof(internaltrigname),
843 "%s_%u", stmt->trigname, trigoid);
844 trigname = internaltrigname;
845 }
846 else
847 {
848 /* user-defined trigger; use the specified trigger name as-is */
849 trigname = stmt->trigname;
850 }
851
852 /*
853 * Build the new pg_trigger tuple.
854 *
855 * When we're creating a trigger in a partition, we mark it as internal,
856 * even though we don't do the isInternal magic in this function. This
857 * makes the triggers in partitions identical to the ones in the
858 * partitioned tables, except that they are marked internal.
859 */
860 memset(nulls, false, sizeof(nulls));
861
862 values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
863 values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
864 values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
865 values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
866 CStringGetDatum(trigname));
867 values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
868 values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
869 values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
870 values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal || in_partition);
871 values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
872 values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
873 values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
874 values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
875 values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
876
877 if (stmt->args)
878 {
879 ListCell *le;
880 char *args;
881 int16 nargs = list_length(stmt->args);
882 int len = 0;
883
884 foreach(le, stmt->args)
885 {
886 char *ar = strVal(lfirst(le));
887
888 len += strlen(ar) + 4;
889 for (; *ar; ar++)
890 {
891 if (*ar == '\\')
892 len++;
893 }
894 }
895 args = (char *) palloc(len + 1);
896 args[0] = '\0';
897 foreach(le, stmt->args)
898 {
899 char *s = strVal(lfirst(le));
900 char *d = args + strlen(args);
901
902 while (*s)
903 {
904 if (*s == '\\')
905 *d++ = '\\';
906 *d++ = *s++;
907 }
908 strcpy(d, "\\000");
909 }
910 values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
911 values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
912 CStringGetDatum(args));
913 }
914 else
915 {
916 values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
917 values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
918 CStringGetDatum(""));
919 }
920
921 /* build column number array if it's a column-specific trigger */
922 ncolumns = list_length(stmt->columns);
923 if (ncolumns == 0)
924 columns = NULL;
925 else
926 {
927 ListCell *cell;
928 int i = 0;
929
930 columns = (int16 *) palloc(ncolumns * sizeof(int16));
931 foreach(cell, stmt->columns)
932 {
933 char *name = strVal(lfirst(cell));
934 int16 attnum;
935 int j;
936
937 /* Lookup column name. System columns are not allowed */
938 attnum = attnameAttNum(rel, name, false);
939 if (attnum == InvalidAttrNumber)
940 ereport(ERROR,
941 (errcode(ERRCODE_UNDEFINED_COLUMN),
942 errmsg("column \"%s\" of relation \"%s\" does not exist",
943 name, RelationGetRelationName(rel))));
944
945 /* Check for duplicates */
946 for (j = i - 1; j >= 0; j--)
947 {
948 if (columns[j] == attnum)
949 ereport(ERROR,
950 (errcode(ERRCODE_DUPLICATE_COLUMN),
951 errmsg("column \"%s\" specified more than once",
952 name)));
953 }
954
955 columns[i++] = attnum;
956 }
957 }
958 tgattr = buildint2vector(columns, ncolumns);
959 values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
960
961 /* set tgqual if trigger has WHEN clause */
962 if (qual)
963 values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
964 else
965 nulls[Anum_pg_trigger_tgqual - 1] = true;
966
967 if (oldtablename)
968 values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
969 CStringGetDatum(oldtablename));
970 else
971 nulls[Anum_pg_trigger_tgoldtable - 1] = true;
972 if (newtablename)
973 values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
974 CStringGetDatum(newtablename));
975 else
976 nulls[Anum_pg_trigger_tgnewtable - 1] = true;
977
978 /*
979 * Insert or replace tuple in pg_trigger.
980 */
981 if (!trigger_exists)
982 {
983 tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
984 CatalogTupleInsert(tgrel, tuple);
985 }
986 else
987 {
988 HeapTuple newtup;
989
990 newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
991 CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
992 heap_freetuple(newtup);
993 }
994
995 heap_freetuple(tuple); /* free either original or new tuple */
996 table_close(tgrel, RowExclusiveLock);
997
998 pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
999 pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1000 pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1001 if (oldtablename)
1002 pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1003 if (newtablename)
1004 pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1005
1006 /*
1007 * Update relation's pg_class entry; if necessary; and if not, send an SI
1008 * message to make other backends (and this one) rebuild relcache entries.
1009 */
1010 pgrel = table_open(RelationRelationId, RowExclusiveLock);
1011 tuple = SearchSysCacheCopy1(RELOID,
1012 ObjectIdGetDatum(RelationGetRelid(rel)));
1013 if (!HeapTupleIsValid(tuple))
1014 elog(ERROR, "cache lookup failed for relation %u",
1015 RelationGetRelid(rel));
1016 if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1017 {
1018 ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1019
1020 CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1021
1022 CommandCounterIncrement();
1023 }
1024 else
1025 CacheInvalidateRelcacheByTuple(tuple);
1026
1027 heap_freetuple(tuple);
1028 table_close(pgrel, RowExclusiveLock);
1029
1030 /*
1031 * If we're replacing a trigger, flush all the old dependencies before
1032 * recording new ones.
1033 */
1034 if (trigger_exists)
1035 deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1036
1037 /*
1038 * Record dependencies for trigger. Always place a normal dependency on
1039 * the function.
1040 */
1041 myself.classId = TriggerRelationId;
1042 myself.objectId = trigoid;
1043 myself.objectSubId = 0;
1044
1045 referenced.classId = ProcedureRelationId;
1046 referenced.objectId = funcoid;
1047 referenced.objectSubId = 0;
1048 recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1049
1050 if (isInternal && OidIsValid(constraintOid))
1051 {
1052 /*
1053 * Internally-generated trigger for a constraint, so make it an
1054 * internal dependency of the constraint. We can skip depending on
1055 * the relation(s), as there'll be an indirect dependency via the
1056 * constraint.
1057 */
1058 referenced.classId = ConstraintRelationId;
1059 referenced.objectId = constraintOid;
1060 referenced.objectSubId = 0;
1061 recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1062 }
1063 else
1064 {
1065 /*
1066 * User CREATE TRIGGER, so place dependencies. We make trigger be
1067 * auto-dropped if its relation is dropped or if the FK relation is
1068 * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1069 */
1070 referenced.classId = RelationRelationId;
1071 referenced.objectId = RelationGetRelid(rel);
1072 referenced.objectSubId = 0;
1073 recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1074
1075 if (OidIsValid(constrrelid))
1076 {
1077 referenced.classId = RelationRelationId;
1078 referenced.objectId = constrrelid;
1079 referenced.objectSubId = 0;
1080 recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1081 }
1082 /* Not possible to have an index dependency in this case */
1083 Assert(!OidIsValid(indexOid));
1084
1085 /*
1086 * If it's a user-specified constraint trigger, make the constraint
1087 * internally dependent on the trigger instead of vice versa.
1088 */
1089 if (OidIsValid(constraintOid))
1090 {
1091 referenced.classId = ConstraintRelationId;
1092 referenced.objectId = constraintOid;
1093 referenced.objectSubId = 0;
1094 recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1095 }
1096
1097 /*
1098 * If it's a partition trigger, create the partition dependencies.
1099 */
1100 if (OidIsValid(parentTriggerOid))
1101 {
1102 ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1103 recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1104 ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1105 recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1106 }
1107 }
1108
1109 /* If column-specific trigger, add normal dependencies on columns */
1110 if (columns != NULL)
1111 {
1112 int i;
1113
1114 referenced.classId = RelationRelationId;
1115 referenced.objectId = RelationGetRelid(rel);
1116 for (i = 0; i < ncolumns; i++)
1117 {
1118 referenced.objectSubId = columns[i];
1119 recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1120 }
1121 }
1122
1123 /*
1124 * If it has a WHEN clause, add dependencies on objects mentioned in the
1125 * expression (eg, functions, as well as any columns used).
1126 */
1127 if (whenRtable != NIL)
1128 recordDependencyOnExpr(&myself, whenClause, whenRtable,
1129 DEPENDENCY_NORMAL);
1130
1131 /* Post creation hook for new trigger */
1132 InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1133 isInternal);
1134
1135 /*
1136 * Lastly, create the trigger on child relations, if needed.
1137 */
1138 if (partition_recurse)
1139 {
1140 PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1141 List *idxs = NIL;
1142 List *childTbls = NIL;
1143 ListCell *l;
1144 int i;
1145 MemoryContext oldcxt,
1146 perChildCxt;
1147
1148 perChildCxt = AllocSetContextCreate(CurrentMemoryContext,
1149 "part trig clone",
1150 ALLOCSET_SMALL_SIZES);
1151
1152 /*
1153 * When a trigger is being created associated with an index, we'll
1154 * need to associate the trigger in each child partition with the
1155 * corresponding index on it.
1156 */
1157 if (OidIsValid(indexOid))
1158 {
1159 ListCell *l;
1160 List *idxs = NIL;
1161
1162 idxs = find_inheritance_children(indexOid, ShareRowExclusiveLock);
1163 foreach(l, idxs)
1164 childTbls = lappend_oid(childTbls,
1165 IndexGetRelation(lfirst_oid(l),
1166 false));
1167 }
1168
1169 oldcxt = MemoryContextSwitchTo(perChildCxt);
1170
1171 /* Iterate to create the trigger on each existing partition */
1172 for (i = 0; i < partdesc->nparts; i++)
1173 {
1174 Oid indexOnChild = InvalidOid;
1175 ListCell *l2;
1176 CreateTrigStmt *childStmt;
1177 Relation childTbl;
1178 Node *qual;
1179
1180 childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1181
1182 /* Find which of the child indexes is the one on this partition */
1183 if (OidIsValid(indexOid))
1184 {
1185 forboth(l, idxs, l2, childTbls)
1186 {
1187 if (lfirst_oid(l2) == partdesc->oids[i])
1188 {
1189 indexOnChild = lfirst_oid(l);
1190 break;
1191 }
1192 }
1193 if (!OidIsValid(indexOnChild))
1194 elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"",
1195 get_rel_name(indexOid),
1196 get_rel_name(partdesc->oids[i]));
1197 }
1198
1199 /*
1200 * Initialize our fabricated parse node by copying the original
1201 * one, then resetting fields that we pass separately.
1202 */
1203 childStmt = (CreateTrigStmt *) copyObject(stmt);
1204 childStmt->funcname = NIL;
1205 childStmt->whenClause = NULL;
1206
1207 /* If there is a WHEN clause, create a modified copy of it */
1208 qual = copyObject(whenClause);
1209 qual = (Node *)
1210 map_partition_varattnos((List *) qual, PRS2_OLD_VARNO,
1211 childTbl, rel);
1212 qual = (Node *)
1213 map_partition_varattnos((List *) qual, PRS2_NEW_VARNO,
1214 childTbl, rel);
1215
1216 CreateTriggerFiringOn(childStmt, queryString,
1217 partdesc->oids[i], refRelOid,
1218 InvalidOid, indexOnChild,
1219 funcoid, trigoid, qual,
1220 isInternal, true, trigger_fires_when);
1221
1222 table_close(childTbl, NoLock);
1223
1224 MemoryContextReset(perChildCxt);
1225 }
1226
1227 MemoryContextSwitchTo(oldcxt);
1228 MemoryContextDelete(perChildCxt);
1229 list_free(idxs);
1230 list_free(childTbls);
1231 }
1232
1233 /* Keep lock on target rel until end of xact */
1234 table_close(rel, NoLock);
1235
1236 return myself;
1237 }
1238
1239
1240 /*
1241 * Guts of trigger deletion.
1242 */
1243 void
RemoveTriggerById(Oid trigOid)1244 RemoveTriggerById(Oid trigOid)
1245 {
1246 Relation tgrel;
1247 SysScanDesc tgscan;
1248 ScanKeyData skey[1];
1249 HeapTuple tup;
1250 Oid relid;
1251 Relation rel;
1252
1253 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1254
1255 /*
1256 * Find the trigger to delete.
1257 */
1258 ScanKeyInit(&skey[0],
1259 Anum_pg_trigger_oid,
1260 BTEqualStrategyNumber, F_OIDEQ,
1261 ObjectIdGetDatum(trigOid));
1262
1263 tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1264 NULL, 1, skey);
1265
1266 tup = systable_getnext(tgscan);
1267 if (!HeapTupleIsValid(tup))
1268 elog(ERROR, "could not find tuple for trigger %u", trigOid);
1269
1270 /*
1271 * Open and exclusive-lock the relation the trigger belongs to.
1272 */
1273 relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1274
1275 rel = table_open(relid, AccessExclusiveLock);
1276
1277 if (rel->rd_rel->relkind != RELKIND_RELATION &&
1278 rel->rd_rel->relkind != RELKIND_VIEW &&
1279 rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1280 rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1281 ereport(ERROR,
1282 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1283 errmsg("\"%s\" is not a table, view, or foreign table",
1284 RelationGetRelationName(rel))));
1285
1286 if (!allowSystemTableMods && IsSystemRelation(rel))
1287 ereport(ERROR,
1288 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1289 errmsg("permission denied: \"%s\" is a system catalog",
1290 RelationGetRelationName(rel))));
1291
1292 /*
1293 * Delete the pg_trigger tuple.
1294 */
1295 CatalogTupleDelete(tgrel, &tup->t_self);
1296
1297 systable_endscan(tgscan);
1298 table_close(tgrel, RowExclusiveLock);
1299
1300 /*
1301 * We do not bother to try to determine whether any other triggers remain,
1302 * which would be needed in order to decide whether it's safe to clear the
1303 * relation's relhastriggers. (In any case, there might be a concurrent
1304 * process adding new triggers.) Instead, just force a relcache inval to
1305 * make other backends (and this one too!) rebuild their relcache entries.
1306 * There's no great harm in leaving relhastriggers true even if there are
1307 * no triggers left.
1308 */
1309 CacheInvalidateRelcache(rel);
1310
1311 /* Keep lock on trigger's rel until end of xact */
1312 table_close(rel, NoLock);
1313 }
1314
1315 /*
1316 * get_trigger_oid - Look up a trigger by name to find its OID.
1317 *
1318 * If missing_ok is false, throw an error if trigger not found. If
1319 * true, just return InvalidOid.
1320 */
1321 Oid
get_trigger_oid(Oid relid,const char * trigname,bool missing_ok)1322 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1323 {
1324 Relation tgrel;
1325 ScanKeyData skey[2];
1326 SysScanDesc tgscan;
1327 HeapTuple tup;
1328 Oid oid;
1329
1330 /*
1331 * Find the trigger, verify permissions, set up object address
1332 */
1333 tgrel = table_open(TriggerRelationId, AccessShareLock);
1334
1335 ScanKeyInit(&skey[0],
1336 Anum_pg_trigger_tgrelid,
1337 BTEqualStrategyNumber, F_OIDEQ,
1338 ObjectIdGetDatum(relid));
1339 ScanKeyInit(&skey[1],
1340 Anum_pg_trigger_tgname,
1341 BTEqualStrategyNumber, F_NAMEEQ,
1342 CStringGetDatum(trigname));
1343
1344 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1345 NULL, 2, skey);
1346
1347 tup = systable_getnext(tgscan);
1348
1349 if (!HeapTupleIsValid(tup))
1350 {
1351 if (!missing_ok)
1352 ereport(ERROR,
1353 (errcode(ERRCODE_UNDEFINED_OBJECT),
1354 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1355 trigname, get_rel_name(relid))));
1356 oid = InvalidOid;
1357 }
1358 else
1359 {
1360 oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1361 }
1362
1363 systable_endscan(tgscan);
1364 table_close(tgrel, AccessShareLock);
1365 return oid;
1366 }
1367
1368 /*
1369 * Perform permissions and integrity checks before acquiring a relation lock.
1370 */
1371 static void
RangeVarCallbackForRenameTrigger(const RangeVar * rv,Oid relid,Oid oldrelid,void * arg)1372 RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid,
1373 void *arg)
1374 {
1375 HeapTuple tuple;
1376 Form_pg_class form;
1377
1378 tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1379 if (!HeapTupleIsValid(tuple))
1380 return; /* concurrently dropped */
1381 form = (Form_pg_class) GETSTRUCT(tuple);
1382
1383 /* only tables and views can have triggers */
1384 if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1385 form->relkind != RELKIND_FOREIGN_TABLE &&
1386 form->relkind != RELKIND_PARTITIONED_TABLE)
1387 ereport(ERROR,
1388 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1389 errmsg("\"%s\" is not a table, view, or foreign table",
1390 rv->relname)));
1391
1392 /* you must own the table to rename one of its triggers */
1393 if (!pg_class_ownercheck(relid, GetUserId()))
1394 aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname);
1395 if (!allowSystemTableMods && IsSystemClass(relid, form))
1396 ereport(ERROR,
1397 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1398 errmsg("permission denied: \"%s\" is a system catalog",
1399 rv->relname)));
1400
1401 ReleaseSysCache(tuple);
1402 }
1403
1404 /*
1405 * renametrig - changes the name of a trigger on a relation
1406 *
1407 * trigger name is changed in trigger catalog.
1408 * No record of the previous name is kept.
1409 *
1410 * get proper relrelation from relation catalog (if not arg)
1411 * scan trigger catalog
1412 * for name conflict (within rel)
1413 * for original trigger (if not arg)
1414 * modify tgname in trigger tuple
1415 * update row in catalog
1416 */
1417 ObjectAddress
renametrig(RenameStmt * stmt)1418 renametrig(RenameStmt *stmt)
1419 {
1420 Oid tgoid;
1421 Relation targetrel;
1422 Relation tgrel;
1423 HeapTuple tuple;
1424 SysScanDesc tgscan;
1425 ScanKeyData key[2];
1426 Oid relid;
1427 ObjectAddress address;
1428
1429 /*
1430 * Look up name, check permissions, and acquire lock (which we will NOT
1431 * release until end of transaction).
1432 */
1433 relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
1434 0,
1435 RangeVarCallbackForRenameTrigger,
1436 NULL);
1437
1438 /* Have lock already, so just need to build relcache entry. */
1439 targetrel = relation_open(relid, NoLock);
1440
1441 /*
1442 * Scan pg_trigger twice for existing triggers on relation. We do this in
1443 * order to ensure a trigger does not exist with newname (The unique index
1444 * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1445 * exist with oldname.
1446 *
1447 * NOTE that this is cool only because we have AccessExclusiveLock on the
1448 * relation, so the trigger set won't be changing underneath us.
1449 */
1450 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1451
1452 /*
1453 * First pass -- look for name conflict
1454 */
1455 ScanKeyInit(&key[0],
1456 Anum_pg_trigger_tgrelid,
1457 BTEqualStrategyNumber, F_OIDEQ,
1458 ObjectIdGetDatum(relid));
1459 ScanKeyInit(&key[1],
1460 Anum_pg_trigger_tgname,
1461 BTEqualStrategyNumber, F_NAMEEQ,
1462 PointerGetDatum(stmt->newname));
1463 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1464 NULL, 2, key);
1465 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1466 ereport(ERROR,
1467 (errcode(ERRCODE_DUPLICATE_OBJECT),
1468 errmsg("trigger \"%s\" for relation \"%s\" already exists",
1469 stmt->newname, RelationGetRelationName(targetrel))));
1470 systable_endscan(tgscan);
1471
1472 /*
1473 * Second pass -- look for trigger existing with oldname and update
1474 */
1475 ScanKeyInit(&key[0],
1476 Anum_pg_trigger_tgrelid,
1477 BTEqualStrategyNumber, F_OIDEQ,
1478 ObjectIdGetDatum(relid));
1479 ScanKeyInit(&key[1],
1480 Anum_pg_trigger_tgname,
1481 BTEqualStrategyNumber, F_NAMEEQ,
1482 PointerGetDatum(stmt->subname));
1483 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1484 NULL, 2, key);
1485 if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1486 {
1487 Form_pg_trigger trigform;
1488
1489 /*
1490 * Update pg_trigger tuple with new tgname.
1491 */
1492 tuple = heap_copytuple(tuple); /* need a modifiable copy */
1493 trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1494 tgoid = trigform->oid;
1495
1496 namestrcpy(&trigform->tgname,
1497 stmt->newname);
1498
1499 CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1500
1501 InvokeObjectPostAlterHook(TriggerRelationId,
1502 tgoid, 0);
1503
1504 /*
1505 * Invalidate relation's relcache entry so that other backends (and
1506 * this one too!) are sent SI message to make them rebuild relcache
1507 * entries. (Ideally this should happen automatically...)
1508 */
1509 CacheInvalidateRelcache(targetrel);
1510 }
1511 else
1512 {
1513 ereport(ERROR,
1514 (errcode(ERRCODE_UNDEFINED_OBJECT),
1515 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1516 stmt->subname, RelationGetRelationName(targetrel))));
1517 }
1518
1519 ObjectAddressSet(address, TriggerRelationId, tgoid);
1520
1521 systable_endscan(tgscan);
1522
1523 table_close(tgrel, RowExclusiveLock);
1524
1525 /*
1526 * Close rel, but keep exclusive lock!
1527 */
1528 relation_close(targetrel, NoLock);
1529
1530 return address;
1531 }
1532
1533
1534 /*
1535 * EnableDisableTrigger()
1536 *
1537 * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1538 * to change 'tgenabled' field for the specified trigger(s)
1539 *
1540 * rel: relation to process (caller must hold suitable lock on it)
1541 * tgname: trigger to process, or NULL to scan all triggers
1542 * fires_when: new value for tgenabled field. In addition to generic
1543 * enablement/disablement, this also defines when the trigger
1544 * should be fired in session replication roles.
1545 * skip_system: if true, skip "system" triggers (constraint triggers)
1546 *
1547 * Caller should have checked permissions for the table; here we also
1548 * enforce that superuser privilege is required to alter the state of
1549 * system triggers
1550 */
1551 void
EnableDisableTrigger(Relation rel,const char * tgname,char fires_when,bool skip_system,LOCKMODE lockmode)1552 EnableDisableTrigger(Relation rel, const char *tgname,
1553 char fires_when, bool skip_system, LOCKMODE lockmode)
1554 {
1555 Relation tgrel;
1556 int nkeys;
1557 ScanKeyData keys[2];
1558 SysScanDesc tgscan;
1559 HeapTuple tuple;
1560 bool found;
1561 bool changed;
1562
1563 /* Scan the relevant entries in pg_triggers */
1564 tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1565
1566 ScanKeyInit(&keys[0],
1567 Anum_pg_trigger_tgrelid,
1568 BTEqualStrategyNumber, F_OIDEQ,
1569 ObjectIdGetDatum(RelationGetRelid(rel)));
1570 if (tgname)
1571 {
1572 ScanKeyInit(&keys[1],
1573 Anum_pg_trigger_tgname,
1574 BTEqualStrategyNumber, F_NAMEEQ,
1575 CStringGetDatum(tgname));
1576 nkeys = 2;
1577 }
1578 else
1579 nkeys = 1;
1580
1581 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1582 NULL, nkeys, keys);
1583
1584 found = changed = false;
1585
1586 while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1587 {
1588 Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1589
1590 if (oldtrig->tgisinternal)
1591 {
1592 /* system trigger ... ok to process? */
1593 if (skip_system)
1594 continue;
1595 if (!superuser())
1596 ereport(ERROR,
1597 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1598 errmsg("permission denied: \"%s\" is a system trigger",
1599 NameStr(oldtrig->tgname))));
1600 }
1601
1602 found = true;
1603
1604 if (oldtrig->tgenabled != fires_when)
1605 {
1606 /* need to change this one ... make a copy to scribble on */
1607 HeapTuple newtup = heap_copytuple(tuple);
1608 Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1609
1610 newtrig->tgenabled = fires_when;
1611
1612 CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1613
1614 heap_freetuple(newtup);
1615
1616 changed = true;
1617 }
1618
1619 InvokeObjectPostAlterHook(TriggerRelationId,
1620 oldtrig->oid, 0);
1621 }
1622
1623 systable_endscan(tgscan);
1624
1625 table_close(tgrel, RowExclusiveLock);
1626
1627 if (tgname && !found)
1628 ereport(ERROR,
1629 (errcode(ERRCODE_UNDEFINED_OBJECT),
1630 errmsg("trigger \"%s\" for table \"%s\" does not exist",
1631 tgname, RelationGetRelationName(rel))));
1632
1633 /*
1634 * If we changed anything, broadcast a SI inval message to force each
1635 * backend (including our own!) to rebuild relation's relcache entry.
1636 * Otherwise they will fail to apply the change promptly.
1637 */
1638 if (changed)
1639 CacheInvalidateRelcache(rel);
1640 }
1641
1642
1643 /*
1644 * Build trigger data to attach to the given relcache entry.
1645 *
1646 * Note that trigger data attached to a relcache entry must be stored in
1647 * CacheMemoryContext to ensure it survives as long as the relcache entry.
1648 * But we should be running in a less long-lived working context. To avoid
1649 * leaking cache memory if this routine fails partway through, we build a
1650 * temporary TriggerDesc in working memory and then copy the completed
1651 * structure into cache memory.
1652 */
1653 void
RelationBuildTriggers(Relation relation)1654 RelationBuildTriggers(Relation relation)
1655 {
1656 TriggerDesc *trigdesc;
1657 int numtrigs;
1658 int maxtrigs;
1659 Trigger *triggers;
1660 Relation tgrel;
1661 ScanKeyData skey;
1662 SysScanDesc tgscan;
1663 HeapTuple htup;
1664 MemoryContext oldContext;
1665 int i;
1666
1667 /*
1668 * Allocate a working array to hold the triggers (the array is extended if
1669 * necessary)
1670 */
1671 maxtrigs = 16;
1672 triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1673 numtrigs = 0;
1674
1675 /*
1676 * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1677 * be reading the triggers in name order, except possibly during
1678 * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1679 * ensures that triggers will be fired in name order.
1680 */
1681 ScanKeyInit(&skey,
1682 Anum_pg_trigger_tgrelid,
1683 BTEqualStrategyNumber, F_OIDEQ,
1684 ObjectIdGetDatum(RelationGetRelid(relation)));
1685
1686 tgrel = table_open(TriggerRelationId, AccessShareLock);
1687 tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1688 NULL, 1, &skey);
1689
1690 while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1691 {
1692 Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1693 Trigger *build;
1694 Datum datum;
1695 bool isnull;
1696
1697 if (numtrigs >= maxtrigs)
1698 {
1699 maxtrigs *= 2;
1700 triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1701 }
1702 build = &(triggers[numtrigs]);
1703
1704 build->tgoid = pg_trigger->oid;
1705 build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
1706 NameGetDatum(&pg_trigger->tgname)));
1707 build->tgfoid = pg_trigger->tgfoid;
1708 build->tgtype = pg_trigger->tgtype;
1709 build->tgenabled = pg_trigger->tgenabled;
1710 build->tgisinternal = pg_trigger->tgisinternal;
1711 build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1712 build->tgconstrrelid = pg_trigger->tgconstrrelid;
1713 build->tgconstrindid = pg_trigger->tgconstrindid;
1714 build->tgconstraint = pg_trigger->tgconstraint;
1715 build->tgdeferrable = pg_trigger->tgdeferrable;
1716 build->tginitdeferred = pg_trigger->tginitdeferred;
1717 build->tgnargs = pg_trigger->tgnargs;
1718 /* tgattr is first var-width field, so OK to access directly */
1719 build->tgnattr = pg_trigger->tgattr.dim1;
1720 if (build->tgnattr > 0)
1721 {
1722 build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1723 memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1724 build->tgnattr * sizeof(int16));
1725 }
1726 else
1727 build->tgattr = NULL;
1728 if (build->tgnargs > 0)
1729 {
1730 bytea *val;
1731 char *p;
1732
1733 val = DatumGetByteaPP(fastgetattr(htup,
1734 Anum_pg_trigger_tgargs,
1735 tgrel->rd_att, &isnull));
1736 if (isnull)
1737 elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1738 RelationGetRelationName(relation));
1739 p = (char *) VARDATA_ANY(val);
1740 build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1741 for (i = 0; i < build->tgnargs; i++)
1742 {
1743 build->tgargs[i] = pstrdup(p);
1744 p += strlen(p) + 1;
1745 }
1746 }
1747 else
1748 build->tgargs = NULL;
1749
1750 datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1751 tgrel->rd_att, &isnull);
1752 if (!isnull)
1753 build->tgoldtable =
1754 DatumGetCString(DirectFunctionCall1(nameout, datum));
1755 else
1756 build->tgoldtable = NULL;
1757
1758 datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1759 tgrel->rd_att, &isnull);
1760 if (!isnull)
1761 build->tgnewtable =
1762 DatumGetCString(DirectFunctionCall1(nameout, datum));
1763 else
1764 build->tgnewtable = NULL;
1765
1766 datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1767 tgrel->rd_att, &isnull);
1768 if (!isnull)
1769 build->tgqual = TextDatumGetCString(datum);
1770 else
1771 build->tgqual = NULL;
1772
1773 numtrigs++;
1774 }
1775
1776 systable_endscan(tgscan);
1777 table_close(tgrel, AccessShareLock);
1778
1779 /* There might not be any triggers */
1780 if (numtrigs == 0)
1781 {
1782 pfree(triggers);
1783 return;
1784 }
1785
1786 /* Build trigdesc */
1787 trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1788 trigdesc->triggers = triggers;
1789 trigdesc->numtriggers = numtrigs;
1790 for (i = 0; i < numtrigs; i++)
1791 SetTriggerFlags(trigdesc, &(triggers[i]));
1792
1793 /* Copy completed trigdesc into cache storage */
1794 oldContext = MemoryContextSwitchTo(CacheMemoryContext);
1795 relation->trigdesc = CopyTriggerDesc(trigdesc);
1796 MemoryContextSwitchTo(oldContext);
1797
1798 /* Release working memory */
1799 FreeTriggerDesc(trigdesc);
1800 }
1801
1802 /*
1803 * Update the TriggerDesc's hint flags to include the specified trigger
1804 */
1805 static void
SetTriggerFlags(TriggerDesc * trigdesc,Trigger * trigger)1806 SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger)
1807 {
1808 int16 tgtype = trigger->tgtype;
1809
1810 trigdesc->trig_insert_before_row |=
1811 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1812 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1813 trigdesc->trig_insert_after_row |=
1814 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1815 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1816 trigdesc->trig_insert_instead_row |=
1817 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1818 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
1819 trigdesc->trig_insert_before_statement |=
1820 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1821 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1822 trigdesc->trig_insert_after_statement |=
1823 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1824 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1825 trigdesc->trig_update_before_row |=
1826 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1827 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1828 trigdesc->trig_update_after_row |=
1829 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1830 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1831 trigdesc->trig_update_instead_row |=
1832 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1833 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
1834 trigdesc->trig_update_before_statement |=
1835 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1836 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1837 trigdesc->trig_update_after_statement |=
1838 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1839 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1840 trigdesc->trig_delete_before_row |=
1841 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1842 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1843 trigdesc->trig_delete_after_row |=
1844 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1845 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1846 trigdesc->trig_delete_instead_row |=
1847 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1848 TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
1849 trigdesc->trig_delete_before_statement |=
1850 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1851 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1852 trigdesc->trig_delete_after_statement |=
1853 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1854 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1855 /* there are no row-level truncate triggers */
1856 trigdesc->trig_truncate_before_statement |=
1857 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1858 TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
1859 trigdesc->trig_truncate_after_statement |=
1860 TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1861 TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
1862
1863 trigdesc->trig_insert_new_table |=
1864 (TRIGGER_FOR_INSERT(tgtype) &&
1865 TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1866 trigdesc->trig_update_old_table |=
1867 (TRIGGER_FOR_UPDATE(tgtype) &&
1868 TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1869 trigdesc->trig_update_new_table |=
1870 (TRIGGER_FOR_UPDATE(tgtype) &&
1871 TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1872 trigdesc->trig_delete_old_table |=
1873 (TRIGGER_FOR_DELETE(tgtype) &&
1874 TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1875 }
1876
1877 /*
1878 * Copy a TriggerDesc data structure.
1879 *
1880 * The copy is allocated in the current memory context.
1881 */
1882 TriggerDesc *
CopyTriggerDesc(TriggerDesc * trigdesc)1883 CopyTriggerDesc(TriggerDesc *trigdesc)
1884 {
1885 TriggerDesc *newdesc;
1886 Trigger *trigger;
1887 int i;
1888
1889 if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1890 return NULL;
1891
1892 newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1893 memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1894
1895 trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1896 memcpy(trigger, trigdesc->triggers,
1897 trigdesc->numtriggers * sizeof(Trigger));
1898 newdesc->triggers = trigger;
1899
1900 for (i = 0; i < trigdesc->numtriggers; i++)
1901 {
1902 trigger->tgname = pstrdup(trigger->tgname);
1903 if (trigger->tgnattr > 0)
1904 {
1905 int16 *newattr;
1906
1907 newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1908 memcpy(newattr, trigger->tgattr,
1909 trigger->tgnattr * sizeof(int16));
1910 trigger->tgattr = newattr;
1911 }
1912 if (trigger->tgnargs > 0)
1913 {
1914 char **newargs;
1915 int16 j;
1916
1917 newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1918 for (j = 0; j < trigger->tgnargs; j++)
1919 newargs[j] = pstrdup(trigger->tgargs[j]);
1920 trigger->tgargs = newargs;
1921 }
1922 if (trigger->tgqual)
1923 trigger->tgqual = pstrdup(trigger->tgqual);
1924 if (trigger->tgoldtable)
1925 trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1926 if (trigger->tgnewtable)
1927 trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1928 trigger++;
1929 }
1930
1931 return newdesc;
1932 }
1933
1934 /*
1935 * Free a TriggerDesc data structure.
1936 */
1937 void
FreeTriggerDesc(TriggerDesc * trigdesc)1938 FreeTriggerDesc(TriggerDesc *trigdesc)
1939 {
1940 Trigger *trigger;
1941 int i;
1942
1943 if (trigdesc == NULL)
1944 return;
1945
1946 trigger = trigdesc->triggers;
1947 for (i = 0; i < trigdesc->numtriggers; i++)
1948 {
1949 pfree(trigger->tgname);
1950 if (trigger->tgnattr > 0)
1951 pfree(trigger->tgattr);
1952 if (trigger->tgnargs > 0)
1953 {
1954 while (--(trigger->tgnargs) >= 0)
1955 pfree(trigger->tgargs[trigger->tgnargs]);
1956 pfree(trigger->tgargs);
1957 }
1958 if (trigger->tgqual)
1959 pfree(trigger->tgqual);
1960 if (trigger->tgoldtable)
1961 pfree(trigger->tgoldtable);
1962 if (trigger->tgnewtable)
1963 pfree(trigger->tgnewtable);
1964 trigger++;
1965 }
1966 pfree(trigdesc->triggers);
1967 pfree(trigdesc);
1968 }
1969
1970 /*
1971 * Compare two TriggerDesc structures for logical equality.
1972 */
1973 #ifdef NOT_USED
1974 bool
equalTriggerDescs(TriggerDesc * trigdesc1,TriggerDesc * trigdesc2)1975 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1976 {
1977 int i,
1978 j;
1979
1980 /*
1981 * We need not examine the hint flags, just the trigger array itself; if
1982 * we have the same triggers with the same types, the flags should match.
1983 *
1984 * As of 7.3 we assume trigger set ordering is significant in the
1985 * comparison; so we just compare corresponding slots of the two sets.
1986 *
1987 * Note: comparing the stringToNode forms of the WHEN clauses means that
1988 * parse column locations will affect the result. This is okay as long as
1989 * this function is only used for detecting exact equality, as for example
1990 * in checking for staleness of a cache entry.
1991 */
1992 if (trigdesc1 != NULL)
1993 {
1994 if (trigdesc2 == NULL)
1995 return false;
1996 if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1997 return false;
1998 for (i = 0; i < trigdesc1->numtriggers; i++)
1999 {
2000 Trigger *trig1 = trigdesc1->triggers + i;
2001 Trigger *trig2 = trigdesc2->triggers + i;
2002
2003 if (trig1->tgoid != trig2->tgoid)
2004 return false;
2005 if (strcmp(trig1->tgname, trig2->tgname) != 0)
2006 return false;
2007 if (trig1->tgfoid != trig2->tgfoid)
2008 return false;
2009 if (trig1->tgtype != trig2->tgtype)
2010 return false;
2011 if (trig1->tgenabled != trig2->tgenabled)
2012 return false;
2013 if (trig1->tgisinternal != trig2->tgisinternal)
2014 return false;
2015 if (trig1->tgisclone != trig2->tgisclone)
2016 return false;
2017 if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2018 return false;
2019 if (trig1->tgconstrindid != trig2->tgconstrindid)
2020 return false;
2021 if (trig1->tgconstraint != trig2->tgconstraint)
2022 return false;
2023 if (trig1->tgdeferrable != trig2->tgdeferrable)
2024 return false;
2025 if (trig1->tginitdeferred != trig2->tginitdeferred)
2026 return false;
2027 if (trig1->tgnargs != trig2->tgnargs)
2028 return false;
2029 if (trig1->tgnattr != trig2->tgnattr)
2030 return false;
2031 if (trig1->tgnattr > 0 &&
2032 memcmp(trig1->tgattr, trig2->tgattr,
2033 trig1->tgnattr * sizeof(int16)) != 0)
2034 return false;
2035 for (j = 0; j < trig1->tgnargs; j++)
2036 if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2037 return false;
2038 if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2039 /* ok */ ;
2040 else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2041 return false;
2042 else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2043 return false;
2044 if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2045 /* ok */ ;
2046 else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2047 return false;
2048 else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2049 return false;
2050 if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2051 /* ok */ ;
2052 else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2053 return false;
2054 else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2055 return false;
2056 }
2057 }
2058 else if (trigdesc2 != NULL)
2059 return false;
2060 return true;
2061 }
2062 #endif /* NOT_USED */
2063
2064 /*
2065 * Check if there is a row-level trigger with transition tables that prevents
2066 * a table from becoming an inheritance child or partition. Return the name
2067 * of the first such incompatible trigger, or NULL if there is none.
2068 */
2069 const char *
FindTriggerIncompatibleWithInheritance(TriggerDesc * trigdesc)2070 FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
2071 {
2072 if (trigdesc != NULL)
2073 {
2074 int i;
2075
2076 for (i = 0; i < trigdesc->numtriggers; ++i)
2077 {
2078 Trigger *trigger = &trigdesc->triggers[i];
2079
2080 if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2081 return trigger->tgname;
2082 }
2083 }
2084
2085 return NULL;
2086 }
2087
2088 /*
2089 * Call a trigger function.
2090 *
2091 * trigdata: trigger descriptor.
2092 * tgindx: trigger's index in finfo and instr arrays.
2093 * finfo: array of cached trigger function call information.
2094 * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2095 * per_tuple_context: memory context to execute the function in.
2096 *
2097 * Returns the tuple (or NULL) as returned by the function.
2098 */
2099 static HeapTuple
ExecCallTriggerFunc(TriggerData * trigdata,int tgindx,FmgrInfo * finfo,Instrumentation * instr,MemoryContext per_tuple_context)2100 ExecCallTriggerFunc(TriggerData *trigdata,
2101 int tgindx,
2102 FmgrInfo *finfo,
2103 Instrumentation *instr,
2104 MemoryContext per_tuple_context)
2105 {
2106 LOCAL_FCINFO(fcinfo, 0);
2107 PgStat_FunctionCallUsage fcusage;
2108 Datum result;
2109 MemoryContext oldContext;
2110
2111 /*
2112 * Protect against code paths that may fail to initialize transition table
2113 * info.
2114 */
2115 Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2116 TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2117 TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2118 TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2119 !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2120 !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2121 (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2122
2123 finfo += tgindx;
2124
2125 /*
2126 * We cache fmgr lookup info, to avoid making the lookup again on each
2127 * call.
2128 */
2129 if (finfo->fn_oid == InvalidOid)
2130 fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2131
2132 Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2133
2134 /*
2135 * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2136 */
2137 if (instr)
2138 InstrStartNode(instr + tgindx);
2139
2140 /*
2141 * Do the function evaluation in the per-tuple memory context, so that
2142 * leaked memory will be reclaimed once per tuple. Note in particular that
2143 * any new tuple created by the trigger function will live till the end of
2144 * the tuple cycle.
2145 */
2146 oldContext = MemoryContextSwitchTo(per_tuple_context);
2147
2148 /*
2149 * Call the function, passing no arguments but setting a context.
2150 */
2151 InitFunctionCallInfoData(*fcinfo, finfo, 0,
2152 InvalidOid, (Node *) trigdata, NULL);
2153
2154 pgstat_init_function_usage(fcinfo, &fcusage);
2155
2156 MyTriggerDepth++;
2157 PG_TRY();
2158 {
2159 result = FunctionCallInvoke(fcinfo);
2160 }
2161 PG_FINALLY();
2162 {
2163 MyTriggerDepth--;
2164 }
2165 PG_END_TRY();
2166
2167 pgstat_end_function_usage(&fcusage, true);
2168
2169 MemoryContextSwitchTo(oldContext);
2170
2171 /*
2172 * Trigger protocol allows function to return a null pointer, but NOT to
2173 * set the isnull result flag.
2174 */
2175 if (fcinfo->isnull)
2176 ereport(ERROR,
2177 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2178 errmsg("trigger function %u returned null value",
2179 fcinfo->flinfo->fn_oid)));
2180
2181 /*
2182 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2183 * one "tuple returned" (really the number of firings).
2184 */
2185 if (instr)
2186 InstrStopNode(instr + tgindx, 1);
2187
2188 return (HeapTuple) DatumGetPointer(result);
2189 }
2190
2191 void
ExecBSInsertTriggers(EState * estate,ResultRelInfo * relinfo)2192 ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
2193 {
2194 TriggerDesc *trigdesc;
2195 int i;
2196 TriggerData LocTriggerData = {0};
2197
2198 trigdesc = relinfo->ri_TrigDesc;
2199
2200 if (trigdesc == NULL)
2201 return;
2202 if (!trigdesc->trig_insert_before_statement)
2203 return;
2204
2205 /* no-op if we already fired BS triggers in this context */
2206 if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2207 CMD_INSERT))
2208 return;
2209
2210 LocTriggerData.type = T_TriggerData;
2211 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2212 TRIGGER_EVENT_BEFORE;
2213 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2214 for (i = 0; i < trigdesc->numtriggers; i++)
2215 {
2216 Trigger *trigger = &trigdesc->triggers[i];
2217 HeapTuple newtuple;
2218
2219 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2220 TRIGGER_TYPE_STATEMENT,
2221 TRIGGER_TYPE_BEFORE,
2222 TRIGGER_TYPE_INSERT))
2223 continue;
2224 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2225 NULL, NULL, NULL))
2226 continue;
2227
2228 LocTriggerData.tg_trigger = trigger;
2229 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2230 i,
2231 relinfo->ri_TrigFunctions,
2232 relinfo->ri_TrigInstrument,
2233 GetPerTupleMemoryContext(estate));
2234
2235 if (newtuple)
2236 ereport(ERROR,
2237 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2238 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2239 }
2240 }
2241
2242 void
ExecASInsertTriggers(EState * estate,ResultRelInfo * relinfo,TransitionCaptureState * transition_capture)2243 ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2244 TransitionCaptureState *transition_capture)
2245 {
2246 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2247
2248 if (trigdesc && trigdesc->trig_insert_after_statement)
2249 AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_INSERT,
2250 false, NULL, NULL, NIL, NULL, transition_capture);
2251 }
2252
2253 bool
ExecBRInsertTriggers(EState * estate,ResultRelInfo * relinfo,TupleTableSlot * slot)2254 ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2255 TupleTableSlot *slot)
2256 {
2257 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2258 HeapTuple newtuple = NULL;
2259 bool should_free;
2260 TriggerData LocTriggerData = {0};
2261 int i;
2262
2263 LocTriggerData.type = T_TriggerData;
2264 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2265 TRIGGER_EVENT_ROW |
2266 TRIGGER_EVENT_BEFORE;
2267 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2268 for (i = 0; i < trigdesc->numtriggers; i++)
2269 {
2270 Trigger *trigger = &trigdesc->triggers[i];
2271 HeapTuple oldtuple;
2272
2273 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2274 TRIGGER_TYPE_ROW,
2275 TRIGGER_TYPE_BEFORE,
2276 TRIGGER_TYPE_INSERT))
2277 continue;
2278 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2279 NULL, NULL, slot))
2280 continue;
2281
2282 if (!newtuple)
2283 newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2284
2285 LocTriggerData.tg_trigslot = slot;
2286 LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2287 LocTriggerData.tg_trigger = trigger;
2288 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2289 i,
2290 relinfo->ri_TrigFunctions,
2291 relinfo->ri_TrigInstrument,
2292 GetPerTupleMemoryContext(estate));
2293 if (newtuple == NULL)
2294 {
2295 if (should_free)
2296 heap_freetuple(oldtuple);
2297 return false; /* "do nothing" */
2298 }
2299 else if (newtuple != oldtuple)
2300 {
2301 ExecForceStoreHeapTuple(newtuple, slot, false);
2302
2303 /*
2304 * After a tuple in a partition goes through a trigger, the user
2305 * could have changed the partition key enough that the tuple no
2306 * longer fits the partition. Verify that.
2307 */
2308 if (trigger->tgisclone &&
2309 !ExecPartitionCheck(relinfo, slot, estate, false))
2310 ereport(ERROR,
2311 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2312 errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2313 errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2314 trigger->tgname,
2315 get_namespace_name(RelationGetNamespace(relinfo->ri_RelationDesc)),
2316 RelationGetRelationName(relinfo->ri_RelationDesc))));
2317
2318 if (should_free)
2319 heap_freetuple(oldtuple);
2320
2321 /* signal tuple should be re-fetched if used */
2322 newtuple = NULL;
2323 }
2324 }
2325
2326 return true;
2327 }
2328
2329 void
ExecARInsertTriggers(EState * estate,ResultRelInfo * relinfo,TupleTableSlot * slot,List * recheckIndexes,TransitionCaptureState * transition_capture)2330 ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2331 TupleTableSlot *slot, List *recheckIndexes,
2332 TransitionCaptureState *transition_capture)
2333 {
2334 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2335
2336 if ((trigdesc && trigdesc->trig_insert_after_row) ||
2337 (transition_capture && transition_capture->tcs_insert_new_table))
2338 AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_INSERT,
2339 true, NULL, slot,
2340 recheckIndexes, NULL,
2341 transition_capture);
2342 }
2343
2344 bool
ExecIRInsertTriggers(EState * estate,ResultRelInfo * relinfo,TupleTableSlot * slot)2345 ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2346 TupleTableSlot *slot)
2347 {
2348 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2349 HeapTuple newtuple = NULL;
2350 bool should_free;
2351 TriggerData LocTriggerData = {0};
2352 int i;
2353
2354 LocTriggerData.type = T_TriggerData;
2355 LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2356 TRIGGER_EVENT_ROW |
2357 TRIGGER_EVENT_INSTEAD;
2358 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2359 for (i = 0; i < trigdesc->numtriggers; i++)
2360 {
2361 Trigger *trigger = &trigdesc->triggers[i];
2362 HeapTuple oldtuple;
2363
2364 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2365 TRIGGER_TYPE_ROW,
2366 TRIGGER_TYPE_INSTEAD,
2367 TRIGGER_TYPE_INSERT))
2368 continue;
2369 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2370 NULL, NULL, slot))
2371 continue;
2372
2373 if (!newtuple)
2374 newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2375
2376 LocTriggerData.tg_trigslot = slot;
2377 LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2378 LocTriggerData.tg_trigger = trigger;
2379 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2380 i,
2381 relinfo->ri_TrigFunctions,
2382 relinfo->ri_TrigInstrument,
2383 GetPerTupleMemoryContext(estate));
2384 if (newtuple == NULL)
2385 {
2386 if (should_free)
2387 heap_freetuple(oldtuple);
2388 return false; /* "do nothing" */
2389 }
2390 else if (newtuple != oldtuple)
2391 {
2392 ExecForceStoreHeapTuple(newtuple, slot, false);
2393
2394 if (should_free)
2395 heap_freetuple(oldtuple);
2396
2397 /* signal tuple should be re-fetched if used */
2398 newtuple = NULL;
2399 }
2400 }
2401
2402 return true;
2403 }
2404
2405 void
ExecBSDeleteTriggers(EState * estate,ResultRelInfo * relinfo)2406 ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
2407 {
2408 TriggerDesc *trigdesc;
2409 int i;
2410 TriggerData LocTriggerData = {0};
2411
2412 trigdesc = relinfo->ri_TrigDesc;
2413
2414 if (trigdesc == NULL)
2415 return;
2416 if (!trigdesc->trig_delete_before_statement)
2417 return;
2418
2419 /* no-op if we already fired BS triggers in this context */
2420 if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2421 CMD_DELETE))
2422 return;
2423
2424 LocTriggerData.type = T_TriggerData;
2425 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2426 TRIGGER_EVENT_BEFORE;
2427 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2428 for (i = 0; i < trigdesc->numtriggers; i++)
2429 {
2430 Trigger *trigger = &trigdesc->triggers[i];
2431 HeapTuple newtuple;
2432
2433 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2434 TRIGGER_TYPE_STATEMENT,
2435 TRIGGER_TYPE_BEFORE,
2436 TRIGGER_TYPE_DELETE))
2437 continue;
2438 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2439 NULL, NULL, NULL))
2440 continue;
2441
2442 LocTriggerData.tg_trigger = trigger;
2443 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2444 i,
2445 relinfo->ri_TrigFunctions,
2446 relinfo->ri_TrigInstrument,
2447 GetPerTupleMemoryContext(estate));
2448
2449 if (newtuple)
2450 ereport(ERROR,
2451 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2452 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2453 }
2454 }
2455
2456 void
ExecASDeleteTriggers(EState * estate,ResultRelInfo * relinfo,TransitionCaptureState * transition_capture)2457 ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2458 TransitionCaptureState *transition_capture)
2459 {
2460 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2461
2462 if (trigdesc && trigdesc->trig_delete_after_statement)
2463 AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
2464 false, NULL, NULL, NIL, NULL, transition_capture);
2465 }
2466
2467 /*
2468 * Execute BEFORE ROW DELETE triggers.
2469 *
2470 * True indicates caller can proceed with the delete. False indicates caller
2471 * need to suppress the delete and additionally if requested, we need to pass
2472 * back the concurrently updated tuple if any.
2473 */
2474 bool
ExecBRDeleteTriggers(EState * estate,EPQState * epqstate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple,TupleTableSlot ** epqslot)2475 ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
2476 ResultRelInfo *relinfo,
2477 ItemPointer tupleid,
2478 HeapTuple fdw_trigtuple,
2479 TupleTableSlot **epqslot)
2480 {
2481 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2482 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2483 bool result = true;
2484 TriggerData LocTriggerData = {0};
2485 HeapTuple trigtuple;
2486 bool should_free = false;
2487 int i;
2488
2489 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2490 if (fdw_trigtuple == NULL)
2491 {
2492 TupleTableSlot *epqslot_candidate = NULL;
2493
2494 if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2495 LockTupleExclusive, slot, &epqslot_candidate))
2496 return false;
2497
2498 /*
2499 * If the tuple was concurrently updated and the caller of this
2500 * function requested for the updated tuple, skip the trigger
2501 * execution.
2502 */
2503 if (epqslot_candidate != NULL && epqslot != NULL)
2504 {
2505 *epqslot = epqslot_candidate;
2506 return false;
2507 }
2508
2509 trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2510
2511 }
2512 else
2513 {
2514 trigtuple = fdw_trigtuple;
2515 ExecForceStoreHeapTuple(trigtuple, slot, false);
2516 }
2517
2518 LocTriggerData.type = T_TriggerData;
2519 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2520 TRIGGER_EVENT_ROW |
2521 TRIGGER_EVENT_BEFORE;
2522 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2523 for (i = 0; i < trigdesc->numtriggers; i++)
2524 {
2525 HeapTuple newtuple;
2526 Trigger *trigger = &trigdesc->triggers[i];
2527
2528 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2529 TRIGGER_TYPE_ROW,
2530 TRIGGER_TYPE_BEFORE,
2531 TRIGGER_TYPE_DELETE))
2532 continue;
2533 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2534 NULL, slot, NULL))
2535 continue;
2536
2537 LocTriggerData.tg_trigslot = slot;
2538 LocTriggerData.tg_trigtuple = trigtuple;
2539 LocTriggerData.tg_trigger = trigger;
2540 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2541 i,
2542 relinfo->ri_TrigFunctions,
2543 relinfo->ri_TrigInstrument,
2544 GetPerTupleMemoryContext(estate));
2545 if (newtuple == NULL)
2546 {
2547 result = false; /* tell caller to suppress delete */
2548 break;
2549 }
2550 if (newtuple != trigtuple)
2551 heap_freetuple(newtuple);
2552 }
2553 if (should_free)
2554 heap_freetuple(trigtuple);
2555
2556 return result;
2557 }
2558
2559 void
ExecARDeleteTriggers(EState * estate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple,TransitionCaptureState * transition_capture)2560 ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2561 ItemPointer tupleid,
2562 HeapTuple fdw_trigtuple,
2563 TransitionCaptureState *transition_capture)
2564 {
2565 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2566
2567 if ((trigdesc && trigdesc->trig_delete_after_row) ||
2568 (transition_capture && transition_capture->tcs_delete_old_table))
2569 {
2570 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2571
2572 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2573 if (fdw_trigtuple == NULL)
2574 GetTupleForTrigger(estate,
2575 NULL,
2576 relinfo,
2577 tupleid,
2578 LockTupleExclusive,
2579 slot,
2580 NULL);
2581 else
2582 ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2583
2584 AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
2585 true, slot, NULL, NIL, NULL,
2586 transition_capture);
2587 }
2588 }
2589
2590 bool
ExecIRDeleteTriggers(EState * estate,ResultRelInfo * relinfo,HeapTuple trigtuple)2591 ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2592 HeapTuple trigtuple)
2593 {
2594 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2595 TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2596 TriggerData LocTriggerData = {0};
2597 int i;
2598
2599 LocTriggerData.type = T_TriggerData;
2600 LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2601 TRIGGER_EVENT_ROW |
2602 TRIGGER_EVENT_INSTEAD;
2603 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2604
2605 ExecForceStoreHeapTuple(trigtuple, slot, false);
2606
2607 for (i = 0; i < trigdesc->numtriggers; i++)
2608 {
2609 HeapTuple rettuple;
2610 Trigger *trigger = &trigdesc->triggers[i];
2611
2612 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2613 TRIGGER_TYPE_ROW,
2614 TRIGGER_TYPE_INSTEAD,
2615 TRIGGER_TYPE_DELETE))
2616 continue;
2617 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2618 NULL, slot, NULL))
2619 continue;
2620
2621 LocTriggerData.tg_trigslot = slot;
2622 LocTriggerData.tg_trigtuple = trigtuple;
2623 LocTriggerData.tg_trigger = trigger;
2624 rettuple = ExecCallTriggerFunc(&LocTriggerData,
2625 i,
2626 relinfo->ri_TrigFunctions,
2627 relinfo->ri_TrigInstrument,
2628 GetPerTupleMemoryContext(estate));
2629 if (rettuple == NULL)
2630 return false; /* Delete was suppressed */
2631 if (rettuple != trigtuple)
2632 heap_freetuple(rettuple);
2633 }
2634 return true;
2635 }
2636
2637 void
ExecBSUpdateTriggers(EState * estate,ResultRelInfo * relinfo)2638 ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
2639 {
2640 TriggerDesc *trigdesc;
2641 int i;
2642 TriggerData LocTriggerData = {0};
2643 Bitmapset *updatedCols;
2644
2645 trigdesc = relinfo->ri_TrigDesc;
2646
2647 if (trigdesc == NULL)
2648 return;
2649 if (!trigdesc->trig_update_before_statement)
2650 return;
2651
2652 /* no-op if we already fired BS triggers in this context */
2653 if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2654 CMD_UPDATE))
2655 return;
2656
2657 /* statement-level triggers operate on the parent table */
2658 Assert(relinfo->ri_RootResultRelInfo == NULL);
2659
2660 updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2661
2662 LocTriggerData.type = T_TriggerData;
2663 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2664 TRIGGER_EVENT_BEFORE;
2665 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2666 LocTriggerData.tg_updatedcols = updatedCols;
2667 for (i = 0; i < trigdesc->numtriggers; i++)
2668 {
2669 Trigger *trigger = &trigdesc->triggers[i];
2670 HeapTuple newtuple;
2671
2672 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2673 TRIGGER_TYPE_STATEMENT,
2674 TRIGGER_TYPE_BEFORE,
2675 TRIGGER_TYPE_UPDATE))
2676 continue;
2677 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2678 updatedCols, NULL, NULL))
2679 continue;
2680
2681 LocTriggerData.tg_trigger = trigger;
2682 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2683 i,
2684 relinfo->ri_TrigFunctions,
2685 relinfo->ri_TrigInstrument,
2686 GetPerTupleMemoryContext(estate));
2687
2688 if (newtuple)
2689 ereport(ERROR,
2690 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2691 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2692 }
2693 }
2694
2695 void
ExecASUpdateTriggers(EState * estate,ResultRelInfo * relinfo,TransitionCaptureState * transition_capture)2696 ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2697 TransitionCaptureState *transition_capture)
2698 {
2699 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2700
2701 /* statement-level triggers operate on the parent table */
2702 Assert(relinfo->ri_RootResultRelInfo == NULL);
2703
2704 if (trigdesc && trigdesc->trig_update_after_statement)
2705 AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
2706 false, NULL, NULL, NIL,
2707 ExecGetAllUpdatedCols(relinfo, estate),
2708 transition_capture);
2709 }
2710
2711 bool
ExecBRUpdateTriggers(EState * estate,EPQState * epqstate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple,TupleTableSlot * newslot)2712 ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
2713 ResultRelInfo *relinfo,
2714 ItemPointer tupleid,
2715 HeapTuple fdw_trigtuple,
2716 TupleTableSlot *newslot)
2717 {
2718 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2719 TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2720 HeapTuple newtuple = NULL;
2721 HeapTuple trigtuple;
2722 bool should_free_trig = false;
2723 bool should_free_new = false;
2724 TriggerData LocTriggerData = {0};
2725 int i;
2726 Bitmapset *updatedCols;
2727 LockTupleMode lockmode;
2728
2729 /* Determine lock mode to use */
2730 lockmode = ExecUpdateLockMode(estate, relinfo);
2731
2732 Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2733 if (fdw_trigtuple == NULL)
2734 {
2735 TupleTableSlot *epqslot_candidate = NULL;
2736
2737 /* get a copy of the on-disk tuple we are planning to update */
2738 if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2739 lockmode, oldslot, &epqslot_candidate))
2740 return false; /* cancel the update action */
2741
2742 /*
2743 * In READ COMMITTED isolation level it's possible that target tuple
2744 * was changed due to concurrent update. In that case we have a raw
2745 * subplan output tuple in epqslot_candidate, and need to form a new
2746 * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2747 * received in newslot. Neither we nor our callers have any further
2748 * interest in the passed-in tuple, so it's okay to overwrite newslot
2749 * with the newer data.
2750 *
2751 * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so
2752 * that epqslot_clean will be that same slot and the copy step below
2753 * is not needed.)
2754 */
2755 if (epqslot_candidate != NULL)
2756 {
2757 TupleTableSlot *epqslot_clean;
2758
2759 epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2760 oldslot);
2761
2762 if (newslot != epqslot_clean)
2763 ExecCopySlot(newslot, epqslot_clean);
2764 }
2765
2766 trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
2767 }
2768 else
2769 {
2770 ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2771 trigtuple = fdw_trigtuple;
2772 }
2773
2774 LocTriggerData.type = T_TriggerData;
2775 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2776 TRIGGER_EVENT_ROW |
2777 TRIGGER_EVENT_BEFORE;
2778 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2779 updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2780 LocTriggerData.tg_updatedcols = updatedCols;
2781 for (i = 0; i < trigdesc->numtriggers; i++)
2782 {
2783 Trigger *trigger = &trigdesc->triggers[i];
2784 HeapTuple oldtuple;
2785
2786 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2787 TRIGGER_TYPE_ROW,
2788 TRIGGER_TYPE_BEFORE,
2789 TRIGGER_TYPE_UPDATE))
2790 continue;
2791 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2792 updatedCols, oldslot, newslot))
2793 continue;
2794
2795 if (!newtuple)
2796 newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
2797
2798 LocTriggerData.tg_trigslot = oldslot;
2799 LocTriggerData.tg_trigtuple = trigtuple;
2800 LocTriggerData.tg_newtuple = oldtuple = newtuple;
2801 LocTriggerData.tg_newslot = newslot;
2802 LocTriggerData.tg_trigger = trigger;
2803 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2804 i,
2805 relinfo->ri_TrigFunctions,
2806 relinfo->ri_TrigInstrument,
2807 GetPerTupleMemoryContext(estate));
2808
2809 if (newtuple == NULL)
2810 {
2811 if (should_free_trig)
2812 heap_freetuple(trigtuple);
2813 if (should_free_new)
2814 heap_freetuple(oldtuple);
2815 return false; /* "do nothing" */
2816 }
2817 else if (newtuple != oldtuple)
2818 {
2819 ExecForceStoreHeapTuple(newtuple, newslot, false);
2820
2821 /*
2822 * If the tuple returned by the trigger / being stored, is the old
2823 * row version, and the heap tuple passed to the trigger was
2824 * allocated locally, materialize the slot. Otherwise we might
2825 * free it while still referenced by the slot.
2826 */
2827 if (should_free_trig && newtuple == trigtuple)
2828 ExecMaterializeSlot(newslot);
2829
2830 if (should_free_new)
2831 heap_freetuple(oldtuple);
2832
2833 /* signal tuple should be re-fetched if used */
2834 newtuple = NULL;
2835 }
2836 }
2837 if (should_free_trig)
2838 heap_freetuple(trigtuple);
2839
2840 return true;
2841 }
2842
2843 void
ExecARUpdateTriggers(EState * estate,ResultRelInfo * relinfo,ItemPointer tupleid,HeapTuple fdw_trigtuple,TupleTableSlot * newslot,List * recheckIndexes,TransitionCaptureState * transition_capture)2844 ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2845 ItemPointer tupleid,
2846 HeapTuple fdw_trigtuple,
2847 TupleTableSlot *newslot,
2848 List *recheckIndexes,
2849 TransitionCaptureState *transition_capture)
2850 {
2851 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2852
2853 if ((trigdesc && trigdesc->trig_update_after_row) ||
2854 (transition_capture &&
2855 (transition_capture->tcs_update_old_table ||
2856 transition_capture->tcs_update_new_table)))
2857 {
2858 /*
2859 * Note: if the UPDATE is converted into a DELETE+INSERT as part of
2860 * update-partition-key operation, then this function is also called
2861 * separately for DELETE and INSERT to capture transition table rows.
2862 * In such case, either old tuple or new tuple can be NULL.
2863 */
2864 TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2865
2866 if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
2867 GetTupleForTrigger(estate,
2868 NULL,
2869 relinfo,
2870 tupleid,
2871 LockTupleExclusive,
2872 oldslot,
2873 NULL);
2874 else if (fdw_trigtuple != NULL)
2875 ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2876 else
2877 ExecClearTuple(oldslot);
2878
2879 AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
2880 true, oldslot, newslot, recheckIndexes,
2881 ExecGetAllUpdatedCols(relinfo, estate),
2882 transition_capture);
2883 }
2884 }
2885
2886 bool
ExecIRUpdateTriggers(EState * estate,ResultRelInfo * relinfo,HeapTuple trigtuple,TupleTableSlot * newslot)2887 ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2888 HeapTuple trigtuple, TupleTableSlot *newslot)
2889 {
2890 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2891 TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2892 HeapTuple newtuple = NULL;
2893 bool should_free;
2894 TriggerData LocTriggerData = {0};
2895 int i;
2896
2897 LocTriggerData.type = T_TriggerData;
2898 LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2899 TRIGGER_EVENT_ROW |
2900 TRIGGER_EVENT_INSTEAD;
2901 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2902
2903 ExecForceStoreHeapTuple(trigtuple, oldslot, false);
2904
2905 for (i = 0; i < trigdesc->numtriggers; i++)
2906 {
2907 Trigger *trigger = &trigdesc->triggers[i];
2908 HeapTuple oldtuple;
2909
2910 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2911 TRIGGER_TYPE_ROW,
2912 TRIGGER_TYPE_INSTEAD,
2913 TRIGGER_TYPE_UPDATE))
2914 continue;
2915 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2916 NULL, oldslot, newslot))
2917 continue;
2918
2919 if (!newtuple)
2920 newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
2921
2922 LocTriggerData.tg_trigslot = oldslot;
2923 LocTriggerData.tg_trigtuple = trigtuple;
2924 LocTriggerData.tg_newslot = newslot;
2925 LocTriggerData.tg_newtuple = oldtuple = newtuple;
2926
2927 LocTriggerData.tg_trigger = trigger;
2928 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2929 i,
2930 relinfo->ri_TrigFunctions,
2931 relinfo->ri_TrigInstrument,
2932 GetPerTupleMemoryContext(estate));
2933 if (newtuple == NULL)
2934 {
2935 return false; /* "do nothing" */
2936 }
2937 else if (newtuple != oldtuple)
2938 {
2939 ExecForceStoreHeapTuple(newtuple, newslot, false);
2940
2941 if (should_free)
2942 heap_freetuple(oldtuple);
2943
2944 /* signal tuple should be re-fetched if used */
2945 newtuple = NULL;
2946 }
2947 }
2948
2949 return true;
2950 }
2951
2952 void
ExecBSTruncateTriggers(EState * estate,ResultRelInfo * relinfo)2953 ExecBSTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
2954 {
2955 TriggerDesc *trigdesc;
2956 int i;
2957 TriggerData LocTriggerData = {0};
2958
2959 trigdesc = relinfo->ri_TrigDesc;
2960
2961 if (trigdesc == NULL)
2962 return;
2963 if (!trigdesc->trig_truncate_before_statement)
2964 return;
2965
2966 LocTriggerData.type = T_TriggerData;
2967 LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2968 TRIGGER_EVENT_BEFORE;
2969 LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2970
2971 for (i = 0; i < trigdesc->numtriggers; i++)
2972 {
2973 Trigger *trigger = &trigdesc->triggers[i];
2974 HeapTuple newtuple;
2975
2976 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2977 TRIGGER_TYPE_STATEMENT,
2978 TRIGGER_TYPE_BEFORE,
2979 TRIGGER_TYPE_TRUNCATE))
2980 continue;
2981 if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2982 NULL, NULL, NULL))
2983 continue;
2984
2985 LocTriggerData.tg_trigger = trigger;
2986 newtuple = ExecCallTriggerFunc(&LocTriggerData,
2987 i,
2988 relinfo->ri_TrigFunctions,
2989 relinfo->ri_TrigInstrument,
2990 GetPerTupleMemoryContext(estate));
2991
2992 if (newtuple)
2993 ereport(ERROR,
2994 (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2995 errmsg("BEFORE STATEMENT trigger cannot return a value")));
2996 }
2997 }
2998
2999 void
ExecASTruncateTriggers(EState * estate,ResultRelInfo * relinfo)3000 ExecASTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
3001 {
3002 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3003
3004 if (trigdesc && trigdesc->trig_truncate_after_statement)
3005 AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_TRUNCATE,
3006 false, NULL, NULL, NIL, NULL, NULL);
3007 }
3008
3009
3010 /*
3011 * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3012 */
3013 static bool
GetTupleForTrigger(EState * estate,EPQState * epqstate,ResultRelInfo * relinfo,ItemPointer tid,LockTupleMode lockmode,TupleTableSlot * oldslot,TupleTableSlot ** epqslot)3014 GetTupleForTrigger(EState *estate,
3015 EPQState *epqstate,
3016 ResultRelInfo *relinfo,
3017 ItemPointer tid,
3018 LockTupleMode lockmode,
3019 TupleTableSlot *oldslot,
3020 TupleTableSlot **epqslot)
3021 {
3022 Relation relation = relinfo->ri_RelationDesc;
3023
3024 if (epqslot != NULL)
3025 {
3026 TM_Result test;
3027 TM_FailureData tmfd;
3028 int lockflags = 0;
3029
3030 *epqslot = NULL;
3031
3032 /* caller must pass an epqstate if EvalPlanQual is possible */
3033 Assert(epqstate != NULL);
3034
3035 /*
3036 * lock tuple for update
3037 */
3038 if (!IsolationUsesXactSnapshot())
3039 lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3040 test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3041 estate->es_output_cid,
3042 lockmode, LockWaitBlock,
3043 lockflags,
3044 &tmfd);
3045
3046 switch (test)
3047 {
3048 case TM_SelfModified:
3049
3050 /*
3051 * The target tuple was already updated or deleted by the
3052 * current command, or by a later command in the current
3053 * transaction. We ignore the tuple in the former case, and
3054 * throw error in the latter case, for the same reasons
3055 * enumerated in ExecUpdate and ExecDelete in
3056 * nodeModifyTable.c.
3057 */
3058 if (tmfd.cmax != estate->es_output_cid)
3059 ereport(ERROR,
3060 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3061 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3062 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3063
3064 /* treat it as deleted; do not process */
3065 return false;
3066
3067 case TM_Ok:
3068 if (tmfd.traversed)
3069 {
3070 *epqslot = EvalPlanQual(epqstate,
3071 relation,
3072 relinfo->ri_RangeTableIndex,
3073 oldslot);
3074
3075 /*
3076 * If PlanQual failed for updated tuple - we must not
3077 * process this tuple!
3078 */
3079 if (TupIsNull(*epqslot))
3080 {
3081 *epqslot = NULL;
3082 return false;
3083 }
3084 }
3085 break;
3086
3087 case TM_Updated:
3088 if (IsolationUsesXactSnapshot())
3089 ereport(ERROR,
3090 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3091 errmsg("could not serialize access due to concurrent update")));
3092 elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3093 break;
3094
3095 case TM_Deleted:
3096 if (IsolationUsesXactSnapshot())
3097 ereport(ERROR,
3098 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3099 errmsg("could not serialize access due to concurrent delete")));
3100 /* tuple was deleted */
3101 return false;
3102
3103 case TM_Invisible:
3104 elog(ERROR, "attempted to lock invisible tuple");
3105 break;
3106
3107 default:
3108 elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3109 return false; /* keep compiler quiet */
3110 }
3111 }
3112 else
3113 {
3114 /*
3115 * We expect the tuple to be present, thus very simple error handling
3116 * suffices.
3117 */
3118 if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3119 oldslot))
3120 elog(ERROR, "failed to fetch tuple for trigger");
3121 }
3122
3123 return true;
3124 }
3125
3126 /*
3127 * Is trigger enabled to fire?
3128 */
3129 static bool
TriggerEnabled(EState * estate,ResultRelInfo * relinfo,Trigger * trigger,TriggerEvent event,Bitmapset * modifiedCols,TupleTableSlot * oldslot,TupleTableSlot * newslot)3130 TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
3131 Trigger *trigger, TriggerEvent event,
3132 Bitmapset *modifiedCols,
3133 TupleTableSlot *oldslot, TupleTableSlot *newslot)
3134 {
3135 /* Check replication-role-dependent enable state */
3136 if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA)
3137 {
3138 if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3139 trigger->tgenabled == TRIGGER_DISABLED)
3140 return false;
3141 }
3142 else /* ORIGIN or LOCAL role */
3143 {
3144 if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3145 trigger->tgenabled == TRIGGER_DISABLED)
3146 return false;
3147 }
3148
3149 /*
3150 * Check for column-specific trigger (only possible for UPDATE, and in
3151 * fact we *must* ignore tgattr for other event types)
3152 */
3153 if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3154 {
3155 int i;
3156 bool modified;
3157
3158 modified = false;
3159 for (i = 0; i < trigger->tgnattr; i++)
3160 {
3161 if (bms_is_member(trigger->tgattr[i] - FirstLowInvalidHeapAttributeNumber,
3162 modifiedCols))
3163 {
3164 modified = true;
3165 break;
3166 }
3167 }
3168 if (!modified)
3169 return false;
3170 }
3171
3172 /* Check for WHEN clause */
3173 if (trigger->tgqual)
3174 {
3175 ExprState **predicate;
3176 ExprContext *econtext;
3177 MemoryContext oldContext;
3178 int i;
3179
3180 Assert(estate != NULL);
3181
3182 /*
3183 * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3184 * matching element of relinfo->ri_TrigWhenExprs[]
3185 */
3186 i = trigger - relinfo->ri_TrigDesc->triggers;
3187 predicate = &relinfo->ri_TrigWhenExprs[i];
3188
3189 /*
3190 * If first time through for this WHEN expression, build expression
3191 * nodetrees for it. Keep them in the per-query memory context so
3192 * they'll survive throughout the query.
3193 */
3194 if (*predicate == NULL)
3195 {
3196 Node *tgqual;
3197
3198 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3199 tgqual = stringToNode(trigger->tgqual);
3200 /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3201 ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER_VAR, 0);
3202 ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER_VAR, 0);
3203 /* ExecPrepareQual wants implicit-AND form */
3204 tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3205 *predicate = ExecPrepareQual((List *) tgqual, estate);
3206 MemoryContextSwitchTo(oldContext);
3207 }
3208
3209 /*
3210 * We will use the EState's per-tuple context for evaluating WHEN
3211 * expressions (creating it if it's not already there).
3212 */
3213 econtext = GetPerTupleExprContext(estate);
3214
3215 /*
3216 * Finally evaluate the expression, making the old and/or new tuples
3217 * available as INNER_VAR/OUTER_VAR respectively.
3218 */
3219 econtext->ecxt_innertuple = oldslot;
3220 econtext->ecxt_outertuple = newslot;
3221 if (!ExecQual(*predicate, econtext))
3222 return false;
3223 }
3224
3225 return true;
3226 }
3227
3228
3229 /* ----------
3230 * After-trigger stuff
3231 *
3232 * The AfterTriggersData struct holds data about pending AFTER trigger events
3233 * during the current transaction tree. (BEFORE triggers are fired
3234 * immediately so we don't need any persistent state about them.) The struct
3235 * and most of its subsidiary data are kept in TopTransactionContext; however
3236 * some data that can be discarded sooner appears in the CurTransactionContext
3237 * of the relevant subtransaction. Also, the individual event records are
3238 * kept in a separate sub-context of TopTransactionContext. This is done
3239 * mainly so that it's easy to tell from a memory context dump how much space
3240 * is being eaten by trigger events.
3241 *
3242 * Because the list of pending events can grow large, we go to some
3243 * considerable effort to minimize per-event memory consumption. The event
3244 * records are grouped into chunks and common data for similar events in the
3245 * same chunk is only stored once.
3246 *
3247 * XXX We need to be able to save the per-event data in a file if it grows too
3248 * large.
3249 * ----------
3250 */
3251
3252 /* Per-trigger SET CONSTRAINT status */
3253 typedef struct SetConstraintTriggerData
3254 {
3255 Oid sct_tgoid;
3256 bool sct_tgisdeferred;
3257 } SetConstraintTriggerData;
3258
3259 typedef struct SetConstraintTriggerData *SetConstraintTrigger;
3260
3261 /*
3262 * SET CONSTRAINT intra-transaction status.
3263 *
3264 * We make this a single palloc'd object so it can be copied and freed easily.
3265 *
3266 * all_isset and all_isdeferred are used to keep track
3267 * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3268 *
3269 * trigstates[] stores per-trigger tgisdeferred settings.
3270 */
3271 typedef struct SetConstraintStateData
3272 {
3273 bool all_isset;
3274 bool all_isdeferred;
3275 int numstates; /* number of trigstates[] entries in use */
3276 int numalloc; /* allocated size of trigstates[] */
3277 SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3278 } SetConstraintStateData;
3279
3280 typedef SetConstraintStateData *SetConstraintState;
3281
3282
3283 /*
3284 * Per-trigger-event data
3285 *
3286 * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3287 * status bits and up to two tuple CTIDs. Each event record also has an
3288 * associated AfterTriggerSharedData that is shared across all instances of
3289 * similar events within a "chunk".
3290 *
3291 * For row-level triggers, we arrange not to waste storage on unneeded ctid
3292 * fields. Updates of regular tables use two; inserts and deletes of regular
3293 * tables use one; foreign tables always use zero and save the tuple(s) to a
3294 * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3295 * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3296 * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3297 * tuple(s). This permits storing tuples once regardless of the number of
3298 * row-level triggers on a foreign table.
3299 *
3300 * Note that we need triggers on foreign tables to be fired in exactly the
3301 * order they were queued, so that the tuples come out of the tuplestore in
3302 * the right order. To ensure that, we forbid deferrable (constraint)
3303 * triggers on foreign tables. This also ensures that such triggers do not
3304 * get deferred into outer trigger query levels, meaning that it's okay to
3305 * destroy the tuplestore at the end of the query level.
3306 *
3307 * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3308 * require no ctid field. We lack the flag bit space to neatly represent that
3309 * distinct case, and it seems unlikely to be worth much trouble.
3310 *
3311 * Note: ats_firing_id is initially zero and is set to something else when
3312 * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3313 * cycle the trigger will be fired in (or was fired in, if DONE is set).
3314 * Although this is mutable state, we can keep it in AfterTriggerSharedData
3315 * because all instances of the same type of event in a given event list will
3316 * be fired at the same time, if they were queued between the same firing
3317 * cycles. So we need only ensure that ats_firing_id is zero when attaching
3318 * a new event to an existing AfterTriggerSharedData record.
3319 */
3320 typedef uint32 TriggerFlags;
3321
3322 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3323 #define AFTER_TRIGGER_DONE 0x10000000
3324 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3325 /* bits describing the size and tuple sources of this event */
3326 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3327 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3328 #define AFTER_TRIGGER_1CTID 0x40000000
3329 #define AFTER_TRIGGER_2CTID 0xC0000000
3330 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3331
3332 typedef struct AfterTriggerSharedData *AfterTriggerShared;
3333
3334 typedef struct AfterTriggerSharedData
3335 {
3336 TriggerEvent ats_event; /* event type indicator, see trigger.h */
3337 Oid ats_tgoid; /* the trigger's ID */
3338 Oid ats_relid; /* the relation it's on */
3339 CommandId ats_firing_id; /* ID for firing cycle */
3340 struct AfterTriggersTableData *ats_table; /* transition table access */
3341 Bitmapset *ats_modifiedcols; /* modified columns */
3342 } AfterTriggerSharedData;
3343
3344 typedef struct AfterTriggerEventData *AfterTriggerEvent;
3345
3346 typedef struct AfterTriggerEventData
3347 {
3348 TriggerFlags ate_flags; /* status bits and offset to shared data */
3349 ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3350 ItemPointerData ate_ctid2; /* new updated tuple */
3351 } AfterTriggerEventData;
3352
3353 /* AfterTriggerEventData, minus ate_ctid2 */
3354 typedef struct AfterTriggerEventDataOneCtid
3355 {
3356 TriggerFlags ate_flags; /* status bits and offset to shared data */
3357 ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3358 } AfterTriggerEventDataOneCtid;
3359
3360 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3361 typedef struct AfterTriggerEventDataZeroCtids
3362 {
3363 TriggerFlags ate_flags; /* status bits and offset to shared data */
3364 } AfterTriggerEventDataZeroCtids;
3365
3366 #define SizeofTriggerEvent(evt) \
3367 (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3368 sizeof(AfterTriggerEventData) : \
3369 ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3370 sizeof(AfterTriggerEventDataOneCtid) : \
3371 sizeof(AfterTriggerEventDataZeroCtids))
3372
3373 #define GetTriggerSharedData(evt) \
3374 ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3375
3376 /*
3377 * To avoid palloc overhead, we keep trigger events in arrays in successively-
3378 * larger chunks (a slightly more sophisticated version of an expansible
3379 * array). The space between CHUNK_DATA_START and freeptr is occupied by
3380 * AfterTriggerEventData records; the space between endfree and endptr is
3381 * occupied by AfterTriggerSharedData records.
3382 */
3383 typedef struct AfterTriggerEventChunk
3384 {
3385 struct AfterTriggerEventChunk *next; /* list link */
3386 char *freeptr; /* start of free space in chunk */
3387 char *endfree; /* end of free space in chunk */
3388 char *endptr; /* end of chunk */
3389 /* event data follows here */
3390 } AfterTriggerEventChunk;
3391
3392 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3393
3394 /* A list of events */
3395 typedef struct AfterTriggerEventList
3396 {
3397 AfterTriggerEventChunk *head;
3398 AfterTriggerEventChunk *tail;
3399 char *tailfree; /* freeptr of tail chunk */
3400 } AfterTriggerEventList;
3401
3402 /* Macros to help in iterating over a list of events */
3403 #define for_each_chunk(cptr, evtlist) \
3404 for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3405 #define for_each_event(eptr, cptr) \
3406 for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3407 (char *) eptr < (cptr)->freeptr; \
3408 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3409 /* Use this if no special per-chunk processing is needed */
3410 #define for_each_event_chunk(eptr, cptr, evtlist) \
3411 for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3412
3413 /* Macros for iterating from a start point that might not be list start */
3414 #define for_each_chunk_from(cptr) \
3415 for (; cptr != NULL; cptr = cptr->next)
3416 #define for_each_event_from(eptr, cptr) \
3417 for (; \
3418 (char *) eptr < (cptr)->freeptr; \
3419 eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3420
3421
3422 /*
3423 * All per-transaction data for the AFTER TRIGGERS module.
3424 *
3425 * AfterTriggersData has the following fields:
3426 *
3427 * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3428 * We mark firable events with the current firing cycle's ID so that we can
3429 * tell which ones to work on. This ensures sane behavior if a trigger
3430 * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3431 * only fire those events that weren't already scheduled for firing.
3432 *
3433 * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3434 * This is saved and restored across failed subtransactions.
3435 *
3436 * events is the current list of deferred events. This is global across
3437 * all subtransactions of the current transaction. In a subtransaction
3438 * abort, we know that the events added by the subtransaction are at the
3439 * end of the list, so it is relatively easy to discard them. The event
3440 * list chunks themselves are stored in event_cxt.
3441 *
3442 * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3443 * (-1 when the stack is empty).
3444 *
3445 * query_stack[query_depth] is the per-query-level data, including these fields:
3446 *
3447 * events is a list of AFTER trigger events queued by the current query.
3448 * None of these are valid until the matching AfterTriggerEndQuery call
3449 * occurs. At that point we fire immediate-mode triggers, and append any
3450 * deferred events to the main events list.
3451 *
3452 * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3453 * needed by events queued by the current query. (Note: we use just one
3454 * tuplestore even though more than one foreign table might be involved.
3455 * This is okay because tuplestores don't really care what's in the tuples
3456 * they store; but it's possible that someday it'd break.)
3457 *
3458 * tables is a List of AfterTriggersTableData structs for target tables
3459 * of the current query (see below).
3460 *
3461 * maxquerydepth is just the allocated length of query_stack.
3462 *
3463 * trans_stack holds per-subtransaction data, including these fields:
3464 *
3465 * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3466 * state data. Each subtransaction level that modifies that state first
3467 * saves a copy, which we use to restore the state if we abort.
3468 *
3469 * events is a copy of the events head/tail pointers,
3470 * which we use to restore those values during subtransaction abort.
3471 *
3472 * query_depth is the subtransaction-start-time value of query_depth,
3473 * which we similarly use to clean up at subtransaction abort.
3474 *
3475 * firing_counter is the subtransaction-start-time value of firing_counter.
3476 * We use this to recognize which deferred triggers were fired (or marked
3477 * for firing) within an aborted subtransaction.
3478 *
3479 * We use GetCurrentTransactionNestLevel() to determine the correct array
3480 * index in trans_stack. maxtransdepth is the number of allocated entries in
3481 * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3482 * in cases where errors during subxact abort cause multiple invocations
3483 * of AfterTriggerEndSubXact() at the same nesting depth.)
3484 *
3485 * We create an AfterTriggersTableData struct for each target table of the
3486 * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3487 * either transition tables or statement-level triggers. This is used to
3488 * hold the relevant transition tables, as well as info tracking whether
3489 * we already queued the statement triggers. (We use that info to prevent
3490 * firing the same statement triggers more than once per statement, or really
3491 * once per transition table set.) These structs, along with the transition
3492 * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3493 * That's sufficient lifespan because we don't allow transition tables to be
3494 * used by deferrable triggers, so they only need to survive until
3495 * AfterTriggerEndQuery.
3496 */
3497 typedef struct AfterTriggersQueryData AfterTriggersQueryData;
3498 typedef struct AfterTriggersTransData AfterTriggersTransData;
3499 typedef struct AfterTriggersTableData AfterTriggersTableData;
3500
3501 typedef struct AfterTriggersData
3502 {
3503 CommandId firing_counter; /* next firing ID to assign */
3504 SetConstraintState state; /* the active S C state */
3505 AfterTriggerEventList events; /* deferred-event list */
3506 MemoryContext event_cxt; /* memory context for events, if any */
3507
3508 /* per-query-level data: */
3509 AfterTriggersQueryData *query_stack; /* array of structs shown below */
3510 int query_depth; /* current index in above array */
3511 int maxquerydepth; /* allocated len of above array */
3512
3513 /* per-subtransaction-level data: */
3514 AfterTriggersTransData *trans_stack; /* array of structs shown below */
3515 int maxtransdepth; /* allocated len of above array */
3516 } AfterTriggersData;
3517
3518 struct AfterTriggersQueryData
3519 {
3520 AfterTriggerEventList events; /* events pending from this query */
3521 Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3522 List *tables; /* list of AfterTriggersTableData, see below */
3523 };
3524
3525 struct AfterTriggersTransData
3526 {
3527 /* these fields are just for resetting at subtrans abort: */
3528 SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3529 AfterTriggerEventList events; /* saved list pointer */
3530 int query_depth; /* saved query_depth */
3531 CommandId firing_counter; /* saved firing_counter */
3532 };
3533
3534 struct AfterTriggersTableData
3535 {
3536 /* relid + cmdType form the lookup key for these structs: */
3537 Oid relid; /* target table's OID */
3538 CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3539 bool closed; /* true when no longer OK to add tuples */
3540 bool before_trig_done; /* did we already queue BS triggers? */
3541 bool after_trig_done; /* did we already queue AS triggers? */
3542 AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3543 Tuplestorestate *old_tuplestore; /* "old" transition table, if any */
3544 Tuplestorestate *new_tuplestore; /* "new" transition table, if any */
3545 TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3546 };
3547
3548 static AfterTriggersData afterTriggers;
3549
3550 static void AfterTriggerExecute(EState *estate,
3551 AfterTriggerEvent event,
3552 ResultRelInfo *relInfo,
3553 TriggerDesc *trigdesc,
3554 FmgrInfo *finfo,
3555 Instrumentation *instr,
3556 MemoryContext per_tuple_context,
3557 TupleTableSlot *trig_tuple_slot1,
3558 TupleTableSlot *trig_tuple_slot2);
3559 static AfterTriggersTableData *GetAfterTriggersTableData(Oid relid,
3560 CmdType cmdType);
3561 static TupleTableSlot *GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
3562 TupleDesc tupdesc);
3563 static void AfterTriggerFreeQuery(AfterTriggersQueryData *qs);
3564 static SetConstraintState SetConstraintStateCreate(int numalloc);
3565 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3566 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3567 Oid tgoid, bool tgisdeferred);
3568 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3569
3570
3571 /*
3572 * Get the FDW tuplestore for the current trigger query level, creating it
3573 * if necessary.
3574 */
3575 static Tuplestorestate *
GetCurrentFDWTuplestore(void)3576 GetCurrentFDWTuplestore(void)
3577 {
3578 Tuplestorestate *ret;
3579
3580 ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3581 if (ret == NULL)
3582 {
3583 MemoryContext oldcxt;
3584 ResourceOwner saveResourceOwner;
3585
3586 /*
3587 * Make the tuplestore valid until end of subtransaction. We really
3588 * only need it until AfterTriggerEndQuery().
3589 */
3590 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
3591 saveResourceOwner = CurrentResourceOwner;
3592 CurrentResourceOwner = CurTransactionResourceOwner;
3593
3594 ret = tuplestore_begin_heap(false, false, work_mem);
3595
3596 CurrentResourceOwner = saveResourceOwner;
3597 MemoryContextSwitchTo(oldcxt);
3598
3599 afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3600 }
3601
3602 return ret;
3603 }
3604
3605 /* ----------
3606 * afterTriggerCheckState()
3607 *
3608 * Returns true if the trigger event is actually in state DEFERRED.
3609 * ----------
3610 */
3611 static bool
afterTriggerCheckState(AfterTriggerShared evtshared)3612 afterTriggerCheckState(AfterTriggerShared evtshared)
3613 {
3614 Oid tgoid = evtshared->ats_tgoid;
3615 SetConstraintState state = afterTriggers.state;
3616 int i;
3617
3618 /*
3619 * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3620 * constraints declared NOT DEFERRABLE), the state is always false.
3621 */
3622 if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3623 return false;
3624
3625 /*
3626 * If constraint state exists, SET CONSTRAINTS might have been executed
3627 * either for this trigger or for all triggers.
3628 */
3629 if (state != NULL)
3630 {
3631 /* Check for SET CONSTRAINTS for this specific trigger. */
3632 for (i = 0; i < state->numstates; i++)
3633 {
3634 if (state->trigstates[i].sct_tgoid == tgoid)
3635 return state->trigstates[i].sct_tgisdeferred;
3636 }
3637
3638 /* Check for SET CONSTRAINTS ALL. */
3639 if (state->all_isset)
3640 return state->all_isdeferred;
3641 }
3642
3643 /*
3644 * Otherwise return the default state for the trigger.
3645 */
3646 return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3647 }
3648
3649
3650 /* ----------
3651 * afterTriggerAddEvent()
3652 *
3653 * Add a new trigger event to the specified queue.
3654 * The passed-in event data is copied.
3655 * ----------
3656 */
3657 static void
afterTriggerAddEvent(AfterTriggerEventList * events,AfterTriggerEvent event,AfterTriggerShared evtshared)3658 afterTriggerAddEvent(AfterTriggerEventList *events,
3659 AfterTriggerEvent event, AfterTriggerShared evtshared)
3660 {
3661 Size eventsize = SizeofTriggerEvent(event);
3662 Size needed = eventsize + sizeof(AfterTriggerSharedData);
3663 AfterTriggerEventChunk *chunk;
3664 AfterTriggerShared newshared;
3665 AfterTriggerEvent newevent;
3666
3667 /*
3668 * If empty list or not enough room in the tail chunk, make a new chunk.
3669 * We assume here that a new shared record will always be needed.
3670 */
3671 chunk = events->tail;
3672 if (chunk == NULL ||
3673 chunk->endfree - chunk->freeptr < needed)
3674 {
3675 Size chunksize;
3676
3677 /* Create event context if we didn't already */
3678 if (afterTriggers.event_cxt == NULL)
3679 afterTriggers.event_cxt =
3680 AllocSetContextCreate(TopTransactionContext,
3681 "AfterTriggerEvents",
3682 ALLOCSET_DEFAULT_SIZES);
3683
3684 /*
3685 * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3686 * These numbers are fairly arbitrary, though there is a hard limit at
3687 * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3688 * shared records using the available space in ate_flags. Another
3689 * constraint is that if the chunk size gets too huge, the search loop
3690 * below would get slow given a (not too common) usage pattern with
3691 * many distinct event types in a chunk. Therefore, we double the
3692 * preceding chunk size only if there weren't too many shared records
3693 * in the preceding chunk; otherwise we halve it. This gives us some
3694 * ability to adapt to the actual usage pattern of the current query
3695 * while still having large chunk sizes in typical usage. All chunk
3696 * sizes used should be MAXALIGN multiples, to ensure that the shared
3697 * records will be aligned safely.
3698 */
3699 #define MIN_CHUNK_SIZE 1024
3700 #define MAX_CHUNK_SIZE (1024*1024)
3701
3702 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3703 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3704 #endif
3705
3706 if (chunk == NULL)
3707 chunksize = MIN_CHUNK_SIZE;
3708 else
3709 {
3710 /* preceding chunk size... */
3711 chunksize = chunk->endptr - (char *) chunk;
3712 /* check number of shared records in preceding chunk */
3713 if ((chunk->endptr - chunk->endfree) <=
3714 (100 * sizeof(AfterTriggerSharedData)))
3715 chunksize *= 2; /* okay, double it */
3716 else
3717 chunksize /= 2; /* too many shared records */
3718 chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3719 }
3720 chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3721 chunk->next = NULL;
3722 chunk->freeptr = CHUNK_DATA_START(chunk);
3723 chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3724 Assert(chunk->endfree - chunk->freeptr >= needed);
3725
3726 if (events->head == NULL)
3727 events->head = chunk;
3728 else
3729 events->tail->next = chunk;
3730 events->tail = chunk;
3731 /* events->tailfree is now out of sync, but we'll fix it below */
3732 }
3733
3734 /*
3735 * Try to locate a matching shared-data record already in the chunk. If
3736 * none, make a new one.
3737 */
3738 for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3739 (char *) newshared >= chunk->endfree;
3740 newshared--)
3741 {
3742 if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3743 newshared->ats_relid == evtshared->ats_relid &&
3744 newshared->ats_event == evtshared->ats_event &&
3745 newshared->ats_table == evtshared->ats_table &&
3746 newshared->ats_firing_id == 0)
3747 break;
3748 }
3749 if ((char *) newshared < chunk->endfree)
3750 {
3751 *newshared = *evtshared;
3752 newshared->ats_firing_id = 0; /* just to be sure */
3753 chunk->endfree = (char *) newshared;
3754 }
3755
3756 /* Insert the data */
3757 newevent = (AfterTriggerEvent) chunk->freeptr;
3758 memcpy(newevent, event, eventsize);
3759 /* ... and link the new event to its shared record */
3760 newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3761 newevent->ate_flags |= (char *) newshared - (char *) newevent;
3762
3763 chunk->freeptr += eventsize;
3764 events->tailfree = chunk->freeptr;
3765 }
3766
3767 /* ----------
3768 * afterTriggerFreeEventList()
3769 *
3770 * Free all the event storage in the given list.
3771 * ----------
3772 */
3773 static void
afterTriggerFreeEventList(AfterTriggerEventList * events)3774 afterTriggerFreeEventList(AfterTriggerEventList *events)
3775 {
3776 AfterTriggerEventChunk *chunk;
3777
3778 while ((chunk = events->head) != NULL)
3779 {
3780 events->head = chunk->next;
3781 pfree(chunk);
3782 }
3783 events->tail = NULL;
3784 events->tailfree = NULL;
3785 }
3786
3787 /* ----------
3788 * afterTriggerRestoreEventList()
3789 *
3790 * Restore an event list to its prior length, removing all the events
3791 * added since it had the value old_events.
3792 * ----------
3793 */
3794 static void
afterTriggerRestoreEventList(AfterTriggerEventList * events,const AfterTriggerEventList * old_events)3795 afterTriggerRestoreEventList(AfterTriggerEventList *events,
3796 const AfterTriggerEventList *old_events)
3797 {
3798 AfterTriggerEventChunk *chunk;
3799 AfterTriggerEventChunk *next_chunk;
3800
3801 if (old_events->tail == NULL)
3802 {
3803 /* restoring to a completely empty state, so free everything */
3804 afterTriggerFreeEventList(events);
3805 }
3806 else
3807 {
3808 *events = *old_events;
3809 /* free any chunks after the last one we want to keep */
3810 for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3811 {
3812 next_chunk = chunk->next;
3813 pfree(chunk);
3814 }
3815 /* and clean up the tail chunk to be the right length */
3816 events->tail->next = NULL;
3817 events->tail->freeptr = events->tailfree;
3818
3819 /*
3820 * We don't make any effort to remove now-unused shared data records.
3821 * They might still be useful, anyway.
3822 */
3823 }
3824 }
3825
3826 /* ----------
3827 * afterTriggerDeleteHeadEventChunk()
3828 *
3829 * Remove the first chunk of events from the query level's event list.
3830 * Keep any event list pointers elsewhere in the query level's data
3831 * structures in sync.
3832 * ----------
3833 */
3834 static void
afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData * qs)3835 afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData *qs)
3836 {
3837 AfterTriggerEventChunk *target = qs->events.head;
3838 ListCell *lc;
3839
3840 Assert(target && target->next);
3841
3842 /*
3843 * First, update any pointers in the per-table data, so that they won't be
3844 * dangling. Resetting obsoleted pointers to NULL will make
3845 * cancel_prior_stmt_triggers start from the list head, which is fine.
3846 */
3847 foreach(lc, qs->tables)
3848 {
3849 AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
3850
3851 if (table->after_trig_done &&
3852 table->after_trig_events.tail == target)
3853 {
3854 table->after_trig_events.head = NULL;
3855 table->after_trig_events.tail = NULL;
3856 table->after_trig_events.tailfree = NULL;
3857 }
3858 }
3859
3860 /* Now we can flush the head chunk */
3861 qs->events.head = target->next;
3862 pfree(target);
3863 }
3864
3865
3866 /* ----------
3867 * AfterTriggerExecute()
3868 *
3869 * Fetch the required tuples back from the heap and fire one
3870 * single trigger function.
3871 *
3872 * Frequently, this will be fired many times in a row for triggers of
3873 * a single relation. Therefore, we cache the open relation and provide
3874 * fmgr lookup cache space at the caller level. (For triggers fired at
3875 * the end of a query, we can even piggyback on the executor's state.)
3876 *
3877 * event: event currently being fired.
3878 * rel: open relation for event.
3879 * trigdesc: working copy of rel's trigger info.
3880 * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3881 * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3882 * or NULL if no instrumentation is wanted.
3883 * per_tuple_context: memory context to call trigger function in.
3884 * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3885 * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3886 * ----------
3887 */
3888 static void
AfterTriggerExecute(EState * estate,AfterTriggerEvent event,ResultRelInfo * relInfo,TriggerDesc * trigdesc,FmgrInfo * finfo,Instrumentation * instr,MemoryContext per_tuple_context,TupleTableSlot * trig_tuple_slot1,TupleTableSlot * trig_tuple_slot2)3889 AfterTriggerExecute(EState *estate,
3890 AfterTriggerEvent event,
3891 ResultRelInfo *relInfo,
3892 TriggerDesc *trigdesc,
3893 FmgrInfo *finfo, Instrumentation *instr,
3894 MemoryContext per_tuple_context,
3895 TupleTableSlot *trig_tuple_slot1,
3896 TupleTableSlot *trig_tuple_slot2)
3897 {
3898 Relation rel = relInfo->ri_RelationDesc;
3899 AfterTriggerShared evtshared = GetTriggerSharedData(event);
3900 Oid tgoid = evtshared->ats_tgoid;
3901 TriggerData LocTriggerData = {0};
3902 HeapTuple rettuple;
3903 int tgindx;
3904 bool should_free_trig = false;
3905 bool should_free_new = false;
3906
3907 /*
3908 * Locate trigger in trigdesc.
3909 */
3910 for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3911 {
3912 if (trigdesc->triggers[tgindx].tgoid == tgoid)
3913 {
3914 LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3915 break;
3916 }
3917 }
3918 if (LocTriggerData.tg_trigger == NULL)
3919 elog(ERROR, "could not find trigger %u", tgoid);
3920
3921 /*
3922 * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3923 * to include time spent re-fetching tuples in the trigger cost.
3924 */
3925 if (instr)
3926 InstrStartNode(instr + tgindx);
3927
3928 /*
3929 * Fetch the required tuple(s).
3930 */
3931 switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3932 {
3933 case AFTER_TRIGGER_FDW_FETCH:
3934 {
3935 Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
3936
3937 if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3938 trig_tuple_slot1))
3939 elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3940
3941 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3942 TRIGGER_EVENT_UPDATE &&
3943 !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3944 trig_tuple_slot2))
3945 elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3946 }
3947 /* fall through */
3948 case AFTER_TRIGGER_FDW_REUSE:
3949
3950 /*
3951 * Store tuple in the slot so that tg_trigtuple does not reference
3952 * tuplestore memory. (It is formally possible for the trigger
3953 * function to queue trigger events that add to the same
3954 * tuplestore, which can push other tuples out of memory.) The
3955 * distinction is academic, because we start with a minimal tuple
3956 * that is stored as a heap tuple, constructed in different memory
3957 * context, in the slot anyway.
3958 */
3959 LocTriggerData.tg_trigslot = trig_tuple_slot1;
3960 LocTriggerData.tg_trigtuple =
3961 ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
3962
3963 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3964 TRIGGER_EVENT_UPDATE)
3965 {
3966 LocTriggerData.tg_newslot = trig_tuple_slot2;
3967 LocTriggerData.tg_newtuple =
3968 ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
3969 }
3970 else
3971 {
3972 LocTriggerData.tg_newtuple = NULL;
3973 }
3974 break;
3975
3976 default:
3977 if (ItemPointerIsValid(&(event->ate_ctid1)))
3978 {
3979 LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
3980
3981 if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
3982 SnapshotAny,
3983 LocTriggerData.tg_trigslot))
3984 elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3985 LocTriggerData.tg_trigtuple =
3986 ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
3987 }
3988 else
3989 {
3990 LocTriggerData.tg_trigtuple = NULL;
3991 }
3992
3993 /* don't touch ctid2 if not there */
3994 if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3995 AFTER_TRIGGER_2CTID &&
3996 ItemPointerIsValid(&(event->ate_ctid2)))
3997 {
3998 LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
3999
4000 if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
4001 SnapshotAny,
4002 LocTriggerData.tg_newslot))
4003 elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4004 LocTriggerData.tg_newtuple =
4005 ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4006 }
4007 else
4008 {
4009 LocTriggerData.tg_newtuple = NULL;
4010 }
4011 }
4012
4013 /*
4014 * Set up the tuplestore information to let the trigger have access to
4015 * transition tables. When we first make a transition table available to
4016 * a trigger, mark it "closed" so that it cannot change anymore. If any
4017 * additional events of the same type get queued in the current trigger
4018 * query level, they'll go into new transition tables.
4019 */
4020 LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4021 if (evtshared->ats_table)
4022 {
4023 if (LocTriggerData.tg_trigger->tgoldtable)
4024 {
4025 LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore;
4026 evtshared->ats_table->closed = true;
4027 }
4028
4029 if (LocTriggerData.tg_trigger->tgnewtable)
4030 {
4031 LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore;
4032 evtshared->ats_table->closed = true;
4033 }
4034 }
4035
4036 /*
4037 * Setup the remaining trigger information
4038 */
4039 LocTriggerData.type = T_TriggerData;
4040 LocTriggerData.tg_event =
4041 evtshared->ats_event & (TRIGGER_EVENT_OPMASK | TRIGGER_EVENT_ROW);
4042 LocTriggerData.tg_relation = rel;
4043 if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4044 LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4045
4046 MemoryContextReset(per_tuple_context);
4047
4048 /*
4049 * Call the trigger and throw away any possibly returned updated tuple.
4050 * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4051 */
4052 rettuple = ExecCallTriggerFunc(&LocTriggerData,
4053 tgindx,
4054 finfo,
4055 NULL,
4056 per_tuple_context);
4057 if (rettuple != NULL &&
4058 rettuple != LocTriggerData.tg_trigtuple &&
4059 rettuple != LocTriggerData.tg_newtuple)
4060 heap_freetuple(rettuple);
4061
4062 /*
4063 * Release resources
4064 */
4065 if (should_free_trig)
4066 heap_freetuple(LocTriggerData.tg_trigtuple);
4067 if (should_free_new)
4068 heap_freetuple(LocTriggerData.tg_newtuple);
4069
4070 /* don't clear slots' contents if foreign table */
4071 if (trig_tuple_slot1 == NULL)
4072 {
4073 if (LocTriggerData.tg_trigslot)
4074 ExecClearTuple(LocTriggerData.tg_trigslot);
4075 if (LocTriggerData.tg_newslot)
4076 ExecClearTuple(LocTriggerData.tg_newslot);
4077 }
4078
4079 /*
4080 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4081 * one "tuple returned" (really the number of firings).
4082 */
4083 if (instr)
4084 InstrStopNode(instr + tgindx, 1);
4085 }
4086
4087
4088 /*
4089 * afterTriggerMarkEvents()
4090 *
4091 * Scan the given event list for not yet invoked events. Mark the ones
4092 * that can be invoked now with the current firing ID.
4093 *
4094 * If move_list isn't NULL, events that are not to be invoked now are
4095 * transferred to move_list.
4096 *
4097 * When immediate_only is true, do not invoke currently-deferred triggers.
4098 * (This will be false only at main transaction exit.)
4099 *
4100 * Returns true if any invokable events were found.
4101 */
4102 static bool
afterTriggerMarkEvents(AfterTriggerEventList * events,AfterTriggerEventList * move_list,bool immediate_only)4103 afterTriggerMarkEvents(AfterTriggerEventList *events,
4104 AfterTriggerEventList *move_list,
4105 bool immediate_only)
4106 {
4107 bool found = false;
4108 bool deferred_found = false;
4109 AfterTriggerEvent event;
4110 AfterTriggerEventChunk *chunk;
4111
4112 for_each_event_chunk(event, chunk, *events)
4113 {
4114 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4115 bool defer_it = false;
4116
4117 if (!(event->ate_flags &
4118 (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS)))
4119 {
4120 /*
4121 * This trigger hasn't been called or scheduled yet. Check if we
4122 * should call it now.
4123 */
4124 if (immediate_only && afterTriggerCheckState(evtshared))
4125 {
4126 defer_it = true;
4127 }
4128 else
4129 {
4130 /*
4131 * Mark it as to be fired in this firing cycle.
4132 */
4133 evtshared->ats_firing_id = afterTriggers.firing_counter;
4134 event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4135 found = true;
4136 }
4137 }
4138
4139 /*
4140 * If it's deferred, move it to move_list, if requested.
4141 */
4142 if (defer_it && move_list != NULL)
4143 {
4144 deferred_found = true;
4145 /* add it to move_list */
4146 afterTriggerAddEvent(move_list, event, evtshared);
4147 /* mark original copy "done" so we don't do it again */
4148 event->ate_flags |= AFTER_TRIGGER_DONE;
4149 }
4150 }
4151
4152 /*
4153 * We could allow deferred triggers if, before the end of the
4154 * security-restricted operation, we were to verify that a SET CONSTRAINTS
4155 * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4156 */
4157 if (deferred_found && InSecurityRestrictedOperation())
4158 ereport(ERROR,
4159 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4160 errmsg("cannot fire deferred trigger within security-restricted operation")));
4161
4162 return found;
4163 }
4164
4165 /*
4166 * afterTriggerInvokeEvents()
4167 *
4168 * Scan the given event list for events that are marked as to be fired
4169 * in the current firing cycle, and fire them.
4170 *
4171 * If estate isn't NULL, we use its result relation info to avoid repeated
4172 * openings and closing of trigger target relations. If it is NULL, we
4173 * make one locally to cache the info in case there are multiple trigger
4174 * events per rel.
4175 *
4176 * When delete_ok is true, it's safe to delete fully-processed events.
4177 * (We are not very tense about that: we simply reset a chunk to be empty
4178 * if all its events got fired. The objective here is just to avoid useless
4179 * rescanning of events when a trigger queues new events during transaction
4180 * end, so it's not necessary to worry much about the case where only
4181 * some events are fired.)
4182 *
4183 * Returns true if no unfired events remain in the list (this allows us
4184 * to avoid repeating afterTriggerMarkEvents).
4185 */
4186 static bool
afterTriggerInvokeEvents(AfterTriggerEventList * events,CommandId firing_id,EState * estate,bool delete_ok)4187 afterTriggerInvokeEvents(AfterTriggerEventList *events,
4188 CommandId firing_id,
4189 EState *estate,
4190 bool delete_ok)
4191 {
4192 bool all_fired = true;
4193 AfterTriggerEventChunk *chunk;
4194 MemoryContext per_tuple_context;
4195 bool local_estate = false;
4196 ResultRelInfo *rInfo = NULL;
4197 Relation rel = NULL;
4198 TriggerDesc *trigdesc = NULL;
4199 FmgrInfo *finfo = NULL;
4200 Instrumentation *instr = NULL;
4201 TupleTableSlot *slot1 = NULL,
4202 *slot2 = NULL;
4203
4204 /* Make a local EState if need be */
4205 if (estate == NULL)
4206 {
4207 estate = CreateExecutorState();
4208 local_estate = true;
4209 }
4210
4211 /* Make a per-tuple memory context for trigger function calls */
4212 per_tuple_context =
4213 AllocSetContextCreate(CurrentMemoryContext,
4214 "AfterTriggerTupleContext",
4215 ALLOCSET_DEFAULT_SIZES);
4216
4217 for_each_chunk(chunk, *events)
4218 {
4219 AfterTriggerEvent event;
4220 bool all_fired_in_chunk = true;
4221
4222 for_each_event(event, chunk)
4223 {
4224 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4225
4226 /*
4227 * Is it one for me to fire?
4228 */
4229 if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4230 evtshared->ats_firing_id == firing_id)
4231 {
4232 /*
4233 * So let's fire it... but first, find the correct relation if
4234 * this is not the same relation as before.
4235 */
4236 if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4237 {
4238 rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4239 rel = rInfo->ri_RelationDesc;
4240 /* Catch calls with insufficient relcache refcounting */
4241 Assert(!RelationHasReferenceCountZero(rel));
4242 trigdesc = rInfo->ri_TrigDesc;
4243 finfo = rInfo->ri_TrigFunctions;
4244 instr = rInfo->ri_TrigInstrument;
4245 if (slot1 != NULL)
4246 {
4247 ExecDropSingleTupleTableSlot(slot1);
4248 ExecDropSingleTupleTableSlot(slot2);
4249 slot1 = slot2 = NULL;
4250 }
4251 if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4252 {
4253 slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4254 &TTSOpsMinimalTuple);
4255 slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4256 &TTSOpsMinimalTuple);
4257 }
4258 if (trigdesc == NULL) /* should not happen */
4259 elog(ERROR, "relation %u has no triggers",
4260 evtshared->ats_relid);
4261 }
4262
4263 /*
4264 * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4265 * still set, so recursive examinations of the event list
4266 * won't try to re-fire it.
4267 */
4268 AfterTriggerExecute(estate, event, rInfo, trigdesc, finfo, instr,
4269 per_tuple_context, slot1, slot2);
4270
4271 /*
4272 * Mark the event as done.
4273 */
4274 event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4275 event->ate_flags |= AFTER_TRIGGER_DONE;
4276 }
4277 else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4278 {
4279 /* something remains to be done */
4280 all_fired = all_fired_in_chunk = false;
4281 }
4282 }
4283
4284 /* Clear the chunk if delete_ok and nothing left of interest */
4285 if (delete_ok && all_fired_in_chunk)
4286 {
4287 chunk->freeptr = CHUNK_DATA_START(chunk);
4288 chunk->endfree = chunk->endptr;
4289
4290 /*
4291 * If it's last chunk, must sync event list's tailfree too. Note
4292 * that delete_ok must NOT be passed as true if there could be
4293 * additional AfterTriggerEventList values pointing at this event
4294 * list, since we'd fail to fix their copies of tailfree.
4295 */
4296 if (chunk == events->tail)
4297 events->tailfree = chunk->freeptr;
4298 }
4299 }
4300 if (slot1 != NULL)
4301 {
4302 ExecDropSingleTupleTableSlot(slot1);
4303 ExecDropSingleTupleTableSlot(slot2);
4304 }
4305
4306 /* Release working resources */
4307 MemoryContextDelete(per_tuple_context);
4308
4309 if (local_estate)
4310 {
4311 ExecCloseResultRelations(estate);
4312 ExecResetTupleTable(estate->es_tupleTable, false);
4313 FreeExecutorState(estate);
4314 }
4315
4316 return all_fired;
4317 }
4318
4319
4320 /*
4321 * GetAfterTriggersTableData
4322 *
4323 * Find or create an AfterTriggersTableData struct for the specified
4324 * trigger event (relation + operation type). Ignore existing structs
4325 * marked "closed"; we don't want to put any additional tuples into them,
4326 * nor change their stmt-triggers-fired state.
4327 *
4328 * Note: the AfterTriggersTableData list is allocated in the current
4329 * (sub)transaction's CurTransactionContext. This is OK because
4330 * we don't need it to live past AfterTriggerEndQuery.
4331 */
4332 static AfterTriggersTableData *
GetAfterTriggersTableData(Oid relid,CmdType cmdType)4333 GetAfterTriggersTableData(Oid relid, CmdType cmdType)
4334 {
4335 AfterTriggersTableData *table;
4336 AfterTriggersQueryData *qs;
4337 MemoryContext oldcxt;
4338 ListCell *lc;
4339
4340 /* Caller should have ensured query_depth is OK. */
4341 Assert(afterTriggers.query_depth >= 0 &&
4342 afterTriggers.query_depth < afterTriggers.maxquerydepth);
4343 qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4344
4345 foreach(lc, qs->tables)
4346 {
4347 table = (AfterTriggersTableData *) lfirst(lc);
4348 if (table->relid == relid && table->cmdType == cmdType &&
4349 !table->closed)
4350 return table;
4351 }
4352
4353 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4354
4355 table = (AfterTriggersTableData *) palloc0(sizeof(AfterTriggersTableData));
4356 table->relid = relid;
4357 table->cmdType = cmdType;
4358 qs->tables = lappend(qs->tables, table);
4359
4360 MemoryContextSwitchTo(oldcxt);
4361
4362 return table;
4363 }
4364
4365 /*
4366 * Returns a TupleTableSlot suitable for holding the tuples to be put
4367 * into AfterTriggersTableData's transition table tuplestores.
4368 */
4369 static TupleTableSlot *
GetAfterTriggersStoreSlot(AfterTriggersTableData * table,TupleDesc tupdesc)4370 GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
4371 TupleDesc tupdesc)
4372 {
4373 /* Create it if not already done. */
4374 if (!table->storeslot)
4375 {
4376 MemoryContext oldcxt;
4377
4378 /*
4379 * We only need this slot only until AfterTriggerEndQuery, but making
4380 * it last till end-of-subxact is good enough. It'll be freed by
4381 * AfterTriggerFreeQuery().
4382 */
4383 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4384 table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4385 MemoryContextSwitchTo(oldcxt);
4386 }
4387
4388 return table->storeslot;
4389 }
4390
4391 /*
4392 * MakeTransitionCaptureState
4393 *
4394 * Make a TransitionCaptureState object for the given TriggerDesc, target
4395 * relation, and operation type. The TCS object holds all the state needed
4396 * to decide whether to capture tuples in transition tables.
4397 *
4398 * If there are no triggers in 'trigdesc' that request relevant transition
4399 * tables, then return NULL.
4400 *
4401 * The resulting object can be passed to the ExecAR* functions. When
4402 * dealing with child tables, the caller can set tcs_original_insert_tuple
4403 * to avoid having to reconstruct the original tuple in the root table's
4404 * format.
4405 *
4406 * Note that we copy the flags from a parent table into this struct (rather
4407 * than subsequently using the relation's TriggerDesc directly) so that we can
4408 * use it to control collection of transition tuples from child tables.
4409 *
4410 * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4411 * on the same table during one query should share one transition table.
4412 * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4413 * looked up using the table OID + CmdType, and are merely referenced by
4414 * the TransitionCaptureState objects we hand out to callers.
4415 */
4416 TransitionCaptureState *
MakeTransitionCaptureState(TriggerDesc * trigdesc,Oid relid,CmdType cmdType)4417 MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType)
4418 {
4419 TransitionCaptureState *state;
4420 bool need_old,
4421 need_new;
4422 AfterTriggersTableData *table;
4423 MemoryContext oldcxt;
4424 ResourceOwner saveResourceOwner;
4425
4426 if (trigdesc == NULL)
4427 return NULL;
4428
4429 /* Detect which table(s) we need. */
4430 switch (cmdType)
4431 {
4432 case CMD_INSERT:
4433 need_old = false;
4434 need_new = trigdesc->trig_insert_new_table;
4435 break;
4436 case CMD_UPDATE:
4437 need_old = trigdesc->trig_update_old_table;
4438 need_new = trigdesc->trig_update_new_table;
4439 break;
4440 case CMD_DELETE:
4441 need_old = trigdesc->trig_delete_old_table;
4442 need_new = false;
4443 break;
4444 default:
4445 elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4446 need_old = need_new = false; /* keep compiler quiet */
4447 break;
4448 }
4449 if (!need_old && !need_new)
4450 return NULL;
4451
4452 /* Check state, like AfterTriggerSaveEvent. */
4453 if (afterTriggers.query_depth < 0)
4454 elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4455
4456 /* Be sure we have enough space to record events at this query depth. */
4457 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4458 AfterTriggerEnlargeQueryState();
4459
4460 /*
4461 * Find or create an AfterTriggersTableData struct to hold the
4462 * tuplestore(s). If there's a matching struct but it's marked closed,
4463 * ignore it; we need a newer one.
4464 *
4465 * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4466 * allocated in the current (sub)transaction's CurTransactionContext, and
4467 * the tuplestores are managed by the (sub)transaction's resource owner.
4468 * This is sufficient lifespan because we do not allow triggers using
4469 * transition tables to be deferrable; they will be fired during
4470 * AfterTriggerEndQuery, after which it's okay to delete the data.
4471 */
4472 table = GetAfterTriggersTableData(relid, cmdType);
4473
4474 /* Now create required tuplestore(s), if we don't have them already. */
4475 oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4476 saveResourceOwner = CurrentResourceOwner;
4477 CurrentResourceOwner = CurTransactionResourceOwner;
4478
4479 if (need_old && table->old_tuplestore == NULL)
4480 table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4481 if (need_new && table->new_tuplestore == NULL)
4482 table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4483
4484 CurrentResourceOwner = saveResourceOwner;
4485 MemoryContextSwitchTo(oldcxt);
4486
4487 /* Now build the TransitionCaptureState struct, in caller's context */
4488 state = (TransitionCaptureState *) palloc0(sizeof(TransitionCaptureState));
4489 state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4490 state->tcs_update_old_table = trigdesc->trig_update_old_table;
4491 state->tcs_update_new_table = trigdesc->trig_update_new_table;
4492 state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4493 state->tcs_private = table;
4494
4495 return state;
4496 }
4497
4498
4499 /* ----------
4500 * AfterTriggerBeginXact()
4501 *
4502 * Called at transaction start (either BEGIN or implicit for single
4503 * statement outside of transaction block).
4504 * ----------
4505 */
4506 void
AfterTriggerBeginXact(void)4507 AfterTriggerBeginXact(void)
4508 {
4509 /*
4510 * Initialize after-trigger state structure to empty
4511 */
4512 afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4513 afterTriggers.query_depth = -1;
4514
4515 /*
4516 * Verify that there is no leftover state remaining. If these assertions
4517 * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4518 * up properly.
4519 */
4520 Assert(afterTriggers.state == NULL);
4521 Assert(afterTriggers.query_stack == NULL);
4522 Assert(afterTriggers.maxquerydepth == 0);
4523 Assert(afterTriggers.event_cxt == NULL);
4524 Assert(afterTriggers.events.head == NULL);
4525 Assert(afterTriggers.trans_stack == NULL);
4526 Assert(afterTriggers.maxtransdepth == 0);
4527 }
4528
4529
4530 /* ----------
4531 * AfterTriggerBeginQuery()
4532 *
4533 * Called just before we start processing a single query within a
4534 * transaction (or subtransaction). Most of the real work gets deferred
4535 * until somebody actually tries to queue a trigger event.
4536 * ----------
4537 */
4538 void
AfterTriggerBeginQuery(void)4539 AfterTriggerBeginQuery(void)
4540 {
4541 /* Increase the query stack depth */
4542 afterTriggers.query_depth++;
4543 }
4544
4545
4546 /* ----------
4547 * AfterTriggerEndQuery()
4548 *
4549 * Called after one query has been completely processed. At this time
4550 * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4551 * transfer deferred trigger events to the global deferred-trigger list.
4552 *
4553 * Note that this must be called BEFORE closing down the executor
4554 * with ExecutorEnd, because we make use of the EState's info about
4555 * target relations. Normally it is called from ExecutorFinish.
4556 * ----------
4557 */
4558 void
AfterTriggerEndQuery(EState * estate)4559 AfterTriggerEndQuery(EState *estate)
4560 {
4561 AfterTriggersQueryData *qs;
4562
4563 /* Must be inside a query, too */
4564 Assert(afterTriggers.query_depth >= 0);
4565
4566 /*
4567 * If we never even got as far as initializing the event stack, there
4568 * certainly won't be any events, so exit quickly.
4569 */
4570 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4571 {
4572 afterTriggers.query_depth--;
4573 return;
4574 }
4575
4576 /*
4577 * Process all immediate-mode triggers queued by the query, and move the
4578 * deferred ones to the main list of deferred events.
4579 *
4580 * Notice that we decide which ones will be fired, and put the deferred
4581 * ones on the main list, before anything is actually fired. This ensures
4582 * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4583 * IMMEDIATE: all events we have decided to defer will be available for it
4584 * to fire.
4585 *
4586 * We loop in case a trigger queues more events at the same query level.
4587 * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4588 * will instead fire any triggers in a dedicated query level. Foreign key
4589 * enforcement triggers do add to the current query level, thanks to their
4590 * passing fire_triggers = false to SPI_execute_snapshot(). Other
4591 * C-language triggers might do likewise.
4592 *
4593 * If we find no firable events, we don't have to increment
4594 * firing_counter.
4595 */
4596 qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4597
4598 for (;;)
4599 {
4600 if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
4601 {
4602 CommandId firing_id = afterTriggers.firing_counter++;
4603 AfterTriggerEventChunk *oldtail = qs->events.tail;
4604
4605 if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
4606 break; /* all fired */
4607
4608 /*
4609 * Firing a trigger could result in query_stack being repalloc'd,
4610 * so we must recalculate qs after each afterTriggerInvokeEvents
4611 * call. Furthermore, it's unsafe to pass delete_ok = true here,
4612 * because that could cause afterTriggerInvokeEvents to try to
4613 * access qs->events after the stack has been repalloc'd.
4614 */
4615 qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4616
4617 /*
4618 * We'll need to scan the events list again. To reduce the cost
4619 * of doing so, get rid of completely-fired chunks. We know that
4620 * all events were marked IN_PROGRESS or DONE at the conclusion of
4621 * afterTriggerMarkEvents, so any still-interesting events must
4622 * have been added after that, and so must be in the chunk that
4623 * was then the tail chunk, or in later chunks. So, zap all
4624 * chunks before oldtail. This is approximately the same set of
4625 * events we would have gotten rid of by passing delete_ok = true.
4626 */
4627 Assert(oldtail != NULL);
4628 while (qs->events.head != oldtail)
4629 afterTriggerDeleteHeadEventChunk(qs);
4630 }
4631 else
4632 break;
4633 }
4634
4635 /* Release query-level-local storage, including tuplestores if any */
4636 AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4637
4638 afterTriggers.query_depth--;
4639 }
4640
4641
4642 /*
4643 * AfterTriggerFreeQuery
4644 * Release subsidiary storage for a trigger query level.
4645 * This includes closing down tuplestores.
4646 * Note: it's important for this to be safe if interrupted by an error
4647 * and then called again for the same query level.
4648 */
4649 static void
AfterTriggerFreeQuery(AfterTriggersQueryData * qs)4650 AfterTriggerFreeQuery(AfterTriggersQueryData *qs)
4651 {
4652 Tuplestorestate *ts;
4653 List *tables;
4654 ListCell *lc;
4655
4656 /* Drop the trigger events */
4657 afterTriggerFreeEventList(&qs->events);
4658
4659 /* Drop FDW tuplestore if any */
4660 ts = qs->fdw_tuplestore;
4661 qs->fdw_tuplestore = NULL;
4662 if (ts)
4663 tuplestore_end(ts);
4664
4665 /* Release per-table subsidiary storage */
4666 tables = qs->tables;
4667 foreach(lc, tables)
4668 {
4669 AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
4670
4671 ts = table->old_tuplestore;
4672 table->old_tuplestore = NULL;
4673 if (ts)
4674 tuplestore_end(ts);
4675 ts = table->new_tuplestore;
4676 table->new_tuplestore = NULL;
4677 if (ts)
4678 tuplestore_end(ts);
4679 if (table->storeslot)
4680 ExecDropSingleTupleTableSlot(table->storeslot);
4681 }
4682
4683 /*
4684 * Now free the AfterTriggersTableData structs and list cells. Reset list
4685 * pointer first; if list_free_deep somehow gets an error, better to leak
4686 * that storage than have an infinite loop.
4687 */
4688 qs->tables = NIL;
4689 list_free_deep(tables);
4690 }
4691
4692
4693 /* ----------
4694 * AfterTriggerFireDeferred()
4695 *
4696 * Called just before the current transaction is committed. At this
4697 * time we invoke all pending DEFERRED triggers.
4698 *
4699 * It is possible for other modules to queue additional deferred triggers
4700 * during pre-commit processing; therefore xact.c may have to call this
4701 * multiple times.
4702 * ----------
4703 */
4704 void
AfterTriggerFireDeferred(void)4705 AfterTriggerFireDeferred(void)
4706 {
4707 AfterTriggerEventList *events;
4708 bool snap_pushed = false;
4709
4710 /* Must not be inside a query */
4711 Assert(afterTriggers.query_depth == -1);
4712
4713 /*
4714 * If there are any triggers to fire, make sure we have set a snapshot for
4715 * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4716 * can't assume ActiveSnapshot is valid on entry.)
4717 */
4718 events = &afterTriggers.events;
4719 if (events->head != NULL)
4720 {
4721 PushActiveSnapshot(GetTransactionSnapshot());
4722 snap_pushed = true;
4723 }
4724
4725 /*
4726 * Run all the remaining triggers. Loop until they are all gone, in case
4727 * some trigger queues more for us to do.
4728 */
4729 while (afterTriggerMarkEvents(events, NULL, false))
4730 {
4731 CommandId firing_id = afterTriggers.firing_counter++;
4732
4733 if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4734 break; /* all fired */
4735 }
4736
4737 /*
4738 * We don't bother freeing the event list, since it will go away anyway
4739 * (and more efficiently than via pfree) in AfterTriggerEndXact.
4740 */
4741
4742 if (snap_pushed)
4743 PopActiveSnapshot();
4744 }
4745
4746
4747 /* ----------
4748 * AfterTriggerEndXact()
4749 *
4750 * The current transaction is finishing.
4751 *
4752 * Any unfired triggers are canceled so we simply throw
4753 * away anything we know.
4754 *
4755 * Note: it is possible for this to be called repeatedly in case of
4756 * error during transaction abort; therefore, do not complain if
4757 * already closed down.
4758 * ----------
4759 */
4760 void
AfterTriggerEndXact(bool isCommit)4761 AfterTriggerEndXact(bool isCommit)
4762 {
4763 /*
4764 * Forget the pending-events list.
4765 *
4766 * Since all the info is in TopTransactionContext or children thereof, we
4767 * don't really need to do anything to reclaim memory. However, the
4768 * pending-events list could be large, and so it's useful to discard it as
4769 * soon as possible --- especially if we are aborting because we ran out
4770 * of memory for the list!
4771 */
4772 if (afterTriggers.event_cxt)
4773 {
4774 MemoryContextDelete(afterTriggers.event_cxt);
4775 afterTriggers.event_cxt = NULL;
4776 afterTriggers.events.head = NULL;
4777 afterTriggers.events.tail = NULL;
4778 afterTriggers.events.tailfree = NULL;
4779 }
4780
4781 /*
4782 * Forget any subtransaction state as well. Since this can't be very
4783 * large, we let the eventual reset of TopTransactionContext free the
4784 * memory instead of doing it here.
4785 */
4786 afterTriggers.trans_stack = NULL;
4787 afterTriggers.maxtransdepth = 0;
4788
4789
4790 /*
4791 * Forget the query stack and constraint-related state information. As
4792 * with the subtransaction state information, we don't bother freeing the
4793 * memory here.
4794 */
4795 afterTriggers.query_stack = NULL;
4796 afterTriggers.maxquerydepth = 0;
4797 afterTriggers.state = NULL;
4798
4799 /* No more afterTriggers manipulation until next transaction starts. */
4800 afterTriggers.query_depth = -1;
4801 }
4802
4803 /*
4804 * AfterTriggerBeginSubXact()
4805 *
4806 * Start a subtransaction.
4807 */
4808 void
AfterTriggerBeginSubXact(void)4809 AfterTriggerBeginSubXact(void)
4810 {
4811 int my_level = GetCurrentTransactionNestLevel();
4812
4813 /*
4814 * Allocate more space in the trans_stack if needed. (Note: because the
4815 * minimum nest level of a subtransaction is 2, we waste the first couple
4816 * entries of the array; not worth the notational effort to avoid it.)
4817 */
4818 while (my_level >= afterTriggers.maxtransdepth)
4819 {
4820 if (afterTriggers.maxtransdepth == 0)
4821 {
4822 /* Arbitrarily initialize for max of 8 subtransaction levels */
4823 afterTriggers.trans_stack = (AfterTriggersTransData *)
4824 MemoryContextAlloc(TopTransactionContext,
4825 8 * sizeof(AfterTriggersTransData));
4826 afterTriggers.maxtransdepth = 8;
4827 }
4828 else
4829 {
4830 /* repalloc will keep the stack in the same context */
4831 int new_alloc = afterTriggers.maxtransdepth * 2;
4832
4833 afterTriggers.trans_stack = (AfterTriggersTransData *)
4834 repalloc(afterTriggers.trans_stack,
4835 new_alloc * sizeof(AfterTriggersTransData));
4836 afterTriggers.maxtransdepth = new_alloc;
4837 }
4838 }
4839
4840 /*
4841 * Push the current information into the stack. The SET CONSTRAINTS state
4842 * is not saved until/unless changed. Likewise, we don't make a
4843 * per-subtransaction event context until needed.
4844 */
4845 afterTriggers.trans_stack[my_level].state = NULL;
4846 afterTriggers.trans_stack[my_level].events = afterTriggers.events;
4847 afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
4848 afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
4849 }
4850
4851 /*
4852 * AfterTriggerEndSubXact()
4853 *
4854 * The current subtransaction is ending.
4855 */
4856 void
AfterTriggerEndSubXact(bool isCommit)4857 AfterTriggerEndSubXact(bool isCommit)
4858 {
4859 int my_level = GetCurrentTransactionNestLevel();
4860 SetConstraintState state;
4861 AfterTriggerEvent event;
4862 AfterTriggerEventChunk *chunk;
4863 CommandId subxact_firing_id;
4864
4865 /*
4866 * Pop the prior state if needed.
4867 */
4868 if (isCommit)
4869 {
4870 Assert(my_level < afterTriggers.maxtransdepth);
4871 /* If we saved a prior state, we don't need it anymore */
4872 state = afterTriggers.trans_stack[my_level].state;
4873 if (state != NULL)
4874 pfree(state);
4875 /* this avoids double pfree if error later: */
4876 afterTriggers.trans_stack[my_level].state = NULL;
4877 Assert(afterTriggers.query_depth ==
4878 afterTriggers.trans_stack[my_level].query_depth);
4879 }
4880 else
4881 {
4882 /*
4883 * Aborting. It is possible subxact start failed before calling
4884 * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4885 * trans_stack levels that aren't there.
4886 */
4887 if (my_level >= afterTriggers.maxtransdepth)
4888 return;
4889
4890 /*
4891 * Release query-level storage for queries being aborted, and restore
4892 * query_depth to its pre-subxact value. This assumes that a
4893 * subtransaction will not add events to query levels started in a
4894 * earlier transaction state.
4895 */
4896 while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
4897 {
4898 if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4899 AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4900 afterTriggers.query_depth--;
4901 }
4902 Assert(afterTriggers.query_depth ==
4903 afterTriggers.trans_stack[my_level].query_depth);
4904
4905 /*
4906 * Restore the global deferred-event list to its former length,
4907 * discarding any events queued by the subxact.
4908 */
4909 afterTriggerRestoreEventList(&afterTriggers.events,
4910 &afterTriggers.trans_stack[my_level].events);
4911
4912 /*
4913 * Restore the trigger state. If the saved state is NULL, then this
4914 * subxact didn't save it, so it doesn't need restoring.
4915 */
4916 state = afterTriggers.trans_stack[my_level].state;
4917 if (state != NULL)
4918 {
4919 pfree(afterTriggers.state);
4920 afterTriggers.state = state;
4921 }
4922 /* this avoids double pfree if error later: */
4923 afterTriggers.trans_stack[my_level].state = NULL;
4924
4925 /*
4926 * Scan for any remaining deferred events that were marked DONE or IN
4927 * PROGRESS by this subxact or a child, and un-mark them. We can
4928 * recognize such events because they have a firing ID greater than or
4929 * equal to the firing_counter value we saved at subtransaction start.
4930 * (This essentially assumes that the current subxact includes all
4931 * subxacts started after it.)
4932 */
4933 subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
4934 for_each_event_chunk(event, chunk, afterTriggers.events)
4935 {
4936 AfterTriggerShared evtshared = GetTriggerSharedData(event);
4937
4938 if (event->ate_flags &
4939 (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS))
4940 {
4941 if (evtshared->ats_firing_id >= subxact_firing_id)
4942 event->ate_flags &=
4943 ~(AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS);
4944 }
4945 }
4946 }
4947 }
4948
4949 /* ----------
4950 * AfterTriggerEnlargeQueryState()
4951 *
4952 * Prepare the necessary state so that we can record AFTER trigger events
4953 * queued by a query. It is allowed to have nested queries within a
4954 * (sub)transaction, so we need to have separate state for each query
4955 * nesting level.
4956 * ----------
4957 */
4958 static void
AfterTriggerEnlargeQueryState(void)4959 AfterTriggerEnlargeQueryState(void)
4960 {
4961 int init_depth = afterTriggers.maxquerydepth;
4962
4963 Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4964
4965 if (afterTriggers.maxquerydepth == 0)
4966 {
4967 int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4968
4969 afterTriggers.query_stack = (AfterTriggersQueryData *)
4970 MemoryContextAlloc(TopTransactionContext,
4971 new_alloc * sizeof(AfterTriggersQueryData));
4972 afterTriggers.maxquerydepth = new_alloc;
4973 }
4974 else
4975 {
4976 /* repalloc will keep the stack in the same context */
4977 int old_alloc = afterTriggers.maxquerydepth;
4978 int new_alloc = Max(afterTriggers.query_depth + 1,
4979 old_alloc * 2);
4980
4981 afterTriggers.query_stack = (AfterTriggersQueryData *)
4982 repalloc(afterTriggers.query_stack,
4983 new_alloc * sizeof(AfterTriggersQueryData));
4984 afterTriggers.maxquerydepth = new_alloc;
4985 }
4986
4987 /* Initialize new array entries to empty */
4988 while (init_depth < afterTriggers.maxquerydepth)
4989 {
4990 AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
4991
4992 qs->events.head = NULL;
4993 qs->events.tail = NULL;
4994 qs->events.tailfree = NULL;
4995 qs->fdw_tuplestore = NULL;
4996 qs->tables = NIL;
4997
4998 ++init_depth;
4999 }
5000 }
5001
5002 /*
5003 * Create an empty SetConstraintState with room for numalloc trigstates
5004 */
5005 static SetConstraintState
SetConstraintStateCreate(int numalloc)5006 SetConstraintStateCreate(int numalloc)
5007 {
5008 SetConstraintState state;
5009
5010 /* Behave sanely with numalloc == 0 */
5011 if (numalloc <= 0)
5012 numalloc = 1;
5013
5014 /*
5015 * We assume that zeroing will correctly initialize the state values.
5016 */
5017 state = (SetConstraintState)
5018 MemoryContextAllocZero(TopTransactionContext,
5019 offsetof(SetConstraintStateData, trigstates) +
5020 numalloc * sizeof(SetConstraintTriggerData));
5021
5022 state->numalloc = numalloc;
5023
5024 return state;
5025 }
5026
5027 /*
5028 * Copy a SetConstraintState
5029 */
5030 static SetConstraintState
SetConstraintStateCopy(SetConstraintState origstate)5031 SetConstraintStateCopy(SetConstraintState origstate)
5032 {
5033 SetConstraintState state;
5034
5035 state = SetConstraintStateCreate(origstate->numstates);
5036
5037 state->all_isset = origstate->all_isset;
5038 state->all_isdeferred = origstate->all_isdeferred;
5039 state->numstates = origstate->numstates;
5040 memcpy(state->trigstates, origstate->trigstates,
5041 origstate->numstates * sizeof(SetConstraintTriggerData));
5042
5043 return state;
5044 }
5045
5046 /*
5047 * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5048 * pointer to the state object (it will change if we have to repalloc).
5049 */
5050 static SetConstraintState
SetConstraintStateAddItem(SetConstraintState state,Oid tgoid,bool tgisdeferred)5051 SetConstraintStateAddItem(SetConstraintState state,
5052 Oid tgoid, bool tgisdeferred)
5053 {
5054 if (state->numstates >= state->numalloc)
5055 {
5056 int newalloc = state->numalloc * 2;
5057
5058 newalloc = Max(newalloc, 8); /* in case original has size 0 */
5059 state = (SetConstraintState)
5060 repalloc(state,
5061 offsetof(SetConstraintStateData, trigstates) +
5062 newalloc * sizeof(SetConstraintTriggerData));
5063 state->numalloc = newalloc;
5064 Assert(state->numstates < state->numalloc);
5065 }
5066
5067 state->trigstates[state->numstates].sct_tgoid = tgoid;
5068 state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
5069 state->numstates++;
5070
5071 return state;
5072 }
5073
5074 /* ----------
5075 * AfterTriggerSetState()
5076 *
5077 * Execute the SET CONSTRAINTS ... utility command.
5078 * ----------
5079 */
5080 void
AfterTriggerSetState(ConstraintsSetStmt * stmt)5081 AfterTriggerSetState(ConstraintsSetStmt *stmt)
5082 {
5083 int my_level = GetCurrentTransactionNestLevel();
5084
5085 /* If we haven't already done so, initialize our state. */
5086 if (afterTriggers.state == NULL)
5087 afterTriggers.state = SetConstraintStateCreate(8);
5088
5089 /*
5090 * If in a subtransaction, and we didn't save the current state already,
5091 * save it so it can be restored if the subtransaction aborts.
5092 */
5093 if (my_level > 1 &&
5094 afterTriggers.trans_stack[my_level].state == NULL)
5095 {
5096 afterTriggers.trans_stack[my_level].state =
5097 SetConstraintStateCopy(afterTriggers.state);
5098 }
5099
5100 /*
5101 * Handle SET CONSTRAINTS ALL ...
5102 */
5103 if (stmt->constraints == NIL)
5104 {
5105 /*
5106 * Forget any previous SET CONSTRAINTS commands in this transaction.
5107 */
5108 afterTriggers.state->numstates = 0;
5109
5110 /*
5111 * Set the per-transaction ALL state to known.
5112 */
5113 afterTriggers.state->all_isset = true;
5114 afterTriggers.state->all_isdeferred = stmt->deferred;
5115 }
5116 else
5117 {
5118 Relation conrel;
5119 Relation tgrel;
5120 List *conoidlist = NIL;
5121 List *tgoidlist = NIL;
5122 ListCell *lc;
5123
5124 /*
5125 * Handle SET CONSTRAINTS constraint-name [, ...]
5126 *
5127 * First, identify all the named constraints and make a list of their
5128 * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5129 * the same name within a schema, the specifications are not
5130 * necessarily unique. Our strategy is to target all matching
5131 * constraints within the first search-path schema that has any
5132 * matches, but disregard matches in schemas beyond the first match.
5133 * (This is a bit odd but it's the historical behavior.)
5134 *
5135 * A constraint in a partitioned table may have corresponding
5136 * constraints in the partitions. Grab those too.
5137 */
5138 conrel = table_open(ConstraintRelationId, AccessShareLock);
5139
5140 foreach(lc, stmt->constraints)
5141 {
5142 RangeVar *constraint = lfirst(lc);
5143 bool found;
5144 List *namespacelist;
5145 ListCell *nslc;
5146
5147 if (constraint->catalogname)
5148 {
5149 if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5150 ereport(ERROR,
5151 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5152 errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5153 constraint->catalogname, constraint->schemaname,
5154 constraint->relname)));
5155 }
5156
5157 /*
5158 * If we're given the schema name with the constraint, look only
5159 * in that schema. If given a bare constraint name, use the
5160 * search path to find the first matching constraint.
5161 */
5162 if (constraint->schemaname)
5163 {
5164 Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5165 false);
5166
5167 namespacelist = list_make1_oid(namespaceId);
5168 }
5169 else
5170 {
5171 namespacelist = fetch_search_path(true);
5172 }
5173
5174 found = false;
5175 foreach(nslc, namespacelist)
5176 {
5177 Oid namespaceId = lfirst_oid(nslc);
5178 SysScanDesc conscan;
5179 ScanKeyData skey[2];
5180 HeapTuple tup;
5181
5182 ScanKeyInit(&skey[0],
5183 Anum_pg_constraint_conname,
5184 BTEqualStrategyNumber, F_NAMEEQ,
5185 CStringGetDatum(constraint->relname));
5186 ScanKeyInit(&skey[1],
5187 Anum_pg_constraint_connamespace,
5188 BTEqualStrategyNumber, F_OIDEQ,
5189 ObjectIdGetDatum(namespaceId));
5190
5191 conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5192 true, NULL, 2, skey);
5193
5194 while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5195 {
5196 Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
5197
5198 if (con->condeferrable)
5199 conoidlist = lappend_oid(conoidlist, con->oid);
5200 else if (stmt->deferred)
5201 ereport(ERROR,
5202 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5203 errmsg("constraint \"%s\" is not deferrable",
5204 constraint->relname)));
5205 found = true;
5206 }
5207
5208 systable_endscan(conscan);
5209
5210 /*
5211 * Once we've found a matching constraint we do not search
5212 * later parts of the search path.
5213 */
5214 if (found)
5215 break;
5216 }
5217
5218 list_free(namespacelist);
5219
5220 /*
5221 * Not found ?
5222 */
5223 if (!found)
5224 ereport(ERROR,
5225 (errcode(ERRCODE_UNDEFINED_OBJECT),
5226 errmsg("constraint \"%s\" does not exist",
5227 constraint->relname)));
5228 }
5229
5230 /*
5231 * Scan for any possible descendants of the constraints. We append
5232 * whatever we find to the same list that we're scanning; this has the
5233 * effect that we create new scans for those, too, so if there are
5234 * further descendents, we'll also catch them.
5235 */
5236 foreach(lc, conoidlist)
5237 {
5238 Oid parent = lfirst_oid(lc);
5239 ScanKeyData key;
5240 SysScanDesc scan;
5241 HeapTuple tuple;
5242
5243 ScanKeyInit(&key,
5244 Anum_pg_constraint_conparentid,
5245 BTEqualStrategyNumber, F_OIDEQ,
5246 ObjectIdGetDatum(parent));
5247
5248 scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5249
5250 while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5251 {
5252 Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple);
5253
5254 conoidlist = lappend_oid(conoidlist, con->oid);
5255 }
5256
5257 systable_endscan(scan);
5258 }
5259
5260 table_close(conrel, AccessShareLock);
5261
5262 /*
5263 * Now, locate the trigger(s) implementing each of these constraints,
5264 * and make a list of their OIDs.
5265 */
5266 tgrel = table_open(TriggerRelationId, AccessShareLock);
5267
5268 foreach(lc, conoidlist)
5269 {
5270 Oid conoid = lfirst_oid(lc);
5271 ScanKeyData skey;
5272 SysScanDesc tgscan;
5273 HeapTuple htup;
5274
5275 ScanKeyInit(&skey,
5276 Anum_pg_trigger_tgconstraint,
5277 BTEqualStrategyNumber, F_OIDEQ,
5278 ObjectIdGetDatum(conoid));
5279
5280 tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5281 NULL, 1, &skey);
5282
5283 while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5284 {
5285 Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5286
5287 /*
5288 * Silently skip triggers that are marked as non-deferrable in
5289 * pg_trigger. This is not an error condition, since a
5290 * deferrable RI constraint may have some non-deferrable
5291 * actions.
5292 */
5293 if (pg_trigger->tgdeferrable)
5294 tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5295 }
5296
5297 systable_endscan(tgscan);
5298 }
5299
5300 table_close(tgrel, AccessShareLock);
5301
5302 /*
5303 * Now we can set the trigger states of individual triggers for this
5304 * xact.
5305 */
5306 foreach(lc, tgoidlist)
5307 {
5308 Oid tgoid = lfirst_oid(lc);
5309 SetConstraintState state = afterTriggers.state;
5310 bool found = false;
5311 int i;
5312
5313 for (i = 0; i < state->numstates; i++)
5314 {
5315 if (state->trigstates[i].sct_tgoid == tgoid)
5316 {
5317 state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5318 found = true;
5319 break;
5320 }
5321 }
5322 if (!found)
5323 {
5324 afterTriggers.state =
5325 SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5326 }
5327 }
5328 }
5329
5330 /*
5331 * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5332 * checks against that constraint must be made when the SET CONSTRAINTS
5333 * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5334 * apply retroactively. We've updated the constraints state, so scan the
5335 * list of previously deferred events to fire any that have now become
5336 * immediate.
5337 *
5338 * Obviously, if this was SET ... DEFERRED then it can't have converted
5339 * any unfired events to immediate, so we need do nothing in that case.
5340 */
5341 if (!stmt->deferred)
5342 {
5343 AfterTriggerEventList *events = &afterTriggers.events;
5344 bool snapshot_set = false;
5345
5346 while (afterTriggerMarkEvents(events, NULL, true))
5347 {
5348 CommandId firing_id = afterTriggers.firing_counter++;
5349
5350 /*
5351 * Make sure a snapshot has been established in case trigger
5352 * functions need one. Note that we avoid setting a snapshot if
5353 * we don't find at least one trigger that has to be fired now.
5354 * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5355 * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5356 * at the start of a transaction it's not possible for any trigger
5357 * events to be queued yet.)
5358 */
5359 if (!snapshot_set)
5360 {
5361 PushActiveSnapshot(GetTransactionSnapshot());
5362 snapshot_set = true;
5363 }
5364
5365 /*
5366 * We can delete fired events if we are at top transaction level,
5367 * but we'd better not if inside a subtransaction, since the
5368 * subtransaction could later get rolled back.
5369 */
5370 if (afterTriggerInvokeEvents(events, firing_id, NULL,
5371 !IsSubTransaction()))
5372 break; /* all fired */
5373 }
5374
5375 if (snapshot_set)
5376 PopActiveSnapshot();
5377 }
5378 }
5379
5380 /* ----------
5381 * AfterTriggerPendingOnRel()
5382 * Test to see if there are any pending after-trigger events for rel.
5383 *
5384 * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5385 * it is unsafe to perform major surgery on a relation. Note that only
5386 * local pending events are examined. We assume that having exclusive lock
5387 * on a rel guarantees there are no unserviced events in other backends ---
5388 * but having a lock does not prevent there being such events in our own.
5389 *
5390 * In some scenarios it'd be reasonable to remove pending events (more
5391 * specifically, mark them DONE by the current subxact) but without a lot
5392 * of knowledge of the trigger semantics we can't do this in general.
5393 * ----------
5394 */
5395 bool
AfterTriggerPendingOnRel(Oid relid)5396 AfterTriggerPendingOnRel(Oid relid)
5397 {
5398 AfterTriggerEvent event;
5399 AfterTriggerEventChunk *chunk;
5400 int depth;
5401
5402 /* Scan queued events */
5403 for_each_event_chunk(event, chunk, afterTriggers.events)
5404 {
5405 AfterTriggerShared evtshared = GetTriggerSharedData(event);
5406
5407 /*
5408 * We can ignore completed events. (Even if a DONE flag is rolled
5409 * back by subxact abort, it's OK because the effects of the TRUNCATE
5410 * or whatever must get rolled back too.)
5411 */
5412 if (event->ate_flags & AFTER_TRIGGER_DONE)
5413 continue;
5414
5415 if (evtshared->ats_relid == relid)
5416 return true;
5417 }
5418
5419 /*
5420 * Also scan events queued by incomplete queries. This could only matter
5421 * if TRUNCATE/etc is executed by a function or trigger within an updating
5422 * query on the same relation, which is pretty perverse, but let's check.
5423 */
5424 for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5425 {
5426 for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
5427 {
5428 AfterTriggerShared evtshared = GetTriggerSharedData(event);
5429
5430 if (event->ate_flags & AFTER_TRIGGER_DONE)
5431 continue;
5432
5433 if (evtshared->ats_relid == relid)
5434 return true;
5435 }
5436 }
5437
5438 return false;
5439 }
5440
5441
5442 /* ----------
5443 * AfterTriggerSaveEvent()
5444 *
5445 * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5446 * be fired for an event.
5447 *
5448 * NOTE: this is called whenever there are any triggers associated with
5449 * the event (even if they are disabled). This function decides which
5450 * triggers actually need to be queued. It is also called after each row,
5451 * even if there are no triggers for that event, if there are any AFTER
5452 * STATEMENT triggers for the statement which use transition tables, so that
5453 * the transition tuplestores can be built. Furthermore, if the transition
5454 * capture is happening for UPDATEd rows being moved to another partition due
5455 * to the partition-key being changed, then this function is called once when
5456 * the row is deleted (to capture OLD row), and once when the row is inserted
5457 * into another partition (to capture NEW row). This is done separately because
5458 * DELETE and INSERT happen on different tables.
5459 *
5460 * Transition tuplestores are built now, rather than when events are pulled
5461 * off of the queue because AFTER ROW triggers are allowed to select from the
5462 * transition tables for the statement.
5463 * ----------
5464 */
5465 static void
AfterTriggerSaveEvent(EState * estate,ResultRelInfo * relinfo,int event,bool row_trigger,TupleTableSlot * oldslot,TupleTableSlot * newslot,List * recheckIndexes,Bitmapset * modifiedCols,TransitionCaptureState * transition_capture)5466 AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
5467 int event, bool row_trigger,
5468 TupleTableSlot *oldslot, TupleTableSlot *newslot,
5469 List *recheckIndexes, Bitmapset *modifiedCols,
5470 TransitionCaptureState *transition_capture)
5471 {
5472 Relation rel = relinfo->ri_RelationDesc;
5473 TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5474 AfterTriggerEventData new_event;
5475 AfterTriggerSharedData new_shared;
5476 char relkind = rel->rd_rel->relkind;
5477 int tgtype_event;
5478 int tgtype_level;
5479 int i;
5480 Tuplestorestate *fdw_tuplestore = NULL;
5481
5482 /*
5483 * Check state. We use a normal test not Assert because it is possible to
5484 * reach here in the wrong state given misconfigured RI triggers, in
5485 * particular deferring a cascade action trigger.
5486 */
5487 if (afterTriggers.query_depth < 0)
5488 elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5489
5490 /* Be sure we have enough space to record events at this query depth. */
5491 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5492 AfterTriggerEnlargeQueryState();
5493
5494 /*
5495 * If the directly named relation has any triggers with transition tables,
5496 * then we need to capture transition tuples.
5497 */
5498 if (row_trigger && transition_capture != NULL)
5499 {
5500 TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5501 TupleConversionMap *map = ExecGetChildToRootMap(relinfo);
5502 bool delete_old_table = transition_capture->tcs_delete_old_table;
5503 bool update_old_table = transition_capture->tcs_update_old_table;
5504 bool update_new_table = transition_capture->tcs_update_new_table;
5505 bool insert_new_table = transition_capture->tcs_insert_new_table;
5506
5507 /*
5508 * For INSERT events NEW should be non-NULL, for DELETE events OLD
5509 * should be non-NULL, whereas for UPDATE events normally both OLD and
5510 * NEW are non-NULL. But for UPDATE events fired for capturing
5511 * transition tuples during UPDATE partition-key row movement, OLD is
5512 * NULL when the event is for a row being inserted, whereas NEW is
5513 * NULL when the event is for a row being deleted.
5514 */
5515 Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5516 TupIsNull(oldslot)));
5517 Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5518 TupIsNull(newslot)));
5519
5520 if (!TupIsNull(oldslot) &&
5521 ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
5522 (event == TRIGGER_EVENT_UPDATE && update_old_table)))
5523 {
5524 Tuplestorestate *old_tuplestore;
5525
5526 old_tuplestore = transition_capture->tcs_private->old_tuplestore;
5527
5528 if (map != NULL)
5529 {
5530 AfterTriggersTableData *table = transition_capture->tcs_private;
5531 TupleTableSlot *storeslot;
5532
5533 storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5534 execute_attr_map_slot(map->attrMap, oldslot, storeslot);
5535 tuplestore_puttupleslot(old_tuplestore, storeslot);
5536 }
5537 else
5538 tuplestore_puttupleslot(old_tuplestore, oldslot);
5539 }
5540 if (!TupIsNull(newslot) &&
5541 ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
5542 (event == TRIGGER_EVENT_UPDATE && update_new_table)))
5543 {
5544 Tuplestorestate *new_tuplestore;
5545
5546 new_tuplestore = transition_capture->tcs_private->new_tuplestore;
5547
5548 if (original_insert_tuple != NULL)
5549 tuplestore_puttupleslot(new_tuplestore,
5550 original_insert_tuple);
5551 else if (map != NULL)
5552 {
5553 AfterTriggersTableData *table = transition_capture->tcs_private;
5554 TupleTableSlot *storeslot;
5555
5556 storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5557 execute_attr_map_slot(map->attrMap, newslot, storeslot);
5558 tuplestore_puttupleslot(new_tuplestore, storeslot);
5559 }
5560 else
5561 tuplestore_puttupleslot(new_tuplestore, newslot);
5562 }
5563
5564 /*
5565 * If transition tables are the only reason we're here, return. As
5566 * mentioned above, we can also be here during update tuple routing in
5567 * presence of transition tables, in which case this function is
5568 * called separately for oldtup and newtup, so we expect exactly one
5569 * of them to be NULL.
5570 */
5571 if (trigdesc == NULL ||
5572 (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5573 (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5574 (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
5575 (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
5576 return;
5577 }
5578
5579 /*
5580 * Validate the event code and collect the associated tuple CTIDs.
5581 *
5582 * The event code will be used both as a bitmask and an array offset, so
5583 * validation is important to make sure we don't walk off the edge of our
5584 * arrays.
5585 *
5586 * Also, if we're considering statement-level triggers, check whether we
5587 * already queued a set of them for this event, and cancel the prior set
5588 * if so. This preserves the behavior that statement-level triggers fire
5589 * just once per statement and fire after row-level triggers.
5590 */
5591 switch (event)
5592 {
5593 case TRIGGER_EVENT_INSERT:
5594 tgtype_event = TRIGGER_TYPE_INSERT;
5595 if (row_trigger)
5596 {
5597 Assert(oldslot == NULL);
5598 Assert(newslot != NULL);
5599 ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
5600 ItemPointerSetInvalid(&(new_event.ate_ctid2));
5601 }
5602 else
5603 {
5604 Assert(oldslot == NULL);
5605 Assert(newslot == NULL);
5606 ItemPointerSetInvalid(&(new_event.ate_ctid1));
5607 ItemPointerSetInvalid(&(new_event.ate_ctid2));
5608 cancel_prior_stmt_triggers(RelationGetRelid(rel),
5609 CMD_INSERT, event);
5610 }
5611 break;
5612 case TRIGGER_EVENT_DELETE:
5613 tgtype_event = TRIGGER_TYPE_DELETE;
5614 if (row_trigger)
5615 {
5616 Assert(oldslot != NULL);
5617 Assert(newslot == NULL);
5618 ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5619 ItemPointerSetInvalid(&(new_event.ate_ctid2));
5620 }
5621 else
5622 {
5623 Assert(oldslot == NULL);
5624 Assert(newslot == NULL);
5625 ItemPointerSetInvalid(&(new_event.ate_ctid1));
5626 ItemPointerSetInvalid(&(new_event.ate_ctid2));
5627 cancel_prior_stmt_triggers(RelationGetRelid(rel),
5628 CMD_DELETE, event);
5629 }
5630 break;
5631 case TRIGGER_EVENT_UPDATE:
5632 tgtype_event = TRIGGER_TYPE_UPDATE;
5633 if (row_trigger)
5634 {
5635 Assert(oldslot != NULL);
5636 Assert(newslot != NULL);
5637 ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5638 ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2));
5639 }
5640 else
5641 {
5642 Assert(oldslot == NULL);
5643 Assert(newslot == NULL);
5644 ItemPointerSetInvalid(&(new_event.ate_ctid1));
5645 ItemPointerSetInvalid(&(new_event.ate_ctid2));
5646 cancel_prior_stmt_triggers(RelationGetRelid(rel),
5647 CMD_UPDATE, event);
5648 }
5649 break;
5650 case TRIGGER_EVENT_TRUNCATE:
5651 tgtype_event = TRIGGER_TYPE_TRUNCATE;
5652 Assert(oldslot == NULL);
5653 Assert(newslot == NULL);
5654 ItemPointerSetInvalid(&(new_event.ate_ctid1));
5655 ItemPointerSetInvalid(&(new_event.ate_ctid2));
5656 break;
5657 default:
5658 elog(ERROR, "invalid after-trigger event code: %d", event);
5659 tgtype_event = 0; /* keep compiler quiet */
5660 break;
5661 }
5662
5663 if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5664 new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5665 AFTER_TRIGGER_2CTID : AFTER_TRIGGER_1CTID;
5666 /* else, we'll initialize ate_flags for each trigger */
5667
5668 tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5669
5670 for (i = 0; i < trigdesc->numtriggers; i++)
5671 {
5672 Trigger *trigger = &trigdesc->triggers[i];
5673
5674 if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5675 tgtype_level,
5676 TRIGGER_TYPE_AFTER,
5677 tgtype_event))
5678 continue;
5679 if (!TriggerEnabled(estate, relinfo, trigger, event,
5680 modifiedCols, oldslot, newslot))
5681 continue;
5682
5683 if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5684 {
5685 if (fdw_tuplestore == NULL)
5686 {
5687 fdw_tuplestore = GetCurrentFDWTuplestore();
5688 new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5689 }
5690 else
5691 /* subsequent event for the same tuple */
5692 new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5693 }
5694
5695 /*
5696 * If the trigger is a foreign key enforcement trigger, there are
5697 * certain cases where we can skip queueing the event because we can
5698 * tell by inspection that the FK constraint will still pass.
5699 */
5700 if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event))
5701 {
5702 switch (RI_FKey_trigger_type(trigger->tgfoid))
5703 {
5704 case RI_TRIGGER_PK:
5705 /* Update or delete on trigger's PK table */
5706 if (!RI_FKey_pk_upd_check_required(trigger, rel,
5707 oldslot, newslot))
5708 {
5709 /* skip queuing this event */
5710 continue;
5711 }
5712 break;
5713
5714 case RI_TRIGGER_FK:
5715 /* Update on trigger's FK table */
5716 if (!RI_FKey_fk_upd_check_required(trigger, rel,
5717 oldslot, newslot))
5718 {
5719 /* skip queuing this event */
5720 continue;
5721 }
5722 break;
5723
5724 case RI_TRIGGER_NONE:
5725 /* Not an FK trigger */
5726 break;
5727 }
5728 }
5729
5730 /*
5731 * If the trigger is a deferred unique constraint check trigger, only
5732 * queue it if the unique constraint was potentially violated, which
5733 * we know from index insertion time.
5734 */
5735 if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5736 {
5737 if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5738 continue; /* Uniqueness definitely not violated */
5739 }
5740
5741 /*
5742 * Fill in event structure and add it to the current query's queue.
5743 * Note we set ats_table to NULL whenever this trigger doesn't use
5744 * transition tables, to improve sharability of the shared event data.
5745 */
5746 new_shared.ats_event =
5747 (event & TRIGGER_EVENT_OPMASK) |
5748 (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5749 (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5750 (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5751 new_shared.ats_tgoid = trigger->tgoid;
5752 new_shared.ats_relid = RelationGetRelid(rel);
5753 new_shared.ats_firing_id = 0;
5754 if ((trigger->tgoldtable || trigger->tgnewtable) &&
5755 transition_capture != NULL)
5756 new_shared.ats_table = transition_capture->tcs_private;
5757 else
5758 new_shared.ats_table = NULL;
5759 new_shared.ats_modifiedcols = modifiedCols;
5760
5761 afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events,
5762 &new_event, &new_shared);
5763 }
5764
5765 /*
5766 * Finally, spool any foreign tuple(s). The tuplestore squashes them to
5767 * minimal tuples, so this loses any system columns. The executor lost
5768 * those columns before us, for an unrelated reason, so this is fine.
5769 */
5770 if (fdw_tuplestore)
5771 {
5772 if (oldslot != NULL)
5773 tuplestore_puttupleslot(fdw_tuplestore, oldslot);
5774 if (newslot != NULL)
5775 tuplestore_puttupleslot(fdw_tuplestore, newslot);
5776 }
5777 }
5778
5779 /*
5780 * Detect whether we already queued BEFORE STATEMENT triggers for the given
5781 * relation + operation, and set the flag so the next call will report "true".
5782 */
5783 static bool
before_stmt_triggers_fired(Oid relid,CmdType cmdType)5784 before_stmt_triggers_fired(Oid relid, CmdType cmdType)
5785 {
5786 bool result;
5787 AfterTriggersTableData *table;
5788
5789 /* Check state, like AfterTriggerSaveEvent. */
5790 if (afterTriggers.query_depth < 0)
5791 elog(ERROR, "before_stmt_triggers_fired() called outside of query");
5792
5793 /* Be sure we have enough space to record events at this query depth. */
5794 if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5795 AfterTriggerEnlargeQueryState();
5796
5797 /*
5798 * We keep this state in the AfterTriggersTableData that also holds
5799 * transition tables for the relation + operation. In this way, if we are
5800 * forced to make a new set of transition tables because more tuples get
5801 * entered after we've already fired triggers, we will allow a new set of
5802 * statement triggers to get queued.
5803 */
5804 table = GetAfterTriggersTableData(relid, cmdType);
5805 result = table->before_trig_done;
5806 table->before_trig_done = true;
5807 return result;
5808 }
5809
5810 /*
5811 * If we previously queued a set of AFTER STATEMENT triggers for the given
5812 * relation + operation, and they've not been fired yet, cancel them. The
5813 * caller will queue a fresh set that's after any row-level triggers that may
5814 * have been queued by the current sub-statement, preserving (as much as
5815 * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT
5816 * triggers, and that the latter only fire once. This deals with the
5817 * situation where several FK enforcement triggers sequentially queue triggers
5818 * for the same table into the same trigger query level. We can't fully
5819 * prevent odd behavior though: if there are AFTER ROW triggers taking
5820 * transition tables, we don't want to change the transition tables once the
5821 * first such trigger has seen them. In such a case, any additional events
5822 * will result in creating new transition tables and allowing new firings of
5823 * statement triggers.
5824 *
5825 * This also saves the current event list location so that a later invocation
5826 * of this function can cheaply find the triggers we're about to queue and
5827 * cancel them.
5828 */
5829 static void
cancel_prior_stmt_triggers(Oid relid,CmdType cmdType,int tgevent)5830 cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent)
5831 {
5832 AfterTriggersTableData *table;
5833 AfterTriggersQueryData *qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5834
5835 /*
5836 * We keep this state in the AfterTriggersTableData that also holds
5837 * transition tables for the relation + operation. In this way, if we are
5838 * forced to make a new set of transition tables because more tuples get
5839 * entered after we've already fired triggers, we will allow a new set of
5840 * statement triggers to get queued without canceling the old ones.
5841 */
5842 table = GetAfterTriggersTableData(relid, cmdType);
5843
5844 if (table->after_trig_done)
5845 {
5846 /*
5847 * We want to start scanning from the tail location that existed just
5848 * before we inserted any statement triggers. But the events list
5849 * might've been entirely empty then, in which case scan from the
5850 * current head.
5851 */
5852 AfterTriggerEvent event;
5853 AfterTriggerEventChunk *chunk;
5854
5855 if (table->after_trig_events.tail)
5856 {
5857 chunk = table->after_trig_events.tail;
5858 event = (AfterTriggerEvent) table->after_trig_events.tailfree;
5859 }
5860 else
5861 {
5862 chunk = qs->events.head;
5863 event = NULL;
5864 }
5865
5866 for_each_chunk_from(chunk)
5867 {
5868 if (event == NULL)
5869 event = (AfterTriggerEvent) CHUNK_DATA_START(chunk);
5870 for_each_event_from(event, chunk)
5871 {
5872 AfterTriggerShared evtshared = GetTriggerSharedData(event);
5873
5874 /*
5875 * Exit loop when we reach events that aren't AS triggers for
5876 * the target relation.
5877 */
5878 if (evtshared->ats_relid != relid)
5879 goto done;
5880 if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) != tgevent)
5881 goto done;
5882 if (!TRIGGER_FIRED_FOR_STATEMENT(evtshared->ats_event))
5883 goto done;
5884 if (!TRIGGER_FIRED_AFTER(evtshared->ats_event))
5885 goto done;
5886 /* OK, mark it DONE */
5887 event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
5888 event->ate_flags |= AFTER_TRIGGER_DONE;
5889 }
5890 /* signal we must reinitialize event ptr for next chunk */
5891 event = NULL;
5892 }
5893 }
5894 done:
5895
5896 /* In any case, save current insertion point for next time */
5897 table->after_trig_done = true;
5898 table->after_trig_events = qs->events;
5899 }
5900
5901 /*
5902 * SQL function pg_trigger_depth()
5903 */
5904 Datum
pg_trigger_depth(PG_FUNCTION_ARGS)5905 pg_trigger_depth(PG_FUNCTION_ARGS)
5906 {
5907 PG_RETURN_INT32(MyTriggerDepth);
5908 }
5909