PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "miscadmin.h"
39 #include "nodes/bitmapset.h"
40 #include "nodes/makefuncs.h"
41 #include "optimizer/optimizer.h"
42 #include "parser/parse_clause.h"
43 #include "parser/parse_collate.h"
44 #include "parser/parse_func.h"
45 #include "parser/parse_relation.h"
46 #include "parser/parsetree.h"
47 #include "partitioning/partdesc.h"
48 #include "pgstat.h"
49 #include "rewrite/rewriteManip.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "tcop/utility.h"
53 #include "utils/acl.h"
54 #include "utils/builtins.h"
55 #include "utils/bytea.h"
56 #include "utils/fmgroids.h"
57 #include "utils/inval.h"
58 #include "utils/lsyscache.h"
59 #include "utils/memutils.h"
60 #include "utils/rel.h"
61 #include "utils/snapmgr.h"
62 #include "utils/syscache.h"
63 #include "utils/tuplestore.h"
64 
65 
66 /* GUC variables */
68 
69 /* How many levels deep into trigger execution are we? */
70 static int MyTriggerDepth = 0;
71 
72 /*
73  * Note that similar macros also exist in executor/execMain.c. There does not
74  * appear to be any good header to put them into, given the structures that
75  * they use, so we let them be duplicated. Be sure to update all if one needs
76  * to be changed, however.
77  */
78 #define GetAllUpdatedColumns(relinfo, estate) \
79  (bms_union(exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols, \
80  exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->extraUpdatedCols))
81 
82 /* Local function prototypes */
83 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
84 static bool GetTupleForTrigger(EState *estate,
85  EPQState *epqstate,
86  ResultRelInfo *relinfo,
87  ItemPointer tid,
88  LockTupleMode lockmode,
89  TupleTableSlot *oldslot,
90  TupleTableSlot **newSlot);
91 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
92  Trigger *trigger, TriggerEvent event,
93  Bitmapset *modifiedCols,
94  TupleTableSlot *oldslot, TupleTableSlot *newslot);
96  int tgindx,
97  FmgrInfo *finfo,
98  Instrumentation *instr,
99  MemoryContext per_tuple_context);
100 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
101  int event, bool row_trigger,
102  TupleTableSlot *oldtup, TupleTableSlot *newtup,
103  List *recheckIndexes, Bitmapset *modifiedCols,
104  TransitionCaptureState *transition_capture);
105 static void AfterTriggerEnlargeQueryState(void);
106 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
107 
108 
109 /*
110  * Create a trigger. Returns the address of the created trigger.
111  *
112  * queryString is the source text of the CREATE TRIGGER command.
113  * This must be supplied if a whenClause is specified, else it can be NULL.
114  *
115  * relOid, if nonzero, is the relation on which the trigger should be
116  * created. If zero, the name provided in the statement will be looked up.
117  *
118  * refRelOid, if nonzero, is the relation to which the constraint trigger
119  * refers. If zero, the constraint relation name provided in the statement
120  * will be looked up as needed.
121  *
122  * constraintOid, if nonzero, says that this trigger is being created
123  * internally to implement that constraint. A suitable pg_depend entry will
124  * be made to link the trigger to that constraint. constraintOid is zero when
125  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
126  * TRIGGER, we build a pg_constraint entry internally.)
127  *
128  * indexOid, if nonzero, is the OID of an index associated with the constraint.
129  * We do nothing with this except store it into pg_trigger.tgconstrindid;
130  * but when creating a trigger for a deferrable unique constraint on a
131  * partitioned table, its children are looked up. Note we don't cope with
132  * invalid indexes in that case.
133  *
134  * funcoid, if nonzero, is the OID of the function to invoke. When this is
135  * given, stmt->funcname is ignored.
136  *
137  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
138  * if that trigger is dropped, this one should be too. (This is passed as
139  * Invalid by most callers; it's set here when recursing on a partition.)
140  *
141  * If whenClause is passed, it is an already-transformed expression for
142  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
143  *
144  * If isInternal is true then this is an internally-generated trigger.
145  * This argument sets the tgisinternal field of the pg_trigger entry, and
146  * if true causes us to modify the given trigger name to ensure uniqueness.
147  *
148  * When isInternal is not true we require ACL_TRIGGER permissions on the
149  * relation, as well as ACL_EXECUTE on the trigger function. For internal
150  * triggers the caller must apply any required permission checks.
151  *
152  * When called on partitioned tables, this function recurses to create the
153  * trigger on all the partitions, except if isInternal is true, in which
154  * case caller is expected to execute recursion on its own.
155  */
157 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
158  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
159  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
160  bool isInternal, bool in_partition)
161 {
162  int16 tgtype;
163  int ncolumns;
164  int16 *columns;
165  int2vector *tgattr;
166  List *whenRtable;
167  char *qual;
168  Datum values[Natts_pg_trigger];
169  bool nulls[Natts_pg_trigger];
170  Relation rel;
171  AclResult aclresult;
172  Relation tgrel;
173  SysScanDesc tgscan;
175  Relation pgrel;
176  HeapTuple tuple;
177  Oid funcrettype;
178  Oid trigoid;
179  char internaltrigname[NAMEDATALEN];
180  char *trigname;
181  Oid constrrelid = InvalidOid;
182  ObjectAddress myself,
183  referenced;
184  char *oldtablename = NULL;
185  char *newtablename = NULL;
186  bool partition_recurse;
187 
188  if (OidIsValid(relOid))
189  rel = table_open(relOid, ShareRowExclusiveLock);
190  else
192 
193  /*
194  * Triggers must be on tables or views, and there are additional
195  * relation-type-specific restrictions.
196  */
197  if (rel->rd_rel->relkind == RELKIND_RELATION)
198  {
199  /* Tables can't have INSTEAD OF triggers */
200  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
201  stmt->timing != TRIGGER_TYPE_AFTER)
202  ereport(ERROR,
203  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
204  errmsg("\"%s\" is a table",
206  errdetail("Tables cannot have INSTEAD OF triggers.")));
207  }
208  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
209  {
210  /* Partitioned tables can't have INSTEAD OF triggers */
211  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
212  stmt->timing != TRIGGER_TYPE_AFTER)
213  ereport(ERROR,
214  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
215  errmsg("\"%s\" is a table",
217  errdetail("Tables cannot have INSTEAD OF triggers.")));
218 
219  /*
220  * FOR EACH ROW triggers have further restrictions
221  */
222  if (stmt->row)
223  {
224  /*
225  * Disallow use of transition tables.
226  *
227  * Note that we have another restriction about transition tables
228  * in partitions; search for 'has_superclass' below for an
229  * explanation. The check here is just to protect from the fact
230  * that if we allowed it here, the creation would succeed for a
231  * partitioned table with no partitions, but would be blocked by
232  * the other restriction when the first partition was created,
233  * which is very unfriendly behavior.
234  */
235  if (stmt->transitionRels != NIL)
236  ereport(ERROR,
237  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
238  errmsg("\"%s\" is a partitioned table",
240  errdetail("Triggers on partitioned tables cannot have transition tables.")));
241  }
242  }
243  else if (rel->rd_rel->relkind == RELKIND_VIEW)
244  {
245  /*
246  * Views can have INSTEAD OF triggers (which we check below are
247  * row-level), or statement-level BEFORE/AFTER triggers.
248  */
249  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
250  ereport(ERROR,
251  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
252  errmsg("\"%s\" is a view",
254  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
255  /* Disallow TRUNCATE triggers on VIEWs */
256  if (TRIGGER_FOR_TRUNCATE(stmt->events))
257  ereport(ERROR,
258  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
259  errmsg("\"%s\" is a view",
261  errdetail("Views cannot have TRUNCATE triggers.")));
262  }
263  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
264  {
265  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
266  stmt->timing != TRIGGER_TYPE_AFTER)
267  ereport(ERROR,
268  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
269  errmsg("\"%s\" is a foreign table",
271  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
272 
273  if (TRIGGER_FOR_TRUNCATE(stmt->events))
274  ereport(ERROR,
275  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
276  errmsg("\"%s\" is a foreign table",
278  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
279 
280  /*
281  * We disallow constraint triggers to protect the assumption that
282  * triggers on FKs can't be deferred. See notes with AfterTriggers
283  * data structures, below.
284  */
285  if (stmt->isconstraint)
286  ereport(ERROR,
287  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
288  errmsg("\"%s\" is a foreign table",
290  errdetail("Foreign tables cannot have constraint triggers.")));
291  }
292  else
293  ereport(ERROR,
294  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
295  errmsg("\"%s\" is not a table or view",
296  RelationGetRelationName(rel))));
297 
299  ereport(ERROR,
300  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
301  errmsg("permission denied: \"%s\" is a system catalog",
302  RelationGetRelationName(rel))));
303 
304  if (stmt->isconstraint)
305  {
306  /*
307  * We must take a lock on the target relation to protect against
308  * concurrent drop. It's not clear that AccessShareLock is strong
309  * enough, but we certainly need at least that much... otherwise, we
310  * might end up creating a pg_constraint entry referencing a
311  * nonexistent table.
312  */
313  if (OidIsValid(refRelOid))
314  {
315  LockRelationOid(refRelOid, AccessShareLock);
316  constrrelid = refRelOid;
317  }
318  else if (stmt->constrrel != NULL)
319  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
320  false);
321  }
322 
323  /* permission checks */
324  if (!isInternal)
325  {
326  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
327  ACL_TRIGGER);
328  if (aclresult != ACLCHECK_OK)
329  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
331 
332  if (OidIsValid(constrrelid))
333  {
334  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
335  ACL_TRIGGER);
336  if (aclresult != ACLCHECK_OK)
337  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
338  get_rel_name(constrrelid));
339  }
340  }
341 
342  /*
343  * When called on a partitioned table to create a FOR EACH ROW trigger
344  * that's not internal, we create one trigger for each partition, too.
345  *
346  * For that, we'd better hold lock on all of them ahead of time.
347  */
348  partition_recurse = !isInternal && stmt->row &&
349  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
350  if (partition_recurse)
352  ShareRowExclusiveLock, NULL));
353 
354  /* Compute tgtype */
355  TRIGGER_CLEAR_TYPE(tgtype);
356  if (stmt->row)
357  TRIGGER_SETT_ROW(tgtype);
358  tgtype |= stmt->timing;
359  tgtype |= stmt->events;
360 
361  /* Disallow ROW-level TRUNCATE triggers */
362  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
363  ereport(ERROR,
364  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
365  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
366 
367  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
368  if (TRIGGER_FOR_INSTEAD(tgtype))
369  {
370  if (!TRIGGER_FOR_ROW(tgtype))
371  ereport(ERROR,
372  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
373  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
374  if (stmt->whenClause)
375  ereport(ERROR,
376  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
377  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
378  if (stmt->columns != NIL)
379  ereport(ERROR,
380  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
381  errmsg("INSTEAD OF triggers cannot have column lists")));
382  }
383 
384  /*
385  * We don't yet support naming ROW transition variables, but the parser
386  * recognizes the syntax so we can give a nicer message here.
387  *
388  * Per standard, REFERENCING TABLE names are only allowed on AFTER
389  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
390  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
391  * only allowed once. Per standard, OLD may not be specified when
392  * creating a trigger only for INSERT, and NEW may not be specified when
393  * creating a trigger only for DELETE.
394  *
395  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
396  * reference both ROW and TABLE transition data.
397  */
398  if (stmt->transitionRels != NIL)
399  {
400  List *varList = stmt->transitionRels;
401  ListCell *lc;
402 
403  foreach(lc, varList)
404  {
406 
407  if (!(tt->isTable))
408  ereport(ERROR,
409  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
410  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
411  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
412 
413  /*
414  * Because of the above test, we omit further ROW-related testing
415  * below. If we later allow naming OLD and NEW ROW variables,
416  * adjustments will be needed below.
417  */
418 
419  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
420  ereport(ERROR,
421  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
422  errmsg("\"%s\" is a foreign table",
424  errdetail("Triggers on foreign tables cannot have transition tables.")));
425 
426  if (rel->rd_rel->relkind == RELKIND_VIEW)
427  ereport(ERROR,
428  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
429  errmsg("\"%s\" is a view",
431  errdetail("Triggers on views cannot have transition tables.")));
432 
433  /*
434  * We currently don't allow row-level triggers with transition
435  * tables on partition or inheritance children. Such triggers
436  * would somehow need to see tuples converted to the format of the
437  * table they're attached to, and it's not clear which subset of
438  * tuples each child should see. See also the prohibitions in
439  * ATExecAttachPartition() and ATExecAddInherit().
440  */
441  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
442  {
443  /* Use appropriate error message. */
444  if (rel->rd_rel->relispartition)
445  ereport(ERROR,
446  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
447  errmsg("ROW triggers with transition tables are not supported on partitions")));
448  else
449  ereport(ERROR,
450  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
451  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
452  }
453 
454  if (stmt->timing != TRIGGER_TYPE_AFTER)
455  ereport(ERROR,
456  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
457  errmsg("transition table name can only be specified for an AFTER trigger")));
458 
459  if (TRIGGER_FOR_TRUNCATE(tgtype))
460  ereport(ERROR,
461  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
462  errmsg("TRUNCATE triggers with transition tables are not supported")));
463 
464  /*
465  * We currently don't allow multi-event triggers ("INSERT OR
466  * UPDATE") with transition tables, because it's not clear how to
467  * handle INSERT ... ON CONFLICT statements which can fire both
468  * INSERT and UPDATE triggers. We show the inserted tuples to
469  * INSERT triggers and the updated tuples to UPDATE triggers, but
470  * it's not yet clear what INSERT OR UPDATE trigger should see.
471  * This restriction could be lifted if we can decide on the right
472  * semantics in a later release.
473  */
474  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
475  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
476  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
477  ereport(ERROR,
478  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
479  errmsg("transition tables cannot be specified for triggers with more than one event")));
480 
481  /*
482  * We currently don't allow column-specific triggers with
483  * transition tables. Per spec, that seems to require
484  * accumulating separate transition tables for each combination of
485  * columns, which is a lot of work for a rather marginal feature.
486  */
487  if (stmt->columns != NIL)
488  ereport(ERROR,
489  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
490  errmsg("transition tables cannot be specified for triggers with column lists")));
491 
492  /*
493  * We disallow constraint triggers with transition tables, to
494  * protect the assumption that such triggers can't be deferred.
495  * See notes with AfterTriggers data structures, below.
496  *
497  * Currently this is enforced by the grammar, so just Assert here.
498  */
499  Assert(!stmt->isconstraint);
500 
501  if (tt->isNew)
502  {
503  if (!(TRIGGER_FOR_INSERT(tgtype) ||
504  TRIGGER_FOR_UPDATE(tgtype)))
505  ereport(ERROR,
506  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
507  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
508 
509  if (newtablename != NULL)
510  ereport(ERROR,
511  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
512  errmsg("NEW TABLE cannot be specified multiple times")));
513 
514  newtablename = tt->name;
515  }
516  else
517  {
518  if (!(TRIGGER_FOR_DELETE(tgtype) ||
519  TRIGGER_FOR_UPDATE(tgtype)))
520  ereport(ERROR,
521  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
522  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
523 
524  if (oldtablename != NULL)
525  ereport(ERROR,
526  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
527  errmsg("OLD TABLE cannot be specified multiple times")));
528 
529  oldtablename = tt->name;
530  }
531  }
532 
533  if (newtablename != NULL && oldtablename != NULL &&
534  strcmp(newtablename, oldtablename) == 0)
535  ereport(ERROR,
536  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
537  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
538  }
539 
540  /*
541  * Parse the WHEN clause, if any and we weren't passed an already
542  * transformed one.
543  *
544  * Note that as a side effect, we fill whenRtable when parsing. If we got
545  * an already parsed clause, this does not occur, which is what we want --
546  * no point in adding redundant dependencies below.
547  */
548  if (!whenClause && stmt->whenClause)
549  {
550  ParseState *pstate;
551  ParseNamespaceItem *nsitem;
552  List *varList;
553  ListCell *lc;
554 
555  /* Set up a pstate to parse with */
556  pstate = make_parsestate(NULL);
557  pstate->p_sourcetext = queryString;
558 
559  /*
560  * Set up nsitems for OLD and NEW references.
561  *
562  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
563  */
564  nsitem = addRangeTableEntryForRelation(pstate, rel,
566  makeAlias("old", NIL),
567  false, false);
568  addNSItemToQuery(pstate, nsitem, false, true, true);
569  nsitem = addRangeTableEntryForRelation(pstate, rel,
571  makeAlias("new", NIL),
572  false, false);
573  addNSItemToQuery(pstate, nsitem, false, true, true);
574 
575  /* Transform expression. Copy to be sure we don't modify original */
576  whenClause = transformWhereClause(pstate,
577  copyObject(stmt->whenClause),
579  "WHEN");
580  /* we have to fix its collations too */
581  assign_expr_collations(pstate, whenClause);
582 
583  /*
584  * Check for disallowed references to OLD/NEW.
585  *
586  * NB: pull_var_clause is okay here only because we don't allow
587  * subselects in WHEN clauses; it would fail to examine the contents
588  * of subselects.
589  */
590  varList = pull_var_clause(whenClause, 0);
591  foreach(lc, varList)
592  {
593  Var *var = (Var *) lfirst(lc);
594 
595  switch (var->varno)
596  {
597  case PRS2_OLD_VARNO:
598  if (!TRIGGER_FOR_ROW(tgtype))
599  ereport(ERROR,
600  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
601  errmsg("statement trigger's WHEN condition cannot reference column values"),
602  parser_errposition(pstate, var->location)));
603  if (TRIGGER_FOR_INSERT(tgtype))
604  ereport(ERROR,
605  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
606  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
607  parser_errposition(pstate, var->location)));
608  /* system columns are okay here */
609  break;
610  case PRS2_NEW_VARNO:
611  if (!TRIGGER_FOR_ROW(tgtype))
612  ereport(ERROR,
613  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
614  errmsg("statement trigger's WHEN condition cannot reference column values"),
615  parser_errposition(pstate, var->location)));
616  if (TRIGGER_FOR_DELETE(tgtype))
617  ereport(ERROR,
618  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
619  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
620  parser_errposition(pstate, var->location)));
621  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
622  ereport(ERROR,
623  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
624  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
625  parser_errposition(pstate, var->location)));
626  if (TRIGGER_FOR_BEFORE(tgtype) &&
627  var->varattno == 0 &&
628  RelationGetDescr(rel)->constr &&
629  RelationGetDescr(rel)->constr->has_generated_stored)
630  ereport(ERROR,
631  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
632  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
633  errdetail("A whole-row reference is used and the table contains generated columns."),
634  parser_errposition(pstate, var->location)));
635  if (TRIGGER_FOR_BEFORE(tgtype) &&
636  var->varattno > 0 &&
637  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
638  ereport(ERROR,
639  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
640  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
641  errdetail("Column \"%s\" is a generated column.",
642  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
643  parser_errposition(pstate, var->location)));
644  break;
645  default:
646  /* can't happen without add_missing_from, so just elog */
647  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
648  break;
649  }
650  }
651 
652  /* we'll need the rtable for recordDependencyOnExpr */
653  whenRtable = pstate->p_rtable;
654 
655  qual = nodeToString(whenClause);
656 
657  free_parsestate(pstate);
658  }
659  else if (!whenClause)
660  {
661  whenClause = NULL;
662  whenRtable = NIL;
663  qual = NULL;
664  }
665  else
666  {
667  qual = nodeToString(whenClause);
668  whenRtable = NIL;
669  }
670 
671  /*
672  * Find and validate the trigger function.
673  */
674  if (!OidIsValid(funcoid))
675  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
676  if (!isInternal)
677  {
678  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
679  if (aclresult != ACLCHECK_OK)
680  aclcheck_error(aclresult, OBJECT_FUNCTION,
681  NameListToString(stmt->funcname));
682  }
683  funcrettype = get_func_rettype(funcoid);
684  if (funcrettype != TRIGGEROID)
685  ereport(ERROR,
686  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
687  errmsg("function %s must return type %s",
688  NameListToString(stmt->funcname), "trigger")));
689 
690  /*
691  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
692  * corresponding pg_constraint entry.
693  */
694  if (stmt->isconstraint && !OidIsValid(constraintOid))
695  {
696  /* Internal callers should have made their own constraints */
697  Assert(!isInternal);
698  constraintOid = CreateConstraintEntry(stmt->trigname,
700  CONSTRAINT_TRIGGER,
701  stmt->deferrable,
702  stmt->initdeferred,
703  true,
704  InvalidOid, /* no parent */
705  RelationGetRelid(rel),
706  NULL, /* no conkey */
707  0,
708  0,
709  InvalidOid, /* no domain */
710  InvalidOid, /* no index */
711  InvalidOid, /* no foreign key */
712  NULL,
713  NULL,
714  NULL,
715  NULL,
716  0,
717  ' ',
718  ' ',
719  ' ',
720  NULL, /* no exclusion */
721  NULL, /* no check constraint */
722  NULL,
723  true, /* islocal */
724  0, /* inhcount */
725  true, /* noinherit */
726  isInternal); /* is_internal */
727  }
728 
729  /*
730  * Generate the trigger's OID now, so that we can use it in the name if
731  * needed.
732  */
733  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
734 
735  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
736  Anum_pg_trigger_oid);
737 
738  /*
739  * If trigger is internally generated, modify the provided trigger name to
740  * ensure uniqueness by appending the trigger OID. (Callers will usually
741  * supply a simple constant trigger name in these cases.)
742  */
743  if (isInternal)
744  {
745  snprintf(internaltrigname, sizeof(internaltrigname),
746  "%s_%u", stmt->trigname, trigoid);
747  trigname = internaltrigname;
748  }
749  else
750  {
751  /* user-defined trigger; use the specified trigger name as-is */
752  trigname = stmt->trigname;
753  }
754 
755  /*
756  * Scan pg_trigger for existing triggers on relation. We do this only to
757  * give a nice error message if there's already a trigger of the same
758  * name. (The unique index on tgrelid/tgname would complain anyway.) We
759  * can skip this for internally generated triggers, since the name
760  * modification above should be sufficient.
761  *
762  * NOTE that this is cool only because we have ShareRowExclusiveLock on
763  * the relation, so the trigger set won't be changing underneath us.
764  */
765  if (!isInternal)
766  {
767  ScanKeyInit(&key,
768  Anum_pg_trigger_tgrelid,
769  BTEqualStrategyNumber, F_OIDEQ,
771  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
772  NULL, 1, &key);
773  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
774  {
775  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
776 
777  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
778  ereport(ERROR,
780  errmsg("trigger \"%s\" for relation \"%s\" already exists",
781  trigname, RelationGetRelationName(rel))));
782  }
783  systable_endscan(tgscan);
784  }
785 
786  /*
787  * Build the new pg_trigger tuple.
788  *
789  * When we're creating a trigger in a partition, we mark it as internal,
790  * even though we don't do the isInternal magic in this function. This
791  * makes the triggers in partitions identical to the ones in the
792  * partitioned tables, except that they are marked internal.
793  */
794  memset(nulls, false, sizeof(nulls));
795 
796  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
797  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
798  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
799  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
800  CStringGetDatum(trigname));
801  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
802  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
803  values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(TRIGGER_FIRES_ON_ORIGIN);
804  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal || in_partition);
805  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
806  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
807  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
808  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
809  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
810 
811  if (stmt->args)
812  {
813  ListCell *le;
814  char *args;
815  int16 nargs = list_length(stmt->args);
816  int len = 0;
817 
818  foreach(le, stmt->args)
819  {
820  char *ar = strVal(lfirst(le));
821 
822  len += strlen(ar) + 4;
823  for (; *ar; ar++)
824  {
825  if (*ar == '\\')
826  len++;
827  }
828  }
829  args = (char *) palloc(len + 1);
830  args[0] = '\0';
831  foreach(le, stmt->args)
832  {
833  char *s = strVal(lfirst(le));
834  char *d = args + strlen(args);
835 
836  while (*s)
837  {
838  if (*s == '\\')
839  *d++ = '\\';
840  *d++ = *s++;
841  }
842  strcpy(d, "\\000");
843  }
844  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
845  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
846  CStringGetDatum(args));
847  }
848  else
849  {
850  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
851  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
852  CStringGetDatum(""));
853  }
854 
855  /* build column number array if it's a column-specific trigger */
856  ncolumns = list_length(stmt->columns);
857  if (ncolumns == 0)
858  columns = NULL;
859  else
860  {
861  ListCell *cell;
862  int i = 0;
863 
864  columns = (int16 *) palloc(ncolumns * sizeof(int16));
865  foreach(cell, stmt->columns)
866  {
867  char *name = strVal(lfirst(cell));
868  int16 attnum;
869  int j;
870 
871  /* Lookup column name. System columns are not allowed */
872  attnum = attnameAttNum(rel, name, false);
873  if (attnum == InvalidAttrNumber)
874  ereport(ERROR,
875  (errcode(ERRCODE_UNDEFINED_COLUMN),
876  errmsg("column \"%s\" of relation \"%s\" does not exist",
877  name, RelationGetRelationName(rel))));
878 
879  /* Check for duplicates */
880  for (j = i - 1; j >= 0; j--)
881  {
882  if (columns[j] == attnum)
883  ereport(ERROR,
884  (errcode(ERRCODE_DUPLICATE_COLUMN),
885  errmsg("column \"%s\" specified more than once",
886  name)));
887  }
888 
889  columns[i++] = attnum;
890  }
891  }
892  tgattr = buildint2vector(columns, ncolumns);
893  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
894 
895  /* set tgqual if trigger has WHEN clause */
896  if (qual)
897  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
898  else
899  nulls[Anum_pg_trigger_tgqual - 1] = true;
900 
901  if (oldtablename)
902  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
903  CStringGetDatum(oldtablename));
904  else
905  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
906  if (newtablename)
907  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
908  CStringGetDatum(newtablename));
909  else
910  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
911 
912  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
913 
914  /*
915  * Insert tuple into pg_trigger.
916  */
917  CatalogTupleInsert(tgrel, tuple);
918 
919  heap_freetuple(tuple);
921 
922  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
923  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
924  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
925  if (oldtablename)
926  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
927  if (newtablename)
928  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
929 
930  /*
931  * Update relation's pg_class entry; if necessary; and if not, send an SI
932  * message to make other backends (and this one) rebuild relcache entries.
933  */
934  pgrel = table_open(RelationRelationId, RowExclusiveLock);
935  tuple = SearchSysCacheCopy1(RELOID,
937  if (!HeapTupleIsValid(tuple))
938  elog(ERROR, "cache lookup failed for relation %u",
939  RelationGetRelid(rel));
940  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
941  {
942  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
943 
944  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
945 
947  }
948  else
950 
951  heap_freetuple(tuple);
953 
954  /*
955  * Record dependencies for trigger. Always place a normal dependency on
956  * the function.
957  */
958  myself.classId = TriggerRelationId;
959  myself.objectId = trigoid;
960  myself.objectSubId = 0;
961 
962  referenced.classId = ProcedureRelationId;
963  referenced.objectId = funcoid;
964  referenced.objectSubId = 0;
965  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
966 
967  if (isInternal && OidIsValid(constraintOid))
968  {
969  /*
970  * Internally-generated trigger for a constraint, so make it an
971  * internal dependency of the constraint. We can skip depending on
972  * the relation(s), as there'll be an indirect dependency via the
973  * constraint.
974  */
975  referenced.classId = ConstraintRelationId;
976  referenced.objectId = constraintOid;
977  referenced.objectSubId = 0;
978  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
979  }
980  else
981  {
982  /*
983  * User CREATE TRIGGER, so place dependencies. We make trigger be
984  * auto-dropped if its relation is dropped or if the FK relation is
985  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
986  */
987  referenced.classId = RelationRelationId;
988  referenced.objectId = RelationGetRelid(rel);
989  referenced.objectSubId = 0;
990  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
991 
992  if (OidIsValid(constrrelid))
993  {
994  referenced.classId = RelationRelationId;
995  referenced.objectId = constrrelid;
996  referenced.objectSubId = 0;
997  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
998  }
999  /* Not possible to have an index dependency in this case */
1000  Assert(!OidIsValid(indexOid));
1001 
1002  /*
1003  * If it's a user-specified constraint trigger, make the constraint
1004  * internally dependent on the trigger instead of vice versa.
1005  */
1006  if (OidIsValid(constraintOid))
1007  {
1008  referenced.classId = ConstraintRelationId;
1009  referenced.objectId = constraintOid;
1010  referenced.objectSubId = 0;
1011  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1012  }
1013 
1014  /*
1015  * If it's a partition trigger, create the partition dependencies.
1016  */
1017  if (OidIsValid(parentTriggerOid))
1018  {
1019  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1020  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1021  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1022  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1023  }
1024  }
1025 
1026  /* If column-specific trigger, add normal dependencies on columns */
1027  if (columns != NULL)
1028  {
1029  int i;
1030 
1031  referenced.classId = RelationRelationId;
1032  referenced.objectId = RelationGetRelid(rel);
1033  for (i = 0; i < ncolumns; i++)
1034  {
1035  referenced.objectSubId = columns[i];
1036  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1037  }
1038  }
1039 
1040  /*
1041  * If it has a WHEN clause, add dependencies on objects mentioned in the
1042  * expression (eg, functions, as well as any columns used).
1043  */
1044  if (whenRtable != NIL)
1045  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1047 
1048  /* Post creation hook for new trigger */
1049  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1050  isInternal);
1051 
1052  /*
1053  * Lastly, create the trigger on child relations, if needed.
1054  */
1055  if (partition_recurse)
1056  {
1057  PartitionDesc partdesc = RelationGetPartitionDesc(rel);
1058  List *idxs = NIL;
1059  List *childTbls = NIL;
1060  ListCell *l;
1061  int i;
1062  MemoryContext oldcxt,
1063  perChildCxt;
1064 
1066  "part trig clone",
1068 
1069  /*
1070  * When a trigger is being created associated with an index, we'll
1071  * need to associate the trigger in each child partition with the
1072  * corresponding index on it.
1073  */
1074  if (OidIsValid(indexOid))
1075  {
1076  ListCell *l;
1077  List *idxs = NIL;
1078 
1080  foreach(l, idxs)
1081  childTbls = lappend_oid(childTbls,
1083  false));
1084  }
1085 
1086  oldcxt = MemoryContextSwitchTo(perChildCxt);
1087 
1088  /* Iterate to create the trigger on each existing partition */
1089  for (i = 0; i < partdesc->nparts; i++)
1090  {
1091  Oid indexOnChild = InvalidOid;
1092  ListCell *l2;
1093  CreateTrigStmt *childStmt;
1094  Relation childTbl;
1095  Node *qual;
1096 
1097  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1098 
1099  /* Find which of the child indexes is the one on this partition */
1100  if (OidIsValid(indexOid))
1101  {
1102  forboth(l, idxs, l2, childTbls)
1103  {
1104  if (lfirst_oid(l2) == partdesc->oids[i])
1105  {
1106  indexOnChild = lfirst_oid(l);
1107  break;
1108  }
1109  }
1110  if (!OidIsValid(indexOnChild))
1111  elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"",
1112  get_rel_name(indexOid),
1113  get_rel_name(partdesc->oids[i]));
1114  }
1115 
1116  /*
1117  * Initialize our fabricated parse node by copying the original
1118  * one, then resetting fields that we pass separately.
1119  */
1120  childStmt = (CreateTrigStmt *) copyObject(stmt);
1121  childStmt->funcname = NIL;
1122  childStmt->whenClause = NULL;
1123 
1124  /* If there is a WHEN clause, create a modified copy of it */
1125  qual = copyObject(whenClause);
1126  qual = (Node *)
1128  childTbl, rel);
1129  qual = (Node *)
1131  childTbl, rel);
1132 
1133  CreateTrigger(childStmt, queryString,
1134  partdesc->oids[i], refRelOid,
1135  InvalidOid, indexOnChild,
1136  funcoid, trigoid, qual,
1137  isInternal, true);
1138 
1139  table_close(childTbl, NoLock);
1140 
1141  MemoryContextReset(perChildCxt);
1142  }
1143 
1144  MemoryContextSwitchTo(oldcxt);
1145  MemoryContextDelete(perChildCxt);
1146  list_free(idxs);
1147  list_free(childTbls);
1148  }
1149 
1150  /* Keep lock on target rel until end of xact */
1151  table_close(rel, NoLock);
1152 
1153  return myself;
1154 }
1155 
1156 
1157 /*
1158  * Guts of trigger deletion.
1159  */
1160 void
1162 {
1163  Relation tgrel;
1164  SysScanDesc tgscan;
1165  ScanKeyData skey[1];
1166  HeapTuple tup;
1167  Oid relid;
1168  Relation rel;
1169 
1170  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1171 
1172  /*
1173  * Find the trigger to delete.
1174  */
1175  ScanKeyInit(&skey[0],
1176  Anum_pg_trigger_oid,
1177  BTEqualStrategyNumber, F_OIDEQ,
1178  ObjectIdGetDatum(trigOid));
1179 
1180  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1181  NULL, 1, skey);
1182 
1183  tup = systable_getnext(tgscan);
1184  if (!HeapTupleIsValid(tup))
1185  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1186 
1187  /*
1188  * Open and exclusive-lock the relation the trigger belongs to.
1189  */
1190  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1191 
1192  rel = table_open(relid, AccessExclusiveLock);
1193 
1194  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1195  rel->rd_rel->relkind != RELKIND_VIEW &&
1196  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1197  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1198  ereport(ERROR,
1199  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1200  errmsg("\"%s\" is not a table, view, or foreign table",
1201  RelationGetRelationName(rel))));
1202 
1204  ereport(ERROR,
1205  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1206  errmsg("permission denied: \"%s\" is a system catalog",
1207  RelationGetRelationName(rel))));
1208 
1209  /*
1210  * Delete the pg_trigger tuple.
1211  */
1212  CatalogTupleDelete(tgrel, &tup->t_self);
1213 
1214  systable_endscan(tgscan);
1215  table_close(tgrel, RowExclusiveLock);
1216 
1217  /*
1218  * We do not bother to try to determine whether any other triggers remain,
1219  * which would be needed in order to decide whether it's safe to clear the
1220  * relation's relhastriggers. (In any case, there might be a concurrent
1221  * process adding new triggers.) Instead, just force a relcache inval to
1222  * make other backends (and this one too!) rebuild their relcache entries.
1223  * There's no great harm in leaving relhastriggers true even if there are
1224  * no triggers left.
1225  */
1227 
1228  /* Keep lock on trigger's rel until end of xact */
1229  table_close(rel, NoLock);
1230 }
1231 
1232 /*
1233  * get_trigger_oid - Look up a trigger by name to find its OID.
1234  *
1235  * If missing_ok is false, throw an error if trigger not found. If
1236  * true, just return InvalidOid.
1237  */
1238 Oid
1239 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1240 {
1241  Relation tgrel;
1242  ScanKeyData skey[2];
1243  SysScanDesc tgscan;
1244  HeapTuple tup;
1245  Oid oid;
1246 
1247  /*
1248  * Find the trigger, verify permissions, set up object address
1249  */
1250  tgrel = table_open(TriggerRelationId, AccessShareLock);
1251 
1252  ScanKeyInit(&skey[0],
1253  Anum_pg_trigger_tgrelid,
1254  BTEqualStrategyNumber, F_OIDEQ,
1255  ObjectIdGetDatum(relid));
1256  ScanKeyInit(&skey[1],
1257  Anum_pg_trigger_tgname,
1258  BTEqualStrategyNumber, F_NAMEEQ,
1259  CStringGetDatum(trigname));
1260 
1261  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1262  NULL, 2, skey);
1263 
1264  tup = systable_getnext(tgscan);
1265 
1266  if (!HeapTupleIsValid(tup))
1267  {
1268  if (!missing_ok)
1269  ereport(ERROR,
1270  (errcode(ERRCODE_UNDEFINED_OBJECT),
1271  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1272  trigname, get_rel_name(relid))));
1273  oid = InvalidOid;
1274  }
1275  else
1276  {
1277  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1278  }
1279 
1280  systable_endscan(tgscan);
1281  table_close(tgrel, AccessShareLock);
1282  return oid;
1283 }
1284 
1285 /*
1286  * Perform permissions and integrity checks before acquiring a relation lock.
1287  */
1288 static void
1290  void *arg)
1291 {
1292  HeapTuple tuple;
1293  Form_pg_class form;
1294 
1295  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1296  if (!HeapTupleIsValid(tuple))
1297  return; /* concurrently dropped */
1298  form = (Form_pg_class) GETSTRUCT(tuple);
1299 
1300  /* only tables and views can have triggers */
1301  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1302  form->relkind != RELKIND_FOREIGN_TABLE &&
1303  form->relkind != RELKIND_PARTITIONED_TABLE)
1304  ereport(ERROR,
1305  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1306  errmsg("\"%s\" is not a table, view, or foreign table",
1307  rv->relname)));
1308 
1309  /* you must own the table to rename one of its triggers */
1310  if (!pg_class_ownercheck(relid, GetUserId()))
1312  if (!allowSystemTableMods && IsSystemClass(relid, form))
1313  ereport(ERROR,
1314  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1315  errmsg("permission denied: \"%s\" is a system catalog",
1316  rv->relname)));
1317 
1318  ReleaseSysCache(tuple);
1319 }
1320 
1321 /*
1322  * renametrig - changes the name of a trigger on a relation
1323  *
1324  * trigger name is changed in trigger catalog.
1325  * No record of the previous name is kept.
1326  *
1327  * get proper relrelation from relation catalog (if not arg)
1328  * scan trigger catalog
1329  * for name conflict (within rel)
1330  * for original trigger (if not arg)
1331  * modify tgname in trigger tuple
1332  * update row in catalog
1333  */
1336 {
1337  Oid tgoid;
1338  Relation targetrel;
1339  Relation tgrel;
1340  HeapTuple tuple;
1341  SysScanDesc tgscan;
1342  ScanKeyData key[2];
1343  Oid relid;
1344  ObjectAddress address;
1345 
1346  /*
1347  * Look up name, check permissions, and acquire lock (which we will NOT
1348  * release until end of transaction).
1349  */
1351  0,
1353  NULL);
1354 
1355  /* Have lock already, so just need to build relcache entry. */
1356  targetrel = relation_open(relid, NoLock);
1357 
1358  /*
1359  * Scan pg_trigger twice for existing triggers on relation. We do this in
1360  * order to ensure a trigger does not exist with newname (The unique index
1361  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1362  * exist with oldname.
1363  *
1364  * NOTE that this is cool only because we have AccessExclusiveLock on the
1365  * relation, so the trigger set won't be changing underneath us.
1366  */
1367  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1368 
1369  /*
1370  * First pass -- look for name conflict
1371  */
1372  ScanKeyInit(&key[0],
1373  Anum_pg_trigger_tgrelid,
1374  BTEqualStrategyNumber, F_OIDEQ,
1375  ObjectIdGetDatum(relid));
1376  ScanKeyInit(&key[1],
1377  Anum_pg_trigger_tgname,
1378  BTEqualStrategyNumber, F_NAMEEQ,
1379  PointerGetDatum(stmt->newname));
1380  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1381  NULL, 2, key);
1382  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1383  ereport(ERROR,
1385  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1386  stmt->newname, RelationGetRelationName(targetrel))));
1387  systable_endscan(tgscan);
1388 
1389  /*
1390  * Second pass -- look for trigger existing with oldname and update
1391  */
1392  ScanKeyInit(&key[0],
1393  Anum_pg_trigger_tgrelid,
1394  BTEqualStrategyNumber, F_OIDEQ,
1395  ObjectIdGetDatum(relid));
1396  ScanKeyInit(&key[1],
1397  Anum_pg_trigger_tgname,
1398  BTEqualStrategyNumber, F_NAMEEQ,
1399  PointerGetDatum(stmt->subname));
1400  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1401  NULL, 2, key);
1402  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1403  {
1404  Form_pg_trigger trigform;
1405 
1406  /*
1407  * Update pg_trigger tuple with new tgname.
1408  */
1409  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1410  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1411  tgoid = trigform->oid;
1412 
1413  namestrcpy(&trigform->tgname,
1414  stmt->newname);
1415 
1416  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1417 
1418  InvokeObjectPostAlterHook(TriggerRelationId,
1419  tgoid, 0);
1420 
1421  /*
1422  * Invalidate relation's relcache entry so that other backends (and
1423  * this one too!) are sent SI message to make them rebuild relcache
1424  * entries. (Ideally this should happen automatically...)
1425  */
1426  CacheInvalidateRelcache(targetrel);
1427  }
1428  else
1429  {
1430  ereport(ERROR,
1431  (errcode(ERRCODE_UNDEFINED_OBJECT),
1432  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1433  stmt->subname, RelationGetRelationName(targetrel))));
1434  }
1435 
1436  ObjectAddressSet(address, TriggerRelationId, tgoid);
1437 
1438  systable_endscan(tgscan);
1439 
1440  table_close(tgrel, RowExclusiveLock);
1441 
1442  /*
1443  * Close rel, but keep exclusive lock!
1444  */
1445  relation_close(targetrel, NoLock);
1446 
1447  return address;
1448 }
1449 
1450 
1451 /*
1452  * EnableDisableTrigger()
1453  *
1454  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1455  * to change 'tgenabled' field for the specified trigger(s)
1456  *
1457  * rel: relation to process (caller must hold suitable lock on it)
1458  * tgname: trigger to process, or NULL to scan all triggers
1459  * fires_when: new value for tgenabled field. In addition to generic
1460  * enablement/disablement, this also defines when the trigger
1461  * should be fired in session replication roles.
1462  * skip_system: if true, skip "system" triggers (constraint triggers)
1463  *
1464  * Caller should have checked permissions for the table; here we also
1465  * enforce that superuser privilege is required to alter the state of
1466  * system triggers
1467  */
1468 void
1469 EnableDisableTrigger(Relation rel, const char *tgname,
1470  char fires_when, bool skip_system, LOCKMODE lockmode)
1471 {
1472  Relation tgrel;
1473  int nkeys;
1474  ScanKeyData keys[2];
1475  SysScanDesc tgscan;
1476  HeapTuple tuple;
1477  bool found;
1478  bool changed;
1479 
1480  /* Scan the relevant entries in pg_triggers */
1481  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1482 
1483  ScanKeyInit(&keys[0],
1484  Anum_pg_trigger_tgrelid,
1485  BTEqualStrategyNumber, F_OIDEQ,
1487  if (tgname)
1488  {
1489  ScanKeyInit(&keys[1],
1490  Anum_pg_trigger_tgname,
1491  BTEqualStrategyNumber, F_NAMEEQ,
1492  CStringGetDatum(tgname));
1493  nkeys = 2;
1494  }
1495  else
1496  nkeys = 1;
1497 
1498  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1499  NULL, nkeys, keys);
1500 
1501  found = changed = false;
1502 
1503  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1504  {
1505  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1506 
1507  if (oldtrig->tgisinternal)
1508  {
1509  /* system trigger ... ok to process? */
1510  if (skip_system)
1511  continue;
1512  if (!superuser())
1513  ereport(ERROR,
1514  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1515  errmsg("permission denied: \"%s\" is a system trigger",
1516  NameStr(oldtrig->tgname))));
1517  }
1518 
1519  found = true;
1520 
1521  if (oldtrig->tgenabled != fires_when)
1522  {
1523  /* need to change this one ... make a copy to scribble on */
1524  HeapTuple newtup = heap_copytuple(tuple);
1525  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1526 
1527  newtrig->tgenabled = fires_when;
1528 
1529  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1530 
1531  heap_freetuple(newtup);
1532 
1533  /*
1534  * When altering FOR EACH ROW triggers on a partitioned table, do
1535  * the same on the partitions as well.
1536  */
1537  if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1538  (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1539  {
1540  PartitionDesc partdesc = RelationGetPartitionDesc(rel);
1541  int i;
1542 
1543  for (i = 0; i < partdesc->nparts; i++)
1544  {
1545  Relation part;
1546 
1547  part = relation_open(partdesc->oids[i], lockmode);
1548  EnableDisableTrigger(part, NameStr(oldtrig->tgname),
1549  fires_when, skip_system, lockmode);
1550  table_close(part, NoLock); /* keep lock till commit */
1551  }
1552  }
1553 
1554  changed = true;
1555  }
1556 
1557  InvokeObjectPostAlterHook(TriggerRelationId,
1558  oldtrig->oid, 0);
1559  }
1560 
1561  systable_endscan(tgscan);
1562 
1563  table_close(tgrel, RowExclusiveLock);
1564 
1565  if (tgname && !found)
1566  ereport(ERROR,
1567  (errcode(ERRCODE_UNDEFINED_OBJECT),
1568  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1569  tgname, RelationGetRelationName(rel))));
1570 
1571  /*
1572  * If we changed anything, broadcast a SI inval message to force each
1573  * backend (including our own!) to rebuild relation's relcache entry.
1574  * Otherwise they will fail to apply the change promptly.
1575  */
1576  if (changed)
1578 }
1579 
1580 
1581 /*
1582  * Build trigger data to attach to the given relcache entry.
1583  *
1584  * Note that trigger data attached to a relcache entry must be stored in
1585  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1586  * But we should be running in a less long-lived working context. To avoid
1587  * leaking cache memory if this routine fails partway through, we build a
1588  * temporary TriggerDesc in working memory and then copy the completed
1589  * structure into cache memory.
1590  */
1591 void
1593 {
1594  TriggerDesc *trigdesc;
1595  int numtrigs;
1596  int maxtrigs;
1597  Trigger *triggers;
1598  Relation tgrel;
1599  ScanKeyData skey;
1600  SysScanDesc tgscan;
1601  HeapTuple htup;
1602  MemoryContext oldContext;
1603  int i;
1604 
1605  /*
1606  * Allocate a working array to hold the triggers (the array is extended if
1607  * necessary)
1608  */
1609  maxtrigs = 16;
1610  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1611  numtrigs = 0;
1612 
1613  /*
1614  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1615  * be reading the triggers in name order, except possibly during
1616  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1617  * ensures that triggers will be fired in name order.
1618  */
1619  ScanKeyInit(&skey,
1620  Anum_pg_trigger_tgrelid,
1621  BTEqualStrategyNumber, F_OIDEQ,
1622  ObjectIdGetDatum(RelationGetRelid(relation)));
1623 
1624  tgrel = table_open(TriggerRelationId, AccessShareLock);
1625  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1626  NULL, 1, &skey);
1627 
1628  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1629  {
1630  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1631  Trigger *build;
1632  Datum datum;
1633  bool isnull;
1634 
1635  if (numtrigs >= maxtrigs)
1636  {
1637  maxtrigs *= 2;
1638  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1639  }
1640  build = &(triggers[numtrigs]);
1641 
1642  build->tgoid = pg_trigger->oid;
1644  NameGetDatum(&pg_trigger->tgname)));
1645  build->tgfoid = pg_trigger->tgfoid;
1646  build->tgtype = pg_trigger->tgtype;
1647  build->tgenabled = pg_trigger->tgenabled;
1648  build->tgisinternal = pg_trigger->tgisinternal;
1649  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1650  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1651  build->tgconstrindid = pg_trigger->tgconstrindid;
1652  build->tgconstraint = pg_trigger->tgconstraint;
1653  build->tgdeferrable = pg_trigger->tgdeferrable;
1654  build->tginitdeferred = pg_trigger->tginitdeferred;
1655  build->tgnargs = pg_trigger->tgnargs;
1656  /* tgattr is first var-width field, so OK to access directly */
1657  build->tgnattr = pg_trigger->tgattr.dim1;
1658  if (build->tgnattr > 0)
1659  {
1660  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1661  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1662  build->tgnattr * sizeof(int16));
1663  }
1664  else
1665  build->tgattr = NULL;
1666  if (build->tgnargs > 0)
1667  {
1668  bytea *val;
1669  char *p;
1670 
1671  val = DatumGetByteaPP(fastgetattr(htup,
1672  Anum_pg_trigger_tgargs,
1673  tgrel->rd_att, &isnull));
1674  if (isnull)
1675  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1676  RelationGetRelationName(relation));
1677  p = (char *) VARDATA_ANY(val);
1678  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1679  for (i = 0; i < build->tgnargs; i++)
1680  {
1681  build->tgargs[i] = pstrdup(p);
1682  p += strlen(p) + 1;
1683  }
1684  }
1685  else
1686  build->tgargs = NULL;
1687 
1688  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1689  tgrel->rd_att, &isnull);
1690  if (!isnull)
1691  build->tgoldtable =
1693  else
1694  build->tgoldtable = NULL;
1695 
1696  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1697  tgrel->rd_att, &isnull);
1698  if (!isnull)
1699  build->tgnewtable =
1701  else
1702  build->tgnewtable = NULL;
1703 
1704  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1705  tgrel->rd_att, &isnull);
1706  if (!isnull)
1707  build->tgqual = TextDatumGetCString(datum);
1708  else
1709  build->tgqual = NULL;
1710 
1711  numtrigs++;
1712  }
1713 
1714  systable_endscan(tgscan);
1715  table_close(tgrel, AccessShareLock);
1716 
1717  /* There might not be any triggers */
1718  if (numtrigs == 0)
1719  {
1720  pfree(triggers);
1721  return;
1722  }
1723 
1724  /* Build trigdesc */
1725  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1726  trigdesc->triggers = triggers;
1727  trigdesc->numtriggers = numtrigs;
1728  for (i = 0; i < numtrigs; i++)
1729  SetTriggerFlags(trigdesc, &(triggers[i]));
1730 
1731  /* Copy completed trigdesc into cache storage */
1733  relation->trigdesc = CopyTriggerDesc(trigdesc);
1734  MemoryContextSwitchTo(oldContext);
1735 
1736  /* Release working memory */
1737  FreeTriggerDesc(trigdesc);
1738 }
1739 
1740 /*
1741  * Update the TriggerDesc's hint flags to include the specified trigger
1742  */
1743 static void
1745 {
1746  int16 tgtype = trigger->tgtype;
1747 
1748  trigdesc->trig_insert_before_row |=
1749  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1750  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1751  trigdesc->trig_insert_after_row |=
1752  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1753  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1754  trigdesc->trig_insert_instead_row |=
1755  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1756  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
1757  trigdesc->trig_insert_before_statement |=
1758  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1759  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1760  trigdesc->trig_insert_after_statement |=
1761  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1762  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1763  trigdesc->trig_update_before_row |=
1764  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1765  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1766  trigdesc->trig_update_after_row |=
1767  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1768  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1769  trigdesc->trig_update_instead_row |=
1770  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1771  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
1772  trigdesc->trig_update_before_statement |=
1773  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1774  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1775  trigdesc->trig_update_after_statement |=
1776  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1777  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1778  trigdesc->trig_delete_before_row |=
1779  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1780  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1781  trigdesc->trig_delete_after_row |=
1782  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1783  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1784  trigdesc->trig_delete_instead_row |=
1785  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1786  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
1787  trigdesc->trig_delete_before_statement |=
1788  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1789  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1790  trigdesc->trig_delete_after_statement |=
1791  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1792  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1793  /* there are no row-level truncate triggers */
1794  trigdesc->trig_truncate_before_statement |=
1795  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1796  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
1797  trigdesc->trig_truncate_after_statement |=
1798  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1799  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
1800 
1801  trigdesc->trig_insert_new_table |=
1802  (TRIGGER_FOR_INSERT(tgtype) &&
1803  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1804  trigdesc->trig_update_old_table |=
1805  (TRIGGER_FOR_UPDATE(tgtype) &&
1806  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1807  trigdesc->trig_update_new_table |=
1808  (TRIGGER_FOR_UPDATE(tgtype) &&
1809  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1810  trigdesc->trig_delete_old_table |=
1811  (TRIGGER_FOR_DELETE(tgtype) &&
1812  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1813 }
1814 
1815 /*
1816  * Copy a TriggerDesc data structure.
1817  *
1818  * The copy is allocated in the current memory context.
1819  */
1820 TriggerDesc *
1822 {
1823  TriggerDesc *newdesc;
1824  Trigger *trigger;
1825  int i;
1826 
1827  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1828  return NULL;
1829 
1830  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1831  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1832 
1833  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1834  memcpy(trigger, trigdesc->triggers,
1835  trigdesc->numtriggers * sizeof(Trigger));
1836  newdesc->triggers = trigger;
1837 
1838  for (i = 0; i < trigdesc->numtriggers; i++)
1839  {
1840  trigger->tgname = pstrdup(trigger->tgname);
1841  if (trigger->tgnattr > 0)
1842  {
1843  int16 *newattr;
1844 
1845  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1846  memcpy(newattr, trigger->tgattr,
1847  trigger->tgnattr * sizeof(int16));
1848  trigger->tgattr = newattr;
1849  }
1850  if (trigger->tgnargs > 0)
1851  {
1852  char **newargs;
1853  int16 j;
1854 
1855  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1856  for (j = 0; j < trigger->tgnargs; j++)
1857  newargs[j] = pstrdup(trigger->tgargs[j]);
1858  trigger->tgargs = newargs;
1859  }
1860  if (trigger->tgqual)
1861  trigger->tgqual = pstrdup(trigger->tgqual);
1862  if (trigger->tgoldtable)
1863  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1864  if (trigger->tgnewtable)
1865  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1866  trigger++;
1867  }
1868 
1869  return newdesc;
1870 }
1871 
1872 /*
1873  * Free a TriggerDesc data structure.
1874  */
1875 void
1877 {
1878  Trigger *trigger;
1879  int i;
1880 
1881  if (trigdesc == NULL)
1882  return;
1883 
1884  trigger = trigdesc->triggers;
1885  for (i = 0; i < trigdesc->numtriggers; i++)
1886  {
1887  pfree(trigger->tgname);
1888  if (trigger->tgnattr > 0)
1889  pfree(trigger->tgattr);
1890  if (trigger->tgnargs > 0)
1891  {
1892  while (--(trigger->tgnargs) >= 0)
1893  pfree(trigger->tgargs[trigger->tgnargs]);
1894  pfree(trigger->tgargs);
1895  }
1896  if (trigger->tgqual)
1897  pfree(trigger->tgqual);
1898  if (trigger->tgoldtable)
1899  pfree(trigger->tgoldtable);
1900  if (trigger->tgnewtable)
1901  pfree(trigger->tgnewtable);
1902  trigger++;
1903  }
1904  pfree(trigdesc->triggers);
1905  pfree(trigdesc);
1906 }
1907 
1908 /*
1909  * Compare two TriggerDesc structures for logical equality.
1910  */
1911 #ifdef NOT_USED
1912 bool
1913 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1914 {
1915  int i,
1916  j;
1917 
1918  /*
1919  * We need not examine the hint flags, just the trigger array itself; if
1920  * we have the same triggers with the same types, the flags should match.
1921  *
1922  * As of 7.3 we assume trigger set ordering is significant in the
1923  * comparison; so we just compare corresponding slots of the two sets.
1924  *
1925  * Note: comparing the stringToNode forms of the WHEN clauses means that
1926  * parse column locations will affect the result. This is okay as long as
1927  * this function is only used for detecting exact equality, as for example
1928  * in checking for staleness of a cache entry.
1929  */
1930  if (trigdesc1 != NULL)
1931  {
1932  if (trigdesc2 == NULL)
1933  return false;
1934  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1935  return false;
1936  for (i = 0; i < trigdesc1->numtriggers; i++)
1937  {
1938  Trigger *trig1 = trigdesc1->triggers + i;
1939  Trigger *trig2 = trigdesc2->triggers + i;
1940 
1941  if (trig1->tgoid != trig2->tgoid)
1942  return false;
1943  if (strcmp(trig1->tgname, trig2->tgname) != 0)
1944  return false;
1945  if (trig1->tgfoid != trig2->tgfoid)
1946  return false;
1947  if (trig1->tgtype != trig2->tgtype)
1948  return false;
1949  if (trig1->tgenabled != trig2->tgenabled)
1950  return false;
1951  if (trig1->tgisinternal != trig2->tgisinternal)
1952  return false;
1953  if (trig1->tgisclone != trig2->tgisclone)
1954  return false;
1955  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
1956  return false;
1957  if (trig1->tgconstrindid != trig2->tgconstrindid)
1958  return false;
1959  if (trig1->tgconstraint != trig2->tgconstraint)
1960  return false;
1961  if (trig1->tgdeferrable != trig2->tgdeferrable)
1962  return false;
1963  if (trig1->tginitdeferred != trig2->tginitdeferred)
1964  return false;
1965  if (trig1->tgnargs != trig2->tgnargs)
1966  return false;
1967  if (trig1->tgnattr != trig2->tgnattr)
1968  return false;
1969  if (trig1->tgnattr > 0 &&
1970  memcmp(trig1->tgattr, trig2->tgattr,
1971  trig1->tgnattr * sizeof(int16)) != 0)
1972  return false;
1973  for (j = 0; j < trig1->tgnargs; j++)
1974  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
1975  return false;
1976  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
1977  /* ok */ ;
1978  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
1979  return false;
1980  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
1981  return false;
1982  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
1983  /* ok */ ;
1984  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
1985  return false;
1986  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
1987  return false;
1988  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
1989  /* ok */ ;
1990  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
1991  return false;
1992  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
1993  return false;
1994  }
1995  }
1996  else if (trigdesc2 != NULL)
1997  return false;
1998  return true;
1999 }
2000 #endif /* NOT_USED */
2001 
2002 /*
2003  * Check if there is a row-level trigger with transition tables that prevents
2004  * a table from becoming an inheritance child or partition. Return the name
2005  * of the first such incompatible trigger, or NULL if there is none.
2006  */
2007 const char *
2009 {
2010  if (trigdesc != NULL)
2011  {
2012  int i;
2013 
2014  for (i = 0; i < trigdesc->numtriggers; ++i)
2015  {
2016  Trigger *trigger = &trigdesc->triggers[i];
2017 
2018  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2019  return trigger->tgname;
2020  }
2021  }
2022 
2023  return NULL;
2024 }
2025 
2026 /*
2027  * Call a trigger function.
2028  *
2029  * trigdata: trigger descriptor.
2030  * tgindx: trigger's index in finfo and instr arrays.
2031  * finfo: array of cached trigger function call information.
2032  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2033  * per_tuple_context: memory context to execute the function in.
2034  *
2035  * Returns the tuple (or NULL) as returned by the function.
2036  */
2037 static HeapTuple
2039  int tgindx,
2040  FmgrInfo *finfo,
2041  Instrumentation *instr,
2042  MemoryContext per_tuple_context)
2043 {
2044  LOCAL_FCINFO(fcinfo, 0);
2045  PgStat_FunctionCallUsage fcusage;
2046  Datum result;
2047  MemoryContext oldContext;
2048 
2049  /*
2050  * Protect against code paths that may fail to initialize transition table
2051  * info.
2052  */
2053  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2054  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2055  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2056  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2057  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2058  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2059  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2060 
2061  finfo += tgindx;
2062 
2063  /*
2064  * We cache fmgr lookup info, to avoid making the lookup again on each
2065  * call.
2066  */
2067  if (finfo->fn_oid == InvalidOid)
2068  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2069 
2070  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2071 
2072  /*
2073  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2074  */
2075  if (instr)
2076  InstrStartNode(instr + tgindx);
2077 
2078  /*
2079  * Do the function evaluation in the per-tuple memory context, so that
2080  * leaked memory will be reclaimed once per tuple. Note in particular that
2081  * any new tuple created by the trigger function will live till the end of
2082  * the tuple cycle.
2083  */
2084  oldContext = MemoryContextSwitchTo(per_tuple_context);
2085 
2086  /*
2087  * Call the function, passing no arguments but setting a context.
2088  */
2089  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2090  InvalidOid, (Node *) trigdata, NULL);
2091 
2092  pgstat_init_function_usage(fcinfo, &fcusage);
2093 
2094  MyTriggerDepth++;
2095  PG_TRY();
2096  {
2097  result = FunctionCallInvoke(fcinfo);
2098  }
2099  PG_FINALLY();
2100  {
2101  MyTriggerDepth--;
2102  }
2103  PG_END_TRY();
2104 
2105  pgstat_end_function_usage(&fcusage, true);
2106 
2107  MemoryContextSwitchTo(oldContext);
2108 
2109  /*
2110  * Trigger protocol allows function to return a null pointer, but NOT to
2111  * set the isnull result flag.
2112  */
2113  if (fcinfo->isnull)
2114  ereport(ERROR,
2115  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2116  errmsg("trigger function %u returned null value",
2117  fcinfo->flinfo->fn_oid)));
2118 
2119  /*
2120  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2121  * one "tuple returned" (really the number of firings).
2122  */
2123  if (instr)
2124  InstrStopNode(instr + tgindx, 1);
2125 
2126  return (HeapTuple) DatumGetPointer(result);
2127 }
2128 
2129 void
2131 {
2132  TriggerDesc *trigdesc;
2133  int i;
2134  TriggerData LocTriggerData = {0};
2135 
2136  trigdesc = relinfo->ri_TrigDesc;
2137 
2138  if (trigdesc == NULL)
2139  return;
2140  if (!trigdesc->trig_insert_before_statement)
2141  return;
2142 
2143  /* no-op if we already fired BS triggers in this context */
2145  CMD_INSERT))
2146  return;
2147 
2148  LocTriggerData.type = T_TriggerData;
2149  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2151  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2152  for (i = 0; i < trigdesc->numtriggers; i++)
2153  {
2154  Trigger *trigger = &trigdesc->triggers[i];
2155  HeapTuple newtuple;
2156 
2157  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2158  TRIGGER_TYPE_STATEMENT,
2159  TRIGGER_TYPE_BEFORE,
2160  TRIGGER_TYPE_INSERT))
2161  continue;
2162  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2163  NULL, NULL, NULL))
2164  continue;
2165 
2166  LocTriggerData.tg_trigger = trigger;
2167  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2168  i,
2169  relinfo->ri_TrigFunctions,
2170  relinfo->ri_TrigInstrument,
2171  GetPerTupleMemoryContext(estate));
2172 
2173  if (newtuple)
2174  ereport(ERROR,
2175  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2176  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2177  }
2178 }
2179 
2180 void
2182  TransitionCaptureState *transition_capture)
2183 {
2184  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2185 
2186  if (trigdesc && trigdesc->trig_insert_after_statement)
2188  false, NULL, NULL, NIL, NULL, transition_capture);
2189 }
2190 
2191 bool
2193  TupleTableSlot *slot)
2194 {
2195  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2196  HeapTuple newtuple = NULL;
2197  bool should_free;
2198  TriggerData LocTriggerData = {0};
2199  int i;
2200 
2201  LocTriggerData.type = T_TriggerData;
2202  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2205  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2206  for (i = 0; i < trigdesc->numtriggers; i++)
2207  {
2208  Trigger *trigger = &trigdesc->triggers[i];
2209  HeapTuple oldtuple;
2210 
2211  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2212  TRIGGER_TYPE_ROW,
2213  TRIGGER_TYPE_BEFORE,
2214  TRIGGER_TYPE_INSERT))
2215  continue;
2216  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2217  NULL, NULL, slot))
2218  continue;
2219 
2220  if (!newtuple)
2221  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2222 
2223  LocTriggerData.tg_trigslot = slot;
2224  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2225  LocTriggerData.tg_trigger = trigger;
2226  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2227  i,
2228  relinfo->ri_TrigFunctions,
2229  relinfo->ri_TrigInstrument,
2230  GetPerTupleMemoryContext(estate));
2231  if (newtuple == NULL)
2232  {
2233  if (should_free)
2234  heap_freetuple(oldtuple);
2235  return false; /* "do nothing" */
2236  }
2237  else if (newtuple != oldtuple)
2238  {
2239  ExecForceStoreHeapTuple(newtuple, slot, false);
2240 
2241  /*
2242  * After a tuple in a partition goes through a trigger, the user
2243  * could have changed the partition key enough that the tuple no
2244  * longer fits the partition. Verify that.
2245  */
2246  if (trigger->tgisclone &&
2247  !ExecPartitionCheck(relinfo, slot, estate, false))
2248  ereport(ERROR,
2249  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2250  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2251  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2252  trigger->tgname,
2255 
2256  if (should_free)
2257  heap_freetuple(oldtuple);
2258 
2259  /* signal tuple should be re-fetched if used */
2260  newtuple = NULL;
2261  }
2262  }
2263 
2264  return true;
2265 }
2266 
2267 void
2269  TupleTableSlot *slot, List *recheckIndexes,
2270  TransitionCaptureState *transition_capture)
2271 {
2272  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2273 
2274  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2275  (transition_capture && transition_capture->tcs_insert_new_table))
2277  true, NULL, slot,
2278  recheckIndexes, NULL,
2279  transition_capture);
2280 }
2281 
2282 bool
2284  TupleTableSlot *slot)
2285 {
2286  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2287  HeapTuple newtuple = NULL;
2288  bool should_free;
2289  TriggerData LocTriggerData = {0};
2290  int i;
2291 
2292  LocTriggerData.type = T_TriggerData;
2293  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2296  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2297  for (i = 0; i < trigdesc->numtriggers; i++)
2298  {
2299  Trigger *trigger = &trigdesc->triggers[i];
2300  HeapTuple oldtuple;
2301 
2302  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2303  TRIGGER_TYPE_ROW,
2304  TRIGGER_TYPE_INSTEAD,
2305  TRIGGER_TYPE_INSERT))
2306  continue;
2307  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2308  NULL, NULL, slot))
2309  continue;
2310 
2311  if (!newtuple)
2312  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2313 
2314  LocTriggerData.tg_trigslot = slot;
2315  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2316  LocTriggerData.tg_trigger = trigger;
2317  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2318  i,
2319  relinfo->ri_TrigFunctions,
2320  relinfo->ri_TrigInstrument,
2321  GetPerTupleMemoryContext(estate));
2322  if (newtuple == NULL)
2323  {
2324  if (should_free)
2325  heap_freetuple(oldtuple);
2326  return false; /* "do nothing" */
2327  }
2328  else if (newtuple != oldtuple)
2329  {
2330  ExecForceStoreHeapTuple(newtuple, slot, false);
2331 
2332  if (should_free)
2333  heap_freetuple(oldtuple);
2334 
2335  /* signal tuple should be re-fetched if used */
2336  newtuple = NULL;
2337  }
2338  }
2339 
2340  return true;
2341 }
2342 
2343 void
2345 {
2346  TriggerDesc *trigdesc;
2347  int i;
2348  TriggerData LocTriggerData = {0};
2349 
2350  trigdesc = relinfo->ri_TrigDesc;
2351 
2352  if (trigdesc == NULL)
2353  return;
2354  if (!trigdesc->trig_delete_before_statement)
2355  return;
2356 
2357  /* no-op if we already fired BS triggers in this context */
2359  CMD_DELETE))
2360  return;
2361 
2362  LocTriggerData.type = T_TriggerData;
2363  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2365  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2366  for (i = 0; i < trigdesc->numtriggers; i++)
2367  {
2368  Trigger *trigger = &trigdesc->triggers[i];
2369  HeapTuple newtuple;
2370 
2371  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2372  TRIGGER_TYPE_STATEMENT,
2373  TRIGGER_TYPE_BEFORE,
2374  TRIGGER_TYPE_DELETE))
2375  continue;
2376  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2377  NULL, NULL, NULL))
2378  continue;
2379 
2380  LocTriggerData.tg_trigger = trigger;
2381  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2382  i,
2383  relinfo->ri_TrigFunctions,
2384  relinfo->ri_TrigInstrument,
2385  GetPerTupleMemoryContext(estate));
2386 
2387  if (newtuple)
2388  ereport(ERROR,
2389  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2390  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2391  }
2392 }
2393 
2394 void
2396  TransitionCaptureState *transition_capture)
2397 {
2398  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2399 
2400  if (trigdesc && trigdesc->trig_delete_after_statement)
2402  false, NULL, NULL, NIL, NULL, transition_capture);
2403 }
2404 
2405 /*
2406  * Execute BEFORE ROW DELETE triggers.
2407  *
2408  * True indicates caller can proceed with the delete. False indicates caller
2409  * need to suppress the delete and additionally if requested, we need to pass
2410  * back the concurrently updated tuple if any.
2411  */
2412 bool
2414  ResultRelInfo *relinfo,
2415  ItemPointer tupleid,
2416  HeapTuple fdw_trigtuple,
2417  TupleTableSlot **epqslot)
2418 {
2419  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2420  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2421  bool result = true;
2422  TriggerData LocTriggerData = {0};
2423  HeapTuple trigtuple;
2424  bool should_free = false;
2425  int i;
2426 
2427  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2428  if (fdw_trigtuple == NULL)
2429  {
2430  TupleTableSlot *epqslot_candidate = NULL;
2431 
2432  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2433  LockTupleExclusive, slot, &epqslot_candidate))
2434  return false;
2435 
2436  /*
2437  * If the tuple was concurrently updated and the caller of this
2438  * function requested for the updated tuple, skip the trigger
2439  * execution.
2440  */
2441  if (epqslot_candidate != NULL && epqslot != NULL)
2442  {
2443  *epqslot = epqslot_candidate;
2444  return false;
2445  }
2446 
2447  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2448 
2449  }
2450  else
2451  {
2452  trigtuple = fdw_trigtuple;
2453  ExecForceStoreHeapTuple(trigtuple, slot, false);
2454  }
2455 
2456  LocTriggerData.type = T_TriggerData;
2457  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2460  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2461  for (i = 0; i < trigdesc->numtriggers; i++)
2462  {
2463  HeapTuple newtuple;
2464  Trigger *trigger = &trigdesc->triggers[i];
2465 
2466  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2467  TRIGGER_TYPE_ROW,
2468  TRIGGER_TYPE_BEFORE,
2469  TRIGGER_TYPE_DELETE))
2470  continue;
2471  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2472  NULL, slot, NULL))
2473  continue;
2474 
2475  LocTriggerData.tg_trigslot = slot;
2476  LocTriggerData.tg_trigtuple = trigtuple;
2477  LocTriggerData.tg_trigger = trigger;
2478  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2479  i,
2480  relinfo->ri_TrigFunctions,
2481  relinfo->ri_TrigInstrument,
2482  GetPerTupleMemoryContext(estate));
2483  if (newtuple == NULL)
2484  {
2485  result = false; /* tell caller to suppress delete */
2486  break;
2487  }
2488  if (newtuple != trigtuple)
2489  heap_freetuple(newtuple);
2490  }
2491  if (should_free)
2492  heap_freetuple(trigtuple);
2493 
2494  return result;
2495 }
2496 
2497 void
2499  ItemPointer tupleid,
2500  HeapTuple fdw_trigtuple,
2501  TransitionCaptureState *transition_capture)
2502 {
2503  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2504  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2505 
2506  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2507  (transition_capture && transition_capture->tcs_delete_old_table))
2508  {
2509  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2510  if (fdw_trigtuple == NULL)
2511  GetTupleForTrigger(estate,
2512  NULL,
2513  relinfo,
2514  tupleid,
2516  slot,
2517  NULL);
2518  else
2519  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2520 
2522  true, slot, NULL, NIL, NULL,
2523  transition_capture);
2524  }
2525 }
2526 
2527 bool
2529  HeapTuple trigtuple)
2530 {
2531  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2532  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2533  TriggerData LocTriggerData = {0};
2534  int i;
2535 
2536  LocTriggerData.type = T_TriggerData;
2537  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2540  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2541 
2542  ExecForceStoreHeapTuple(trigtuple, slot, false);
2543 
2544  for (i = 0; i < trigdesc->numtriggers; i++)
2545  {
2546  HeapTuple rettuple;
2547  Trigger *trigger = &trigdesc->triggers[i];
2548 
2549  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2550  TRIGGER_TYPE_ROW,
2551  TRIGGER_TYPE_INSTEAD,
2552  TRIGGER_TYPE_DELETE))
2553  continue;
2554  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2555  NULL, slot, NULL))
2556  continue;
2557 
2558  LocTriggerData.tg_trigslot = slot;
2559  LocTriggerData.tg_trigtuple = trigtuple;
2560  LocTriggerData.tg_trigger = trigger;
2561  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2562  i,
2563  relinfo->ri_TrigFunctions,
2564  relinfo->ri_TrigInstrument,
2565  GetPerTupleMemoryContext(estate));
2566  if (rettuple == NULL)
2567  return false; /* Delete was suppressed */
2568  if (rettuple != trigtuple)
2569  heap_freetuple(rettuple);
2570  }
2571  return true;
2572 }
2573 
2574 void
2576 {
2577  TriggerDesc *trigdesc;
2578  int i;
2579  TriggerData LocTriggerData = {0};
2580  Bitmapset *updatedCols;
2581 
2582  trigdesc = relinfo->ri_TrigDesc;
2583 
2584  if (trigdesc == NULL)
2585  return;
2586  if (!trigdesc->trig_update_before_statement)
2587  return;
2588 
2589  /* no-op if we already fired BS triggers in this context */
2591  CMD_UPDATE))
2592  return;
2593 
2594  updatedCols = GetAllUpdatedColumns(relinfo, estate);
2595 
2596  LocTriggerData.type = T_TriggerData;
2597  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2599  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2600  LocTriggerData.tg_updatedcols = updatedCols;
2601  for (i = 0; i < trigdesc->numtriggers; i++)
2602  {
2603  Trigger *trigger = &trigdesc->triggers[i];
2604  HeapTuple newtuple;
2605 
2606  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2607  TRIGGER_TYPE_STATEMENT,
2608  TRIGGER_TYPE_BEFORE,
2609  TRIGGER_TYPE_UPDATE))
2610  continue;
2611  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2612  updatedCols, NULL, NULL))
2613  continue;
2614 
2615  LocTriggerData.tg_trigger = trigger;
2616  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2617  i,
2618  relinfo->ri_TrigFunctions,
2619  relinfo->ri_TrigInstrument,
2620  GetPerTupleMemoryContext(estate));
2621 
2622  if (newtuple)
2623  ereport(ERROR,
2624  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2625  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2626  }
2627 }
2628 
2629 void
2631  TransitionCaptureState *transition_capture)
2632 {
2633  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2634 
2635  if (trigdesc && trigdesc->trig_update_after_statement)
2637  false, NULL, NULL, NIL,
2638  GetAllUpdatedColumns(relinfo, estate),
2639  transition_capture);
2640 }
2641 
2642 bool
2644  ResultRelInfo *relinfo,
2645  ItemPointer tupleid,
2646  HeapTuple fdw_trigtuple,
2647  TupleTableSlot *newslot)
2648 {
2649  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2650  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2651  HeapTuple newtuple = NULL;
2652  HeapTuple trigtuple;
2653  bool should_free_trig = false;
2654  bool should_free_new = false;
2655  TriggerData LocTriggerData = {0};
2656  int i;
2657  Bitmapset *updatedCols;
2658  LockTupleMode lockmode;
2659 
2660  /* Determine lock mode to use */
2661  lockmode = ExecUpdateLockMode(estate, relinfo);
2662 
2663  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2664  if (fdw_trigtuple == NULL)
2665  {
2666  TupleTableSlot *epqslot_candidate = NULL;
2667 
2668  /* get a copy of the on-disk tuple we are planning to update */
2669  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2670  lockmode, oldslot, &epqslot_candidate))
2671  return false; /* cancel the update action */
2672 
2673  /*
2674  * In READ COMMITTED isolation level it's possible that target tuple
2675  * was changed due to concurrent update. In that case we have a raw
2676  * subplan output tuple in epqslot_candidate, and need to run it
2677  * through the junk filter to produce an insertable tuple.
2678  *
2679  * Caution: more than likely, the passed-in slot is the same as the
2680  * junkfilter's output slot, so we are clobbering the original value
2681  * of slottuple by doing the filtering. This is OK since neither we
2682  * nor our caller have any more interest in the prior contents of that
2683  * slot.
2684  */
2685  if (epqslot_candidate != NULL)
2686  {
2687  TupleTableSlot *epqslot_clean;
2688 
2689  epqslot_clean = ExecFilterJunk(relinfo->ri_junkFilter, epqslot_candidate);
2690 
2691  if (newslot != epqslot_clean)
2692  ExecCopySlot(newslot, epqslot_clean);
2693  }
2694 
2695  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
2696  }
2697  else
2698  {
2699  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2700  trigtuple = fdw_trigtuple;
2701  }
2702 
2703  LocTriggerData.type = T_TriggerData;
2704  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2707  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2708  updatedCols = GetAllUpdatedColumns(relinfo, estate);
2709  LocTriggerData.tg_updatedcols = updatedCols;
2710  for (i = 0; i < trigdesc->numtriggers; i++)
2711  {
2712  Trigger *trigger = &trigdesc->triggers[i];
2713  HeapTuple oldtuple;
2714 
2715  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2716  TRIGGER_TYPE_ROW,
2717  TRIGGER_TYPE_BEFORE,
2718  TRIGGER_TYPE_UPDATE))
2719  continue;
2720  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2721  updatedCols, oldslot, newslot))
2722  continue;
2723 
2724  if (!newtuple)
2725  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
2726 
2727  LocTriggerData.tg_trigslot = oldslot;
2728  LocTriggerData.tg_trigtuple = trigtuple;
2729  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2730  LocTriggerData.tg_newslot = newslot;
2731  LocTriggerData.tg_trigger = trigger;
2732  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2733  i,
2734  relinfo->ri_TrigFunctions,
2735  relinfo->ri_TrigInstrument,
2736  GetPerTupleMemoryContext(estate));
2737 
2738  if (newtuple == NULL)
2739  {
2740  if (should_free_trig)
2741  heap_freetuple(trigtuple);
2742  if (should_free_new)
2743  heap_freetuple(oldtuple);
2744  return false; /* "do nothing" */
2745  }
2746  else if (newtuple != oldtuple)
2747  {
2748  ExecForceStoreHeapTuple(newtuple, newslot, false);
2749 
2750  if (trigger->tgisclone &&
2751  !ExecPartitionCheck(relinfo, newslot, estate, false))
2752  ereport(ERROR,
2753  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2754  errmsg("moving row to another partition during a BEFORE trigger is not supported"),
2755  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2756  trigger->tgname,
2759 
2760  /*
2761  * If the tuple returned by the trigger / being stored, is the old
2762  * row version, and the heap tuple passed to the trigger was
2763  * allocated locally, materialize the slot. Otherwise we might
2764  * free it while still referenced by the slot.
2765  */
2766  if (should_free_trig && newtuple == trigtuple)
2767  ExecMaterializeSlot(newslot);
2768 
2769  if (should_free_new)
2770  heap_freetuple(oldtuple);
2771 
2772  /* signal tuple should be re-fetched if used */
2773  newtuple = NULL;
2774  }
2775  }
2776  if (should_free_trig)
2777  heap_freetuple(trigtuple);
2778 
2779  return true;
2780 }
2781 
2782 void
2784  ItemPointer tupleid,
2785  HeapTuple fdw_trigtuple,
2786  TupleTableSlot *newslot,
2787  List *recheckIndexes,
2788  TransitionCaptureState *transition_capture)
2789 {
2790  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2791  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2792 
2793  ExecClearTuple(oldslot);
2794 
2795  if ((trigdesc && trigdesc->trig_update_after_row) ||
2796  (transition_capture &&
2797  (transition_capture->tcs_update_old_table ||
2798  transition_capture->tcs_update_new_table)))
2799  {
2800  /*
2801  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
2802  * update-partition-key operation, then this function is also called
2803  * separately for DELETE and INSERT to capture transition table rows.
2804  * In such case, either old tuple or new tuple can be NULL.
2805  */
2806  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
2807  GetTupleForTrigger(estate,
2808  NULL,
2809  relinfo,
2810  tupleid,
2812  oldslot,
2813  NULL);
2814  else if (fdw_trigtuple != NULL)
2815  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2816 
2818  true, oldslot, newslot, recheckIndexes,
2819  GetAllUpdatedColumns(relinfo, estate),
2820  transition_capture);
2821  }
2822 }
2823 
2824 bool
2826  HeapTuple trigtuple, TupleTableSlot *newslot)
2827 {
2828  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2829  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2830  HeapTuple newtuple = NULL;
2831  bool should_free;
2832  TriggerData LocTriggerData = {0};
2833  int i;
2834 
2835  LocTriggerData.type = T_TriggerData;
2836  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2839  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2840 
2841  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
2842 
2843  for (i = 0; i < trigdesc->numtriggers; i++)
2844  {
2845  Trigger *trigger = &trigdesc->triggers[i];
2846  HeapTuple oldtuple;
2847 
2848  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2849  TRIGGER_TYPE_ROW,
2850  TRIGGER_TYPE_INSTEAD,
2851  TRIGGER_TYPE_UPDATE))
2852  continue;
2853  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2854  NULL, oldslot, newslot))
2855  continue;
2856 
2857  if (!newtuple)
2858  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
2859 
2860  LocTriggerData.tg_trigslot = oldslot;
2861  LocTriggerData.tg_trigtuple = trigtuple;
2862  LocTriggerData.tg_newslot = newslot;
2863  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2864 
2865  LocTriggerData.tg_trigger = trigger;
2866  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2867  i,
2868  relinfo->ri_TrigFunctions,
2869  relinfo->ri_TrigInstrument,
2870  GetPerTupleMemoryContext(estate));
2871  if (newtuple == NULL)
2872  {
2873  return false; /* "do nothing" */
2874  }
2875  else if (newtuple != oldtuple)
2876  {
2877  ExecForceStoreHeapTuple(newtuple, newslot, false);
2878 
2879  if (should_free)
2880  heap_freetuple(oldtuple);
2881 
2882  /* signal tuple should be re-fetched if used */
2883  newtuple = NULL;
2884  }
2885  }
2886 
2887  return true;
2888 }
2889 
2890 void
2892 {
2893  TriggerDesc *trigdesc;
2894  int i;
2895  TriggerData LocTriggerData = {0};
2896 
2897  trigdesc = relinfo->ri_TrigDesc;
2898 
2899  if (trigdesc == NULL)
2900  return;
2901  if (!trigdesc->trig_truncate_before_statement)
2902  return;
2903 
2904  LocTriggerData.type = T_TriggerData;
2905  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2907  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2908 
2909  for (i = 0; i < trigdesc->numtriggers; i++)
2910  {
2911  Trigger *trigger = &trigdesc->triggers[i];
2912  HeapTuple newtuple;
2913 
2914  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2915  TRIGGER_TYPE_STATEMENT,
2916  TRIGGER_TYPE_BEFORE,
2917  TRIGGER_TYPE_TRUNCATE))
2918  continue;
2919  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2920  NULL, NULL, NULL))
2921  continue;
2922 
2923  LocTriggerData.tg_trigger = trigger;
2924  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2925  i,
2926  relinfo->ri_TrigFunctions,
2927  relinfo->ri_TrigInstrument,
2928  GetPerTupleMemoryContext(estate));
2929 
2930  if (newtuple)
2931  ereport(ERROR,
2932  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2933  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2934  }
2935 }
2936 
2937 void
2939 {
2940  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2941 
2942  if (trigdesc && trigdesc->trig_truncate_after_statement)
2944  false, NULL, NULL, NIL, NULL, NULL);
2945 }
2946 
2947 
2948 static bool
2950  EPQState *epqstate,
2951  ResultRelInfo *relinfo,
2952  ItemPointer tid,
2953  LockTupleMode lockmode,
2954  TupleTableSlot *oldslot,
2955  TupleTableSlot **epqslot)
2956 {
2957  Relation relation = relinfo->ri_RelationDesc;
2958 
2959  if (epqslot != NULL)
2960  {
2961  TM_Result test;
2962  TM_FailureData tmfd;
2963  int lockflags = 0;
2964 
2965  *epqslot = NULL;
2966 
2967  /* caller must pass an epqstate if EvalPlanQual is possible */
2968  Assert(epqstate != NULL);
2969 
2970  /*
2971  * lock tuple for update
2972  */
2974  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
2975  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
2976  estate->es_output_cid,
2977  lockmode, LockWaitBlock,
2978  lockflags,
2979  &tmfd);
2980 
2981  switch (test)
2982  {
2983  case TM_SelfModified:
2984 
2985  /*
2986  * The target tuple was already updated or deleted by the
2987  * current command, or by a later command in the current
2988  * transaction. We ignore the tuple in the former case, and
2989  * throw error in the latter case, for the same reasons
2990  * enumerated in ExecUpdate and ExecDelete in
2991  * nodeModifyTable.c.
2992  */
2993  if (tmfd.cmax != estate->es_output_cid)
2994  ereport(ERROR,
2995  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2996  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2997  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2998 
2999  /* treat it as deleted; do not process */
3000  return false;
3001 
3002  case TM_Ok:
3003  if (tmfd.traversed)
3004  {
3005  *epqslot = EvalPlanQual(epqstate,
3006  relation,
3007  relinfo->ri_RangeTableIndex,
3008  oldslot);
3009 
3010  /*
3011  * If PlanQual failed for updated tuple - we must not
3012  * process this tuple!
3013  */
3014  if (TupIsNull(*epqslot))
3015  {
3016  *epqslot = NULL;
3017  return false;
3018  }
3019  }
3020  break;
3021 
3022  case TM_Updated:
3024  ereport(ERROR,
3025  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3026  errmsg("could not serialize access due to concurrent update")));
3027  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3028  break;
3029 
3030  case TM_Deleted:
3032  ereport(ERROR,
3033  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3034  errmsg("could not serialize access due to concurrent delete")));
3035  /* tuple was deleted */
3036  return false;
3037 
3038  case TM_Invisible:
3039  elog(ERROR, "attempted to lock invisible tuple");
3040  break;
3041 
3042  default:
3043  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3044  return false; /* keep compiler quiet */
3045  }
3046  }
3047  else
3048  {
3049  /*
3050  * We expect the tuple to be present, thus very simple error handling
3051  * suffices.
3052  */
3053  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3054  oldslot))
3055  elog(ERROR, "failed to fetch tuple for trigger");
3056  }
3057 
3058  return true;
3059 }
3060 
3061 /*
3062  * Is trigger enabled to fire?
3063  */
3064 static bool
3066  Trigger *trigger, TriggerEvent event,
3067  Bitmapset *modifiedCols,
3068  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3069 {
3070  /* Check replication-role-dependent enable state */
3072  {
3073  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3074  trigger->tgenabled == TRIGGER_DISABLED)
3075  return false;
3076  }
3077  else /* ORIGIN or LOCAL role */
3078  {
3079  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3080  trigger->tgenabled == TRIGGER_DISABLED)
3081  return false;
3082  }
3083 
3084  /*
3085  * Check for column-specific trigger (only possible for UPDATE, and in
3086  * fact we *must* ignore tgattr for other event types)
3087  */
3088  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3089  {
3090  int i;
3091  bool modified;
3092 
3093  modified = false;
3094  for (i = 0; i < trigger->tgnattr; i++)
3095  {
3097  modifiedCols))
3098  {
3099  modified = true;
3100  break;
3101  }
3102  }
3103  if (!modified)
3104  return false;
3105  }
3106 
3107  /* Check for WHEN clause */
3108  if (trigger->tgqual)
3109  {
3110  ExprState **predicate;
3111  ExprContext *econtext;
3112  MemoryContext oldContext;
3113  int i;
3114 
3115  Assert(estate != NULL);
3116 
3117  /*
3118  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3119  * matching element of relinfo->ri_TrigWhenExprs[]
3120  */
3121  i = trigger - relinfo->ri_TrigDesc->triggers;
3122  predicate = &relinfo->ri_TrigWhenExprs[i];
3123 
3124  /*
3125  * If first time through for this WHEN expression, build expression
3126  * nodetrees for it. Keep them in the per-query memory context so
3127  * they'll survive throughout the query.
3128  */
3129  if (*predicate == NULL)
3130  {
3131  Node *tgqual;
3132 
3133  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3134  tgqual = stringToNode(trigger->tgqual);
3135  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3138  /* ExecPrepareQual wants implicit-AND form */
3139  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3140  *predicate = ExecPrepareQual((List *) tgqual, estate);
3141  MemoryContextSwitchTo(oldContext);
3142  }
3143 
3144  /*
3145  * We will use the EState's per-tuple context for evaluating WHEN
3146  * expressions (creating it if it's not already there).
3147  */
3148  econtext = GetPerTupleExprContext(estate);
3149 
3150  /*
3151  * Finally evaluate the expression, making the old and/or new tuples
3152  * available as INNER_VAR/OUTER_VAR respectively.
3153  */
3154  econtext->ecxt_innertuple = oldslot;
3155  econtext->ecxt_outertuple = newslot;
3156  if (!ExecQual(*predicate, econtext))
3157  return false;
3158  }
3159 
3160  return true;
3161 }
3162 
3163 
3164 /* ----------
3165  * After-trigger stuff
3166  *
3167  * The AfterTriggersData struct holds data about pending AFTER trigger events
3168  * during the current transaction tree. (BEFORE triggers are fired
3169  * immediately so we don't need any persistent state about them.) The struct
3170  * and most of its subsidiary data are kept in TopTransactionContext; however
3171  * some data that can be discarded sooner appears in the CurTransactionContext
3172  * of the relevant subtransaction. Also, the individual event records are
3173  * kept in a separate sub-context of TopTransactionContext. This is done
3174  * mainly so that it's easy to tell from a memory context dump how much space
3175  * is being eaten by trigger events.
3176  *
3177  * Because the list of pending events can grow large, we go to some
3178  * considerable effort to minimize per-event memory consumption. The event
3179  * records are grouped into chunks and common data for similar events in the
3180  * same chunk is only stored once.
3181  *
3182  * XXX We need to be able to save the per-event data in a file if it grows too
3183  * large.
3184  * ----------
3185  */
3186 
3187 /* Per-trigger SET CONSTRAINT status */
3189 {
3193 
3195 
3196 /*
3197  * SET CONSTRAINT intra-transaction status.
3198  *
3199  * We make this a single palloc'd object so it can be copied and freed easily.
3200  *
3201  * all_isset and all_isdeferred are used to keep track
3202  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3203  *
3204  * trigstates[] stores per-trigger tgisdeferred settings.
3205  */
3207 {
3210  int numstates; /* number of trigstates[] entries in use */
3211  int numalloc; /* allocated size of trigstates[] */
3214 
3216 
3217 
3218 /*
3219  * Per-trigger-event data
3220  *
3221  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3222  * status bits and up to two tuple CTIDs. Each event record also has an
3223  * associated AfterTriggerSharedData that is shared across all instances of
3224  * similar events within a "chunk".
3225  *
3226  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3227  * fields. Updates of regular tables use two; inserts and deletes of regular
3228  * tables use one; foreign tables always use zero and save the tuple(s) to a
3229  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3230  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3231  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3232  * tuple(s). This permits storing tuples once regardless of the number of
3233  * row-level triggers on a foreign table.
3234  *
3235  * Note that we need triggers on foreign tables to be fired in exactly the
3236  * order they were queued, so that the tuples come out of the tuplestore in
3237  * the right order. To ensure that, we forbid deferrable (constraint)
3238  * triggers on foreign tables. This also ensures that such triggers do not
3239  * get deferred into outer trigger query levels, meaning that it's okay to
3240  * destroy the tuplestore at the end of the query level.
3241  *
3242  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3243  * require no ctid field. We lack the flag bit space to neatly represent that
3244  * distinct case, and it seems unlikely to be worth much trouble.
3245  *
3246  * Note: ats_firing_id is initially zero and is set to something else when
3247  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3248  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3249  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3250  * because all instances of the same type of event in a given event list will
3251  * be fired at the same time, if they were queued between the same firing
3252  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3253  * a new event to an existing AfterTriggerSharedData record.
3254  */
3256 
3257 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3258 #define AFTER_TRIGGER_DONE 0x10000000
3259 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3260 /* bits describing the size and tuple sources of this event */
3261 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3262 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3263 #define AFTER_TRIGGER_1CTID 0x40000000
3264 #define AFTER_TRIGGER_2CTID 0xC0000000
3265 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3266 
3268 
3270 {
3271  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3272  Oid ats_tgoid; /* the trigger's ID */
3273  Oid ats_relid; /* the relation it's on */
3274  CommandId ats_firing_id; /* ID for firing cycle */
3275  struct AfterTriggersTableData *ats_table; /* transition table access */
3276  Bitmapset *ats_modifiedcols; /* modified columns */
3278 
3280 
3282 {
3283  TriggerFlags ate_flags; /* status bits and offset to shared data */
3284  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3285  ItemPointerData ate_ctid2; /* new updated tuple */
3287 
3288 /* AfterTriggerEventData, minus ate_ctid2 */
3290 {
3291  TriggerFlags ate_flags; /* status bits and offset to shared data */
3292  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3294 
3295 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3297 {
3298  TriggerFlags ate_flags; /* status bits and offset to shared data */
3300 
3301 #define SizeofTriggerEvent(evt) \
3302  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3303  sizeof(AfterTriggerEventData) : \
3304  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3305  sizeof(AfterTriggerEventDataOneCtid) : \
3306  sizeof(AfterTriggerEventDataZeroCtids))
3307 
3308 #define GetTriggerSharedData(evt) \
3309  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3310 
3311 /*
3312  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3313  * larger chunks (a slightly more sophisticated version of an expansible
3314  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3315  * AfterTriggerEventData records; the space between endfree and endptr is
3316  * occupied by AfterTriggerSharedData records.
3317  */
3319 {
3320  struct AfterTriggerEventChunk *next; /* list link */
3321  char *freeptr; /* start of free space in chunk */
3322  char *endfree; /* end of free space in chunk */
3323  char *endptr; /* end of chunk */
3324  /* event data follows here */
3326 
3327 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3328 
3329 /* A list of events */
3331 {
3334  char *tailfree; /* freeptr of tail chunk */
3336 
3337 /* Macros to help in iterating over a list of events */
3338 #define for_each_chunk(cptr, evtlist) \
3339  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3340 #define for_each_event(eptr, cptr) \
3341  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3342  (char *) eptr < (cptr)->freeptr; \
3343  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3344 /* Use this if no special per-chunk processing is needed */
3345 #define for_each_event_chunk(eptr, cptr, evtlist) \
3346  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3347 
3348 /* Macros for iterating from a start point that might not be list start */
3349 #define for_each_chunk_from(cptr) \
3350  for (; cptr != NULL; cptr = cptr->next)
3351 #define for_each_event_from(eptr, cptr) \
3352  for (; \
3353  (char *) eptr < (cptr)->freeptr; \
3354  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3355 
3356 
3357 /*
3358  * All per-transaction data for the AFTER TRIGGERS module.
3359  *
3360  * AfterTriggersData has the following fields:
3361  *
3362  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3363  * We mark firable events with the current firing cycle's ID so that we can
3364  * tell which ones to work on. This ensures sane behavior if a trigger
3365  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3366  * only fire those events that weren't already scheduled for firing.
3367  *
3368  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3369  * This is saved and restored across failed subtransactions.
3370  *
3371  * events is the current list of deferred events. This is global across
3372  * all subtransactions of the current transaction. In a subtransaction
3373  * abort, we know that the events added by the subtransaction are at the
3374  * end of the list, so it is relatively easy to discard them. The event
3375  * list chunks themselves are stored in event_cxt.
3376  *
3377  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3378  * (-1 when the stack is empty).
3379  *
3380  * query_stack[query_depth] is the per-query-level data, including these fields:
3381  *
3382  * events is a list of AFTER trigger events queued by the current query.
3383  * None of these are valid until the matching AfterTriggerEndQuery call
3384  * occurs. At that point we fire immediate-mode triggers, and append any
3385  * deferred events to the main events list.
3386  *
3387  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3388  * needed by events queued by the current query. (Note: we use just one
3389  * tuplestore even though more than one foreign table might be involved.
3390  * This is okay because tuplestores don't really care what's in the tuples
3391  * they store; but it's possible that someday it'd break.)
3392  *
3393  * tables is a List of AfterTriggersTableData structs for target tables
3394  * of the current query (see below).
3395  *
3396  * maxquerydepth is just the allocated length of query_stack.
3397  *
3398  * trans_stack holds per-subtransaction data, including these fields:
3399  *
3400  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3401  * state data. Each subtransaction level that modifies that state first
3402  * saves a copy, which we use to restore the state if we abort.
3403  *
3404  * events is a copy of the events head/tail pointers,
3405  * which we use to restore those values during subtransaction abort.
3406  *
3407  * query_depth is the subtransaction-start-time value of query_depth,
3408  * which we similarly use to clean up at subtransaction abort.
3409  *
3410  * firing_counter is the subtransaction-start-time value of firing_counter.
3411  * We use this to recognize which deferred triggers were fired (or marked
3412  * for firing) within an aborted subtransaction.
3413  *
3414  * We use GetCurrentTransactionNestLevel() to determine the correct array
3415  * index in trans_stack. maxtransdepth is the number of allocated entries in
3416  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3417  * in cases where errors during subxact abort cause multiple invocations
3418  * of AfterTriggerEndSubXact() at the same nesting depth.)
3419  *
3420  * We create an AfterTriggersTableData struct for each target table of the
3421  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3422  * either transition tables or statement-level triggers. This is used to
3423  * hold the relevant transition tables, as well as info tracking whether
3424  * we already queued the statement triggers. (We use that info to prevent
3425  * firing the same statement triggers more than once per statement, or really
3426  * once per transition table set.) These structs, along with the transition
3427  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3428  * That's sufficient lifespan because we don't allow transition tables to be
3429  * used by deferrable triggers, so they only need to survive until
3430  * AfterTriggerEndQuery.
3431  */
3435 
3436 typedef struct AfterTriggersData
3437 {
3438  CommandId firing_counter; /* next firing ID to assign */
3439  SetConstraintState state; /* the active S C state */
3440  AfterTriggerEventList events; /* deferred-event list */
3441  MemoryContext event_cxt; /* memory context for events, if any */
3442 
3443  /* per-query-level data: */
3444  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3445  int query_depth; /* current index in above array */
3446  int maxquerydepth; /* allocated len of above array */
3447 
3448  /* per-subtransaction-level data: */
3449  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3450  int maxtransdepth; /* allocated len of above array */
3452 
3454 {
3455  AfterTriggerEventList events; /* events pending from this query */
3456  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3457  List *tables; /* list of AfterTriggersTableData, see below */
3458 };
3459 
3461 {
3462  /* these fields are just for resetting at subtrans abort: */
3463  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3464  AfterTriggerEventList events; /* saved list pointer */
3465  int query_depth; /* saved query_depth */
3466  CommandId firing_counter; /* saved firing_counter */
3467 };
3468 
3470 {
3471  /* relid + cmdType form the lookup key for these structs: */
3472  Oid relid; /* target table's OID */
3473  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3474  bool closed; /* true when no longer OK to add tuples */
3475  bool before_trig_done; /* did we already queue BS triggers? */
3476  bool after_trig_done; /* did we already queue AS triggers? */
3477  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3478  Tuplestorestate *old_tuplestore; /* "old" transition table, if any */
3479  Tuplestorestate *new_tuplestore; /* "new" transition table, if any */
3480  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3481 };
3482 
3484 
3485 static void AfterTriggerExecute(EState *estate,
3486  AfterTriggerEvent event,
3487  ResultRelInfo *relInfo,
3488  TriggerDesc *trigdesc,
3489  FmgrInfo *finfo,
3490  Instrumentation *instr,
3491  MemoryContext per_tuple_context,
3492  TupleTableSlot *trig_tuple_slot1,
3493  TupleTableSlot *trig_tuple_slot2);
3495  CmdType cmdType);
3497 static SetConstraintState SetConstraintStateCreate(int numalloc);
3500  Oid tgoid, bool tgisdeferred);
3501 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3502 
3503 
3504 /*
3505  * Get the FDW tuplestore for the current trigger query level, creating it
3506  * if necessary.
3507  */
3508 static Tuplestorestate *
3510 {
3511  Tuplestorestate *ret;
3512 
3513  ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3514  if (ret == NULL)
3515  {
3516  MemoryContext oldcxt;
3517  ResourceOwner saveResourceOwner;
3518 
3519  /*
3520  * Make the tuplestore valid until end of subtransaction. We really
3521  * only need it until AfterTriggerEndQuery().
3522  */
3524  saveResourceOwner = CurrentResourceOwner;
3526 
3527  ret = tuplestore_begin_heap(false, false, work_mem);
3528 
3529  CurrentResourceOwner = saveResourceOwner;
3530  MemoryContextSwitchTo(oldcxt);
3531 
3532  afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3533  }
3534 
3535  return ret;
3536 }
3537 
3538 /* ----------
3539  * afterTriggerCheckState()
3540  *
3541  * Returns true if the trigger event is actually in state DEFERRED.
3542  * ----------
3543  */
3544 static bool
3545 afterTriggerCheckState(AfterTriggerShared evtshared)
3546 {
3547  Oid tgoid = evtshared->ats_tgoid;
3548  SetConstraintState state = afterTriggers.state;
3549  int i;
3550 
3551  /*
3552  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3553  * constraints declared NOT DEFERRABLE), the state is always false.
3554  */
3555  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3556  return false;
3557 
3558  /*
3559  * If constraint state exists, SET CONSTRAINTS might have been executed
3560  * either for this trigger or for all triggers.
3561  */
3562  if (state != NULL)
3563  {
3564  /* Check for SET CONSTRAINTS for this specific trigger. */
3565  for (i = 0; i < state->numstates; i++)
3566  {
3567  if (state->trigstates[i].sct_tgoid == tgoid)
3568  return state->trigstates[i].sct_tgisdeferred;
3569  }
3570 
3571  /* Check for SET CONSTRAINTS ALL. */
3572  if (state->all_isset)
3573  return state->all_isdeferred;
3574  }
3575 
3576  /*
3577  * Otherwise return the default state for the trigger.
3578  */
3579  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3580 }
3581 
3582 
3583 /* ----------
3584  * afterTriggerAddEvent()
3585  *
3586  * Add a new trigger event to the specified queue.
3587  * The passed-in event data is copied.
3588  * ----------
3589  */
3590 static void
3592  AfterTriggerEvent event, AfterTriggerShared evtshared)
3593 {
3594  Size eventsize = SizeofTriggerEvent(event);
3595  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3596  AfterTriggerEventChunk *chunk;
3597  AfterTriggerShared newshared;
3598  AfterTriggerEvent newevent;
3599 
3600  /*
3601  * If empty list or not enough room in the tail chunk, make a new chunk.
3602  * We assume here that a new shared record will always be needed.
3603  */
3604  chunk = events->tail;
3605  if (chunk == NULL ||
3606  chunk->endfree - chunk->freeptr < needed)
3607  {
3608  Size chunksize;
3609 
3610  /* Create event context if we didn't already */
3611  if (afterTriggers.event_cxt == NULL)
3612  afterTriggers.event_cxt =
3614  "AfterTriggerEvents",
3616 
3617  /*
3618  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3619  * These numbers are fairly arbitrary, though there is a hard limit at
3620  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3621  * shared records using the available space in ate_flags. Another
3622  * constraint is that if the chunk size gets too huge, the search loop
3623  * below would get slow given a (not too common) usage pattern with
3624  * many distinct event types in a chunk. Therefore, we double the
3625  * preceding chunk size only if there weren't too many shared records
3626  * in the preceding chunk; otherwise we halve it. This gives us some
3627  * ability to adapt to the actual usage pattern of the current query
3628  * while still having large chunk sizes in typical usage. All chunk
3629  * sizes used should be MAXALIGN multiples, to ensure that the shared
3630  * records will be aligned safely.
3631  */
3632 #define MIN_CHUNK_SIZE 1024
3633 #define MAX_CHUNK_SIZE (1024*1024)
3634 
3635 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3636 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3637 #endif
3638 
3639  if (chunk == NULL)
3640  chunksize = MIN_CHUNK_SIZE;
3641  else
3642  {
3643  /* preceding chunk size... */
3644  chunksize = chunk->endptr - (char *) chunk;
3645  /* check number of shared records in preceding chunk */
3646  if ((chunk->endptr - chunk->endfree) <=
3647  (100 * sizeof(AfterTriggerSharedData)))
3648  chunksize *= 2; /* okay, double it */
3649  else
3650  chunksize /= 2; /* too many shared records */
3651  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3652  }
3653  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3654  chunk->next = NULL;
3655  chunk->freeptr = CHUNK_DATA_START(chunk);
3656  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3657  Assert(chunk->endfree - chunk->freeptr >= needed);
3658 
3659  if (events->head == NULL)
3660  events->head = chunk;
3661  else
3662  events->tail->next = chunk;
3663  events->tail = chunk;
3664  /* events->tailfree is now out of sync, but we'll fix it below */
3665  }
3666 
3667  /*
3668  * Try to locate a matching shared-data record already in the chunk. If
3669  * none, make a new one.
3670  */
3671  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3672  (char *) newshared >= chunk->endfree;
3673  newshared--)
3674  {
3675  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3676  newshared->ats_relid == evtshared->ats_relid &&
3677  newshared->ats_event == evtshared->ats_event &&
3678  newshared->ats_table == evtshared->ats_table &&
3679  newshared->ats_firing_id == 0)
3680  break;
3681  }
3682  if ((char *) newshared < chunk->endfree)
3683  {
3684  *newshared = *evtshared;
3685  newshared->ats_firing_id = 0; /* just to be sure */
3686  chunk->endfree = (char *) newshared;
3687  }
3688 
3689  /* Insert the data */
3690  newevent = (AfterTriggerEvent) chunk->freeptr;
3691  memcpy(newevent, event, eventsize);
3692  /* ... and link the new event to its shared record */
3693  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3694  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3695 
3696  chunk->freeptr += eventsize;
3697  events->tailfree = chunk->freeptr;
3698 }
3699 
3700 /* ----------
3701  * afterTriggerFreeEventList()
3702  *
3703  * Free all the event storage in the given list.
3704  * ----------
3705  */
3706 static void
3708 {
3709  AfterTriggerEventChunk *chunk;
3710 
3711  while ((chunk = events->head) != NULL)
3712  {
3713  events->head = chunk->next;
3714  pfree(chunk);
3715  }
3716  events->tail = NULL;
3717  events->tailfree = NULL;
3718 }
3719 
3720 /* ----------
3721  * afterTriggerRestoreEventList()
3722  *
3723  * Restore an event list to its prior length, removing all the events
3724  * added since it had the value old_events.
3725  * ----------
3726  */
3727 static void
3729  const AfterTriggerEventList *old_events)
3730 {
3731  AfterTriggerEventChunk *chunk;
3732  AfterTriggerEventChunk *next_chunk;
3733 
3734  if (old_events->tail == NULL)
3735  {
3736  /* restoring to a completely empty state, so free everything */
3737  afterTriggerFreeEventList(events);
3738  }
3739  else
3740  {
3741  *events = *old_events;
3742  /* free any chunks after the last one we want to keep */
3743  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3744  {
3745  next_chunk = chunk->next;
3746  pfree(chunk);
3747  }
3748  /* and clean up the tail chunk to be the right length */
3749  events->tail->next = NULL;
3750  events->tail->freeptr = events->tailfree;
3751 
3752  /*
3753  * We don't make any effort to remove now-unused shared data records.
3754  * They might still be useful, anyway.
3755  */
3756  }
3757 }
3758 
3759 /* ----------
3760  * afterTriggerDeleteHeadEventChunk()
3761  *
3762  * Remove the first chunk of events from the query level's event list.
3763  * Keep any event list pointers elsewhere in the query level's data
3764  * structures in sync.
3765  * ----------
3766  */
3767 static void
3769 {
3770  AfterTriggerEventChunk *target = qs->events.head;
3771  ListCell *lc;
3772 
3773  Assert(target && target->next);
3774 
3775  /*
3776  * First, update any pointers in the per-table data, so that they won't be
3777  * dangling. Resetting obsoleted pointers to NULL will make
3778  * cancel_prior_stmt_triggers start from the list head, which is fine.
3779  */
3780  foreach(lc, qs->tables)
3781  {
3783 
3784  if (table->after_trig_done &&
3785  table->after_trig_events.tail == target)
3786  {
3787  table->after_trig_events.head = NULL;
3788  table->after_trig_events.tail = NULL;
3789  table->after_trig_events.tailfree = NULL;
3790  }
3791  }
3792 
3793  /* Now we can flush the head chunk */
3794  qs->events.head = target->next;
3795  pfree(target);
3796 }
3797 
3798 
3799 /* ----------
3800  * AfterTriggerExecute()
3801  *
3802  * Fetch the required tuples back from the heap and fire one
3803  * single trigger function.
3804  *
3805  * Frequently, this will be fired many times in a row for triggers of
3806  * a single relation. Therefore, we cache the open relation and provide
3807  * fmgr lookup cache space at the caller level. (For triggers fired at
3808  * the end of a query, we can even piggyback on the executor's state.)
3809  *
3810  * event: event currently being fired.
3811  * rel: open relation for event.
3812  * trigdesc: working copy of rel's trigger info.
3813  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3814  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3815  * or NULL if no instrumentation is wanted.
3816  * per_tuple_context: memory context to call trigger function in.
3817  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3818  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3819  * ----------
3820  */
3821 static void
3823  AfterTriggerEvent event,
3824  ResultRelInfo *relInfo,
3825  TriggerDesc *trigdesc,
3826  FmgrInfo *finfo, Instrumentation *instr,
3827  MemoryContext per_tuple_context,
3828  TupleTableSlot *trig_tuple_slot1,
3829  TupleTableSlot *trig_tuple_slot2)
3830 {
3831  Relation rel = relInfo->ri_RelationDesc;
3832  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3833  Oid tgoid = evtshared->ats_tgoid;
3834  TriggerData LocTriggerData = {0};
3835  HeapTuple rettuple;
3836  int tgindx;
3837  bool should_free_trig = false;
3838  bool should_free_new = false;
3839 
3840  /*
3841  * Locate trigger in trigdesc.
3842  */
3843  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3844  {
3845  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3846  {
3847  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3848  break;
3849  }
3850  }
3851  if (LocTriggerData.tg_trigger == NULL)
3852  elog(ERROR, "could not find trigger %u", tgoid);
3853 
3854  /*
3855  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3856  * to include time spent re-fetching tuples in the trigger cost.
3857  */
3858  if (instr)
3859  InstrStartNode(instr + tgindx);
3860 
3861  /*
3862  * Fetch the required tuple(s).
3863  */
3864  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3865  {
3867  {
3868  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
3869 
3870  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3871  trig_tuple_slot1))
3872  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3873 
3874  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3876  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3877  trig_tuple_slot2))
3878  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3879  }
3880  /* fall through */
3882 
3883  /*
3884  * Store tuple in the slot so that tg_trigtuple does not reference
3885  * tuplestore memory. (It is formally possible for the trigger
3886  * function to queue trigger events that add to the same
3887  * tuplestore, which can push other tuples out of memory.) The
3888  * distinction is academic, because we start with a minimal tuple
3889  * that is stored as a heap tuple, constructed in different memory
3890  * context, in the slot anyway.
3891  */
3892  LocTriggerData.tg_trigslot = trig_tuple_slot1;
3893  LocTriggerData.tg_trigtuple =
3894  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
3895 
3896  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3898  {
3899  LocTriggerData.tg_newslot = trig_tuple_slot2;
3900  LocTriggerData.tg_newtuple =
3901  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
3902  }
3903  else
3904  {
3905  LocTriggerData.tg_newtuple = NULL;
3906  }
3907  break;
3908 
3909  default:
3910  if (ItemPointerIsValid(&(event->ate_ctid1)))
3911  {
3912  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
3913 
3914  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
3915  SnapshotAny,
3916  LocTriggerData.tg_trigslot))
3917  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3918  LocTriggerData.tg_trigtuple =
3919  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
3920  }
3921  else
3922  {
3923  LocTriggerData.tg_trigtuple = NULL;
3924  }
3925 
3926  /* don't touch ctid2 if not there */
3927  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3929  ItemPointerIsValid(&(event->ate_ctid2)))
3930  {
3931  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
3932 
3933  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
3934  SnapshotAny,
3935  LocTriggerData.tg_newslot))
3936  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3937  LocTriggerData.tg_newtuple =
3938  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
3939  }
3940  else
3941  {
3942  LocTriggerData.tg_newtuple = NULL;
3943  }
3944  }
3945 
3946  /*
3947  * Set up the tuplestore information to let the trigger have access to
3948  * transition tables. When we first make a transition table available to
3949  * a trigger, mark it "closed" so that it cannot change anymore. If any
3950  * additional events of the same type get queued in the current trigger
3951  * query level, they'll go into new transition tables.
3952  */
3953  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
3954  if (evtshared->ats_table)
3955  {
3956  if (LocTriggerData.tg_trigger->tgoldtable)
3957  {
3958  LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore;
3959  evtshared->ats_table->closed = true;
3960  }
3961 
3962  if (LocTriggerData.tg_trigger->tgnewtable)
3963  {
3964  LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore;
3965  evtshared->ats_table->closed = true;
3966  }
3967  }
3968 
3969  /*
3970  * Setup the remaining trigger information
3971  */
3972  LocTriggerData.type = T_TriggerData;
3973  LocTriggerData.tg_event =
3975  LocTriggerData.tg_relation = rel;
3976  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
3977  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
3978 
3979  MemoryContextReset(per_tuple_context);
3980 
3981  /*
3982  * Call the trigger and throw away any possibly returned updated tuple.
3983  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
3984  */
3985  rettuple = ExecCallTriggerFunc(&LocTriggerData,
3986  tgindx,
3987  finfo,
3988  NULL,
3989  per_tuple_context);
3990  if (rettuple != NULL &&
3991  rettuple != LocTriggerData.tg_trigtuple &&
3992  rettuple != LocTriggerData.tg_newtuple)
3993  heap_freetuple(rettuple);
3994 
3995  /*
3996  * Release resources
3997  */
3998  if (should_free_trig)
3999  heap_freetuple(LocTriggerData.tg_trigtuple);
4000  if (should_free_new)
4001  heap_freetuple(LocTriggerData.tg_newtuple);
4002 
4003  /* don't clear slots' contents if foreign table */
4004  if (trig_tuple_slot1 == NULL)
4005  {
4006  if (LocTriggerData.tg_trigslot)
4007  ExecClearTuple(LocTriggerData.tg_trigslot);
4008  if (LocTriggerData.tg_newslot)
4009  ExecClearTuple(LocTriggerData.tg_newslot);
4010  }
4011 
4012  /*
4013  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4014  * one "tuple returned" (really the number of firings).
4015  */
4016  if (instr)
4017  InstrStopNode(instr + tgindx, 1);
4018 }
4019 
4020 
4021 /*
4022  * afterTriggerMarkEvents()
4023  *
4024  * Scan the given event list for not yet invoked events. Mark the ones
4025  * that can be invoked now with the current firing ID.
4026  *
4027  * If move_list isn't NULL, events that are not to be invoked now are
4028  * transferred to move_list.
4029  *
4030  * When immediate_only is true, do not invoke currently-deferred triggers.
4031  * (This will be false only at main transaction exit.)
4032  *
4033  * Returns true if any invokable events were found.
4034  */
4035 static bool
4037  AfterTriggerEventList *move_list,
4038  bool immediate_only)
4039 {
4040  bool found = false;
4041  AfterTriggerEvent event;
4042  AfterTriggerEventChunk *chunk;
4043 
4044  for_each_event_chunk(event, chunk, *events)
4045  {
4046  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4047  bool defer_it = false;
4048 
4049  if (!(event->ate_flags &
4051  {
4052  /*
4053  * This trigger hasn't been called or scheduled yet. Check if we
4054  * should call it now.
4055  */
4056  if (immediate_only && afterTriggerCheckState(evtshared))
4057  {
4058  defer_it = true;
4059  }
4060  else
4061  {
4062  /*
4063  * Mark it as to be fired in this firing cycle.
4064  */
4065  evtshared->ats_firing_id = afterTriggers.firing_counter;
4066  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4067  found = true;
4068  }
4069  }
4070 
4071  /*
4072  * If it's deferred, move it to move_list, if requested.
4073  */
4074  if (defer_it && move_list != NULL)
4075  {
4076  /* add it to move_list */
4077  afterTriggerAddEvent(move_list, event, evtshared);
4078  /* mark original copy "done" so we don't do it again */
4079  event->ate_flags |= AFTER_TRIGGER_DONE;
4080  }
4081  }
4082 
4083  return found;
4084 }
4085 
4086 /*
4087  * afterTriggerInvokeEvents()
4088  *
4089  * Scan the given event list for events that are marked as to be fired
4090  * in the current firing cycle, and fire them.
4091  *
4092  * If estate isn't NULL, we use its result relation info to avoid repeated
4093  * openings and closing of trigger target relations. If it is NULL, we
4094  * make one locally to cache the info in case there are multiple trigger
4095  * events per rel.
4096  *
4097  * When delete_ok is true, it's safe to delete fully-processed events.
4098  * (We are not very tense about that: we simply reset a chunk to be empty
4099  * if all its events got fired. The objective here is just to avoid useless
4100  * rescanning of events when a trigger queues new events during transaction
4101  * end, so it's not necessary to worry much about the case where only
4102  * some events are fired.)
4103  *
4104  * Returns true if no unfired events remain in the list (this allows us
4105  * to avoid repeating afterTriggerMarkEvents).
4106  */
4107 static bool
4109  CommandId firing_id,
4110  EState *estate,
4111  bool delete_ok)
4112 {
4113  bool all_fired = true;
4114  AfterTriggerEventChunk *chunk;
4115  MemoryContext per_tuple_context;
4116  bool local_estate = false;
4117  ResultRelInfo *rInfo = NULL;
4118  Relation rel = NULL;
4119  TriggerDesc *trigdesc = NULL;
4120  FmgrInfo *finfo = NULL;
4121  Instrumentation *instr = NULL;
4122  TupleTableSlot *slot1 = NULL,
4123  *slot2 = NULL;
4124 
4125  /* Make a local EState if need be */
4126  if (estate == NULL)
4127  {
4128  estate = CreateExecutorState();
4129  local_estate = true;
4130  }
4131 
4132  /* Make a per-tuple memory context for trigger function calls */
4133  per_tuple_context =
4135  "AfterTriggerTupleContext",
4137 
4138  for_each_chunk(chunk, *events)
4139  {
4140  AfterTriggerEvent event;
4141  bool all_fired_in_chunk = true;
4142 
4143  for_each_event(event, chunk)
4144  {
4145  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4146 
4147  /*
4148  * Is it one for me to fire?
4149  */
4150  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4151  evtshared->ats_firing_id == firing_id)
4152  {
4153  /*
4154  * So let's fire it... but first, find the correct relation if
4155  * this is not the same relation as before.
4156  */
4157  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4158  {
4159  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4160  rel = rInfo->ri_RelationDesc;
4161  trigdesc = rInfo->ri_TrigDesc;
4162  finfo = rInfo->ri_TrigFunctions;
4163  instr = rInfo->ri_TrigInstrument;
4164  if (slot1 != NULL)
4165  {
4168  slot1 = slot2 = NULL;
4169  }
4170  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4171  {
4172  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4174  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4176  }
4177  if (trigdesc == NULL) /* should not happen */
4178  elog(ERROR, "relation %u has no triggers",
4179  evtshared->ats_relid);
4180  }
4181 
4182  /*
4183  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4184  * still set, so recursive examinations of the event list
4185  * won't try to re-fire it.
4186  */
4187  AfterTriggerExecute(estate, event, rInfo, trigdesc, finfo, instr,
4188  per_tuple_context, slot1, slot2);
4189 
4190  /*
4191  * Mark the event as done.
4192  */
4193  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4194  event->ate_flags |= AFTER_TRIGGER_DONE;
4195  }
4196  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4197  {
4198  /* something remains to be done */
4199  all_fired = all_fired_in_chunk = false;
4200  }
4201  }
4202 
4203  /* Clear the chunk if delete_ok and nothing left of interest */
4204  if (delete_ok && all_fired_in_chunk)
4205  {
4206  chunk->freeptr = CHUNK_DATA_START(chunk);
4207  chunk->endfree = chunk->endptr;
4208 
4209  /*
4210  * If it's last chunk, must sync event list's tailfree too. Note
4211  * that delete_ok must NOT be passed as true if there could be
4212  * additional AfterTriggerEventList values pointing at this event
4213  * list, since we'd fail to fix their copies of tailfree.
4214  */
4215  if (chunk == events->tail)
4216  events->tailfree = chunk->freeptr;
4217  }
4218  }
4219  if (slot1 != NULL)
4220  {
4223  }
4224 
4225  /* Release working resources */
4226  MemoryContextDelete(per_tuple_context);
4227 
4228  if (local_estate)
4229  {
4230  ExecCleanUpTriggerState(estate);
4231  ExecResetTupleTable(estate->es_tupleTable, false);
4232  FreeExecutorState(estate);
4233  }
4234 
4235  return all_fired;
4236 }
4237 
4238 
4239 /*
4240  * GetAfterTriggersTableData
4241  *
4242  * Find or create an AfterTriggersTableData struct for the specified
4243  * trigger event (relation + operation type). Ignore existing structs
4244  * marked "closed"; we don't want to put any additional tuples into them,
4245  * nor change their stmt-triggers-fired state.
4246  *
4247  * Note: the AfterTriggersTableData list is allocated in the current
4248  * (sub)transaction's CurTransactionContext. This is OK because
4249  * we don't need it to live past AfterTriggerEndQuery.
4250  */
4251 static AfterTriggersTableData *
4253 {
4254  AfterTriggersTableData *table;
4256  MemoryContext oldcxt;
4257  ListCell *lc;
4258 
4259  /* Caller should have ensured query_depth is OK. */
4260  Assert(afterTriggers.query_depth >= 0 &&
4261  afterTriggers.query_depth < afterTriggers.maxquerydepth);
4262  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4263 
4264  foreach(lc, qs->tables)
4265  {
4266  table = (AfterTriggersTableData *) lfirst(lc);
4267  if (table->relid == relid && table->cmdType == cmdType &&
4268  !table->closed)
4269  return table;
4270  }
4271 
4273 
4275  table->relid = relid;
4276  table->cmdType = cmdType;
4277  qs->tables = lappend(qs->tables, table);
4278 
4279  MemoryContextSwitchTo(oldcxt);
4280 
4281  return table;
4282 }
4283 
4284 
4285 /*
4286  * MakeTransitionCaptureState
4287  *
4288  * Make a TransitionCaptureState object for the given TriggerDesc, target
4289  * relation, and operation type. The TCS object holds all the state needed
4290  * to decide whether to capture tuples in transition tables.
4291  *
4292  * If there are no triggers in 'trigdesc' that request relevant transition
4293  * tables, then return NULL.
4294  *
4295  * The resulting object can be passed to the ExecAR* functions. The caller
4296  * should set tcs_map or tcs_original_insert_tuple as appropriate when dealing
4297  * with child tables.
4298  *
4299  * Note that we copy the flags from a parent table into this struct (rather
4300  * than subsequently using the relation's TriggerDesc directly) so that we can
4301  * use it to control collection of transition tuples from child tables.
4302  *
4303  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4304  * on the same table during one query should share one transition table.
4305  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4306  * looked up using the table OID + CmdType, and are merely referenced by
4307  * the TransitionCaptureState objects we hand out to callers.
4308  */
4311 {
4313  bool need_old,
4314  need_new;
4315  AfterTriggersTableData *table;
4316  MemoryContext oldcxt;
4317  ResourceOwner saveResourceOwner;
4318 
4319  if (trigdesc == NULL)
4320  return NULL;
4321 
4322  /* Detect which table(s) we need. */
4323  switch (cmdType)
4324  {
4325  case CMD_INSERT:
4326  need_old = false;
4327  need_new = trigdesc->trig_insert_new_table;
4328  break;
4329  case CMD_UPDATE:
4330  need_old = trigdesc->trig_update_old_table;
4331  need_new = trigdesc->trig_update_new_table;
4332  break;
4333  case CMD_DELETE:
4334  need_old = trigdesc->trig_delete_old_table;
4335  need_new = false;
4336  break;
4337  default:
4338  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4339  need_old = need_new = false; /* keep compiler quiet */
4340  break;
4341  }
4342  if (!need_old && !need_new)
4343  return NULL;
4344 
4345  /* Check state, like AfterTriggerSaveEvent. */
4346  if (afterTriggers.query_depth < 0)
4347  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4348 
4349  /* Be sure we have enough space to record events at this query depth. */
4350  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4352 
4353  /*
4354  * Find or create an AfterTriggersTableData struct to hold the
4355  * tuplestore(s). If there's a matching struct but it's marked closed,
4356  * ignore it; we need a newer one.
4357  *
4358  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4359  * allocated in the current (sub)transaction's CurTransactionContext, and
4360  * the tuplestores are managed by the (sub)transaction's resource owner.
4361  * This is sufficient lifespan because we do not allow triggers using
4362  * transition tables to be deferrable; they will be fired during
4363  * AfterTriggerEndQuery, after which it's okay to delete the data.
4364  */
4365  table = GetAfterTriggersTableData(relid, cmdType);
4366 
4367  /* Now create required tuplestore(s), if we don't have them already. */
4369  saveResourceOwner = CurrentResourceOwner;
4371 
4372  if (need_old && table->old_tuplestore == NULL)
4373  table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4374  if (need_new && table->new_tuplestore == NULL)
4375  table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4376 
4377  CurrentResourceOwner = saveResourceOwner;
4378  MemoryContextSwitchTo(oldcxt);
4379 
4380  /* Now build the TransitionCaptureState struct, in caller's context */
4382  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4383  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4384  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4385  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4386  state->tcs_private = table;
4387 
4388  return state;
4389 }
4390 
4391 
4392 /* ----------
4393  * AfterTriggerBeginXact()
4394  *
4395  * Called at transaction start (either BEGIN or implicit for single
4396  * statement outside of transaction block).
4397  * ----------
4398  */
4399 void
4401 {
4402  /*
4403  * Initialize after-trigger state structure to empty
4404  */
4405  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4406  afterTriggers.query_depth = -1;
4407 
4408  /*
4409  * Verify that there is no leftover state remaining. If these assertions
4410  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4411  * up properly.
4412  */
4413  Assert(afterTriggers.state == NULL);
4414  Assert(afterTriggers.query_stack == NULL);
4415  Assert(afterTriggers.maxquerydepth == 0);
4416  Assert(afterTriggers.event_cxt == NULL);
4417  Assert(afterTriggers.events.head == NULL);
4418  Assert(afterTriggers.trans_stack == NULL);
4419  Assert(afterTriggers.maxtransdepth == 0);
4420 }
4421 
4422 
4423 /* ----------
4424  * AfterTriggerBeginQuery()
4425  *
4426  * Called just before we start processing a single query within a
4427  * transaction (or subtransaction). Most of the real work gets deferred
4428  * until somebody actually tries to queue a trigger event.
4429  * ----------
4430  */
4431 void
4433 {
4434  /* Increase the query stack depth */
4435  afterTriggers.query_depth++;
4436 }
4437 
4438 
4439 /* ----------
4440  * AfterTriggerEndQuery()
4441  *
4442  * Called after one query has been completely processed. At this time
4443  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4444  * transfer deferred trigger events to the global deferred-trigger list.
4445  *
4446  * Note that this must be called BEFORE closing down the executor
4447  * with ExecutorEnd, because we make use of the EState's info about
4448  * target relations. Normally it is called from ExecutorFinish.
4449  * ----------
4450  */
4451 void
4453 {
4455 
4456  /* Must be inside a query, too */
4457  Assert(afterTriggers.query_depth >= 0);
4458 
4459  /*
4460  * If we never even got as far as initializing the event stack, there
4461  * certainly won't be any events, so exit quickly.
4462  */
4463  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4464  {
4465  afterTriggers.query_depth--;
4466  return;
4467  }
4468 
4469  /*
4470  * Process all immediate-mode triggers queued by the query, and move the
4471  * deferred ones to the main list of deferred events.
4472  *
4473  * Notice that we decide which ones will be fired, and put the deferred
4474  * ones on the main list, before anything is actually fired. This ensures
4475  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4476  * IMMEDIATE: all events we have decided to defer will be available for it
4477  * to fire.
4478  *
4479  * We loop in case a trigger queues more events at the same query level.
4480  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4481  * will instead fire any triggers in a dedicated query level. Foreign key
4482  * enforcement triggers do add to the current query level, thanks to their
4483  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4484  * C-language triggers might do likewise.
4485  *
4486  * If we find no firable events, we don't have to increment
4487  * firing_counter.
4488  */
4489  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4490 
4491  for (;;)
4492  {
4493  if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
4494  {
4495  CommandId firing_id = afterTriggers.firing_counter++;
4496  AfterTriggerEventChunk *oldtail = qs->events.tail;
4497 
4498  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
4499  break; /* all fired */
4500 
4501  /*
4502  * Firing a trigger could result in query_stack being repalloc'd,
4503  * so we must recalculate qs after each afterTriggerInvokeEvents
4504  * call. Furthermore, it's unsafe to pass delete_ok = true here,
4505  * because that could cause afterTriggerInvokeEvents to try to
4506  * access qs->events after the stack has been repalloc'd.
4507  */
4508  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4509 
4510  /*
4511  * We'll need to scan the events list again. To reduce the cost
4512  * of doing so, get rid of completely-fired chunks. We know that
4513  * all events were marked IN_PROGRESS or DONE at the conclusion of
4514  * afterTriggerMarkEvents, so any still-interesting events must
4515  * have been added after that, and so must be in the chunk that
4516  * was then the tail chunk, or in later chunks. So, zap all
4517  * chunks before oldtail. This is approximately the same set of
4518  * events we would have gotten rid of by passing delete_ok = true.
4519  */
4520  Assert(oldtail != NULL);
4521  while (qs->events.head != oldtail)
4523  }
4524  else
4525  break;
4526  }
4527 
4528  /* Release query-level-local storage, including tuplestores if any */
4529  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4530 
4531  afterTriggers.query_depth--;
4532 }
4533 
4534 
4535 /*
4536  * AfterTriggerFreeQuery
4537  * Release subsidiary storage for a trigger query level.
4538  * This includes closing down tuplestores.
4539  * Note: it's important for this to be safe if interrupted by an error
4540  * and then called again for the same query level.
4541  */
4542 static void
4544 {
4545  Tuplestorestate *ts;
4546  List *tables;
4547  ListCell *lc;
4548 
4549  /* Drop the trigger events */
4551 
4552  /* Drop FDW tuplestore if any */
4553  ts = qs->fdw_tuplestore;
4554  qs->fdw_tuplestore = NULL;
4555  if (ts)
4556  tuplestore_end(ts);
4557 
4558  /* Release per-table subsidiary storage */
4559  tables = qs->tables;
4560  foreach(lc, tables)
4561  {
4563 
4564  ts = table->old_tuplestore;
4565  table->old_tuplestore = NULL;
4566  if (ts)
4567  tuplestore_end(ts);
4568  ts = table->new_tuplestore;
4569  table->new_tuplestore = NULL;
4570  if (ts)
4571  tuplestore_end(ts);
4572  }
4573 
4574  /*
4575  * Now free the AfterTriggersTableData structs and list cells. Reset list
4576  * pointer first; if list_free_deep somehow gets an error, better to leak
4577  * that storage than have an infinite loop.
4578  */
4579  qs->tables = NIL;
4580  list_free_deep(tables);
4581 }
4582 
4583 
4584 /* ----------
4585  * AfterTriggerFireDeferred()
4586  *
4587  * Called just before the current transaction is committed. At this
4588  * time we invoke all pending DEFERRED triggers.
4589  *
4590  * It is possible for other modules to queue additional deferred triggers
4591  * during pre-commit processing; therefore xact.c may have to call this
4592  * multiple times.
4593  * ----------
4594  */
4595 void
4597 {
4598  AfterTriggerEventList *events;
4599  bool snap_pushed = false;
4600 
4601  /* Must not be inside a query */
4602  Assert(afterTriggers.query_depth == -1);
4603 
4604  /*
4605  * If there are any triggers to fire, make sure we have set a snapshot for
4606  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4607  * can't assume ActiveSnapshot is valid on entry.)
4608  */
4609  events = &afterTriggers.events;
4610  if (events->head != NULL)
4611  {
4613  snap_pushed = true;
4614  }
4615 
4616  /*
4617  * Run all the remaining triggers. Loop until they are all gone, in case
4618  * some trigger queues more for us to do.
4619  */
4620  while (afterTriggerMarkEvents(events, NULL, false))
4621  {
4622  CommandId firing_id = afterTriggers.firing_counter++;
4623 
4624  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4625  break; /* all fired */
4626  }
4627 
4628  /*
4629  * We don't bother freeing the event list, since it will go away anyway
4630  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4631  */
4632 
4633  if (snap_pushed)
4635 }
4636 
4637 
4638 /* ----------
4639  * AfterTriggerEndXact()
4640  *
4641  * The current transaction is finishing.
4642  *
4643  * Any unfired triggers are canceled so we simply throw
4644  * away anything we know.
4645  *
4646  * Note: it is possible for this to be called repeatedly in case of
4647  * error during transaction abort; therefore, do not complain if
4648  * already closed down.
4649  * ----------
4650  */
4651 void
4652 AfterTriggerEndXact(bool isCommit)
4653 {
4654  /*
4655  * Forget the pending-events list.
4656  *
4657  * Since all the info is in TopTransactionContext or children thereof, we
4658  * don't really need to do anything to reclaim memory. However, the
4659  * pending-events list could be large, and so it's useful to discard it as
4660  * soon as possible --- especially if we are aborting because we ran out
4661  * of memory for the list!
4662  */
4663  if (afterTriggers.event_cxt)
4664  {
4665  MemoryContextDelete(afterTriggers.event_cxt);
4666  afterTriggers.event_cxt = NULL;
4667  afterTriggers.events.head = NULL;
4668  afterTriggers.events.tail = NULL;
4669  afterTriggers.events.tailfree = NULL;
4670  }
4671 
4672  /*
4673  * Forget any subtransaction state as well. Since this can't be very
4674  * large, we let the eventual reset of TopTransactionContext free the
4675  * memory instead of doing it here.
4676  */
4677  afterTriggers.trans_stack = NULL;
4678  afterTriggers.maxtransdepth = 0;
4679 
4680 
4681  /*
4682  * Forget the query stack and constraint-related state information. As
4683  * with the subtransaction state information, we don't bother freeing the
4684  * memory here.
4685  */
4686  afterTriggers.query_stack = NULL;
4687  afterTriggers.maxquerydepth = 0;
4688  afterTriggers.state = NULL;
4689 
4690  /* No more afterTriggers manipulation until next transaction starts. */
4691  afterTriggers.query_depth = -1;
4692 }
4693 
4694 /*
4695  * AfterTriggerBeginSubXact()
4696  *
4697  * Start a subtransaction.
4698  */
4699 void
4701 {
4702  int my_level = GetCurrentTransactionNestLevel();
4703 
4704  /*
4705  * Allocate more space in the trans_stack if needed. (Note: because the
4706  * minimum nest level of a subtransaction is 2, we waste the first couple
4707  * entries of the array; not worth the notational effort to avoid it.)
4708  */
4709  while (my_level >= afterTriggers.maxtransdepth)
4710  {
4711  if (afterTriggers.maxtransdepth == 0)
4712  {
4713  /* Arbitrarily initialize for max of 8 subtransaction levels */
4714  afterTriggers.trans_stack = (AfterTriggersTransData *)
4716  8 * sizeof(AfterTriggersTransData));
4717  afterTriggers.maxtransdepth = 8;
4718  }
4719  else
4720  {
4721  /* repalloc will keep the stack in the same context */
4722  int new_alloc = afterTriggers.maxtransdepth * 2;
4723 
4724  afterTriggers.trans_stack = (AfterTriggersTransData *)
4725  repalloc(afterTriggers.trans_stack,
4726  new_alloc * sizeof(AfterTriggersTransData));
4727  afterTriggers.maxtransdepth = new_alloc;
4728  }
4729  }
4730 
4731  /*
4732  * Push the current information into the stack. The SET CONSTRAINTS state
4733  * is not saved until/unless changed. Likewise, we don't make a
4734  * per-subtransaction event context until needed.
4735  */
4736  afterTriggers.trans_stack[my_level].state = NULL;
4737  afterTriggers.trans_stack[my_level].events = afterTriggers.events;
4738  afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
4739  afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
4740 }
4741 
4742 /*
4743  * AfterTriggerEndSubXact()
4744  *
4745  * The current subtransaction is ending.
4746  */
4747 void
4749 {
4750  int my_level = GetCurrentTransactionNestLevel();
4752  AfterTriggerEvent event;
4753  AfterTriggerEventChunk *chunk;
4754  CommandId subxact_firing_id;
4755 
4756  /*
4757  * Pop the prior state if needed.
4758  */
4759  if (isCommit)
4760  {
4761  Assert(my_level < afterTriggers.maxtransdepth);
4762  /* If we saved a prior state, we don't need it anymore */
4763  state = afterTriggers.trans_stack[my_level].state;
4764  if (state != NULL)
4765  pfree(state);
4766  /* this avoids double pfree if error later: */
4767  afterTriggers.trans_stack[my_level].state = NULL;
4768  Assert(afterTriggers.query_depth ==
4769  afterTriggers.trans_stack[my_level].query_depth);
4770  }
4771  else
4772  {
4773  /*
4774  * Aborting. It is possible subxact start failed before calling
4775  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4776  * trans_stack levels that aren't there.
4777  */
4778  if (my_level >= afterTriggers.maxtransdepth)
4779  return;
4780 
4781  /*
4782  * Release query-level storage for queries being aborted, and restore
4783  * query_depth to its pre-subxact value. This assumes that a
4784  * subtransaction will not add events to query levels started in a
4785  * earlier transaction state.
4786  */
4787  while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
4788  {
4789  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4790  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4791  afterTriggers.query_depth--;
4792  }
4793  Assert(afterTriggers.query_depth ==
4794  afterTriggers.trans_stack[my_level].query_depth);
4795 
4796  /*
4797  * Restore the global deferred-event list to its former length,
4798  * discarding any events queued by the subxact.
4799  */
4800  afterTriggerRestoreEventList(&afterTriggers.events,
4801  &afterTriggers.trans_stack[my_level].events);
4802 
4803  /*
4804  * Restore the trigger state. If the saved state is NULL, then this
4805  * subxact didn't save it, so it doesn't need restoring.
4806  */
4807  state = afterTriggers.trans_stack[my_level].state;
4808  if (state != NULL)
4809  {
4810  pfree(afterTriggers.state);
4811  afterTriggers.state = state;
4812  }
4813  /* this avoids double pfree if error later: */
4814  afterTriggers.trans_stack[my_level].state = NULL;
4815 
4816  /*
4817  * Scan for any remaining deferred events that were marked DONE or IN
4818  * PROGRESS by this subxact or a child, and un-mark them. We can
4819  * recognize such events because they have a firing ID greater than or
4820  * equal to the firing_counter value we saved at subtransaction start.
4821  * (This essentially assumes that the current subxact includes all
4822  * subxacts started after it.)
4823  */
4824  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
4825  for_each_event_chunk(event, chunk, afterTriggers.events)
4826  {
4827  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4828 
4829  if (event->ate_flags &
4831  {
4832  if (evtshared->ats_firing_id >= subxact_firing_id)
4833  event->ate_flags &=
4835  }
4836  }
4837  }
4838 }
4839 
4840 /* ----------
4841  * AfterTriggerEnlargeQueryState()
4842  *
4843  * Prepare the necessary state so that we can record AFTER trigger events
4844  * queued by a query. It is allowed to have nested queries within a
4845  * (sub)transaction, so we need to have separate state for each query
4846  * nesting level.
4847  * ----------
4848  */
4849 static void
4851 {
4852  int init_depth = afterTriggers.maxquerydepth;
4853 
4854  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4855 
4856  if (afterTriggers.maxquerydepth == 0)
4857  {
4858  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4859 
4860  afterTriggers.query_stack = (AfterTriggersQueryData *)
4862  new_alloc * sizeof(AfterTriggersQueryData));
4863  afterTriggers.maxquerydepth = new_alloc;
4864  }
4865  else
4866  {
4867  /* repalloc will keep the stack in the same context */
4868  int old_alloc = afterTriggers.maxquerydepth;
4869  int new_alloc = Max(afterTriggers.query_depth + 1,
4870  old_alloc * 2);
4871 
4872  afterTriggers.query_stack = (AfterTriggersQueryData *)
4873  repalloc(afterTriggers.query_stack,
4874  new_alloc * sizeof(AfterTriggersQueryData));
4875  afterTriggers.maxquerydepth = new_alloc;
4876  }
4877 
4878  /* Initialize new array entries to empty */
4879  while (init_depth < afterTriggers.maxquerydepth)
4880  {
4881  AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
4882 
4883  qs->events.head = NULL;
4884  qs->events.tail = NULL;
4885  qs->events.tailfree = NULL;
4886  qs->fdw_tuplestore = NULL;
4887  qs->tables = NIL;
4888 
4889  ++init_depth;
4890  }
4891 }
4892 
4893 /*
4894  * Create an empty SetConstraintState with room for numalloc trigstates
4895  */
4896 static SetConstraintState
4898 {
4900 
4901  /* Behave sanely with numalloc == 0 */
4902  if (numalloc <= 0)
4903  numalloc = 1;
4904 
4905  /*
4906  * We assume that zeroing will correctly initialize the state values.
4907  */
4908  state = (SetConstraintState)
4910  offsetof(SetConstraintStateData, trigstates) +
4911  numalloc * sizeof(SetConstraintTriggerData));
4912 
4913  state->numalloc = numalloc;
4914 
4915  return state;
4916 }
4917 
4918 /*
4919  * Copy a SetConstraintState
4920  */
4921 static SetConstraintState
4923 {
4925 
4926  state = SetConstraintStateCreate(origstate->numstates);
4927 
4928  state->all_isset = origstate->all_isset;
4929  state->all_isdeferred = origstate->all_isdeferred;
4930  state->numstates = origstate->numstates;
4931  memcpy(state->trigstates, origstate->trigstates,
4932  origstate->numstates * sizeof(SetConstraintTriggerData));
4933 
4934  return state;
4935 }
4936 
4937 /*
4938  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
4939  * pointer to the state object (it will change if we have to repalloc).
4940  */
4941 static SetConstraintState
4943  Oid tgoid, bool tgisdeferred)
4944 {
4945  if (state->numstates >= state->numalloc)
4946  {
4947  int newalloc = state->numalloc * 2;
4948 
4949  newalloc = Max(newalloc, 8); /* in case original has size 0 */
4950  state = (SetConstraintState)
4951  repalloc(state,
4952  offsetof(SetConstraintStateData, trigstates) +
4953  newalloc * sizeof(SetConstraintTriggerData));
4954  state->numalloc = newalloc;
4955  Assert(state->numstates < state->numalloc);
4956  }
4957 
4958  state->trigstates[state->numstates].sct_tgoid = tgoid;
4959  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4960  state->numstates++;
4961 
4962  return state;
4963 }
4964 
4965 /* ----------
4966  * AfterTriggerSetState()
4967  *
4968  * Execute the SET CONSTRAINTS ... utility command.
4969  * ----------
4970  */
4971 void
4973 {
4974  int my_level = GetCurrentTransactionNestLevel();
4975 
4976  /* If we haven't already done so, initialize our state. */
4977  if (afterTriggers.state == NULL)
4978  afterTriggers.state = SetConstraintStateCreate(8);
4979 
4980  /*
4981  * If in a subtransaction, and we didn't save the current state already,
4982  * save it so it can be restored if the subtransaction aborts.
4983  */
4984  if (my_level > 1 &&
4985  afterTriggers.trans_stack[my_level].state == NULL)
4986  {
4987  afterTriggers.trans_stack[my_level].state =
4988  SetConstraintStateCopy(afterTriggers.state);
4989  }
4990 
4991  /*
4992  * Handle SET CONSTRAINTS ALL ...
4993  */
4994  if (stmt->constraints == NIL)
4995  {
4996  /*
4997  * Forget any previous SET CONSTRAINTS commands in this transaction.
4998  */
4999  afterTriggers.state->numstates = 0;
5000 
5001  /*
5002  * Set the per-transaction ALL state to known.
5003  */
5004  afterTriggers.state->all_isset = true;
5005  afterTriggers.state->all_isdeferred = stmt->deferred;
5006  }
5007  else
5008  {
5009  Relation conrel;
5010  Relation tgrel;
5011  List *conoidlist = NIL;
5012  List *tgoidlist = NIL;
5013  ListCell *lc;
5014 
5015  /*
5016  * Handle SET CONSTRAINTS constraint-name [, ...]
5017  *
5018  * First, identify all the named constraints and make a list of their
5019  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5020  * the same name within a schema, the specifications are not
5021  * necessarily unique. Our strategy is to target all matching
5022  * constraints within the first search-path schema that has any
5023  * matches, but disregard matches in schemas beyond the first match.
5024  * (This is a bit odd but it's the historical behavior.)
5025  *
5026  * A constraint in a partitioned table may have corresponding
5027  * constraints in the partitions. Grab those too.
5028  */
5029  conrel = table_open(ConstraintRelationId, AccessShareLock);
5030 
5031  foreach(lc, stmt->constraints)
5032  {
5033  RangeVar *constraint = lfirst(lc);
5034  bool found;
5035  List *namespacelist;
5036  ListCell *nslc;
5037 
5038  if (constraint->catalogname)
5039  {
5040  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5041  ereport(ERROR,
5042  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5043  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5044  constraint->catalogname, constraint->schemaname,
5045  constraint->relname)));
5046  }
5047 
5048  /*
5049  * If we're given the schema name with the constraint, look only
5050  * in that schema. If given a bare constraint name, use the
5051  * search path to find the first matching constraint.
5052  */
5053  if (constraint->schemaname)
5054  {
5055  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5056  false);
5057 
5058  namespacelist = list_make1_oid(namespaceId);
5059  }
5060  else
5061  {
5062  namespacelist = fetch_search_path(true);
5063  }
5064 
5065  found = false;
5066  foreach(nslc, namespacelist)
5067  {
5068  Oid namespaceId = lfirst_oid(nslc);
5069  SysScanDesc conscan;
5070  ScanKeyData skey[2];
5071  HeapTuple tup;
5072 
5073  ScanKeyInit(&skey[0],
5074  Anum_pg_constraint_conname,
5075  BTEqualStrategyNumber, F_NAMEEQ,
5076  CStringGetDatum(constraint->relname));
5077  ScanKeyInit(&skey[1],
5078  Anum_pg_constraint_connamespace,
5079  BTEqualStrategyNumber, F_OIDEQ,
5080  ObjectIdGetDatum(namespaceId));
5081 
5082  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5083  true, NULL, 2, skey);
5084 
5085  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5086  {
5088 
5089  if (con->condeferrable)
5090  conoidlist = lappend_oid(conoidlist, con->oid);
5091  else if (stmt->deferred)
5092  ereport(ERROR,
5093  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5094  errmsg("constraint \"%s\" is not deferrable",
5095  constraint->relname)));
5096  found = true;
5097  }
5098 
5099  systable_endscan(conscan);
5100 
5101  /*
5102  * Once we've found a matching constraint we do not search
5103  * later parts of the search path.
5104  */
5105  if (found)
5106  break;
5107  }
5108 
5109  list_free(namespacelist);
5110 
5111  /*
5112  * Not found ?
5113  */
5114  if (!found)
5115  ereport(ERROR,
5116  (errcode(ERRCODE_UNDEFINED_OBJECT),
5117  errmsg("constraint \"%s\" does not exist",
5118  constraint->relname)));
5119  }
5120 
5121  /*
5122  * Scan for any possible descendants of the constraints. We append
5123  * whatever we find to the same list that we're scanning; this has the
5124  * effect that we create new scans for those, too, so if there are
5125  * further descendents, we'll also catch them.
5126  */
5127  foreach(lc, conoidlist)
5128  {
5129  Oid parent = lfirst_oid(lc);
5130  ScanKeyData key;
5131  SysScanDesc scan;
5132  HeapTuple tuple;
5133 
5134  ScanKeyInit(&key,
5135  Anum_pg_constraint_conparentid,
5136  BTEqualStrategyNumber, F_OIDEQ,
5137  ObjectIdGetDatum(parent));
5138 
5139  scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5140 
5141  while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5142  {
5144 
5145  conoidlist = lappend_oid(conoidlist, con->oid);
5146  }
5147 
5148  systable_endscan(scan);
5149  }
5150 
5151  table_close(conrel, AccessShareLock);
5152 
5153  /*
5154  * Now, locate the trigger(s) implementing each of these constraints,
5155  * and make a list of their OIDs.
5156  */
5157  tgrel = table_open(TriggerRelationId, AccessShareLock);
5158 
5159  foreach(lc, conoidlist)
5160  {
5161  Oid conoid = lfirst_oid(lc);
5162  ScanKeyData skey;
5163  SysScanDesc tgscan;
5164  HeapTuple htup;
5165 
5166  ScanKeyInit(&skey,
5167  Anum_pg_trigger_tgconstraint,
5168  BTEqualStrategyNumber, F_OIDEQ,
5169  ObjectIdGetDatum(conoid));
5170 
5171  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5172  NULL, 1, &skey);
5173 
5174  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5175  {
5176  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5177 
5178  /*
5179  * Silently skip triggers that are marked as non-deferrable in
5180  * pg_trigger. This is not an error condition, since a
5181  * deferrable RI constraint may have some non-deferrable
5182  * actions.
5183  */
5184  if (pg_trigger->tgdeferrable)
5185  tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5186  }
5187 
5188  systable_endscan(tgscan);
5189  }
5190 
5191  table_close(tgrel, AccessShareLock);
5192 
5193  /*
5194  * Now we can set the trigger states of individual triggers for this
5195  * xact.
5196  */
5197  foreach(lc, tgoidlist)
5198  {
5199  Oid tgoid = lfirst_oid(lc);
5200  SetConstraintState state = afterTriggers.state;
5201  bool found = false;
5202  int i;
5203 
5204  for (i = 0; i < state->numstates; i++)
5205  {
5206  if (state->trigstates[i].sct_tgoid == tgoid)
5207  {
5208  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5209  found = true;
5210  break;
5211  }
5212  }
5213  if (!found)
5214  {
5215  afterTriggers.state =
5216  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5217  }
5218  }
5219  }
5220 
5221  /*
5222  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5223  * checks against that constraint must be made when the SET CONSTRAINTS
5224  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5225  * apply retroactively. We've updated the constraints state, so scan the
5226  * list of previously deferred events to fire any that have now become
5227  * immediate.
5228  *
5229  * Obviously, if this was SET ... DEFERRED then it can't have converted
5230  * any unfired events to immediate, so we need do nothing in that case.
5231  */
5232  if (!stmt->deferred)
5233  {
5234  AfterTriggerEventList *events = &afterTriggers.events;
5235  bool snapshot_set = false;
5236 
5237  while (afterTriggerMarkEvents(events, NULL, true))
5238  {
5239  CommandId firing_id = afterTriggers.firing_counter++;
5240 
5241  /*
5242  * Make sure a snapshot has been established in case trigger
5243  * functions need one. Note that we avoid setting a snapshot if
5244  * we don't find at least one trigger that has to be fired now.
5245  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5246  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5247  * at the start of a transaction it's not possible for any trigger
5248  * events to be queued yet.)
5249  */
5250  if (!snapshot_set)
5251  {
5253  snapshot_set = true;
5254  }
5255 
5256  /*
5257  * We can delete fired events if we are at top transaction level,
5258  * but we'd better not if inside a subtransaction, since the
5259  * subtransaction could later get rolled back.
5260  */
5261  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5262  !IsSubTransaction()))
5263  break; /* all fired */
5264  }
5265 
5266  if (snapshot_set)
5268  }
5269 }
5270 
5271 /* ----------
5272  * AfterTriggerPendingOnRel()
5273  * Test to see if there are any pending after-trigger events for rel.
5274  *
5275  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5276  * it is unsafe to perform major surgery on a relation. Note that only
5277  * local pending events are examined. We assume that having exclusive lock
5278  * on a rel guarantees there are no unserviced events in other backends ---
5279  * but having a lock does not prevent there being such events in our own.
5280  *
5281  * In some scenarios it'd be reasonable to remove pending events (more
5282  * specifically, mark them DONE by the current subxact) but without a lot
5283  * of knowledge of the trigger semantics we can't do this in general.
5284  * ----------
5285  */
5286 bool
5288 {
5289  AfterTriggerEvent event;
5290  AfterTriggerEventChunk *chunk;
5291  int depth;
5292 
5293  /* Scan queued events */
5294  for_each_event_chunk(event, chunk, afterTriggers.events)
5295  {
5296  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5297 
5298  /*
5299  * We can ignore completed events. (Even if a DONE flag is rolled
5300  * back by subxact abort, it's OK because the effects of the TRUNCATE
5301  * or whatever must get rolled back too.)
5302  */
5303  if (event->ate_flags & AFTER_TRIGGER_DONE)
5304  continue;
5305 
5306  if (evtshared->ats_relid == relid)
5307  return true;
5308  }
5309 
5310  /*
5311  * Also scan events queued by incomplete queries. This could only matter
5312  * if TRUNCATE/etc is executed by a function or trigger within an updating
5313  * query on the same relation, which is pretty perverse, but let's check.
5314  */
5315  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5316  {
5317  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
5318  {
5319  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5320 
5321  if (event->ate_flags & AFTER_TRIGGER_DONE)
5322  continue;
5323 
5324  if (evtshared->ats_relid == relid)
5325  return true;
5326  }
5327  }
5328 
5329  return false;
5330 }
5331 
5332 
5333 /* ----------
5334  * AfterTriggerSaveEvent()
5335  *
5336  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5337  * be fired for an event.
5338  *
5339  * NOTE: this is called whenever there are any triggers associated with
5340  * the event (even if they are disabled). This function decides which
5341  * triggers actually need to be queued. It is also called after each row,
5342  * even if there are no triggers for that event, if there are any AFTER
5343  * STATEMENT triggers for the statement which use transition tables, so that
5344  * the transition tuplestores can be built. Furthermore, if the transition
5345  * capture is happening for UPDATEd rows being moved to another partition due
5346  * to the partition-key being changed, then this function is called once when
5347  * the row is deleted (to capture OLD row), and once when the row is inserted
5348  * into another partition (to capture NEW row). This is done separately because
5349  * DELETE and INSERT happen on different tables.
5350  *
5351  * Transition tuplestores are built now, rather than when events are pulled
5352  * off of the queue because AFTER ROW triggers are allowed to select from the
5353  * transition tables for the statement.
5354  * ----------
5355  */
5356 static void
5358  int event, bool row_trigger,
5359  TupleTableSlot *oldslot, TupleTableSlot *newslot,
5360  List *recheckIndexes, Bitmapset *modifiedCols,
5361  TransitionCaptureState *transition_capture)
5362 {
5363  Relation rel = relinfo->ri_RelationDesc;
5364  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5365  AfterTriggerEventData new_event;
5366  AfterTriggerSharedData new_shared;
5367  char relkind = rel->rd_rel->relkind;
5368  int tgtype_event;
5369  int tgtype_level;
5370  int i;
5371  Tuplestorestate *fdw_tuplestore = NULL;
5372 
5373  /*
5374  * Check state. We use a normal test not Assert because it is possible to
5375  * reach here in the wrong state given misconfigured RI triggers, in
5376  * particular deferring a cascade action trigger.
5377  */
5378  if (afterTriggers.query_depth < 0)
5379  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5380 
5381  /* Be sure we have enough space to record events at this query depth. */
5382  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5384 
5385  /*
5386  * If the directly named relation has any triggers with transition tables,
5387  * then we need to capture transition tuples.
5388  */
5389  if (row_trigger && transition_capture != NULL)
5390  {
5391  TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5392  TupleConversionMap *map = transition_capture->tcs_map;
5393  bool delete_old_table = transition_capture->tcs_delete_old_table;
5394  bool update_old_table = transition_capture->tcs_update_old_table;
5395  bool update_new_table = transition_capture->tcs_update_new_table;
5396  bool insert_new_table = transition_capture->tcs_insert_new_table;
5397 
5398  /*
5399  * For INSERT events NEW should be non-NULL, for DELETE events OLD
5400  * should be non-NULL, whereas for UPDATE events normally both OLD and
5401  * NEW are non-NULL. But for UPDATE events fired for capturing
5402  * transition tuples during UPDATE partition-key row movement, OLD is
5403  * NULL when the event is for a row being inserted, whereas NEW is
5404  * NULL when the event is for a row being deleted.
5405  */
5406  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5407  TupIsNull(oldslot)));
5408  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5409  TupIsNull(newslot)));
5410 
5411  if (!TupIsNull(oldslot) &&
5412  ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
5413  (event == TRIGGER_EVENT_UPDATE && update_old_table)))
5414  {
5415  Tuplestorestate *old_tuplestore;
5416 
5417  old_tuplestore = transition_capture->tcs_private->old_tuplestore;
5418 
5419  if (map != NULL)
5420  {
5421  TupleTableSlot *storeslot;
5422 
5423  storeslot = transition_capture->tcs_private->storeslot;
5424  if (!storeslot)
5425  {
5426  storeslot = ExecAllocTableSlot(&estate->es_tupleTable,
5427  map->outdesc,
5428  &TTSOpsVirtual);
5429  transition_capture->tcs_private->storeslot = storeslot;
5430  }
5431 
5432  execute_attr_map_slot(map->attrMap, oldslot, storeslot);
5433  tuplestore_puttupleslot(old_tuplestore, storeslot);
5434  }
5435  else
5436  tuplestore_puttupleslot(old_tuplestore, oldslot);
5437  }
5438  if (!TupIsNull(newslot) &&
5439  ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
5440  (event == TRIGGER_EVENT_UPDATE && update_new_table)))
5441  {
5442  Tuplestorestate *new_tuplestore;
5443 
5444  new_tuplestore = transition_capture->tcs_private->new_tuplestore;
5445 
5446  if (original_insert_tuple != NULL)
5447  tuplestore_puttupleslot(new_tuplestore,
5448  original_insert_tuple);
5449  else if (map != NULL)
5450  {
5451  TupleTableSlot *storeslot;
5452 
5453  storeslot = transition_capture->tcs_private->storeslot;
5454 
5455  if (!storeslot)
5456  {
5457  storeslot = ExecAllocTableSlot(&estate->es_tupleTable,
5458  map->outdesc,
5459  &TTSOpsVirtual);
5460  transition_capture->tcs_private->storeslot = storeslot;
5461  }
5462 
5463  execute_attr_map_slot(map->attrMap, newslot, storeslot);
5464  tuplestore_puttupleslot(new_tuplestore, storeslot);
5465  }
5466  else
5467  tuplestore_puttupleslot(new_tuplestore, newslot);
5468  }
5469 
5470  /*
5471  * If transition tables are the only reason we're here, return. As
5472  * mentioned above, we can also be here during update tuple routing in
5473  * presence of transition tables, in which case this function is
5474  * called separately for oldtup and newtup, so we expect exactly one
5475  * of them to be NULL.
5476  */
5477  if (trigdesc == NULL ||
5478  (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5479  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5480  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
5481  (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
5482  return;
5483  }
5484 
5485  /*
5486  * Validate the event code and collect the associated tuple CTIDs.
5487  *
5488  * The event code will be used both as a bitmask and an array offset, so
5489  * validation is important to make sure we don't walk off the edge of our
5490  * arrays.
5491  *
5492  * Also, if we're considering statement-level triggers, check whether we
5493  * already queued a set of them for this event, and cancel the prior set
5494  * if so. This preserves the behavior that statement-level triggers fire
5495  * just once per statement and fire after row-level triggers.
5496  */
5497  switch (event)
5498  {
5499  case TRIGGER_EVENT_INSERT:
5500  tgtype_event = TRIGGER_TYPE_INSERT;
5501  if (row_trigger)
5502  {
5503  Assert(oldslot == NULL);
5504  Assert(newslot != NULL);
5505  ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
5506  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5507  }
5508  else
5509  {
5510  Assert(oldslot == NULL);
5511  Assert(newslot == NULL);
5512  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5513  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5515  CMD_INSERT, event);
5516  }
5517  break;
5518  case TRIGGER_EVENT_DELETE:
5519  tgtype_event = TRIGGER_TYPE_DELETE;
5520  if (row_trigger)
5521  {
5522  Assert(oldslot != NULL);
5523  Assert(newslot == NULL);
5524  ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5525  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5526  }
5527  else
5528  {
5529  Assert(oldslot == NULL);
5530  Assert(newslot == NULL);
5531  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5532  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5534  CMD_DELETE, event);
5535  }
5536  break;
5537  case TRIGGER_EVENT_UPDATE:
5538  tgtype_event = TRIGGER_TYPE_UPDATE;
5539  if (row_trigger)
5540  {
5541  Assert(oldslot != NULL);
5542  Assert(newslot != NULL);
5543  ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5544  ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2));
5545  }
5546  else
5547  {
5548  Assert(oldslot == NULL);
5549  Assert(newslot == NULL);
5550  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5551  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5553  CMD_UPDATE, event);
5554  }
5555  break;
5557  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5558  Assert(oldslot == NULL);
5559  Assert(newslot == NULL);
5560  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5561  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5562  break;
5563  default:
5564  elog(ERROR, "invalid after-trigger event code: %d", event);
5565  tgtype_event = 0; /* keep compiler quiet */
5566  break;
5567  }
5568 
5569  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5570  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5572  /* else, we'll initialize ate_flags for each trigger */
5573 
5574  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5575 
5576  for (i = 0; i < trigdesc->numtriggers; i++)
5577  {
5578  Trigger *trigger = &trigdesc->triggers[i];
5579 
5580  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5581  tgtype_level,
5582  TRIGGER_TYPE_AFTER,
5583  tgtype_event))
5584  continue;
5585  if (!TriggerEnabled(estate, relinfo, trigger, event,
5586  modifiedCols, oldslot, newslot))
5587  continue;
5588 
5589  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5590  {
5591  if (fdw_tuplestore == NULL)
5592  {
5593  fdw_tuplestore = GetCurrentFDWTuplestore();
5594  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5595  }
5596  else
5597  /* subsequent event for the same tuple */
5598  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5599  }
5600 
5601  /*
5602  * If the trigger is a foreign key enforcement trigger, there are
5603  * certain cases where we can skip queueing the event because we can
5604  * tell by inspection that the FK constraint will still pass.
5605  */
5606  if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event))
5607  {
5608  switch (RI_FKey_trigger_type(trigger->tgfoid))
5609  {
5610  case RI_TRIGGER_PK:
5611  /* Update or delete on trigger's PK table */
5612  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5613  oldslot, newslot))
5614  {
5615  /* skip queuing this event */
5616  continue;
5617  }
5618  break;
5619 
5620  case RI_TRIGGER_FK:
5621  /* Update on trigger's FK table */
5622  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5623  oldslot, newslot))
5624  {
5625  /* skip queuing this event */
5626  continue;
5627  }
5628  break;
5629 
5630  case RI_TRIGGER_NONE:
5631  /* Not an FK trigger */
5632  break;
5633  }
5634  }
5635 
5636  /*
5637  * If the trigger is a deferred unique constraint check trigger, only
5638  * queue it if the unique constraint was potentially violated, which
5639  * we know from index insertion time.
5640  */
5641  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5642  {
5643  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5644  continue; /* Uniqueness definitely not violated */
5645  }
5646 
5647  /*
5648  * Fill in event structure and add it to the current query's queue.
5649  * Note we set ats_table to NULL whenever this trigger doesn't use
5650  * transition tables, to improve sharability of the shared event data.
5651  */
5652  new_shared.ats_event =
5653  (event & TRIGGER_EVENT_OPMASK) |
5654  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5655  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5656  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5657  new_shared.ats_tgoid = trigger->tgoid;
5658  new_shared.ats_relid = RelationGetRelid(rel);
5659  new_shared.ats_firing_id = 0;
5660  if ((trigger->tgoldtable || trigger->tgnewtable) &&
5661  transition_capture != NULL)
5662  new_shared.ats_table = transition_capture->tcs_private;
5663  else
5664  new_shared.ats_table = NULL;
5665  new_shared.ats_modifiedcols = modifiedCols;
5666 
5667  afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events,
5668  &new_event, &new_shared);
5669  }
5670 
5671  /*
5672  * Finally, spool any foreign tuple(s). The tuplestore squashes them to
5673  * minimal tuples, so this loses any system columns. The executor lost
5674  * those columns before us, for an unrelated reason, so this is fine.
5675  */
5676  if (fdw_tuplestore)
5677  {
5678  if (oldslot != NULL)
5679  tuplestore_puttupleslot(fdw_tuplestore, oldslot);
5680  if (newslot != NULL)
5681  tuplestore_puttupleslot(fdw_tuplestore, newslot);
5682  }
5683 }
5684 
5685 /*
5686  * Detect whether we already queued BEFORE STATEMENT triggers for the given
5687  * relation + operation, and set the flag so the next call will report "true".
5688  */
5689 static bool
5691 {
5692  bool result;
5693  AfterTriggersTableData *table;
5694 
5695  /* Check state, like AfterTriggerSaveEvent. */
5696  if (afterTriggers.query_depth < 0)
5697  elog(ERROR, "before_stmt_triggers_fired() called outside of query");
5698 
5699  /* Be sure we have enough space to record events at this query depth. */
5700  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5702 
5703  /*
5704  * We keep this state in the AfterTriggersTableData that also holds
5705  * transition tables for the relation + operation. In this way, if we are
5706  * forced to make a new set of transition tables because more tuples get
5707  * entered after we've already fired triggers, we will allow a new set of
5708  * statement triggers to get queued.
5709  */
5710  table = GetAfterTriggersTableData(relid, cmdType);
5711  result = table->before_trig_done;
5712  table->before_trig_done = true;
5713  return result;
5714 }
5715 
5716 /*
5717  * If we previously queued a set of AFTER STATEMENT triggers for the given
5718  * relation + operation, and they've not been fired yet, cancel them. The
5719  * caller will queue a fresh set that's after any row-level triggers that may
5720  * have been queued by the current sub-statement, preserving (as much as
5721  * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT
5722  * triggers, and that the latter only fire once. This deals with the
5723  * situation where several FK enforcement triggers sequentially queue triggers
5724  * for the same table into the same trigger query level. We can't fully
5725  * prevent odd behavior though: if there are AFTER ROW triggers taking
5726  * transition tables, we don't want to change the transition tables once the
5727  * first such trigger has seen them. In such a case, any additional events
5728  * will result in creating new transition tables and allowing new firings of
5729  * statement triggers.
5730  *
5731  * This also saves the current event list location so that a later invocation
5732  * of this function can cheaply find the triggers we're about to queue and
5733  * cancel them.
5734  */
5735 static void
5736 cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent)
5737 {
5738  AfterTrigg