PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/inval.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/rel.h"
62 #include "utils/snapmgr.h"
63 #include "utils/syscache.h"
64 #include "utils/tuplestore.h"
65 
66 
67 /* GUC variables */
69 
70 /* How many levels deep into trigger execution are we? */
71 static int MyTriggerDepth = 0;
72 
73 /*
74  * Note that similar macros also exist in executor/execMain.c. There does not
75  * appear to be any good header to put them into, given the structures that
76  * they use, so we let them be duplicated. Be sure to update all if one needs
77  * to be changed, however.
78  */
79 #define GetAllUpdatedColumns(relinfo, estate) \
80  (bms_union(exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols, \
81  exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->extraUpdatedCols))
82 
83 /* Local function prototypes */
84 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
85 static bool GetTupleForTrigger(EState *estate,
86  EPQState *epqstate,
87  ResultRelInfo *relinfo,
88  ItemPointer tid,
89  LockTupleMode lockmode,
90  TupleTableSlot *oldslot,
91  TupleTableSlot **newSlot);
92 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
93  Trigger *trigger, TriggerEvent event,
94  Bitmapset *modifiedCols,
95  TupleTableSlot *oldslot, TupleTableSlot *newslot);
97  int tgindx,
98  FmgrInfo *finfo,
99  Instrumentation *instr,
100  MemoryContext per_tuple_context);
101 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
102  int event, bool row_trigger,
103  TupleTableSlot *oldtup, TupleTableSlot *newtup,
104  List *recheckIndexes, Bitmapset *modifiedCols,
105  TransitionCaptureState *transition_capture);
106 static void AfterTriggerEnlargeQueryState(void);
107 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
108 
109 
110 /*
111  * Create a trigger. Returns the address of the created trigger.
112  *
113  * queryString is the source text of the CREATE TRIGGER command.
114  * This must be supplied if a whenClause is specified, else it can be NULL.
115  *
116  * relOid, if nonzero, is the relation on which the trigger should be
117  * created. If zero, the name provided in the statement will be looked up.
118  *
119  * refRelOid, if nonzero, is the relation to which the constraint trigger
120  * refers. If zero, the constraint relation name provided in the statement
121  * will be looked up as needed.
122  *
123  * constraintOid, if nonzero, says that this trigger is being created
124  * internally to implement that constraint. A suitable pg_depend entry will
125  * be made to link the trigger to that constraint. constraintOid is zero when
126  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
127  * TRIGGER, we build a pg_constraint entry internally.)
128  *
129  * indexOid, if nonzero, is the OID of an index associated with the constraint.
130  * We do nothing with this except store it into pg_trigger.tgconstrindid;
131  * but when creating a trigger for a deferrable unique constraint on a
132  * partitioned table, its children are looked up. Note we don't cope with
133  * invalid indexes in that case.
134  *
135  * funcoid, if nonzero, is the OID of the function to invoke. When this is
136  * given, stmt->funcname is ignored.
137  *
138  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
139  * if that trigger is dropped, this one should be too. (This is passed as
140  * Invalid by most callers; it's set here when recursing on a partition.)
141  *
142  * If whenClause is passed, it is an already-transformed expression for
143  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
144  *
145  * If isInternal is true then this is an internally-generated trigger.
146  * This argument sets the tgisinternal field of the pg_trigger entry, and
147  * if true causes us to modify the given trigger name to ensure uniqueness.
148  *
149  * When isInternal is not true we require ACL_TRIGGER permissions on the
150  * relation, as well as ACL_EXECUTE on the trigger function. For internal
151  * triggers the caller must apply any required permission checks.
152  *
153  * When called on partitioned tables, this function recurses to create the
154  * trigger on all the partitions, except if isInternal is true, in which
155  * case caller is expected to execute recursion on its own.
156  */
158 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
159  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
160  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
161  bool isInternal, bool in_partition)
162 {
163  int16 tgtype;
164  int ncolumns;
165  int16 *columns;
166  int2vector *tgattr;
167  List *whenRtable;
168  char *qual;
169  Datum values[Natts_pg_trigger];
170  bool nulls[Natts_pg_trigger];
171  Relation rel;
172  AclResult aclresult;
173  Relation tgrel;
174  SysScanDesc tgscan;
176  Relation pgrel;
177  HeapTuple tuple;
178  Oid funcrettype;
179  Oid trigoid;
180  char internaltrigname[NAMEDATALEN];
181  char *trigname;
182  Oid constrrelid = InvalidOid;
183  ObjectAddress myself,
184  referenced;
185  char *oldtablename = NULL;
186  char *newtablename = NULL;
187  bool partition_recurse;
188 
189  if (OidIsValid(relOid))
190  rel = table_open(relOid, ShareRowExclusiveLock);
191  else
193 
194  /*
195  * Triggers must be on tables or views, and there are additional
196  * relation-type-specific restrictions.
197  */
198  if (rel->rd_rel->relkind == RELKIND_RELATION)
199  {
200  /* Tables can't have INSTEAD OF triggers */
201  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
202  stmt->timing != TRIGGER_TYPE_AFTER)
203  ereport(ERROR,
204  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
205  errmsg("\"%s\" is a table",
207  errdetail("Tables cannot have INSTEAD OF triggers.")));
208  }
209  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
210  {
211  /* Partitioned tables can't have INSTEAD OF triggers */
212  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
213  stmt->timing != TRIGGER_TYPE_AFTER)
214  ereport(ERROR,
215  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
216  errmsg("\"%s\" is a table",
218  errdetail("Tables cannot have INSTEAD OF triggers.")));
219 
220  /*
221  * FOR EACH ROW triggers have further restrictions
222  */
223  if (stmt->row)
224  {
225  /*
226  * Disallow use of transition tables.
227  *
228  * Note that we have another restriction about transition tables
229  * in partitions; search for 'has_superclass' below for an
230  * explanation. The check here is just to protect from the fact
231  * that if we allowed it here, the creation would succeed for a
232  * partitioned table with no partitions, but would be blocked by
233  * the other restriction when the first partition was created,
234  * which is very unfriendly behavior.
235  */
236  if (stmt->transitionRels != NIL)
237  ereport(ERROR,
238  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
239  errmsg("\"%s\" is a partitioned table",
241  errdetail("Triggers on partitioned tables cannot have transition tables.")));
242  }
243  }
244  else if (rel->rd_rel->relkind == RELKIND_VIEW)
245  {
246  /*
247  * Views can have INSTEAD OF triggers (which we check below are
248  * row-level), or statement-level BEFORE/AFTER triggers.
249  */
250  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
251  ereport(ERROR,
252  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
253  errmsg("\"%s\" is a view",
255  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
256  /* Disallow TRUNCATE triggers on VIEWs */
257  if (TRIGGER_FOR_TRUNCATE(stmt->events))
258  ereport(ERROR,
259  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
260  errmsg("\"%s\" is a view",
262  errdetail("Views cannot have TRUNCATE triggers.")));
263  }
264  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
265  {
266  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
267  stmt->timing != TRIGGER_TYPE_AFTER)
268  ereport(ERROR,
269  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
270  errmsg("\"%s\" is a foreign table",
272  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
273 
274  if (TRIGGER_FOR_TRUNCATE(stmt->events))
275  ereport(ERROR,
276  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
277  errmsg("\"%s\" is a foreign table",
279  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
280 
281  /*
282  * We disallow constraint triggers to protect the assumption that
283  * triggers on FKs can't be deferred. See notes with AfterTriggers
284  * data structures, below.
285  */
286  if (stmt->isconstraint)
287  ereport(ERROR,
288  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
289  errmsg("\"%s\" is a foreign table",
291  errdetail("Foreign tables cannot have constraint triggers.")));
292  }
293  else
294  ereport(ERROR,
295  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
296  errmsg("\"%s\" is not a table or view",
297  RelationGetRelationName(rel))));
298 
300  ereport(ERROR,
301  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
302  errmsg("permission denied: \"%s\" is a system catalog",
303  RelationGetRelationName(rel))));
304 
305  if (stmt->isconstraint)
306  {
307  /*
308  * We must take a lock on the target relation to protect against
309  * concurrent drop. It's not clear that AccessShareLock is strong
310  * enough, but we certainly need at least that much... otherwise, we
311  * might end up creating a pg_constraint entry referencing a
312  * nonexistent table.
313  */
314  if (OidIsValid(refRelOid))
315  {
316  LockRelationOid(refRelOid, AccessShareLock);
317  constrrelid = refRelOid;
318  }
319  else if (stmt->constrrel != NULL)
320  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
321  false);
322  }
323 
324  /* permission checks */
325  if (!isInternal)
326  {
327  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
328  ACL_TRIGGER);
329  if (aclresult != ACLCHECK_OK)
330  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
332 
333  if (OidIsValid(constrrelid))
334  {
335  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
336  ACL_TRIGGER);
337  if (aclresult != ACLCHECK_OK)
338  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
339  get_rel_name(constrrelid));
340  }
341  }
342 
343  /*
344  * When called on a partitioned table to create a FOR EACH ROW trigger
345  * that's not internal, we create one trigger for each partition, too.
346  *
347  * For that, we'd better hold lock on all of them ahead of time.
348  */
349  partition_recurse = !isInternal && stmt->row &&
350  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
351  if (partition_recurse)
353  ShareRowExclusiveLock, NULL));
354 
355  /* Compute tgtype */
356  TRIGGER_CLEAR_TYPE(tgtype);
357  if (stmt->row)
358  TRIGGER_SETT_ROW(tgtype);
359  tgtype |= stmt->timing;
360  tgtype |= stmt->events;
361 
362  /* Disallow ROW-level TRUNCATE triggers */
363  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
364  ereport(ERROR,
365  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
366  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
367 
368  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
369  if (TRIGGER_FOR_INSTEAD(tgtype))
370  {
371  if (!TRIGGER_FOR_ROW(tgtype))
372  ereport(ERROR,
373  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
374  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
375  if (stmt->whenClause)
376  ereport(ERROR,
377  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
378  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
379  if (stmt->columns != NIL)
380  ereport(ERROR,
381  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
382  errmsg("INSTEAD OF triggers cannot have column lists")));
383  }
384 
385  /*
386  * We don't yet support naming ROW transition variables, but the parser
387  * recognizes the syntax so we can give a nicer message here.
388  *
389  * Per standard, REFERENCING TABLE names are only allowed on AFTER
390  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
391  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
392  * only allowed once. Per standard, OLD may not be specified when
393  * creating a trigger only for INSERT, and NEW may not be specified when
394  * creating a trigger only for DELETE.
395  *
396  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
397  * reference both ROW and TABLE transition data.
398  */
399  if (stmt->transitionRels != NIL)
400  {
401  List *varList = stmt->transitionRels;
402  ListCell *lc;
403 
404  foreach(lc, varList)
405  {
407 
408  if (!(tt->isTable))
409  ereport(ERROR,
410  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
411  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
412  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
413 
414  /*
415  * Because of the above test, we omit further ROW-related testing
416  * below. If we later allow naming OLD and NEW ROW variables,
417  * adjustments will be needed below.
418  */
419 
420  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
421  ereport(ERROR,
422  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
423  errmsg("\"%s\" is a foreign table",
425  errdetail("Triggers on foreign tables cannot have transition tables.")));
426 
427  if (rel->rd_rel->relkind == RELKIND_VIEW)
428  ereport(ERROR,
429  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
430  errmsg("\"%s\" is a view",
432  errdetail("Triggers on views cannot have transition tables.")));
433 
434  /*
435  * We currently don't allow row-level triggers with transition
436  * tables on partition or inheritance children. Such triggers
437  * would somehow need to see tuples converted to the format of the
438  * table they're attached to, and it's not clear which subset of
439  * tuples each child should see. See also the prohibitions in
440  * ATExecAttachPartition() and ATExecAddInherit().
441  */
442  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
443  {
444  /* Use appropriate error message. */
445  if (rel->rd_rel->relispartition)
446  ereport(ERROR,
447  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
448  errmsg("ROW triggers with transition tables are not supported on partitions")));
449  else
450  ereport(ERROR,
451  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
452  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
453  }
454 
455  if (stmt->timing != TRIGGER_TYPE_AFTER)
456  ereport(ERROR,
457  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
458  errmsg("transition table name can only be specified for an AFTER trigger")));
459 
460  if (TRIGGER_FOR_TRUNCATE(tgtype))
461  ereport(ERROR,
462  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
463  errmsg("TRUNCATE triggers with transition tables are not supported")));
464 
465  /*
466  * We currently don't allow multi-event triggers ("INSERT OR
467  * UPDATE") with transition tables, because it's not clear how to
468  * handle INSERT ... ON CONFLICT statements which can fire both
469  * INSERT and UPDATE triggers. We show the inserted tuples to
470  * INSERT triggers and the updated tuples to UPDATE triggers, but
471  * it's not yet clear what INSERT OR UPDATE trigger should see.
472  * This restriction could be lifted if we can decide on the right
473  * semantics in a later release.
474  */
475  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
476  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
477  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
478  ereport(ERROR,
479  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
480  errmsg("transition tables cannot be specified for triggers with more than one event")));
481 
482  /*
483  * We currently don't allow column-specific triggers with
484  * transition tables. Per spec, that seems to require
485  * accumulating separate transition tables for each combination of
486  * columns, which is a lot of work for a rather marginal feature.
487  */
488  if (stmt->columns != NIL)
489  ereport(ERROR,
490  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
491  errmsg("transition tables cannot be specified for triggers with column lists")));
492 
493  /*
494  * We disallow constraint triggers with transition tables, to
495  * protect the assumption that such triggers can't be deferred.
496  * See notes with AfterTriggers data structures, below.
497  *
498  * Currently this is enforced by the grammar, so just Assert here.
499  */
500  Assert(!stmt->isconstraint);
501 
502  if (tt->isNew)
503  {
504  if (!(TRIGGER_FOR_INSERT(tgtype) ||
505  TRIGGER_FOR_UPDATE(tgtype)))
506  ereport(ERROR,
507  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
508  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
509 
510  if (newtablename != NULL)
511  ereport(ERROR,
512  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
513  errmsg("NEW TABLE cannot be specified multiple times")));
514 
515  newtablename = tt->name;
516  }
517  else
518  {
519  if (!(TRIGGER_FOR_DELETE(tgtype) ||
520  TRIGGER_FOR_UPDATE(tgtype)))
521  ereport(ERROR,
522  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
523  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
524 
525  if (oldtablename != NULL)
526  ereport(ERROR,
527  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
528  errmsg("OLD TABLE cannot be specified multiple times")));
529 
530  oldtablename = tt->name;
531  }
532  }
533 
534  if (newtablename != NULL && oldtablename != NULL &&
535  strcmp(newtablename, oldtablename) == 0)
536  ereport(ERROR,
537  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
538  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
539  }
540 
541  /*
542  * Parse the WHEN clause, if any and we weren't passed an already
543  * transformed one.
544  *
545  * Note that as a side effect, we fill whenRtable when parsing. If we got
546  * an already parsed clause, this does not occur, which is what we want --
547  * no point in adding redundant dependencies below.
548  */
549  if (!whenClause && stmt->whenClause)
550  {
551  ParseState *pstate;
552  ParseNamespaceItem *nsitem;
553  List *varList;
554  ListCell *lc;
555 
556  /* Set up a pstate to parse with */
557  pstate = make_parsestate(NULL);
558  pstate->p_sourcetext = queryString;
559 
560  /*
561  * Set up nsitems for OLD and NEW references.
562  *
563  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
564  */
565  nsitem = addRangeTableEntryForRelation(pstate, rel,
567  makeAlias("old", NIL),
568  false, false);
569  addNSItemToQuery(pstate, nsitem, false, true, true);
570  nsitem = addRangeTableEntryForRelation(pstate, rel,
572  makeAlias("new", NIL),
573  false, false);
574  addNSItemToQuery(pstate, nsitem, false, true, true);
575 
576  /* Transform expression. Copy to be sure we don't modify original */
577  whenClause = transformWhereClause(pstate,
578  copyObject(stmt->whenClause),
580  "WHEN");
581  /* we have to fix its collations too */
582  assign_expr_collations(pstate, whenClause);
583 
584  /*
585  * Check for disallowed references to OLD/NEW.
586  *
587  * NB: pull_var_clause is okay here only because we don't allow
588  * subselects in WHEN clauses; it would fail to examine the contents
589  * of subselects.
590  */
591  varList = pull_var_clause(whenClause, 0);
592  foreach(lc, varList)
593  {
594  Var *var = (Var *) lfirst(lc);
595 
596  switch (var->varno)
597  {
598  case PRS2_OLD_VARNO:
599  if (!TRIGGER_FOR_ROW(tgtype))
600  ereport(ERROR,
601  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
602  errmsg("statement trigger's WHEN condition cannot reference column values"),
603  parser_errposition(pstate, var->location)));
604  if (TRIGGER_FOR_INSERT(tgtype))
605  ereport(ERROR,
606  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
607  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
608  parser_errposition(pstate, var->location)));
609  /* system columns are okay here */
610  break;
611  case PRS2_NEW_VARNO:
612  if (!TRIGGER_FOR_ROW(tgtype))
613  ereport(ERROR,
614  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
615  errmsg("statement trigger's WHEN condition cannot reference column values"),
616  parser_errposition(pstate, var->location)));
617  if (TRIGGER_FOR_DELETE(tgtype))
618  ereport(ERROR,
619  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
620  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
621  parser_errposition(pstate, var->location)));
622  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
623  ereport(ERROR,
624  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
625  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
626  parser_errposition(pstate, var->location)));
627  if (TRIGGER_FOR_BEFORE(tgtype) &&
628  var->varattno == 0 &&
629  RelationGetDescr(rel)->constr &&
630  RelationGetDescr(rel)->constr->has_generated_stored)
631  ereport(ERROR,
632  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
633  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
634  errdetail("A whole-row reference is used and the table contains generated columns."),
635  parser_errposition(pstate, var->location)));
636  if (TRIGGER_FOR_BEFORE(tgtype) &&
637  var->varattno > 0 &&
638  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
639  ereport(ERROR,
640  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
641  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
642  errdetail("Column \"%s\" is a generated column.",
643  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
644  parser_errposition(pstate, var->location)));
645  break;
646  default:
647  /* can't happen without add_missing_from, so just elog */
648  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
649  break;
650  }
651  }
652 
653  /* we'll need the rtable for recordDependencyOnExpr */
654  whenRtable = pstate->p_rtable;
655 
656  qual = nodeToString(whenClause);
657 
658  free_parsestate(pstate);
659  }
660  else if (!whenClause)
661  {
662  whenClause = NULL;
663  whenRtable = NIL;
664  qual = NULL;
665  }
666  else
667  {
668  qual = nodeToString(whenClause);
669  whenRtable = NIL;
670  }
671 
672  /*
673  * Find and validate the trigger function.
674  */
675  if (!OidIsValid(funcoid))
676  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
677  if (!isInternal)
678  {
679  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
680  if (aclresult != ACLCHECK_OK)
681  aclcheck_error(aclresult, OBJECT_FUNCTION,
682  NameListToString(stmt->funcname));
683  }
684  funcrettype = get_func_rettype(funcoid);
685  if (funcrettype != TRIGGEROID)
686  ereport(ERROR,
687  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
688  errmsg("function %s must return type %s",
689  NameListToString(stmt->funcname), "trigger")));
690 
691  /*
692  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
693  * corresponding pg_constraint entry.
694  */
695  if (stmt->isconstraint && !OidIsValid(constraintOid))
696  {
697  /* Internal callers should have made their own constraints */
698  Assert(!isInternal);
699  constraintOid = CreateConstraintEntry(stmt->trigname,
701  CONSTRAINT_TRIGGER,
702  stmt->deferrable,
703  stmt->initdeferred,
704  true,
705  InvalidOid, /* no parent */
706  RelationGetRelid(rel),
707  NULL, /* no conkey */
708  0,
709  0,
710  InvalidOid, /* no domain */
711  InvalidOid, /* no index */
712  InvalidOid, /* no foreign key */
713  NULL,
714  NULL,
715  NULL,
716  NULL,
717  0,
718  ' ',
719  ' ',
720  ' ',
721  NULL, /* no exclusion */
722  NULL, /* no check constraint */
723  NULL,
724  true, /* islocal */
725  0, /* inhcount */
726  true, /* noinherit */
727  isInternal); /* is_internal */
728  }
729 
730  /*
731  * Generate the trigger's OID now, so that we can use it in the name if
732  * needed.
733  */
734  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
735 
736  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
737  Anum_pg_trigger_oid);
738 
739  /*
740  * If trigger is internally generated, modify the provided trigger name to
741  * ensure uniqueness by appending the trigger OID. (Callers will usually
742  * supply a simple constant trigger name in these cases.)
743  */
744  if (isInternal)
745  {
746  snprintf(internaltrigname, sizeof(internaltrigname),
747  "%s_%u", stmt->trigname, trigoid);
748  trigname = internaltrigname;
749  }
750  else
751  {
752  /* user-defined trigger; use the specified trigger name as-is */
753  trigname = stmt->trigname;
754  }
755 
756  /*
757  * Scan pg_trigger for existing triggers on relation. We do this only to
758  * give a nice error message if there's already a trigger of the same
759  * name. (The unique index on tgrelid/tgname would complain anyway.) We
760  * can skip this for internally generated triggers, since the name
761  * modification above should be sufficient.
762  *
763  * NOTE that this is cool only because we have ShareRowExclusiveLock on
764  * the relation, so the trigger set won't be changing underneath us.
765  */
766  if (!isInternal)
767  {
768  ScanKeyInit(&key,
769  Anum_pg_trigger_tgrelid,
770  BTEqualStrategyNumber, F_OIDEQ,
772  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
773  NULL, 1, &key);
774  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
775  {
776  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
777 
778  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
779  ereport(ERROR,
781  errmsg("trigger \"%s\" for relation \"%s\" already exists",
782  trigname, RelationGetRelationName(rel))));
783  }
784  systable_endscan(tgscan);
785  }
786 
787  /*
788  * Build the new pg_trigger tuple.
789  *
790  * When we're creating a trigger in a partition, we mark it as internal,
791  * even though we don't do the isInternal magic in this function. This
792  * makes the triggers in partitions identical to the ones in the
793  * partitioned tables, except that they are marked internal.
794  */
795  memset(nulls, false, sizeof(nulls));
796 
797  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
798  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
799  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
800  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
801  CStringGetDatum(trigname));
802  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
803  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
804  values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(TRIGGER_FIRES_ON_ORIGIN);
805  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal || in_partition);
806  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
807  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
808  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
809  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
810  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
811 
812  if (stmt->args)
813  {
814  ListCell *le;
815  char *args;
816  int16 nargs = list_length(stmt->args);
817  int len = 0;
818 
819  foreach(le, stmt->args)
820  {
821  char *ar = strVal(lfirst(le));
822 
823  len += strlen(ar) + 4;
824  for (; *ar; ar++)
825  {
826  if (*ar == '\\')
827  len++;
828  }
829  }
830  args = (char *) palloc(len + 1);
831  args[0] = '\0';
832  foreach(le, stmt->args)
833  {
834  char *s = strVal(lfirst(le));
835  char *d = args + strlen(args);
836 
837  while (*s)
838  {
839  if (*s == '\\')
840  *d++ = '\\';
841  *d++ = *s++;
842  }
843  strcpy(d, "\\000");
844  }
845  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
846  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
847  CStringGetDatum(args));
848  }
849  else
850  {
851  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
852  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
853  CStringGetDatum(""));
854  }
855 
856  /* build column number array if it's a column-specific trigger */
857  ncolumns = list_length(stmt->columns);
858  if (ncolumns == 0)
859  columns = NULL;
860  else
861  {
862  ListCell *cell;
863  int i = 0;
864 
865  columns = (int16 *) palloc(ncolumns * sizeof(int16));
866  foreach(cell, stmt->columns)
867  {
868  char *name = strVal(lfirst(cell));
869  int16 attnum;
870  int j;
871 
872  /* Lookup column name. System columns are not allowed */
873  attnum = attnameAttNum(rel, name, false);
874  if (attnum == InvalidAttrNumber)
875  ereport(ERROR,
876  (errcode(ERRCODE_UNDEFINED_COLUMN),
877  errmsg("column \"%s\" of relation \"%s\" does not exist",
878  name, RelationGetRelationName(rel))));
879 
880  /* Check for duplicates */
881  for (j = i - 1; j >= 0; j--)
882  {
883  if (columns[j] == attnum)
884  ereport(ERROR,
885  (errcode(ERRCODE_DUPLICATE_COLUMN),
886  errmsg("column \"%s\" specified more than once",
887  name)));
888  }
889 
890  columns[i++] = attnum;
891  }
892  }
893  tgattr = buildint2vector(columns, ncolumns);
894  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
895 
896  /* set tgqual if trigger has WHEN clause */
897  if (qual)
898  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
899  else
900  nulls[Anum_pg_trigger_tgqual - 1] = true;
901 
902  if (oldtablename)
903  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
904  CStringGetDatum(oldtablename));
905  else
906  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
907  if (newtablename)
908  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
909  CStringGetDatum(newtablename));
910  else
911  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
912 
913  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
914 
915  /*
916  * Insert tuple into pg_trigger.
917  */
918  CatalogTupleInsert(tgrel, tuple);
919 
920  heap_freetuple(tuple);
922 
923  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
924  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
925  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
926  if (oldtablename)
927  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
928  if (newtablename)
929  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
930 
931  /*
932  * Update relation's pg_class entry; if necessary; and if not, send an SI
933  * message to make other backends (and this one) rebuild relcache entries.
934  */
935  pgrel = table_open(RelationRelationId, RowExclusiveLock);
936  tuple = SearchSysCacheCopy1(RELOID,
938  if (!HeapTupleIsValid(tuple))
939  elog(ERROR, "cache lookup failed for relation %u",
940  RelationGetRelid(rel));
941  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
942  {
943  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
944 
945  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
946 
948  }
949  else
951 
952  heap_freetuple(tuple);
954 
955  /*
956  * Record dependencies for trigger. Always place a normal dependency on
957  * the function.
958  */
959  myself.classId = TriggerRelationId;
960  myself.objectId = trigoid;
961  myself.objectSubId = 0;
962 
963  referenced.classId = ProcedureRelationId;
964  referenced.objectId = funcoid;
965  referenced.objectSubId = 0;
966  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
967 
968  if (isInternal && OidIsValid(constraintOid))
969  {
970  /*
971  * Internally-generated trigger for a constraint, so make it an
972  * internal dependency of the constraint. We can skip depending on
973  * the relation(s), as there'll be an indirect dependency via the
974  * constraint.
975  */
976  referenced.classId = ConstraintRelationId;
977  referenced.objectId = constraintOid;
978  referenced.objectSubId = 0;
979  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
980  }
981  else
982  {
983  /*
984  * User CREATE TRIGGER, so place dependencies. We make trigger be
985  * auto-dropped if its relation is dropped or if the FK relation is
986  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
987  */
988  referenced.classId = RelationRelationId;
989  referenced.objectId = RelationGetRelid(rel);
990  referenced.objectSubId = 0;
991  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
992 
993  if (OidIsValid(constrrelid))
994  {
995  referenced.classId = RelationRelationId;
996  referenced.objectId = constrrelid;
997  referenced.objectSubId = 0;
998  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
999  }
1000  /* Not possible to have an index dependency in this case */
1001  Assert(!OidIsValid(indexOid));
1002 
1003  /*
1004  * If it's a user-specified constraint trigger, make the constraint
1005  * internally dependent on the trigger instead of vice versa.
1006  */
1007  if (OidIsValid(constraintOid))
1008  {
1009  referenced.classId = ConstraintRelationId;
1010  referenced.objectId = constraintOid;
1011  referenced.objectSubId = 0;
1012  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1013  }
1014 
1015  /*
1016  * If it's a partition trigger, create the partition dependencies.
1017  */
1018  if (OidIsValid(parentTriggerOid))
1019  {
1020  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1021  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1022  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1023  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1024  }
1025  }
1026 
1027  /* If column-specific trigger, add normal dependencies on columns */
1028  if (columns != NULL)
1029  {
1030  int i;
1031 
1032  referenced.classId = RelationRelationId;
1033  referenced.objectId = RelationGetRelid(rel);
1034  for (i = 0; i < ncolumns; i++)
1035  {
1036  referenced.objectSubId = columns[i];
1037  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1038  }
1039  }
1040 
1041  /*
1042  * If it has a WHEN clause, add dependencies on objects mentioned in the
1043  * expression (eg, functions, as well as any columns used).
1044  */
1045  if (whenRtable != NIL)
1046  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1048 
1049  /* Post creation hook for new trigger */
1050  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1051  isInternal);
1052 
1053  /*
1054  * Lastly, create the trigger on child relations, if needed.
1055  */
1056  if (partition_recurse)
1057  {
1058  PartitionDesc partdesc = RelationGetPartitionDesc(rel);
1059  List *idxs = NIL;
1060  List *childTbls = NIL;
1061  ListCell *l;
1062  int i;
1063  MemoryContext oldcxt,
1064  perChildCxt;
1065 
1067  "part trig clone",
1069 
1070  /*
1071  * When a trigger is being created associated with an index, we'll
1072  * need to associate the trigger in each child partition with the
1073  * corresponding index on it.
1074  */
1075  if (OidIsValid(indexOid))
1076  {
1077  ListCell *l;
1078  List *idxs = NIL;
1079 
1081  foreach(l, idxs)
1082  childTbls = lappend_oid(childTbls,
1084  false));
1085  }
1086 
1087  oldcxt = MemoryContextSwitchTo(perChildCxt);
1088 
1089  /* Iterate to create the trigger on each existing partition */
1090  for (i = 0; i < partdesc->nparts; i++)
1091  {
1092  Oid indexOnChild = InvalidOid;
1093  ListCell *l2;
1094  CreateTrigStmt *childStmt;
1095  Relation childTbl;
1096  Node *qual;
1097 
1098  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1099 
1100  /* Find which of the child indexes is the one on this partition */
1101  if (OidIsValid(indexOid))
1102  {
1103  forboth(l, idxs, l2, childTbls)
1104  {
1105  if (lfirst_oid(l2) == partdesc->oids[i])
1106  {
1107  indexOnChild = lfirst_oid(l);
1108  break;
1109  }
1110  }
1111  if (!OidIsValid(indexOnChild))
1112  elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"",
1113  get_rel_name(indexOid),
1114  get_rel_name(partdesc->oids[i]));
1115  }
1116 
1117  /*
1118  * Initialize our fabricated parse node by copying the original
1119  * one, then resetting fields that we pass separately.
1120  */
1121  childStmt = (CreateTrigStmt *) copyObject(stmt);
1122  childStmt->funcname = NIL;
1123  childStmt->whenClause = NULL;
1124 
1125  /* If there is a WHEN clause, create a modified copy of it */
1126  qual = copyObject(whenClause);
1127  qual = (Node *)
1129  childTbl, rel);
1130  qual = (Node *)
1132  childTbl, rel);
1133 
1134  CreateTrigger(childStmt, queryString,
1135  partdesc->oids[i], refRelOid,
1136  InvalidOid, indexOnChild,
1137  funcoid, trigoid, qual,
1138  isInternal, true);
1139 
1140  table_close(childTbl, NoLock);
1141 
1142  MemoryContextReset(perChildCxt);
1143  }
1144 
1145  MemoryContextSwitchTo(oldcxt);
1146  MemoryContextDelete(perChildCxt);
1147  list_free(idxs);
1148  list_free(childTbls);
1149  }
1150 
1151  /* Keep lock on target rel until end of xact */
1152  table_close(rel, NoLock);
1153 
1154  return myself;
1155 }
1156 
1157 
1158 /*
1159  * Guts of trigger deletion.
1160  */
1161 void
1163 {
1164  Relation tgrel;
1165  SysScanDesc tgscan;
1166  ScanKeyData skey[1];
1167  HeapTuple tup;
1168  Oid relid;
1169  Relation rel;
1170 
1171  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1172 
1173  /*
1174  * Find the trigger to delete.
1175  */
1176  ScanKeyInit(&skey[0],
1177  Anum_pg_trigger_oid,
1178  BTEqualStrategyNumber, F_OIDEQ,
1179  ObjectIdGetDatum(trigOid));
1180 
1181  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1182  NULL, 1, skey);
1183 
1184  tup = systable_getnext(tgscan);
1185  if (!HeapTupleIsValid(tup))
1186  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1187 
1188  /*
1189  * Open and exclusive-lock the relation the trigger belongs to.
1190  */
1191  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1192 
1193  rel = table_open(relid, AccessExclusiveLock);
1194 
1195  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1196  rel->rd_rel->relkind != RELKIND_VIEW &&
1197  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1198  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1199  ereport(ERROR,
1200  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1201  errmsg("\"%s\" is not a table, view, or foreign table",
1202  RelationGetRelationName(rel))));
1203 
1205  ereport(ERROR,
1206  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1207  errmsg("permission denied: \"%s\" is a system catalog",
1208  RelationGetRelationName(rel))));
1209 
1210  /*
1211  * Delete the pg_trigger tuple.
1212  */
1213  CatalogTupleDelete(tgrel, &tup->t_self);
1214 
1215  systable_endscan(tgscan);
1216  table_close(tgrel, RowExclusiveLock);
1217 
1218  /*
1219  * We do not bother to try to determine whether any other triggers remain,
1220  * which would be needed in order to decide whether it's safe to clear the
1221  * relation's relhastriggers. (In any case, there might be a concurrent
1222  * process adding new triggers.) Instead, just force a relcache inval to
1223  * make other backends (and this one too!) rebuild their relcache entries.
1224  * There's no great harm in leaving relhastriggers true even if there are
1225  * no triggers left.
1226  */
1228 
1229  /* Keep lock on trigger's rel until end of xact */
1230  table_close(rel, NoLock);
1231 }
1232 
1233 /*
1234  * get_trigger_oid - Look up a trigger by name to find its OID.
1235  *
1236  * If missing_ok is false, throw an error if trigger not found. If
1237  * true, just return InvalidOid.
1238  */
1239 Oid
1240 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1241 {
1242  Relation tgrel;
1243  ScanKeyData skey[2];
1244  SysScanDesc tgscan;
1245  HeapTuple tup;
1246  Oid oid;
1247 
1248  /*
1249  * Find the trigger, verify permissions, set up object address
1250  */
1251  tgrel = table_open(TriggerRelationId, AccessShareLock);
1252 
1253  ScanKeyInit(&skey[0],
1254  Anum_pg_trigger_tgrelid,
1255  BTEqualStrategyNumber, F_OIDEQ,
1256  ObjectIdGetDatum(relid));
1257  ScanKeyInit(&skey[1],
1258  Anum_pg_trigger_tgname,
1259  BTEqualStrategyNumber, F_NAMEEQ,
1260  CStringGetDatum(trigname));
1261 
1262  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1263  NULL, 2, skey);
1264 
1265  tup = systable_getnext(tgscan);
1266 
1267  if (!HeapTupleIsValid(tup))
1268  {
1269  if (!missing_ok)
1270  ereport(ERROR,
1271  (errcode(ERRCODE_UNDEFINED_OBJECT),
1272  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1273  trigname, get_rel_name(relid))));
1274  oid = InvalidOid;
1275  }
1276  else
1277  {
1278  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1279  }
1280 
1281  systable_endscan(tgscan);
1282  table_close(tgrel, AccessShareLock);
1283  return oid;
1284 }
1285 
1286 /*
1287  * Perform permissions and integrity checks before acquiring a relation lock.
1288  */
1289 static void
1291  void *arg)
1292 {
1293  HeapTuple tuple;
1294  Form_pg_class form;
1295 
1296  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1297  if (!HeapTupleIsValid(tuple))
1298  return; /* concurrently dropped */
1299  form = (Form_pg_class) GETSTRUCT(tuple);
1300 
1301  /* only tables and views can have triggers */
1302  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1303  form->relkind != RELKIND_FOREIGN_TABLE &&
1304  form->relkind != RELKIND_PARTITIONED_TABLE)
1305  ereport(ERROR,
1306  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1307  errmsg("\"%s\" is not a table, view, or foreign table",
1308  rv->relname)));
1309 
1310  /* you must own the table to rename one of its triggers */
1311  if (!pg_class_ownercheck(relid, GetUserId()))
1313  if (!allowSystemTableMods && IsSystemClass(relid, form))
1314  ereport(ERROR,
1315  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1316  errmsg("permission denied: \"%s\" is a system catalog",
1317  rv->relname)));
1318 
1319  ReleaseSysCache(tuple);
1320 }
1321 
1322 /*
1323  * renametrig - changes the name of a trigger on a relation
1324  *
1325  * trigger name is changed in trigger catalog.
1326  * No record of the previous name is kept.
1327  *
1328  * get proper relrelation from relation catalog (if not arg)
1329  * scan trigger catalog
1330  * for name conflict (within rel)
1331  * for original trigger (if not arg)
1332  * modify tgname in trigger tuple
1333  * update row in catalog
1334  */
1337 {
1338  Oid tgoid;
1339  Relation targetrel;
1340  Relation tgrel;
1341  HeapTuple tuple;
1342  SysScanDesc tgscan;
1343  ScanKeyData key[2];
1344  Oid relid;
1345  ObjectAddress address;
1346 
1347  /*
1348  * Look up name, check permissions, and acquire lock (which we will NOT
1349  * release until end of transaction).
1350  */
1352  0,
1354  NULL);
1355 
1356  /* Have lock already, so just need to build relcache entry. */
1357  targetrel = relation_open(relid, NoLock);
1358 
1359  /*
1360  * Scan pg_trigger twice for existing triggers on relation. We do this in
1361  * order to ensure a trigger does not exist with newname (The unique index
1362  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1363  * exist with oldname.
1364  *
1365  * NOTE that this is cool only because we have AccessExclusiveLock on the
1366  * relation, so the trigger set won't be changing underneath us.
1367  */
1368  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1369 
1370  /*
1371  * First pass -- look for name conflict
1372  */
1373  ScanKeyInit(&key[0],
1374  Anum_pg_trigger_tgrelid,
1375  BTEqualStrategyNumber, F_OIDEQ,
1376  ObjectIdGetDatum(relid));
1377  ScanKeyInit(&key[1],
1378  Anum_pg_trigger_tgname,
1379  BTEqualStrategyNumber, F_NAMEEQ,
1380  PointerGetDatum(stmt->newname));
1381  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1382  NULL, 2, key);
1383  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1384  ereport(ERROR,
1386  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1387  stmt->newname, RelationGetRelationName(targetrel))));
1388  systable_endscan(tgscan);
1389 
1390  /*
1391  * Second pass -- look for trigger existing with oldname and update
1392  */
1393  ScanKeyInit(&key[0],
1394  Anum_pg_trigger_tgrelid,
1395  BTEqualStrategyNumber, F_OIDEQ,
1396  ObjectIdGetDatum(relid));
1397  ScanKeyInit(&key[1],
1398  Anum_pg_trigger_tgname,
1399  BTEqualStrategyNumber, F_NAMEEQ,
1400  PointerGetDatum(stmt->subname));
1401  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1402  NULL, 2, key);
1403  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1404  {
1405  Form_pg_trigger trigform;
1406 
1407  /*
1408  * Update pg_trigger tuple with new tgname.
1409  */
1410  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1411  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1412  tgoid = trigform->oid;
1413 
1414  namestrcpy(&trigform->tgname,
1415  stmt->newname);
1416 
1417  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1418 
1419  InvokeObjectPostAlterHook(TriggerRelationId,
1420  tgoid, 0);
1421 
1422  /*
1423  * Invalidate relation's relcache entry so that other backends (and
1424  * this one too!) are sent SI message to make them rebuild relcache
1425  * entries. (Ideally this should happen automatically...)
1426  */
1427  CacheInvalidateRelcache(targetrel);
1428  }
1429  else
1430  {
1431  ereport(ERROR,
1432  (errcode(ERRCODE_UNDEFINED_OBJECT),
1433  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1434  stmt->subname, RelationGetRelationName(targetrel))));
1435  }
1436 
1437  ObjectAddressSet(address, TriggerRelationId, tgoid);
1438 
1439  systable_endscan(tgscan);
1440 
1441  table_close(tgrel, RowExclusiveLock);
1442 
1443  /*
1444  * Close rel, but keep exclusive lock!
1445  */
1446  relation_close(targetrel, NoLock);
1447 
1448  return address;
1449 }
1450 
1451 
1452 /*
1453  * EnableDisableTrigger()
1454  *
1455  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1456  * to change 'tgenabled' field for the specified trigger(s)
1457  *
1458  * rel: relation to process (caller must hold suitable lock on it)
1459  * tgname: trigger to process, or NULL to scan all triggers
1460  * fires_when: new value for tgenabled field. In addition to generic
1461  * enablement/disablement, this also defines when the trigger
1462  * should be fired in session replication roles.
1463  * skip_system: if true, skip "system" triggers (constraint triggers)
1464  *
1465  * Caller should have checked permissions for the table; here we also
1466  * enforce that superuser privilege is required to alter the state of
1467  * system triggers
1468  */
1469 void
1470 EnableDisableTrigger(Relation rel, const char *tgname,
1471  char fires_when, bool skip_system, LOCKMODE lockmode)
1472 {
1473  Relation tgrel;
1474  int nkeys;
1475  ScanKeyData keys[2];
1476  SysScanDesc tgscan;
1477  HeapTuple tuple;
1478  bool found;
1479  bool changed;
1480 
1481  /* Scan the relevant entries in pg_triggers */
1482  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1483 
1484  ScanKeyInit(&keys[0],
1485  Anum_pg_trigger_tgrelid,
1486  BTEqualStrategyNumber, F_OIDEQ,
1488  if (tgname)
1489  {
1490  ScanKeyInit(&keys[1],
1491  Anum_pg_trigger_tgname,
1492  BTEqualStrategyNumber, F_NAMEEQ,
1493  CStringGetDatum(tgname));
1494  nkeys = 2;
1495  }
1496  else
1497  nkeys = 1;
1498 
1499  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1500  NULL, nkeys, keys);
1501 
1502  found = changed = false;
1503 
1504  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1505  {
1506  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1507 
1508  if (oldtrig->tgisinternal)
1509  {
1510  /* system trigger ... ok to process? */
1511  if (skip_system)
1512  continue;
1513  if (!superuser())
1514  ereport(ERROR,
1515  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1516  errmsg("permission denied: \"%s\" is a system trigger",
1517  NameStr(oldtrig->tgname))));
1518  }
1519 
1520  found = true;
1521 
1522  if (oldtrig->tgenabled != fires_when)
1523  {
1524  /* need to change this one ... make a copy to scribble on */
1525  HeapTuple newtup = heap_copytuple(tuple);
1526  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1527 
1528  newtrig->tgenabled = fires_when;
1529 
1530  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1531 
1532  heap_freetuple(newtup);
1533 
1534  changed = true;
1535  }
1536 
1537  InvokeObjectPostAlterHook(TriggerRelationId,
1538  oldtrig->oid, 0);
1539  }
1540 
1541  systable_endscan(tgscan);
1542 
1543  table_close(tgrel, RowExclusiveLock);
1544 
1545  if (tgname && !found)
1546  ereport(ERROR,
1547  (errcode(ERRCODE_UNDEFINED_OBJECT),
1548  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1549  tgname, RelationGetRelationName(rel))));
1550 
1551  /*
1552  * If we changed anything, broadcast a SI inval message to force each
1553  * backend (including our own!) to rebuild relation's relcache entry.
1554  * Otherwise they will fail to apply the change promptly.
1555  */
1556  if (changed)
1558 }
1559 
1560 
1561 /*
1562  * Build trigger data to attach to the given relcache entry.
1563  *
1564  * Note that trigger data attached to a relcache entry must be stored in
1565  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1566  * But we should be running in a less long-lived working context. To avoid
1567  * leaking cache memory if this routine fails partway through, we build a
1568  * temporary TriggerDesc in working memory and then copy the completed
1569  * structure into cache memory.
1570  */
1571 void
1573 {
1574  TriggerDesc *trigdesc;
1575  int numtrigs;
1576  int maxtrigs;
1577  Trigger *triggers;
1578  Relation tgrel;
1579  ScanKeyData skey;
1580  SysScanDesc tgscan;
1581  HeapTuple htup;
1582  MemoryContext oldContext;
1583  int i;
1584 
1585  /*
1586  * Allocate a working array to hold the triggers (the array is extended if
1587  * necessary)
1588  */
1589  maxtrigs = 16;
1590  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1591  numtrigs = 0;
1592 
1593  /*
1594  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1595  * be reading the triggers in name order, except possibly during
1596  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1597  * ensures that triggers will be fired in name order.
1598  */
1599  ScanKeyInit(&skey,
1600  Anum_pg_trigger_tgrelid,
1601  BTEqualStrategyNumber, F_OIDEQ,
1602  ObjectIdGetDatum(RelationGetRelid(relation)));
1603 
1604  tgrel = table_open(TriggerRelationId, AccessShareLock);
1605  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1606  NULL, 1, &skey);
1607 
1608  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1609  {
1610  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1611  Trigger *build;
1612  Datum datum;
1613  bool isnull;
1614 
1615  if (numtrigs >= maxtrigs)
1616  {
1617  maxtrigs *= 2;
1618  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1619  }
1620  build = &(triggers[numtrigs]);
1621 
1622  build->tgoid = pg_trigger->oid;
1624  NameGetDatum(&pg_trigger->tgname)));
1625  build->tgfoid = pg_trigger->tgfoid;
1626  build->tgtype = pg_trigger->tgtype;
1627  build->tgenabled = pg_trigger->tgenabled;
1628  build->tgisinternal = pg_trigger->tgisinternal;
1629  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1630  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1631  build->tgconstrindid = pg_trigger->tgconstrindid;
1632  build->tgconstraint = pg_trigger->tgconstraint;
1633  build->tgdeferrable = pg_trigger->tgdeferrable;
1634  build->tginitdeferred = pg_trigger->tginitdeferred;
1635  build->tgnargs = pg_trigger->tgnargs;
1636  /* tgattr is first var-width field, so OK to access directly */
1637  build->tgnattr = pg_trigger->tgattr.dim1;
1638  if (build->tgnattr > 0)
1639  {
1640  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1641  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1642  build->tgnattr * sizeof(int16));
1643  }
1644  else
1645  build->tgattr = NULL;
1646  if (build->tgnargs > 0)
1647  {
1648  bytea *val;
1649  char *p;
1650 
1651  val = DatumGetByteaPP(fastgetattr(htup,
1652  Anum_pg_trigger_tgargs,
1653  tgrel->rd_att, &isnull));
1654  if (isnull)
1655  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1656  RelationGetRelationName(relation));
1657  p = (char *) VARDATA_ANY(val);
1658  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1659  for (i = 0; i < build->tgnargs; i++)
1660  {
1661  build->tgargs[i] = pstrdup(p);
1662  p += strlen(p) + 1;
1663  }
1664  }
1665  else
1666  build->tgargs = NULL;
1667 
1668  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1669  tgrel->rd_att, &isnull);
1670  if (!isnull)
1671  build->tgoldtable =
1673  else
1674  build->tgoldtable = NULL;
1675 
1676  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1677  tgrel->rd_att, &isnull);
1678  if (!isnull)
1679  build->tgnewtable =
1681  else
1682  build->tgnewtable = NULL;
1683 
1684  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1685  tgrel->rd_att, &isnull);
1686  if (!isnull)
1687  build->tgqual = TextDatumGetCString(datum);
1688  else
1689  build->tgqual = NULL;
1690 
1691  numtrigs++;
1692  }
1693 
1694  systable_endscan(tgscan);
1695  table_close(tgrel, AccessShareLock);
1696 
1697  /* There might not be any triggers */
1698  if (numtrigs == 0)
1699  {
1700  pfree(triggers);
1701  return;
1702  }
1703 
1704  /* Build trigdesc */
1705  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1706  trigdesc->triggers = triggers;
1707  trigdesc->numtriggers = numtrigs;
1708  for (i = 0; i < numtrigs; i++)
1709  SetTriggerFlags(trigdesc, &(triggers[i]));
1710 
1711  /* Copy completed trigdesc into cache storage */
1713  relation->trigdesc = CopyTriggerDesc(trigdesc);
1714  MemoryContextSwitchTo(oldContext);
1715 
1716  /* Release working memory */
1717  FreeTriggerDesc(trigdesc);
1718 }
1719 
1720 /*
1721  * Update the TriggerDesc's hint flags to include the specified trigger
1722  */
1723 static void
1725 {
1726  int16 tgtype = trigger->tgtype;
1727 
1728  trigdesc->trig_insert_before_row |=
1729  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1730  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1731  trigdesc->trig_insert_after_row |=
1732  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1733  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1734  trigdesc->trig_insert_instead_row |=
1735  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1736  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
1737  trigdesc->trig_insert_before_statement |=
1738  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1739  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1740  trigdesc->trig_insert_after_statement |=
1741  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1742  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1743  trigdesc->trig_update_before_row |=
1744  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1745  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1746  trigdesc->trig_update_after_row |=
1747  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1748  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1749  trigdesc->trig_update_instead_row |=
1750  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1751  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
1752  trigdesc->trig_update_before_statement |=
1753  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1754  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1755  trigdesc->trig_update_after_statement |=
1756  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1757  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1758  trigdesc->trig_delete_before_row |=
1759  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1760  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1761  trigdesc->trig_delete_after_row |=
1762  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1763  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1764  trigdesc->trig_delete_instead_row |=
1765  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1766  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
1767  trigdesc->trig_delete_before_statement |=
1768  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1769  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1770  trigdesc->trig_delete_after_statement |=
1771  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1772  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1773  /* there are no row-level truncate triggers */
1774  trigdesc->trig_truncate_before_statement |=
1775  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1776  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
1777  trigdesc->trig_truncate_after_statement |=
1778  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1779  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
1780 
1781  trigdesc->trig_insert_new_table |=
1782  (TRIGGER_FOR_INSERT(tgtype) &&
1783  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1784  trigdesc->trig_update_old_table |=
1785  (TRIGGER_FOR_UPDATE(tgtype) &&
1786  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1787  trigdesc->trig_update_new_table |=
1788  (TRIGGER_FOR_UPDATE(tgtype) &&
1789  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1790  trigdesc->trig_delete_old_table |=
1791  (TRIGGER_FOR_DELETE(tgtype) &&
1792  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1793 }
1794 
1795 /*
1796  * Copy a TriggerDesc data structure.
1797  *
1798  * The copy is allocated in the current memory context.
1799  */
1800 TriggerDesc *
1802 {
1803  TriggerDesc *newdesc;
1804  Trigger *trigger;
1805  int i;
1806 
1807  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1808  return NULL;
1809 
1810  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1811  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1812 
1813  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1814  memcpy(trigger, trigdesc->triggers,
1815  trigdesc->numtriggers * sizeof(Trigger));
1816  newdesc->triggers = trigger;
1817 
1818  for (i = 0; i < trigdesc->numtriggers; i++)
1819  {
1820  trigger->tgname = pstrdup(trigger->tgname);
1821  if (trigger->tgnattr > 0)
1822  {
1823  int16 *newattr;
1824 
1825  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1826  memcpy(newattr, trigger->tgattr,
1827  trigger->tgnattr * sizeof(int16));
1828  trigger->tgattr = newattr;
1829  }
1830  if (trigger->tgnargs > 0)
1831  {
1832  char **newargs;
1833  int16 j;
1834 
1835  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1836  for (j = 0; j < trigger->tgnargs; j++)
1837  newargs[j] = pstrdup(trigger->tgargs[j]);
1838  trigger->tgargs = newargs;
1839  }
1840  if (trigger->tgqual)
1841  trigger->tgqual = pstrdup(trigger->tgqual);
1842  if (trigger->tgoldtable)
1843  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1844  if (trigger->tgnewtable)
1845  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1846  trigger++;
1847  }
1848 
1849  return newdesc;
1850 }
1851 
1852 /*
1853  * Free a TriggerDesc data structure.
1854  */
1855 void
1857 {
1858  Trigger *trigger;
1859  int i;
1860 
1861  if (trigdesc == NULL)
1862  return;
1863 
1864  trigger = trigdesc->triggers;
1865  for (i = 0; i < trigdesc->numtriggers; i++)
1866  {
1867  pfree(trigger->tgname);
1868  if (trigger->tgnattr > 0)
1869  pfree(trigger->tgattr);
1870  if (trigger->tgnargs > 0)
1871  {
1872  while (--(trigger->tgnargs) >= 0)
1873  pfree(trigger->tgargs[trigger->tgnargs]);
1874  pfree(trigger->tgargs);
1875  }
1876  if (trigger->tgqual)
1877  pfree(trigger->tgqual);
1878  if (trigger->tgoldtable)
1879  pfree(trigger->tgoldtable);
1880  if (trigger->tgnewtable)
1881  pfree(trigger->tgnewtable);
1882  trigger++;
1883  }
1884  pfree(trigdesc->triggers);
1885  pfree(trigdesc);
1886 }
1887 
1888 /*
1889  * Compare two TriggerDesc structures for logical equality.
1890  */
1891 #ifdef NOT_USED
1892 bool
1893 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1894 {
1895  int i,
1896  j;
1897 
1898  /*
1899  * We need not examine the hint flags, just the trigger array itself; if
1900  * we have the same triggers with the same types, the flags should match.
1901  *
1902  * As of 7.3 we assume trigger set ordering is significant in the
1903  * comparison; so we just compare corresponding slots of the two sets.
1904  *
1905  * Note: comparing the stringToNode forms of the WHEN clauses means that
1906  * parse column locations will affect the result. This is okay as long as
1907  * this function is only used for detecting exact equality, as for example
1908  * in checking for staleness of a cache entry.
1909  */
1910  if (trigdesc1 != NULL)
1911  {
1912  if (trigdesc2 == NULL)
1913  return false;
1914  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1915  return false;
1916  for (i = 0; i < trigdesc1->numtriggers; i++)
1917  {
1918  Trigger *trig1 = trigdesc1->triggers + i;
1919  Trigger *trig2 = trigdesc2->triggers + i;
1920 
1921  if (trig1->tgoid != trig2->tgoid)
1922  return false;
1923  if (strcmp(trig1->tgname, trig2->tgname) != 0)
1924  return false;
1925  if (trig1->tgfoid != trig2->tgfoid)
1926  return false;
1927  if (trig1->tgtype != trig2->tgtype)
1928  return false;
1929  if (trig1->tgenabled != trig2->tgenabled)
1930  return false;
1931  if (trig1->tgisinternal != trig2->tgisinternal)
1932  return false;
1933  if (trig1->tgisclone != trig2->tgisclone)
1934  return false;
1935  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
1936  return false;
1937  if (trig1->tgconstrindid != trig2->tgconstrindid)
1938  return false;
1939  if (trig1->tgconstraint != trig2->tgconstraint)
1940  return false;
1941  if (trig1->tgdeferrable != trig2->tgdeferrable)
1942  return false;
1943  if (trig1->tginitdeferred != trig2->tginitdeferred)
1944  return false;
1945  if (trig1->tgnargs != trig2->tgnargs)
1946  return false;
1947  if (trig1->tgnattr != trig2->tgnattr)
1948  return false;
1949  if (trig1->tgnattr > 0 &&
1950  memcmp(trig1->tgattr, trig2->tgattr,
1951  trig1->tgnattr * sizeof(int16)) != 0)
1952  return false;
1953  for (j = 0; j < trig1->tgnargs; j++)
1954  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
1955  return false;
1956  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
1957  /* ok */ ;
1958  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
1959  return false;
1960  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
1961  return false;
1962  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
1963  /* ok */ ;
1964  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
1965  return false;
1966  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
1967  return false;
1968  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
1969  /* ok */ ;
1970  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
1971  return false;
1972  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
1973  return false;
1974  }
1975  }
1976  else if (trigdesc2 != NULL)
1977  return false;
1978  return true;
1979 }
1980 #endif /* NOT_USED */
1981 
1982 /*
1983  * Check if there is a row-level trigger with transition tables that prevents
1984  * a table from becoming an inheritance child or partition. Return the name
1985  * of the first such incompatible trigger, or NULL if there is none.
1986  */
1987 const char *
1989 {
1990  if (trigdesc != NULL)
1991  {
1992  int i;
1993 
1994  for (i = 0; i < trigdesc->numtriggers; ++i)
1995  {
1996  Trigger *trigger = &trigdesc->triggers[i];
1997 
1998  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
1999  return trigger->tgname;
2000  }
2001  }
2002 
2003  return NULL;
2004 }
2005 
2006 /*
2007  * Call a trigger function.
2008  *
2009  * trigdata: trigger descriptor.
2010  * tgindx: trigger's index in finfo and instr arrays.
2011  * finfo: array of cached trigger function call information.
2012  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2013  * per_tuple_context: memory context to execute the function in.
2014  *
2015  * Returns the tuple (or NULL) as returned by the function.
2016  */
2017 static HeapTuple
2019  int tgindx,
2020  FmgrInfo *finfo,
2021  Instrumentation *instr,
2022  MemoryContext per_tuple_context)
2023 {
2024  LOCAL_FCINFO(fcinfo, 0);
2025  PgStat_FunctionCallUsage fcusage;
2026  Datum result;
2027  MemoryContext oldContext;
2028 
2029  /*
2030  * Protect against code paths that may fail to initialize transition table
2031  * info.
2032  */
2033  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2034  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2035  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2036  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2037  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2038  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2039  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2040 
2041  finfo += tgindx;
2042 
2043  /*
2044  * We cache fmgr lookup info, to avoid making the lookup again on each
2045  * call.
2046  */
2047  if (finfo->fn_oid == InvalidOid)
2048  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2049 
2050  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2051 
2052  /*
2053  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2054  */
2055  if (instr)
2056  InstrStartNode(instr + tgindx);
2057 
2058  /*
2059  * Do the function evaluation in the per-tuple memory context, so that
2060  * leaked memory will be reclaimed once per tuple. Note in particular that
2061  * any new tuple created by the trigger function will live till the end of
2062  * the tuple cycle.
2063  */
2064  oldContext = MemoryContextSwitchTo(per_tuple_context);
2065 
2066  /*
2067  * Call the function, passing no arguments but setting a context.
2068  */
2069  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2070  InvalidOid, (Node *) trigdata, NULL);
2071 
2072  pgstat_init_function_usage(fcinfo, &fcusage);
2073 
2074  MyTriggerDepth++;
2075  PG_TRY();
2076  {
2077  result = FunctionCallInvoke(fcinfo);
2078  }
2079  PG_FINALLY();
2080  {
2081  MyTriggerDepth--;
2082  }
2083  PG_END_TRY();
2084 
2085  pgstat_end_function_usage(&fcusage, true);
2086 
2087  MemoryContextSwitchTo(oldContext);
2088 
2089  /*
2090  * Trigger protocol allows function to return a null pointer, but NOT to
2091  * set the isnull result flag.
2092  */
2093  if (fcinfo->isnull)
2094  ereport(ERROR,
2095  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2096  errmsg("trigger function %u returned null value",
2097  fcinfo->flinfo->fn_oid)));
2098 
2099  /*
2100  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2101  * one "tuple returned" (really the number of firings).
2102  */
2103  if (instr)
2104  InstrStopNode(instr + tgindx, 1);
2105 
2106  return (HeapTuple) DatumGetPointer(result);
2107 }
2108 
2109 void
2111 {
2112  TriggerDesc *trigdesc;
2113  int i;
2114  TriggerData LocTriggerData = {0};
2115 
2116  trigdesc = relinfo->ri_TrigDesc;
2117 
2118  if (trigdesc == NULL)
2119  return;
2120  if (!trigdesc->trig_insert_before_statement)
2121  return;
2122 
2123  /* no-op if we already fired BS triggers in this context */
2125  CMD_INSERT))
2126  return;
2127 
2128  LocTriggerData.type = T_TriggerData;
2129  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2131  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2132  for (i = 0; i < trigdesc->numtriggers; i++)
2133  {
2134  Trigger *trigger = &trigdesc->triggers[i];
2135  HeapTuple newtuple;
2136 
2137  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2138  TRIGGER_TYPE_STATEMENT,
2139  TRIGGER_TYPE_BEFORE,
2140  TRIGGER_TYPE_INSERT))
2141  continue;
2142  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2143  NULL, NULL, NULL))
2144  continue;
2145 
2146  LocTriggerData.tg_trigger = trigger;
2147  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2148  i,
2149  relinfo->ri_TrigFunctions,
2150  relinfo->ri_TrigInstrument,
2151  GetPerTupleMemoryContext(estate));
2152 
2153  if (newtuple)
2154  ereport(ERROR,
2155  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2156  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2157  }
2158 }
2159 
2160 void
2162  TransitionCaptureState *transition_capture)
2163 {
2164  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2165 
2166  if (trigdesc && trigdesc->trig_insert_after_statement)
2168  false, NULL, NULL, NIL, NULL, transition_capture);
2169 }
2170 
2171 bool
2173  TupleTableSlot *slot)
2174 {
2175  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2176  HeapTuple newtuple = NULL;
2177  bool should_free;
2178  TriggerData LocTriggerData = {0};
2179  int i;
2180 
2181  LocTriggerData.type = T_TriggerData;
2182  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2185  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2186  for (i = 0; i < trigdesc->numtriggers; i++)
2187  {
2188  Trigger *trigger = &trigdesc->triggers[i];
2189  HeapTuple oldtuple;
2190 
2191  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2192  TRIGGER_TYPE_ROW,
2193  TRIGGER_TYPE_BEFORE,
2194  TRIGGER_TYPE_INSERT))
2195  continue;
2196  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2197  NULL, NULL, slot))
2198  continue;
2199 
2200  if (!newtuple)
2201  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2202 
2203  LocTriggerData.tg_trigslot = slot;
2204  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2205  LocTriggerData.tg_trigger = trigger;
2206  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2207  i,
2208  relinfo->ri_TrigFunctions,
2209  relinfo->ri_TrigInstrument,
2210  GetPerTupleMemoryContext(estate));
2211  if (newtuple == NULL)
2212  {
2213  if (should_free)
2214  heap_freetuple(oldtuple);
2215  return false; /* "do nothing" */
2216  }
2217  else if (newtuple != oldtuple)
2218  {
2219  ExecForceStoreHeapTuple(newtuple, slot, false);
2220 
2221  /*
2222  * After a tuple in a partition goes through a trigger, the user
2223  * could have changed the partition key enough that the tuple no
2224  * longer fits the partition. Verify that.
2225  */
2226  if (trigger->tgisclone &&
2227  !ExecPartitionCheck(relinfo, slot, estate, false))
2228  ereport(ERROR,
2229  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2230  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2231  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2232  trigger->tgname,
2235 
2236  if (should_free)
2237  heap_freetuple(oldtuple);
2238 
2239  /* signal tuple should be re-fetched if used */
2240  newtuple = NULL;
2241  }
2242  }
2243 
2244  return true;
2245 }
2246 
2247 void
2249  TupleTableSlot *slot, List *recheckIndexes,
2250  TransitionCaptureState *transition_capture)
2251 {
2252  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2253 
2254  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2255  (transition_capture && transition_capture->tcs_insert_new_table))
2257  true, NULL, slot,
2258  recheckIndexes, NULL,
2259  transition_capture);
2260 }
2261 
2262 bool
2264  TupleTableSlot *slot)
2265 {
2266  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2267  HeapTuple newtuple = NULL;
2268  bool should_free;
2269  TriggerData LocTriggerData = {0};
2270  int i;
2271 
2272  LocTriggerData.type = T_TriggerData;
2273  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2276  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2277  for (i = 0; i < trigdesc->numtriggers; i++)
2278  {
2279  Trigger *trigger = &trigdesc->triggers[i];
2280  HeapTuple oldtuple;
2281 
2282  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2283  TRIGGER_TYPE_ROW,
2284  TRIGGER_TYPE_INSTEAD,
2285  TRIGGER_TYPE_INSERT))
2286  continue;
2287  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2288  NULL, NULL, slot))
2289  continue;
2290 
2291  if (!newtuple)
2292  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2293 
2294  LocTriggerData.tg_trigslot = slot;
2295  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2296  LocTriggerData.tg_trigger = trigger;
2297  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2298  i,
2299  relinfo->ri_TrigFunctions,
2300  relinfo->ri_TrigInstrument,
2301  GetPerTupleMemoryContext(estate));
2302  if (newtuple == NULL)
2303  {
2304  if (should_free)
2305  heap_freetuple(oldtuple);
2306  return false; /* "do nothing" */
2307  }
2308  else if (newtuple != oldtuple)
2309  {
2310  ExecForceStoreHeapTuple(newtuple, slot, false);
2311 
2312  if (should_free)
2313  heap_freetuple(oldtuple);
2314 
2315  /* signal tuple should be re-fetched if used */
2316  newtuple = NULL;
2317  }
2318  }
2319 
2320  return true;
2321 }
2322 
2323 void
2325 {
2326  TriggerDesc *trigdesc;
2327  int i;
2328  TriggerData LocTriggerData = {0};
2329 
2330  trigdesc = relinfo->ri_TrigDesc;
2331 
2332  if (trigdesc == NULL)
2333  return;
2334  if (!trigdesc->trig_delete_before_statement)
2335  return;
2336 
2337  /* no-op if we already fired BS triggers in this context */
2339  CMD_DELETE))
2340  return;
2341 
2342  LocTriggerData.type = T_TriggerData;
2343  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2345  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2346  for (i = 0; i < trigdesc->numtriggers; i++)
2347  {
2348  Trigger *trigger = &trigdesc->triggers[i];
2349  HeapTuple newtuple;
2350 
2351  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2352  TRIGGER_TYPE_STATEMENT,
2353  TRIGGER_TYPE_BEFORE,
2354  TRIGGER_TYPE_DELETE))
2355  continue;
2356  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2357  NULL, NULL, NULL))
2358  continue;
2359 
2360  LocTriggerData.tg_trigger = trigger;
2361  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2362  i,
2363  relinfo->ri_TrigFunctions,
2364  relinfo->ri_TrigInstrument,
2365  GetPerTupleMemoryContext(estate));
2366 
2367  if (newtuple)
2368  ereport(ERROR,
2369  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2370  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2371  }
2372 }
2373 
2374 void
2376  TransitionCaptureState *transition_capture)
2377 {
2378  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2379 
2380  if (trigdesc && trigdesc->trig_delete_after_statement)
2382  false, NULL, NULL, NIL, NULL, transition_capture);
2383 }
2384 
2385 /*
2386  * Execute BEFORE ROW DELETE triggers.
2387  *
2388  * True indicates caller can proceed with the delete. False indicates caller
2389  * need to suppress the delete and additionally if requested, we need to pass
2390  * back the concurrently updated tuple if any.
2391  */
2392 bool
2394  ResultRelInfo *relinfo,
2395  ItemPointer tupleid,
2396  HeapTuple fdw_trigtuple,
2397  TupleTableSlot **epqslot)
2398 {
2399  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2400  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2401  bool result = true;
2402  TriggerData LocTriggerData = {0};
2403  HeapTuple trigtuple;
2404  bool should_free = false;
2405  int i;
2406 
2407  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2408  if (fdw_trigtuple == NULL)
2409  {
2410  TupleTableSlot *epqslot_candidate = NULL;
2411 
2412  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2413  LockTupleExclusive, slot, &epqslot_candidate))
2414  return false;
2415 
2416  /*
2417  * If the tuple was concurrently updated and the caller of this
2418  * function requested for the updated tuple, skip the trigger
2419  * execution.
2420  */
2421  if (epqslot_candidate != NULL && epqslot != NULL)
2422  {
2423  *epqslot = epqslot_candidate;
2424  return false;
2425  }
2426 
2427  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2428 
2429  }
2430  else
2431  {
2432  trigtuple = fdw_trigtuple;
2433  ExecForceStoreHeapTuple(trigtuple, slot, false);
2434  }
2435 
2436  LocTriggerData.type = T_TriggerData;
2437  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2440  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2441  for (i = 0; i < trigdesc->numtriggers; i++)
2442  {
2443  HeapTuple newtuple;
2444  Trigger *trigger = &trigdesc->triggers[i];
2445 
2446  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2447  TRIGGER_TYPE_ROW,
2448  TRIGGER_TYPE_BEFORE,
2449  TRIGGER_TYPE_DELETE))
2450  continue;
2451  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2452  NULL, slot, NULL))
2453  continue;
2454 
2455  LocTriggerData.tg_trigslot = slot;
2456  LocTriggerData.tg_trigtuple = trigtuple;
2457  LocTriggerData.tg_trigger = trigger;
2458  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2459  i,
2460  relinfo->ri_TrigFunctions,
2461  relinfo->ri_TrigInstrument,
2462  GetPerTupleMemoryContext(estate));
2463  if (newtuple == NULL)
2464  {
2465  result = false; /* tell caller to suppress delete */
2466  break;
2467  }
2468  if (newtuple != trigtuple)
2469  heap_freetuple(newtuple);
2470  }
2471  if (should_free)
2472  heap_freetuple(trigtuple);
2473 
2474  return result;
2475 }
2476 
2477 void
2479  ItemPointer tupleid,
2480  HeapTuple fdw_trigtuple,
2481  TransitionCaptureState *transition_capture)
2482 {
2483  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2484  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2485 
2486  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2487  (transition_capture && transition_capture->tcs_delete_old_table))
2488  {
2489  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2490  if (fdw_trigtuple == NULL)
2491  GetTupleForTrigger(estate,
2492  NULL,
2493  relinfo,
2494  tupleid,
2496  slot,
2497  NULL);
2498  else
2499  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2500 
2502  true, slot, NULL, NIL, NULL,
2503  transition_capture);
2504  }
2505 }
2506 
2507 bool
2509  HeapTuple trigtuple)
2510 {
2511  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2512  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2513  TriggerData LocTriggerData = {0};
2514  int i;
2515 
2516  LocTriggerData.type = T_TriggerData;
2517  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2520  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2521 
2522  ExecForceStoreHeapTuple(trigtuple, slot, false);
2523 
2524  for (i = 0; i < trigdesc->numtriggers; i++)
2525  {
2526  HeapTuple rettuple;
2527  Trigger *trigger = &trigdesc->triggers[i];
2528 
2529  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2530  TRIGGER_TYPE_ROW,
2531  TRIGGER_TYPE_INSTEAD,
2532  TRIGGER_TYPE_DELETE))
2533  continue;
2534  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2535  NULL, slot, NULL))
2536  continue;
2537 
2538  LocTriggerData.tg_trigslot = slot;
2539  LocTriggerData.tg_trigtuple = trigtuple;
2540  LocTriggerData.tg_trigger = trigger;
2541  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2542  i,
2543  relinfo->ri_TrigFunctions,
2544  relinfo->ri_TrigInstrument,
2545  GetPerTupleMemoryContext(estate));
2546  if (rettuple == NULL)
2547  return false; /* Delete was suppressed */
2548  if (rettuple != trigtuple)
2549  heap_freetuple(rettuple);
2550  }
2551  return true;
2552 }
2553 
2554 void
2556 {
2557  TriggerDesc *trigdesc;
2558  int i;
2559  TriggerData LocTriggerData = {0};
2560  Bitmapset *updatedCols;
2561 
2562  trigdesc = relinfo->ri_TrigDesc;
2563 
2564  if (trigdesc == NULL)
2565  return;
2566  if (!trigdesc->trig_update_before_statement)
2567  return;
2568 
2569  /* no-op if we already fired BS triggers in this context */
2571  CMD_UPDATE))
2572  return;
2573 
2574  updatedCols = GetAllUpdatedColumns(relinfo, estate);
2575 
2576  LocTriggerData.type = T_TriggerData;
2577  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2579  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2580  LocTriggerData.tg_updatedcols = updatedCols;
2581  for (i = 0; i < trigdesc->numtriggers; i++)
2582  {
2583  Trigger *trigger = &trigdesc->triggers[i];
2584  HeapTuple newtuple;
2585 
2586  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2587  TRIGGER_TYPE_STATEMENT,
2588  TRIGGER_TYPE_BEFORE,
2589  TRIGGER_TYPE_UPDATE))
2590  continue;
2591  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2592  updatedCols, NULL, NULL))
2593  continue;
2594 
2595  LocTriggerData.tg_trigger = trigger;
2596  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2597  i,
2598  relinfo->ri_TrigFunctions,
2599  relinfo->ri_TrigInstrument,
2600  GetPerTupleMemoryContext(estate));
2601 
2602  if (newtuple)
2603  ereport(ERROR,
2604  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2605  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2606  }
2607 }
2608 
2609 void
2611  TransitionCaptureState *transition_capture)
2612 {
2613  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2614 
2615  if (trigdesc && trigdesc->trig_update_after_statement)
2617  false, NULL, NULL, NIL,
2618  GetAllUpdatedColumns(relinfo, estate),
2619  transition_capture);
2620 }
2621 
2622 bool
2624  ResultRelInfo *relinfo,
2625  ItemPointer tupleid,
2626  HeapTuple fdw_trigtuple,
2627  TupleTableSlot *newslot)
2628 {
2629  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2630  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2631  HeapTuple newtuple = NULL;
2632  HeapTuple trigtuple;
2633  bool should_free_trig = false;
2634  bool should_free_new = false;
2635  TriggerData LocTriggerData = {0};
2636  int i;
2637  Bitmapset *updatedCols;
2638  LockTupleMode lockmode;
2639 
2640  /* Determine lock mode to use */
2641  lockmode = ExecUpdateLockMode(estate, relinfo);
2642 
2643  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2644  if (fdw_trigtuple == NULL)
2645  {
2646  TupleTableSlot *epqslot_candidate = NULL;
2647 
2648  /* get a copy of the on-disk tuple we are planning to update */
2649  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2650  lockmode, oldslot, &epqslot_candidate))
2651  return false; /* cancel the update action */
2652 
2653  /*
2654  * In READ COMMITTED isolation level it's possible that target tuple
2655  * was changed due to concurrent update. In that case we have a raw
2656  * subplan output tuple in epqslot_candidate, and need to run it
2657  * through the junk filter to produce an insertable tuple.
2658  *
2659  * Caution: more than likely, the passed-in slot is the same as the
2660  * junkfilter's output slot, so we are clobbering the original value
2661  * of slottuple by doing the filtering. This is OK since neither we
2662  * nor our caller have any more interest in the prior contents of that
2663  * slot.
2664  */
2665  if (epqslot_candidate != NULL)
2666  {
2667  TupleTableSlot *epqslot_clean;
2668 
2669  epqslot_clean = ExecFilterJunk(relinfo->ri_junkFilter, epqslot_candidate);
2670 
2671  if (newslot != epqslot_clean)
2672  ExecCopySlot(newslot, epqslot_clean);
2673  }
2674 
2675  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
2676  }
2677  else
2678  {
2679  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2680  trigtuple = fdw_trigtuple;
2681  }
2682 
2683  LocTriggerData.type = T_TriggerData;
2684  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2687  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2688  updatedCols = GetAllUpdatedColumns(relinfo, estate);
2689  LocTriggerData.tg_updatedcols = updatedCols;
2690  for (i = 0; i < trigdesc->numtriggers; i++)
2691  {
2692  Trigger *trigger = &trigdesc->triggers[i];
2693  HeapTuple oldtuple;
2694 
2695  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2696  TRIGGER_TYPE_ROW,
2697  TRIGGER_TYPE_BEFORE,
2698  TRIGGER_TYPE_UPDATE))
2699  continue;
2700  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2701  updatedCols, oldslot, newslot))
2702  continue;
2703 
2704  if (!newtuple)
2705  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
2706 
2707  LocTriggerData.tg_trigslot = oldslot;
2708  LocTriggerData.tg_trigtuple = trigtuple;
2709  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2710  LocTriggerData.tg_newslot = newslot;
2711  LocTriggerData.tg_trigger = trigger;
2712  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2713  i,
2714  relinfo->ri_TrigFunctions,
2715  relinfo->ri_TrigInstrument,
2716  GetPerTupleMemoryContext(estate));
2717 
2718  if (newtuple == NULL)
2719  {
2720  if (should_free_trig)
2721  heap_freetuple(trigtuple);
2722  if (should_free_new)
2723  heap_freetuple(oldtuple);
2724  return false; /* "do nothing" */
2725  }
2726  else if (newtuple != oldtuple)
2727  {
2728  ExecForceStoreHeapTuple(newtuple, newslot, false);
2729 
2730  if (trigger->tgisclone &&
2731  !ExecPartitionCheck(relinfo, newslot, estate, false))
2732  ereport(ERROR,
2733  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2734  errmsg("moving row to another partition during a BEFORE trigger is not supported"),
2735  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2736  trigger->tgname,
2739 
2740  /*
2741  * If the tuple returned by the trigger / being stored, is the old
2742  * row version, and the heap tuple passed to the trigger was
2743  * allocated locally, materialize the slot. Otherwise we might
2744  * free it while still referenced by the slot.
2745  */
2746  if (should_free_trig && newtuple == trigtuple)
2747  ExecMaterializeSlot(newslot);
2748 
2749  if (should_free_new)
2750  heap_freetuple(oldtuple);
2751 
2752  /* signal tuple should be re-fetched if used */
2753  newtuple = NULL;
2754  }
2755  }
2756  if (should_free_trig)
2757  heap_freetuple(trigtuple);
2758 
2759  return true;
2760 }
2761 
2762 void
2764  ItemPointer tupleid,
2765  HeapTuple fdw_trigtuple,
2766  TupleTableSlot *newslot,
2767  List *recheckIndexes,
2768  TransitionCaptureState *transition_capture)
2769 {
2770  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2771  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2772 
2773  ExecClearTuple(oldslot);
2774 
2775  if ((trigdesc && trigdesc->trig_update_after_row) ||
2776  (transition_capture &&
2777  (transition_capture->tcs_update_old_table ||
2778  transition_capture->tcs_update_new_table)))
2779  {
2780  /*
2781  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
2782  * update-partition-key operation, then this function is also called
2783  * separately for DELETE and INSERT to capture transition table rows.
2784  * In such case, either old tuple or new tuple can be NULL.
2785  */
2786  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
2787  GetTupleForTrigger(estate,
2788  NULL,
2789  relinfo,
2790  tupleid,
2792  oldslot,
2793  NULL);
2794  else if (fdw_trigtuple != NULL)
2795  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2796 
2798  true, oldslot, newslot, recheckIndexes,
2799  GetAllUpdatedColumns(relinfo, estate),
2800  transition_capture);
2801  }
2802 }
2803 
2804 bool
2806  HeapTuple trigtuple, TupleTableSlot *newslot)
2807 {
2808  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2809  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2810  HeapTuple newtuple = NULL;
2811  bool should_free;
2812  TriggerData LocTriggerData = {0};
2813  int i;
2814 
2815  LocTriggerData.type = T_TriggerData;
2816  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2819  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2820 
2821  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
2822 
2823  for (i = 0; i < trigdesc->numtriggers; i++)
2824  {
2825  Trigger *trigger = &trigdesc->triggers[i];
2826  HeapTuple oldtuple;
2827 
2828  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2829  TRIGGER_TYPE_ROW,
2830  TRIGGER_TYPE_INSTEAD,
2831  TRIGGER_TYPE_UPDATE))
2832  continue;
2833  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2834  NULL, oldslot, newslot))
2835  continue;
2836 
2837  if (!newtuple)
2838  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
2839 
2840  LocTriggerData.tg_trigslot = oldslot;
2841  LocTriggerData.tg_trigtuple = trigtuple;
2842  LocTriggerData.tg_newslot = newslot;
2843  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2844 
2845  LocTriggerData.tg_trigger = trigger;
2846  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2847  i,
2848  relinfo->ri_TrigFunctions,
2849  relinfo->ri_TrigInstrument,
2850  GetPerTupleMemoryContext(estate));
2851  if (newtuple == NULL)
2852  {
2853  return false; /* "do nothing" */
2854  }
2855  else if (newtuple != oldtuple)
2856  {
2857  ExecForceStoreHeapTuple(newtuple, newslot, false);
2858 
2859  if (should_free)
2860  heap_freetuple(oldtuple);
2861 
2862  /* signal tuple should be re-fetched if used */
2863  newtuple = NULL;
2864  }
2865  }
2866 
2867  return true;
2868 }
2869 
2870 void
2872 {
2873  TriggerDesc *trigdesc;
2874  int i;
2875  TriggerData LocTriggerData = {0};
2876 
2877  trigdesc = relinfo->ri_TrigDesc;
2878 
2879  if (trigdesc == NULL)
2880  return;
2881  if (!trigdesc->trig_truncate_before_statement)
2882  return;
2883 
2884  LocTriggerData.type = T_TriggerData;
2885  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2887  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2888 
2889  for (i = 0; i < trigdesc->numtriggers; i++)
2890  {
2891  Trigger *trigger = &trigdesc->triggers[i];
2892  HeapTuple newtuple;
2893 
2894  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2895  TRIGGER_TYPE_STATEMENT,
2896  TRIGGER_TYPE_BEFORE,
2897  TRIGGER_TYPE_TRUNCATE))
2898  continue;
2899  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2900  NULL, NULL, NULL))
2901  continue;
2902 
2903  LocTriggerData.tg_trigger = trigger;
2904  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2905  i,
2906  relinfo->ri_TrigFunctions,
2907  relinfo->ri_TrigInstrument,
2908  GetPerTupleMemoryContext(estate));
2909 
2910  if (newtuple)
2911  ereport(ERROR,
2912  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2913  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2914  }
2915 }
2916 
2917 void
2919 {
2920  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2921 
2922  if (trigdesc && trigdesc->trig_truncate_after_statement)
2924  false, NULL, NULL, NIL, NULL, NULL);
2925 }
2926 
2927 
2928 static bool
2930  EPQState *epqstate,
2931  ResultRelInfo *relinfo,
2932  ItemPointer tid,
2933  LockTupleMode lockmode,
2934  TupleTableSlot *oldslot,
2935  TupleTableSlot **epqslot)
2936 {
2937  Relation relation = relinfo->ri_RelationDesc;
2938 
2939  if (epqslot != NULL)
2940  {
2941  TM_Result test;
2942  TM_FailureData tmfd;
2943  int lockflags = 0;
2944 
2945  *epqslot = NULL;
2946 
2947  /* caller must pass an epqstate if EvalPlanQual is possible */
2948  Assert(epqstate != NULL);
2949 
2950  /*
2951  * lock tuple for update
2952  */
2954  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
2955  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
2956  estate->es_output_cid,
2957  lockmode, LockWaitBlock,
2958  lockflags,
2959  &tmfd);
2960 
2961  switch (test)
2962  {
2963  case TM_SelfModified:
2964 
2965  /*
2966  * The target tuple was already updated or deleted by the
2967  * current command, or by a later command in the current
2968  * transaction. We ignore the tuple in the former case, and
2969  * throw error in the latter case, for the same reasons
2970  * enumerated in ExecUpdate and ExecDelete in
2971  * nodeModifyTable.c.
2972  */
2973  if (tmfd.cmax != estate->es_output_cid)
2974  ereport(ERROR,
2975  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2976  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2977  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2978 
2979  /* treat it as deleted; do not process */
2980  return false;
2981 
2982  case TM_Ok:
2983  if (tmfd.traversed)
2984  {
2985  *epqslot = EvalPlanQual(epqstate,
2986  relation,
2987  relinfo->ri_RangeTableIndex,
2988  oldslot);
2989 
2990  /*
2991  * If PlanQual failed for updated tuple - we must not
2992  * process this tuple!
2993  */
2994  if (TupIsNull(*epqslot))
2995  {
2996  *epqslot = NULL;
2997  return false;
2998  }
2999  }
3000  break;
3001 
3002  case TM_Updated:
3004  ereport(ERROR,
3005  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3006  errmsg("could not serialize access due to concurrent update")));
3007  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3008  break;
3009 
3010  case TM_Deleted:
3012  ereport(ERROR,
3013  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3014  errmsg("could not serialize access due to concurrent delete")));
3015  /* tuple was deleted */
3016  return false;
3017 
3018  case TM_Invisible:
3019  elog(ERROR, "attempted to lock invisible tuple");
3020  break;
3021 
3022  default:
3023  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3024  return false; /* keep compiler quiet */
3025  }
3026  }
3027  else
3028  {
3029  /*
3030  * We expect the tuple to be present, thus very simple error handling
3031  * suffices.
3032  */
3033  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3034  oldslot))
3035  elog(ERROR, "failed to fetch tuple for trigger");
3036  }
3037 
3038  return true;
3039 }
3040 
3041 /*
3042  * Is trigger enabled to fire?
3043  */
3044 static bool
3046  Trigger *trigger, TriggerEvent event,
3047  Bitmapset *modifiedCols,
3048  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3049 {
3050  /* Check replication-role-dependent enable state */
3052  {
3053  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3054  trigger->tgenabled == TRIGGER_DISABLED)
3055  return false;
3056  }
3057  else /* ORIGIN or LOCAL role */
3058  {
3059  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3060  trigger->tgenabled == TRIGGER_DISABLED)
3061  return false;
3062  }
3063 
3064  /*
3065  * Check for column-specific trigger (only possible for UPDATE, and in
3066  * fact we *must* ignore tgattr for other event types)
3067  */
3068  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3069  {
3070  int i;
3071  bool modified;
3072 
3073  modified = false;
3074  for (i = 0; i < trigger->tgnattr; i++)
3075  {
3077  modifiedCols))
3078  {
3079  modified = true;
3080  break;
3081  }
3082  }
3083  if (!modified)
3084  return false;
3085  }
3086 
3087  /* Check for WHEN clause */
3088  if (trigger->tgqual)
3089  {
3090  ExprState **predicate;
3091  ExprContext *econtext;
3092  MemoryContext oldContext;
3093  int i;
3094 
3095  Assert(estate != NULL);
3096 
3097  /*
3098  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3099  * matching element of relinfo->ri_TrigWhenExprs[]
3100  */
3101  i = trigger - relinfo->ri_TrigDesc->triggers;
3102  predicate = &relinfo->ri_TrigWhenExprs[i];
3103 
3104  /*
3105  * If first time through for this WHEN expression, build expression
3106  * nodetrees for it. Keep them in the per-query memory context so
3107  * they'll survive throughout the query.
3108  */
3109  if (*predicate == NULL)
3110  {
3111  Node *tgqual;
3112 
3113  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3114  tgqual = stringToNode(trigger->tgqual);
3115  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3118  /* ExecPrepareQual wants implicit-AND form */
3119  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3120  *predicate = ExecPrepareQual((List *) tgqual, estate);
3121  MemoryContextSwitchTo(oldContext);
3122  }
3123 
3124  /*
3125  * We will use the EState's per-tuple context for evaluating WHEN
3126  * expressions (creating it if it's not already there).
3127  */
3128  econtext = GetPerTupleExprContext(estate);
3129 
3130  /*
3131  * Finally evaluate the expression, making the old and/or new tuples
3132  * available as INNER_VAR/OUTER_VAR respectively.
3133  */
3134  econtext->ecxt_innertuple = oldslot;
3135  econtext->ecxt_outertuple = newslot;
3136  if (!ExecQual(*predicate, econtext))
3137  return false;
3138  }
3139 
3140  return true;
3141 }
3142 
3143 
3144 /* ----------
3145  * After-trigger stuff
3146  *
3147  * The AfterTriggersData struct holds data about pending AFTER trigger events
3148  * during the current transaction tree. (BEFORE triggers are fired
3149  * immediately so we don't need any persistent state about them.) The struct
3150  * and most of its subsidiary data are kept in TopTransactionContext; however
3151  * some data that can be discarded sooner appears in the CurTransactionContext
3152  * of the relevant subtransaction. Also, the individual event records are
3153  * kept in a separate sub-context of TopTransactionContext. This is done
3154  * mainly so that it's easy to tell from a memory context dump how much space
3155  * is being eaten by trigger events.
3156  *
3157  * Because the list of pending events can grow large, we go to some
3158  * considerable effort to minimize per-event memory consumption. The event
3159  * records are grouped into chunks and common data for similar events in the
3160  * same chunk is only stored once.
3161  *
3162  * XXX We need to be able to save the per-event data in a file if it grows too
3163  * large.
3164  * ----------
3165  */
3166 
3167 /* Per-trigger SET CONSTRAINT status */
3169 {
3173 
3175 
3176 /*
3177  * SET CONSTRAINT intra-transaction status.
3178  *
3179  * We make this a single palloc'd object so it can be copied and freed easily.
3180  *
3181  * all_isset and all_isdeferred are used to keep track
3182  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3183  *
3184  * trigstates[] stores per-trigger tgisdeferred settings.
3185  */
3187 {
3190  int numstates; /* number of trigstates[] entries in use */
3191  int numalloc; /* allocated size of trigstates[] */
3194 
3196 
3197 
3198 /*
3199  * Per-trigger-event data
3200  *
3201  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3202  * status bits and up to two tuple CTIDs. Each event record also has an
3203  * associated AfterTriggerSharedData that is shared across all instances of
3204  * similar events within a "chunk".
3205  *
3206  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3207  * fields. Updates of regular tables use two; inserts and deletes of regular
3208  * tables use one; foreign tables always use zero and save the tuple(s) to a
3209  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3210  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3211  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3212  * tuple(s). This permits storing tuples once regardless of the number of
3213  * row-level triggers on a foreign table.
3214  *
3215  * Note that we need triggers on foreign tables to be fired in exactly the
3216  * order they were queued, so that the tuples come out of the tuplestore in
3217  * the right order. To ensure that, we forbid deferrable (constraint)
3218  * triggers on foreign tables. This also ensures that such triggers do not
3219  * get deferred into outer trigger query levels, meaning that it's okay to
3220  * destroy the tuplestore at the end of the query level.
3221  *
3222  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3223  * require no ctid field. We lack the flag bit space to neatly represent that
3224  * distinct case, and it seems unlikely to be worth much trouble.
3225  *
3226  * Note: ats_firing_id is initially zero and is set to something else when
3227  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3228  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3229  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3230  * because all instances of the same type of event in a given event list will
3231  * be fired at the same time, if they were queued between the same firing
3232  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3233  * a new event to an existing AfterTriggerSharedData record.
3234  */
3236 
3237 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3238 #define AFTER_TRIGGER_DONE 0x10000000
3239 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3240 /* bits describing the size and tuple sources of this event */
3241 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3242 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3243 #define AFTER_TRIGGER_1CTID 0x40000000
3244 #define AFTER_TRIGGER_2CTID 0xC0000000
3245 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3246 
3248 
3250 {
3251  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3252  Oid ats_tgoid; /* the trigger's ID */
3253  Oid ats_relid; /* the relation it's on */
3254  CommandId ats_firing_id; /* ID for firing cycle */
3255  struct AfterTriggersTableData *ats_table; /* transition table access */
3256  Bitmapset *ats_modifiedcols; /* modified columns */
3258 
3260 
3262 {
3263  TriggerFlags ate_flags; /* status bits and offset to shared data */
3264  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3265  ItemPointerData ate_ctid2; /* new updated tuple */
3267 
3268 /* AfterTriggerEventData, minus ate_ctid2 */
3270 {
3271  TriggerFlags ate_flags; /* status bits and offset to shared data */
3272  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3274 
3275 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3277 {
3278  TriggerFlags ate_flags; /* status bits and offset to shared data */
3280 
3281 #define SizeofTriggerEvent(evt) \
3282  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3283  sizeof(AfterTriggerEventData) : \
3284  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3285  sizeof(AfterTriggerEventDataOneCtid) : \
3286  sizeof(AfterTriggerEventDataZeroCtids))
3287 
3288 #define GetTriggerSharedData(evt) \
3289  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3290 
3291 /*
3292  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3293  * larger chunks (a slightly more sophisticated version of an expansible
3294  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3295  * AfterTriggerEventData records; the space between endfree and endptr is
3296  * occupied by AfterTriggerSharedData records.
3297  */
3299 {
3300  struct AfterTriggerEventChunk *next; /* list link */
3301  char *freeptr; /* start of free space in chunk */
3302  char *endfree; /* end of free space in chunk */
3303  char *endptr; /* end of chunk */
3304  /* event data follows here */
3306 
3307 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3308 
3309 /* A list of events */
3311 {
3314  char *tailfree; /* freeptr of tail chunk */
3316 
3317 /* Macros to help in iterating over a list of events */
3318 #define for_each_chunk(cptr, evtlist) \
3319  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3320 #define for_each_event(eptr, cptr) \
3321  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3322  (char *) eptr < (cptr)->freeptr; \
3323  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3324 /* Use this if no special per-chunk processing is needed */
3325 #define for_each_event_chunk(eptr, cptr, evtlist) \
3326  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3327 
3328 /* Macros for iterating from a start point that might not be list start */
3329 #define for_each_chunk_from(cptr) \
3330  for (; cptr != NULL; cptr = cptr->next)
3331 #define for_each_event_from(eptr, cptr) \
3332  for (; \
3333  (char *) eptr < (cptr)->freeptr; \
3334  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3335 
3336 
3337 /*
3338  * All per-transaction data for the AFTER TRIGGERS module.
3339  *
3340  * AfterTriggersData has the following fields:
3341  *
3342  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3343  * We mark firable events with the current firing cycle's ID so that we can
3344  * tell which ones to work on. This ensures sane behavior if a trigger
3345  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3346  * only fire those events that weren't already scheduled for firing.
3347  *
3348  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3349  * This is saved and restored across failed subtransactions.
3350  *
3351  * events is the current list of deferred events. This is global across
3352  * all subtransactions of the current transaction. In a subtransaction
3353  * abort, we know that the events added by the subtransaction are at the
3354  * end of the list, so it is relatively easy to discard them. The event
3355  * list chunks themselves are stored in event_cxt.
3356  *
3357  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3358  * (-1 when the stack is empty).
3359  *
3360  * query_stack[query_depth] is the per-query-level data, including these fields:
3361  *
3362  * events is a list of AFTER trigger events queued by the current query.
3363  * None of these are valid until the matching AfterTriggerEndQuery call
3364  * occurs. At that point we fire immediate-mode triggers, and append any
3365  * deferred events to the main events list.
3366  *
3367  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3368  * needed by events queued by the current query. (Note: we use just one
3369  * tuplestore even though more than one foreign table might be involved.
3370  * This is okay because tuplestores don't really care what's in the tuples
3371  * they store; but it's possible that someday it'd break.)
3372  *
3373  * tables is a List of AfterTriggersTableData structs for target tables
3374  * of the current query (see below).
3375  *
3376  * maxquerydepth is just the allocated length of query_stack.
3377  *
3378  * trans_stack holds per-subtransaction data, including these fields:
3379  *
3380  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3381  * state data. Each subtransaction level that modifies that state first
3382  * saves a copy, which we use to restore the state if we abort.
3383  *
3384  * events is a copy of the events head/tail pointers,
3385  * which we use to restore those values during subtransaction abort.
3386  *
3387  * query_depth is the subtransaction-start-time value of query_depth,
3388  * which we similarly use to clean up at subtransaction abort.
3389  *
3390  * firing_counter is the subtransaction-start-time value of firing_counter.
3391  * We use this to recognize which deferred triggers were fired (or marked
3392  * for firing) within an aborted subtransaction.
3393  *
3394  * We use GetCurrentTransactionNestLevel() to determine the correct array
3395  * index in trans_stack. maxtransdepth is the number of allocated entries in
3396  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3397  * in cases where errors during subxact abort cause multiple invocations
3398  * of AfterTriggerEndSubXact() at the same nesting depth.)
3399  *
3400  * We create an AfterTriggersTableData struct for each target table of the
3401  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3402  * either transition tables or statement-level triggers. This is used to
3403  * hold the relevant transition tables, as well as info tracking whether
3404  * we already queued the statement triggers. (We use that info to prevent
3405  * firing the same statement triggers more than once per statement, or really
3406  * once per transition table set.) These structs, along with the transition
3407  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3408  * That's sufficient lifespan because we don't allow transition tables to be
3409  * used by deferrable triggers, so they only need to survive until
3410  * AfterTriggerEndQuery.
3411  */
3415 
3416 typedef struct AfterTriggersData
3417 {
3418  CommandId firing_counter; /* next firing ID to assign */
3419  SetConstraintState state; /* the active S C state */
3420  AfterTriggerEventList events; /* deferred-event list */
3421  MemoryContext event_cxt; /* memory context for events, if any */
3422 
3423  /* per-query-level data: */
3424  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3425  int query_depth; /* current index in above array */
3426  int maxquerydepth; /* allocated len of above array */
3427 
3428  /* per-subtransaction-level data: */
3429  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3430  int maxtransdepth; /* allocated len of above array */
3432 
3434 {
3435  AfterTriggerEventList events; /* events pending from this query */
3436  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3437  List *tables; /* list of AfterTriggersTableData, see below */
3438 };
3439 
3441 {
3442  /* these fields are just for resetting at subtrans abort: */
3443  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3444  AfterTriggerEventList events; /* saved list pointer */
3445  int query_depth; /* saved query_depth */
3446  CommandId firing_counter; /* saved firing_counter */
3447 };
3448 
3450 {
3451  /* relid + cmdType form the lookup key for these structs: */
3452  Oid relid; /* target table's OID */
3453  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3454  bool closed; /* true when no longer OK to add tuples */
3455  bool before_trig_done; /* did we already queue BS triggers? */
3456  bool after_trig_done; /* did we already queue AS triggers? */
3457  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3458  Tuplestorestate *old_tuplestore; /* "old" transition table, if any */
3459  Tuplestorestate *new_tuplestore; /* "new" transition table, if any */
3460  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3461 };
3462 
3464 
3465 static void AfterTriggerExecute(EState *estate,
3466  AfterTriggerEvent event,
3467  ResultRelInfo *relInfo,
3468  TriggerDesc *trigdesc,
3469  FmgrInfo *finfo,
3470  Instrumentation *instr,
3471  MemoryContext per_tuple_context,
3472  TupleTableSlot *trig_tuple_slot1,
3473  TupleTableSlot *trig_tuple_slot2);
3475  CmdType cmdType);
3477 static SetConstraintState SetConstraintStateCreate(int numalloc);
3480  Oid tgoid, bool tgisdeferred);
3481 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3482 
3483 
3484 /*
3485  * Get the FDW tuplestore for the current trigger query level, creating it
3486  * if necessary.
3487  */
3488 static Tuplestorestate *
3490 {
3491  Tuplestorestate *ret;
3492 
3493  ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3494  if (ret == NULL)
3495  {
3496  MemoryContext oldcxt;
3497  ResourceOwner saveResourceOwner;
3498 
3499  /*
3500  * Make the tuplestore valid until end of subtransaction. We really
3501  * only need it until AfterTriggerEndQuery().
3502  */
3504  saveResourceOwner = CurrentResourceOwner;
3506 
3507  ret = tuplestore_begin_heap(false, false, work_mem);
3508 
3509  CurrentResourceOwner = saveResourceOwner;
3510  MemoryContextSwitchTo(oldcxt);
3511 
3512  afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3513  }
3514 
3515  return ret;
3516 }
3517 
3518 /* ----------
3519  * afterTriggerCheckState()
3520  *
3521  * Returns true if the trigger event is actually in state DEFERRED.
3522  * ----------
3523  */
3524 static bool
3525 afterTriggerCheckState(AfterTriggerShared evtshared)
3526 {
3527  Oid tgoid = evtshared->ats_tgoid;
3528  SetConstraintState state = afterTriggers.state;
3529  int i;
3530 
3531  /*
3532  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3533  * constraints declared NOT DEFERRABLE), the state is always false.
3534  */
3535  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3536  return false;
3537 
3538  /*
3539  * If constraint state exists, SET CONSTRAINTS might have been executed
3540  * either for this trigger or for all triggers.
3541  */
3542  if (state != NULL)
3543  {
3544  /* Check for SET CONSTRAINTS for this specific trigger. */
3545  for (i = 0; i < state->numstates; i++)
3546  {
3547  if (state->trigstates[i].sct_tgoid == tgoid)
3548  return state->trigstates[i].sct_tgisdeferred;
3549  }
3550 
3551  /* Check for SET CONSTRAINTS ALL. */
3552  if (state->all_isset)
3553  return state->all_isdeferred;
3554  }
3555 
3556  /*
3557  * Otherwise return the default state for the trigger.
3558  */
3559  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3560 }
3561 
3562 
3563 /* ----------
3564  * afterTriggerAddEvent()
3565  *
3566  * Add a new trigger event to the specified queue.
3567  * The passed-in event data is copied.
3568  * ----------
3569  */
3570 static void
3572  AfterTriggerEvent event, AfterTriggerShared evtshared)
3573 {
3574  Size eventsize = SizeofTriggerEvent(event);
3575  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3576  AfterTriggerEventChunk *chunk;
3577  AfterTriggerShared newshared;
3578  AfterTriggerEvent newevent;
3579 
3580  /*
3581  * If empty list or not enough room in the tail chunk, make a new chunk.
3582  * We assume here that a new shared record will always be needed.
3583  */
3584  chunk = events->tail;
3585  if (chunk == NULL ||
3586  chunk->endfree - chunk->freeptr < needed)
3587  {
3588  Size chunksize;
3589 
3590  /* Create event context if we didn't already */
3591  if (afterTriggers.event_cxt == NULL)
3592  afterTriggers.event_cxt =
3594  "AfterTriggerEvents",
3596 
3597  /*
3598  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3599  * These numbers are fairly arbitrary, though there is a hard limit at
3600  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3601  * shared records using the available space in ate_flags. Another
3602  * constraint is that if the chunk size gets too huge, the search loop
3603  * below would get slow given a (not too common) usage pattern with
3604  * many distinct event types in a chunk. Therefore, we double the
3605  * preceding chunk size only if there weren't too many shared records
3606  * in the preceding chunk; otherwise we halve it. This gives us some
3607  * ability to adapt to the actual usage pattern of the current query
3608  * while still having large chunk sizes in typical usage. All chunk
3609  * sizes used should be MAXALIGN multiples, to ensure that the shared
3610  * records will be aligned safely.
3611  */
3612 #define MIN_CHUNK_SIZE 1024
3613 #define MAX_CHUNK_SIZE (1024*1024)
3614 
3615 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3616 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3617 #endif
3618 
3619  if (chunk == NULL)
3620  chunksize = MIN_CHUNK_SIZE;
3621  else
3622  {
3623  /* preceding chunk size... */
3624  chunksize = chunk->endptr - (char *) chunk;
3625  /* check number of shared records in preceding chunk */
3626  if ((chunk->endptr - chunk->endfree) <=
3627  (100 * sizeof(AfterTriggerSharedData)))
3628  chunksize *= 2; /* okay, double it */
3629  else
3630  chunksize /= 2; /* too many shared records */
3631  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3632  }
3633  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3634  chunk->next = NULL;
3635  chunk->freeptr = CHUNK_DATA_START(chunk);
3636  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3637  Assert(chunk->endfree - chunk->freeptr >= needed);
3638 
3639  if (events->head == NULL)
3640  events->head = chunk;
3641  else
3642  events->tail->next = chunk;
3643  events->tail = chunk;
3644  /* events->tailfree is now out of sync, but we'll fix it below */
3645  }
3646 
3647  /*
3648  * Try to locate a matching shared-data record already in the chunk. If
3649  * none, make a new one.
3650  */
3651  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3652  (char *) newshared >= chunk->endfree;
3653  newshared--)
3654  {
3655  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3656  newshared->ats_relid == evtshared->ats_relid &&
3657  newshared->ats_event == evtshared->ats_event &&
3658  newshared->ats_table == evtshared->ats_table &&
3659  newshared->ats_firing_id == 0)
3660  break;
3661  }
3662  if ((char *) newshared < chunk->endfree)
3663  {
3664  *newshared = *evtshared;
3665  newshared->ats_firing_id = 0; /* just to be sure */
3666  chunk->endfree = (char *) newshared;
3667  }
3668 
3669  /* Insert the data */
3670  newevent = (AfterTriggerEvent) chunk->freeptr;
3671  memcpy(newevent, event, eventsize);
3672  /* ... and link the new event to its shared record */
3673  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3674  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3675 
3676  chunk->freeptr += eventsize;
3677  events->tailfree = chunk->freeptr;
3678 }
3679 
3680 /* ----------
3681  * afterTriggerFreeEventList()
3682  *
3683  * Free all the event storage in the given list.
3684  * ----------
3685  */
3686 static void
3688 {
3689  AfterTriggerEventChunk *chunk;
3690 
3691  while ((chunk = events->head) != NULL)
3692  {
3693  events->head = chunk->next;
3694  pfree(chunk);
3695  }
3696  events->tail = NULL;
3697  events->tailfree = NULL;
3698 }
3699 
3700 /* ----------
3701  * afterTriggerRestoreEventList()
3702  *
3703  * Restore an event list to its prior length, removing all the events
3704  * added since it had the value old_events.
3705  * ----------
3706  */
3707 static void
3709  const AfterTriggerEventList *old_events)
3710 {
3711  AfterTriggerEventChunk *chunk;
3712  AfterTriggerEventChunk *next_chunk;
3713 
3714  if (old_events->tail == NULL)
3715  {
3716  /* restoring to a completely empty state, so free everything */
3717  afterTriggerFreeEventList(events);
3718  }
3719  else
3720  {
3721  *events = *old_events;
3722  /* free any chunks after the last one we want to keep */
3723  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3724  {
3725  next_chunk = chunk->next;
3726  pfree(chunk);
3727  }
3728  /* and clean up the tail chunk to be the right length */
3729  events->tail->next = NULL;
3730  events->tail->freeptr = events->tailfree;
3731 
3732  /*
3733  * We don't make any effort to remove now-unused shared data records.
3734  * They might still be useful, anyway.
3735  */
3736  }
3737 }
3738 
3739 /* ----------
3740  * afterTriggerDeleteHeadEventChunk()
3741  *
3742  * Remove the first chunk of events from the query level's event list.
3743  * Keep any event list pointers elsewhere in the query level's data
3744  * structures in sync.
3745  * ----------
3746  */
3747 static void
3749 {
3750  AfterTriggerEventChunk *target = qs->events.head;
3751  ListCell *lc;
3752 
3753  Assert(target && target->next);
3754 
3755  /*
3756  * First, update any pointers in the per-table data, so that they won't be
3757  * dangling. Resetting obsoleted pointers to NULL will make
3758  * cancel_prior_stmt_triggers start from the list head, which is fine.
3759  */
3760  foreach(lc, qs->tables)
3761  {
3763 
3764  if (table->after_trig_done &&
3765  table->after_trig_events.tail == target)
3766  {
3767  table->after_trig_events.head = NULL;
3768  table->after_trig_events.tail = NULL;
3769  table->after_trig_events.tailfree = NULL;
3770  }
3771  }
3772 
3773  /* Now we can flush the head chunk */
3774  qs->events.head = target->next;
3775  pfree(target);
3776 }
3777 
3778 
3779 /* ----------
3780  * AfterTriggerExecute()
3781  *
3782  * Fetch the required tuples back from the heap and fire one
3783  * single trigger function.
3784  *
3785  * Frequently, this will be fired many times in a row for triggers of
3786  * a single relation. Therefore, we cache the open relation and provide
3787  * fmgr lookup cache space at the caller level. (For triggers fired at
3788  * the end of a query, we can even piggyback on the executor's state.)
3789  *
3790  * event: event currently being fired.
3791  * rel: open relation for event.
3792  * trigdesc: working copy of rel's trigger info.
3793  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3794  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3795  * or NULL if no instrumentation is wanted.
3796  * per_tuple_context: memory context to call trigger function in.
3797  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3798  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3799  * ----------
3800  */
3801 static void
3803  AfterTriggerEvent event,
3804  ResultRelInfo *relInfo,
3805  TriggerDesc *trigdesc,
3806  FmgrInfo *finfo, Instrumentation *instr,
3807  MemoryContext per_tuple_context,
3808  TupleTableSlot *trig_tuple_slot1,
3809  TupleTableSlot *trig_tuple_slot2)
3810 {
3811  Relation rel = relInfo->ri_RelationDesc;
3812  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3813  Oid tgoid = evtshared->ats_tgoid;
3814  TriggerData LocTriggerData = {0};
3815  HeapTuple rettuple;
3816  int tgindx;
3817  bool should_free_trig = false;
3818  bool should_free_new = false;
3819 
3820  /*
3821  * Locate trigger in trigdesc.
3822  */
3823  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3824  {
3825  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3826  {
3827  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3828  break;
3829  }
3830  }
3831  if (LocTriggerData.tg_trigger == NULL)
3832  elog(ERROR, "could not find trigger %u", tgoid);
3833 
3834  /*
3835  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3836  * to include time spent re-fetching tuples in the trigger cost.
3837  */
3838  if (instr)
3839  InstrStartNode(instr + tgindx);
3840 
3841  /*
3842  * Fetch the required tuple(s).
3843  */
3844  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3845  {
3847  {
3848  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
3849 
3850  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3851  trig_tuple_slot1))
3852  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3853 
3854  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3856  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3857  trig_tuple_slot2))
3858  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3859  }
3860  /* fall through */
3862 
3863  /*
3864  * Store tuple in the slot so that tg_trigtuple does not reference
3865  * tuplestore memory. (It is formally possible for the trigger
3866  * function to queue trigger events that add to the same
3867  * tuplestore, which can push other tuples out of memory.) The
3868  * distinction is academic, because we start with a minimal tuple
3869  * that is stored as a heap tuple, constructed in different memory
3870  * context, in the slot anyway.
3871  */
3872  LocTriggerData.tg_trigslot = trig_tuple_slot1;
3873  LocTriggerData.tg_trigtuple =
3874  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
3875 
3876  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3878  {
3879  LocTriggerData.tg_newslot = trig_tuple_slot2;
3880  LocTriggerData.tg_newtuple =
3881  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
3882  }
3883  else
3884  {
3885  LocTriggerData.tg_newtuple = NULL;
3886  }
3887  break;
3888 
3889  default:
3890  if (ItemPointerIsValid(&(event->ate_ctid1)))
3891  {
3892  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
3893 
3894  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
3895  SnapshotAny,
3896  LocTriggerData.tg_trigslot))
3897  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3898  LocTriggerData.tg_trigtuple =
3899  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
3900  }
3901  else
3902  {
3903  LocTriggerData.tg_trigtuple = NULL;
3904  }
3905 
3906  /* don't touch ctid2 if not there */
3907  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3909  ItemPointerIsValid(&(event->ate_ctid2)))
3910  {
3911  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
3912 
3913  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
3914  SnapshotAny,
3915  LocTriggerData.tg_newslot))
3916  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3917  LocTriggerData.tg_newtuple =
3918  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
3919  }
3920  else
3921  {
3922  LocTriggerData.tg_newtuple = NULL;
3923  }
3924  }
3925 
3926  /*
3927  * Set up the tuplestore information to let the trigger have access to
3928  * transition tables. When we first make a transition table available to
3929  * a trigger, mark it "closed" so that it cannot change anymore. If any
3930  * additional events of the same type get queued in the current trigger
3931  * query level, they'll go into new transition tables.
3932  */
3933  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
3934  if (evtshared->ats_table)
3935  {
3936  if (LocTriggerData.tg_trigger->tgoldtable)
3937  {
3938  LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore;
3939  evtshared->ats_table->closed = true;
3940  }
3941 
3942  if (LocTriggerData.tg_trigger->tgnewtable)
3943  {
3944  LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore;
3945  evtshared->ats_table->closed = true;
3946  }
3947  }
3948 
3949  /*
3950  * Setup the remaining trigger information
3951  */
3952  LocTriggerData.type = T_TriggerData;
3953  LocTriggerData.tg_event =
3955  LocTriggerData.tg_relation = rel;
3956  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
3957  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
3958 
3959  MemoryContextReset(per_tuple_context);
3960 
3961  /*
3962  * Call the trigger and throw away any possibly returned updated tuple.
3963  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
3964  */
3965  rettuple = ExecCallTriggerFunc(&LocTriggerData,
3966  tgindx,
3967  finfo,
3968  NULL,
3969  per_tuple_context);
3970  if (rettuple != NULL &&
3971  rettuple != LocTriggerData.tg_trigtuple &&
3972  rettuple != LocTriggerData.tg_newtuple)
3973  heap_freetuple(rettuple);
3974 
3975  /*
3976  * Release resources
3977  */
3978  if (should_free_trig)
3979  heap_freetuple(LocTriggerData.tg_trigtuple);
3980  if (should_free_new)
3981  heap_freetuple(LocTriggerData.tg_newtuple);
3982 
3983  /* don't clear slots' contents if foreign table */
3984  if (trig_tuple_slot1 == NULL)
3985  {
3986  if (LocTriggerData.tg_trigslot)
3987  ExecClearTuple(LocTriggerData.tg_trigslot);
3988  if (LocTriggerData.tg_newslot)
3989  ExecClearTuple(LocTriggerData.tg_newslot);
3990  }
3991 
3992  /*
3993  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
3994  * one "tuple returned" (really the number of firings).
3995  */
3996  if (instr)
3997  InstrStopNode(instr + tgindx, 1);
3998 }
3999 
4000 
4001 /*
4002  * afterTriggerMarkEvents()
4003  *
4004  * Scan the given event list for not yet invoked events. Mark the ones
4005  * that can be invoked now with the current firing ID.
4006  *
4007  * If move_list isn't NULL, events that are not to be invoked now are
4008  * transferred to move_list.
4009  *
4010  * When immediate_only is true, do not invoke currently-deferred triggers.
4011  * (This will be false only at main transaction exit.)
4012  *
4013  * Returns true if any invokable events were found.
4014  */
4015 static bool
4017  AfterTriggerEventList *move_list,
4018  bool immediate_only)
4019 {
4020  bool found = false;
4021  AfterTriggerEvent event;
4022  AfterTriggerEventChunk *chunk;
4023 
4024  for_each_event_chunk(event, chunk, *events)
4025  {
4026  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4027  bool defer_it = false;
4028 
4029  if (!(event->ate_flags &
4031  {
4032  /*
4033  * This trigger hasn't been called or scheduled yet. Check if we
4034  * should call it now.
4035  */
4036  if (immediate_only && afterTriggerCheckState(evtshared))
4037  {
4038  defer_it = true;
4039  }
4040  else
4041  {
4042  /*
4043  * Mark it as to be fired in this firing cycle.
4044  */
4045  evtshared->ats_firing_id = afterTriggers.firing_counter;
4046  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4047  found = true;
4048  }
4049  }
4050 
4051  /*
4052  * If it's deferred, move it to move_list, if requested.
4053  */
4054  if (defer_it && move_list != NULL)
4055  {
4056  /* add it to move_list */
4057  afterTriggerAddEvent(move_list, event, evtshared);
4058  /* mark original copy "done" so we don't do it again */
4059  event->ate_flags |= AFTER_TRIGGER_DONE;
4060  }
4061  }
4062 
4063  return found;
4064 }
4065 
4066 /*
4067  * afterTriggerInvokeEvents()
4068  *
4069  * Scan the given event list for events that are marked as to be fired
4070  * in the current firing cycle, and fire them.
4071  *
4072  * If estate isn't NULL, we use its result relation info to avoid repeated
4073  * openings and closing of trigger target relations. If it is NULL, we
4074  * make one locally to cache the info in case there are multiple trigger
4075  * events per rel.
4076  *
4077  * When delete_ok is true, it's safe to delete fully-processed events.
4078  * (We are not very tense about that: we simply reset a chunk to be empty
4079  * if all its events got fired. The objective here is just to avoid useless
4080  * rescanning of events when a trigger queues new events during transaction
4081  * end, so it's not necessary to worry much about the case where only
4082  * some events are fired.)
4083  *
4084  * Returns true if no unfired events remain in the list (this allows us
4085  * to avoid repeating afterTriggerMarkEvents).
4086  */
4087 static bool
4089  CommandId firing_id,
4090  EState *estate,
4091  bool delete_ok)
4092 {
4093  bool all_fired = true;
4094  AfterTriggerEventChunk *chunk;
4095  MemoryContext per_tuple_context;
4096  bool local_estate = false;
4097  ResultRelInfo *rInfo = NULL;
4098  Relation rel = NULL;
4099  TriggerDesc *trigdesc = NULL;
4100  FmgrInfo *finfo = NULL;
4101  Instrumentation *instr = NULL;
4102  TupleTableSlot *slot1 = NULL,
4103  *slot2 = NULL;
4104 
4105  /* Make a local EState if need be */
4106  if (estate == NULL)
4107  {
4108  estate = CreateExecutorState();
4109  local_estate = true;
4110  }
4111 
4112  /* Make a per-tuple memory context for trigger function calls */
4113  per_tuple_context =
4115  "AfterTriggerTupleContext",
4117 
4118  for_each_chunk(chunk, *events)
4119  {
4120  AfterTriggerEvent event;
4121  bool all_fired_in_chunk = true;
4122 
4123  for_each_event(event, chunk)
4124  {
4125  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4126 
4127  /*
4128  * Is it one for me to fire?
4129  */
4130  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4131  evtshared->ats_firing_id == firing_id)
4132  {
4133  /*
4134  * So let's fire it... but first, find the correct relation if
4135  * this is not the same relation as before.
4136  */
4137  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4138  {
4139  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4140  rel = rInfo->ri_RelationDesc;
4141  trigdesc = rInfo->ri_TrigDesc;
4142  finfo = rInfo->ri_TrigFunctions;
4143  instr = rInfo->ri_TrigInstrument;
4144  if (slot1 != NULL)
4145  {
4148  slot1 = slot2 = NULL;
4149  }
4150  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4151  {
4152  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4154  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4156  }
4157  if (trigdesc == NULL) /* should not happen */
4158  elog(ERROR, "relation %u has no triggers",
4159  evtshared->ats_relid);
4160  }
4161 
4162  /*
4163  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4164  * still set, so recursive examinations of the event list
4165  * won't try to re-fire it.
4166  */
4167  AfterTriggerExecute(estate, event, rInfo, trigdesc, finfo, instr,
4168  per_tuple_context, slot1, slot2);
4169 
4170  /*
4171  * Mark the event as done.
4172  */
4173  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4174  event->ate_flags |= AFTER_TRIGGER_DONE;
4175  }
4176  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4177  {
4178  /* something remains to be done */
4179  all_fired = all_fired_in_chunk = false;
4180  }
4181  }
4182 
4183  /* Clear the chunk if delete_ok and nothing left of interest */
4184  if (delete_ok && all_fired_in_chunk)
4185  {
4186  chunk->freeptr = CHUNK_DATA_START(chunk);
4187  chunk->endfree = chunk->endptr;
4188 
4189  /*
4190  * If it's last chunk, must sync event list's tailfree too. Note
4191  * that delete_ok must NOT be passed as true if there could be
4192  * additional AfterTriggerEventList values pointing at this event
4193  * list, since we'd fail to fix their copies of tailfree.
4194  */
4195  if (chunk == events->tail)
4196  events->tailfree = chunk->freeptr;
4197  }
4198  }
4199  if (slot1 != NULL)
4200  {
4203  }
4204 
4205  /* Release working resources */
4206  MemoryContextDelete(per_tuple_context);
4207 
4208  if (local_estate)
4209  {
4210  ExecCloseResultRelations(estate);
4211  ExecResetTupleTable(estate->es_tupleTable, false);
4212  FreeExecutorState(estate);
4213  }
4214 
4215  return all_fired;
4216 }
4217 
4218 
4219 /*
4220  * GetAfterTriggersTableData
4221  *
4222  * Find or create an AfterTriggersTableData struct for the specified
4223  * trigger event (relation + operation type). Ignore existing structs
4224  * marked "closed"; we don't want to put any additional tuples into them,
4225  * nor change their stmt-triggers-fired state.
4226  *
4227  * Note: the AfterTriggersTableData list is allocated in the current
4228  * (sub)transaction's CurTransactionContext. This is OK because
4229  * we don't need it to live past AfterTriggerEndQuery.
4230  */
4231 static AfterTriggersTableData *
4233 {
4234  AfterTriggersTableData *table;
4236  MemoryContext oldcxt;
4237  ListCell *lc;
4238 
4239  /* Caller should have ensured query_depth is OK. */
4240  Assert(afterTriggers.query_depth >= 0 &&
4241  afterTriggers.query_depth < afterTriggers.maxquerydepth);
4242  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4243 
4244  foreach(lc, qs->tables)
4245  {
4246  table = (AfterTriggersTableData *) lfirst(lc);
4247  if (table->relid == relid && table->cmdType == cmdType &&
4248  !table->closed)
4249  return table;
4250  }
4251 
4253 
4255  table->relid = relid;
4256  table->cmdType = cmdType;
4257  qs->tables = lappend(qs->tables, table);
4258 
4259  MemoryContextSwitchTo(oldcxt);
4260 
4261  return table;
4262 }
4263 
4264 
4265 /*
4266  * MakeTransitionCaptureState
4267  *
4268  * Make a TransitionCaptureState object for the given TriggerDesc, target
4269  * relation, and operation type. The TCS object holds all the state needed
4270  * to decide whether to capture tuples in transition tables.
4271  *
4272  * If there are no triggers in 'trigdesc' that request relevant transition
4273  * tables, then return NULL.
4274  *
4275  * The resulting object can be passed to the ExecAR* functions. When
4276  * dealing with child tables, the caller can set tcs_original_insert_tuple
4277  * to avoid having to reconstruct the original tuple in the root table's
4278  * format.
4279  *
4280  * Note that we copy the flags from a parent table into this struct (rather
4281  * than subsequently using the relation's TriggerDesc directly) so that we can
4282  * use it to control collection of transition tuples from child tables.
4283  *
4284  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4285  * on the same table during one query should share one transition table.
4286  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4287  * looked up using the table OID + CmdType, and are merely referenced by
4288  * the TransitionCaptureState objects we hand out to callers.
4289  */
4292 {
4294  bool need_old,
4295  need_new;
4296  AfterTriggersTableData *table;
4297  MemoryContext oldcxt;
4298  ResourceOwner saveResourceOwner;
4299 
4300  if (trigdesc == NULL)
4301  return NULL;
4302 
4303  /* Detect which table(s) we need. */
4304  switch (cmdType)
4305  {
4306  case CMD_INSERT:
4307  need_old = false;
4308  need_new = trigdesc->trig_insert_new_table;
4309  break;
4310  case CMD_UPDATE:
4311  need_old = trigdesc->trig_update_old_table;
4312  need_new = trigdesc->trig_update_new_table;
4313  break;
4314  case CMD_DELETE:
4315  need_old = trigdesc->trig_delete_old_table;
4316  need_new = false;
4317  break;
4318  default:
4319  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4320  need_old = need_new = false; /* keep compiler quiet */
4321  break;
4322  }
4323  if (!need_old && !need_new)
4324  return NULL;
4325 
4326  /* Check state, like AfterTriggerSaveEvent. */
4327  if (afterTriggers.query_depth < 0)
4328  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4329 
4330  /* Be sure we have enough space to record events at this query depth. */
4331  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4333 
4334  /*
4335  * Find or create an AfterTriggersTableData struct to hold the
4336  * tuplestore(s). If there's a matching struct but it's marked closed,
4337  * ignore it; we need a newer one.
4338  *
4339  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4340  * allocated in the current (sub)transaction's CurTransactionContext, and
4341  * the tuplestores are managed by the (sub)transaction's resource owner.
4342  * This is sufficient lifespan because we do not allow triggers using
4343  * transition tables to be deferrable; they will be fired during
4344  * AfterTriggerEndQuery, after which it's okay to delete the data.
4345  */
4346  table = GetAfterTriggersTableData(relid, cmdType);
4347 
4348  /* Now create required tuplestore(s), if we don't have them already. */
4350  saveResourceOwner = CurrentResourceOwner;
4352 
4353  if (need_old && table->old_tuplestore == NULL)
4354  table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4355  if (need_new && table->new_tuplestore == NULL)
4356  table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4357 
4358  CurrentResourceOwner = saveResourceOwner;
4359  MemoryContextSwitchTo(oldcxt);
4360 
4361  /* Now build the TransitionCaptureState struct, in caller's context */
4363  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4364  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4365  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4366  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4367  state->tcs_private = table;
4368 
4369  return state;
4370 }
4371 
4372 
4373 /* ----------
4374  * AfterTriggerBeginXact()
4375  *
4376  * Called at transaction start (either BEGIN or implicit for single
4377  * statement outside of transaction block).
4378  * ----------
4379  */
4380 void
4382 {
4383  /*
4384  * Initialize after-trigger state structure to empty
4385  */
4386  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4387  afterTriggers.query_depth = -1;
4388 
4389  /*
4390  * Verify that there is no leftover state remaining. If these assertions
4391  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4392  * up properly.
4393  */
4394  Assert(afterTriggers.state == NULL);
4395  Assert(afterTriggers.query_stack == NULL);
4396  Assert(afterTriggers.maxquerydepth == 0);
4397  Assert(afterTriggers.event_cxt == NULL);
4398  Assert(afterTriggers.events.head == NULL);
4399  Assert(afterTriggers.trans_stack == NULL);
4400  Assert(afterTriggers.maxtransdepth == 0);
4401 }
4402 
4403 
4404 /* ----------
4405  * AfterTriggerBeginQuery()
4406  *
4407  * Called just before we start processing a single query within a
4408  * transaction (or subtransaction). Most of the real work gets deferred
4409  * until somebody actually tries to queue a trigger event.
4410  * ----------
4411  */
4412 void
4414 {
4415  /* Increase the query stack depth */
4416  afterTriggers.query_depth++;
4417 }
4418 
4419 
4420 /* ----------
4421  * AfterTriggerEndQuery()
4422  *
4423  * Called after one query has been completely processed. At this time
4424  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4425  * transfer deferred trigger events to the global deferred-trigger list.
4426  *
4427  * Note that this must be called BEFORE closing down the executor
4428  * with ExecutorEnd, because we make use of the EState's info about
4429  * target relations. Normally it is called from ExecutorFinish.
4430  * ----------
4431  */
4432 void
4434 {
4436 
4437  /* Must be inside a query, too */
4438  Assert(afterTriggers.query_depth >= 0);
4439 
4440  /*
4441  * If we never even got as far as initializing the event stack, there
4442  * certainly won't be any events, so exit quickly.
4443  */
4444  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4445  {
4446  afterTriggers.query_depth--;
4447  return;
4448  }
4449 
4450  /*
4451  * Process all immediate-mode triggers queued by the query, and move the
4452  * deferred ones to the main list of deferred events.
4453  *
4454  * Notice that we decide which ones will be fired, and put the deferred
4455  * ones on the main list, before anything is actually fired. This ensures
4456  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4457  * IMMEDIATE: all events we have decided to defer will be available for it
4458  * to fire.
4459  *
4460  * We loop in case a trigger queues more events at the same query level.
4461  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4462  * will instead fire any triggers in a dedicated query level. Foreign key
4463  * enforcement triggers do add to the current query level, thanks to their
4464  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4465  * C-language triggers might do likewise.
4466  *
4467  * If we find no firable events, we don't have to increment
4468  * firing_counter.
4469  */
4470  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4471 
4472  for (;;)
4473  {
4474  if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
4475  {
4476  CommandId firing_id = afterTriggers.firing_counter++;
4477  AfterTriggerEventChunk *oldtail = qs->events.tail;
4478 
4479  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
4480  break; /* all fired */
4481 
4482  /*
4483  * Firing a trigger could result in query_stack being repalloc'd,
4484  * so we must recalculate qs after each afterTriggerInvokeEvents
4485  * call. Furthermore, it's unsafe to pass delete_ok = true here,
4486  * because that could cause afterTriggerInvokeEvents to try to
4487  * access qs->events after the stack has been repalloc'd.
4488  */
4489  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4490 
4491  /*
4492  * We'll need to scan the events list again. To reduce the cost
4493  * of doing so, get rid of completely-fired chunks. We know that
4494  * all events were marked IN_PROGRESS or DONE at the conclusion of
4495  * afterTriggerMarkEvents, so any still-interesting events must
4496  * have been added after that, and so must be in the chunk that
4497  * was then the tail chunk, or in later chunks. So, zap all
4498  * chunks before oldtail. This is approximately the same set of
4499  * events we would have gotten rid of by passing delete_ok = true.
4500  */
4501  Assert(oldtail != NULL);
4502  while (qs->events.head != oldtail)
4504  }
4505  else
4506  break;
4507  }
4508 
4509  /* Release query-level-local storage, including tuplestores if any */
4510  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4511 
4512  afterTriggers.query_depth--;
4513 }
4514 
4515 
4516 /*
4517  * AfterTriggerFreeQuery
4518  * Release subsidiary storage for a trigger query level.
4519  * This includes closing down tuplestores.
4520  * Note: it's important for this to be safe if interrupted by an error
4521  * and then called again for the same query level.
4522  */
4523 static void
4525 {
4526  Tuplestorestate *ts;
4527  List *tables;
4528  ListCell *lc;
4529 
4530  /* Drop the trigger events */
4532 
4533  /* Drop FDW tuplestore if any */
4534  ts = qs->fdw_tuplestore;
4535  qs->fdw_tuplestore = NULL;
4536  if (ts)
4537  tuplestore_end(ts);
4538 
4539  /* Release per-table subsidiary storage */
4540  tables = qs->tables;
4541  foreach(lc, tables)
4542  {
4544 
4545  ts = table->old_tuplestore;
4546  table->old_tuplestore = NULL;
4547  if (ts)
4548  tuplestore_end(ts);
4549  ts = table->new_tuplestore;
4550  table->new_tuplestore = NULL;
4551  if (ts)
4552  tuplestore_end(ts);
4553  }
4554 
4555  /*
4556  * Now free the AfterTriggersTableData structs and list cells. Reset list
4557  * pointer first; if list_free_deep somehow gets an error, better to leak
4558  * that storage than have an infinite loop.
4559  */
4560  qs->tables = NIL;
4561  list_free_deep(tables);
4562 }
4563 
4564 
4565 /* ----------
4566  * AfterTriggerFireDeferred()
4567  *
4568  * Called just before the current transaction is committed. At this
4569  * time we invoke all pending DEFERRED triggers.
4570  *
4571  * It is possible for other modules to queue additional deferred triggers
4572  * during pre-commit processing; therefore xact.c may have to call this
4573  * multiple times.
4574  * ----------
4575  */
4576 void
4578 {
4579  AfterTriggerEventList *events;
4580  bool snap_pushed = false;
4581 
4582  /* Must not be inside a query */
4583  Assert(afterTriggers.query_depth == -1);
4584 
4585  /*
4586  * If there are any triggers to fire, make sure we have set a snapshot for
4587  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4588  * can't assume ActiveSnapshot is valid on entry.)
4589  */
4590  events = &afterTriggers.events;
4591  if (events->head != NULL)
4592  {
4594  snap_pushed = true;
4595  }
4596 
4597  /*
4598  * Run all the remaining triggers. Loop until they are all gone, in case
4599  * some trigger queues more for us to do.
4600  */
4601  while (afterTriggerMarkEvents(events, NULL, false))
4602  {
4603  CommandId firing_id = afterTriggers.firing_counter++;
4604 
4605  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4606  break; /* all fired */
4607  }
4608 
4609  /*
4610  * We don't bother freeing the event list, since it will go away anyway
4611  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4612  */
4613 
4614  if (snap_pushed)
4616 }
4617 
4618 
4619 /* ----------
4620  * AfterTriggerEndXact()
4621  *
4622  * The current transaction is finishing.
4623  *
4624  * Any unfired triggers are canceled so we simply throw
4625  * away anything we know.
4626  *
4627  * Note: it is possible for this to be called repeatedly in case of
4628  * error during transaction abort; therefore, do not complain if
4629  * already closed down.
4630  * ----------
4631  */
4632 void
4633 AfterTriggerEndXact(bool isCommit)
4634 {
4635  /*
4636  * Forget the pending-events list.
4637  *
4638  * Since all the info is in TopTransactionContext or children thereof, we
4639  * don't really need to do anything to reclaim memory. However, the
4640  * pending-events list could be large, and so it's useful to discard it as
4641  * soon as possible --- especially if we are aborting because we ran out
4642  * of memory for the list!
4643  */
4644  if (afterTriggers.event_cxt)
4645  {
4646  MemoryContextDelete(afterTriggers.event_cxt);
4647  afterTriggers.event_cxt = NULL;
4648  afterTriggers.events.head = NULL;
4649  afterTriggers.events.tail = NULL;
4650  afterTriggers.events.tailfree = NULL;
4651  }
4652 
4653  /*
4654  * Forget any subtransaction state as well. Since this can't be very
4655  * large, we let the eventual reset of TopTransactionContext free the
4656  * memory instead of doing it here.
4657  */
4658  afterTriggers.trans_stack = NULL;
4659  afterTriggers.maxtransdepth = 0;
4660 
4661 
4662  /*
4663  * Forget the query stack and constraint-related state information. As
4664  * with the subtransaction state information, we don't bother freeing the
4665  * memory here.
4666  */
4667  afterTriggers.query_stack = NULL;
4668  afterTriggers.maxquerydepth = 0;
4669  afterTriggers.state = NULL;
4670 
4671  /* No more afterTriggers manipulation until next transaction starts. */
4672  afterTriggers.query_depth = -1;
4673 }
4674 
4675 /*
4676  * AfterTriggerBeginSubXact()
4677  *
4678  * Start a subtransaction.
4679  */
4680 void
4682 {
4683  int my_level = GetCurrentTransactionNestLevel();
4684 
4685  /*
4686  * Allocate more space in the trans_stack if needed. (Note: because the
4687  * minimum nest level of a subtransaction is 2, we waste the first couple
4688  * entries of the array; not worth the notational effort to avoid it.)
4689  */
4690  while (my_level >= afterTriggers.maxtransdepth)
4691  {
4692  if (afterTriggers.maxtransdepth == 0)
4693  {
4694  /* Arbitrarily initialize for max of 8 subtransaction levels */
4695  afterTriggers.trans_stack = (AfterTriggersTransData *)
4697  8 * sizeof(AfterTriggersTransData));
4698  afterTriggers.maxtransdepth = 8;
4699  }
4700  else
4701  {
4702  /* repalloc will keep the stack in the same context */
4703  int new_alloc = afterTriggers.maxtransdepth * 2;
4704 
4705  afterTriggers.trans_stack = (AfterTriggersTransData *)
4706  repalloc(afterTriggers.trans_stack,
4707  new_alloc * sizeof(AfterTriggersTransData));
4708  afterTriggers.maxtransdepth = new_alloc;
4709  }
4710  }
4711 
4712  /*
4713  * Push the current information into the stack. The SET CONSTRAINTS state
4714  * is not saved until/unless changed. Likewise, we don't make a
4715  * per-subtransaction event context until needed.
4716  */
4717  afterTriggers.trans_stack[my_level].state = NULL;
4718  afterTriggers.trans_stack[my_level].events = afterTriggers.events;
4719  afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
4720  afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
4721 }
4722 
4723 /*
4724  * AfterTriggerEndSubXact()
4725  *
4726  * The current subtransaction is ending.
4727  */
4728 void
4730 {
4731  int my_level = GetCurrentTransactionNestLevel();
4733  AfterTriggerEvent event;
4734  AfterTriggerEventChunk *chunk;
4735  CommandId subxact_firing_id;
4736 
4737  /*
4738  * Pop the prior state if needed.
4739  */
4740  if (isCommit)
4741  {
4742  Assert(my_level < afterTriggers.maxtransdepth);
4743  /* If we saved a prior state, we don't need it anymore */
4744  state = afterTriggers.trans_stack[my_level].state;
4745  if (state != NULL)
4746  pfree(state);
4747  /* this avoids double pfree if error later: */
4748  afterTriggers.trans_stack[my_level].state = NULL;
4749  Assert(afterTriggers.query_depth ==
4750  afterTriggers.trans_stack[my_level].query_depth);
4751  }
4752  else
4753  {
4754  /*
4755  * Aborting. It is possible subxact start failed before calling
4756  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4757  * trans_stack levels that aren't there.
4758  */
4759  if (my_level >= afterTriggers.maxtransdepth)
4760  return;
4761 
4762  /*
4763  * Release query-level storage for queries being aborted, and restore
4764  * query_depth to its pre-subxact value. This assumes that a
4765  * subtransaction will not add events to query levels started in a
4766  * earlier transaction state.
4767  */
4768  while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
4769  {
4770  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4771  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4772  afterTriggers.query_depth--;
4773  }
4774  Assert(afterTriggers.query_depth ==
4775  afterTriggers.trans_stack[my_level].query_depth);
4776 
4777  /*
4778  * Restore the global deferred-event list to its former length,
4779  * discarding any events queued by the subxact.
4780  */
4781  afterTriggerRestoreEventList(&afterTriggers.events,
4782  &afterTriggers.trans_stack[my_level].events);
4783 
4784  /*
4785  * Restore the trigger state. If the saved state is NULL, then this
4786  * subxact didn't save it, so it doesn't need restoring.
4787  */
4788  state = afterTriggers.trans_stack[my_level].state;
4789  if (state != NULL)
4790  {
4791  pfree(afterTriggers.state);
4792  afterTriggers.state = state;
4793  }
4794  /* this avoids double pfree if error later: */
4795  afterTriggers.trans_stack[my_level].state = NULL;
4796 
4797  /*
4798  * Scan for any remaining deferred events that were marked DONE or IN
4799  * PROGRESS by this subxact or a child, and un-mark them. We can
4800  * recognize such events because they have a firing ID greater than or
4801  * equal to the firing_counter value we saved at subtransaction start.
4802  * (This essentially assumes that the current subxact includes all
4803  * subxacts started after it.)
4804  */
4805  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
4806  for_each_event_chunk(event, chunk, afterTriggers.events)
4807  {
4808  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4809 
4810  if (event->ate_flags &
4812  {
4813  if (evtshared->ats_firing_id >= subxact_firing_id)
4814  event->ate_flags &=
4816  }
4817  }
4818  }
4819 }
4820 
4821 /* ----------
4822  * AfterTriggerEnlargeQueryState()
4823  *
4824  * Prepare the necessary state so that we can record AFTER trigger events
4825  * queued by a query. It is allowed to have nested queries within a
4826  * (sub)transaction, so we need to have separate state for each query
4827  * nesting level.
4828  * ----------
4829  */
4830 static void
4832 {
4833  int init_depth = afterTriggers.maxquerydepth;
4834 
4835  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4836 
4837  if (afterTriggers.maxquerydepth == 0)
4838  {
4839  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4840 
4841  afterTriggers.query_stack = (AfterTriggersQueryData *)
4843  new_alloc * sizeof(AfterTriggersQueryData));
4844  afterTriggers.maxquerydepth = new_alloc;
4845  }
4846  else
4847  {
4848  /* repalloc will keep the stack in the same context */
4849  int old_alloc = afterTriggers.maxquerydepth;
4850  int new_alloc = Max(afterTriggers.query_depth + 1,
4851  old_alloc * 2);
4852 
4853  afterTriggers.query_stack = (AfterTriggersQueryData *)
4854  repalloc(afterTriggers.query_stack,
4855  new_alloc * sizeof(AfterTriggersQueryData));
4856  afterTriggers.maxquerydepth = new_alloc;
4857  }
4858 
4859  /* Initialize new array entries to empty */
4860  while (init_depth < afterTriggers.maxquerydepth)
4861  {
4862  AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
4863 
4864  qs->events.head = NULL;
4865  qs->events.tail = NULL;
4866  qs->events.tailfree = NULL;
4867  qs->fdw_tuplestore = NULL;
4868  qs->tables = NIL;
4869 
4870  ++init_depth;
4871  }
4872 }
4873 
4874 /*
4875  * Create an empty SetConstraintState with room for numalloc trigstates
4876  */
4877 static SetConstraintState
4879 {
4881 
4882  /* Behave sanely with numalloc == 0 */
4883  if (numalloc <= 0)
4884  numalloc = 1;
4885 
4886  /*
4887  * We assume that zeroing will correctly initialize the state values.
4888  */
4889  state = (SetConstraintState)
4891  offsetof(SetConstraintStateData, trigstates) +
4892  numalloc * sizeof(SetConstraintTriggerData));
4893 
4894  state->numalloc = numalloc;
4895 
4896  return state;
4897 }
4898 
4899 /*
4900  * Copy a SetConstraintState
4901  */
4902 static SetConstraintState
4904 {
4906 
4907  state = SetConstraintStateCreate(origstate->numstates);
4908 
4909  state->all_isset = origstate->all_isset;
4910  state->all_isdeferred = origstate->all_isdeferred;
4911  state->numstates = origstate->numstates;
4912  memcpy(state->trigstates, origstate->trigstates,
4913  origstate->numstates * sizeof(SetConstraintTriggerData));
4914 
4915  return state;
4916 }
4917 
4918 /*
4919  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
4920  * pointer to the state object (it will change if we have to repalloc).
4921  */
4922 static SetConstraintState
4924  Oid tgoid, bool tgisdeferred)
4925 {
4926  if (state->numstates >= state->numalloc)
4927  {
4928  int newalloc = state->numalloc * 2;
4929 
4930  newalloc = Max(newalloc, 8); /* in case original has size 0 */
4931  state = (SetConstraintState)
4932  repalloc(state,
4933  offsetof(SetConstraintStateData, trigstates) +
4934  newalloc * sizeof(SetConstraintTriggerData));
4935  state->numalloc = newalloc;
4936  Assert(state->numstates < state->numalloc);
4937  }
4938 
4939  state->trigstates[state->numstates].sct_tgoid = tgoid;
4940  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4941  state->numstates++;
4942 
4943  return state;
4944 }
4945 
4946 /* ----------
4947  * AfterTriggerSetState()
4948  *
4949  * Execute the SET CONSTRAINTS ... utility command.
4950  * ----------
4951  */
4952 void
4954 {
4955  int my_level = GetCurrentTransactionNestLevel();
4956 
4957  /* If we haven't already done so, initialize our state. */
4958  if (afterTriggers.state == NULL)
4959  afterTriggers.state = SetConstraintStateCreate(8);
4960 
4961  /*
4962  * If in a subtransaction, and we didn't save the current state already,
4963  * save it so it can be restored if the subtransaction aborts.
4964  */
4965  if (my_level > 1 &&
4966  afterTriggers.trans_stack[my_level].state == NULL)
4967  {
4968  afterTriggers.trans_stack[my_level].state =
4969  SetConstraintStateCopy(afterTriggers.state);
4970  }
4971 
4972  /*
4973  * Handle SET CONSTRAINTS ALL ...
4974  */
4975  if (stmt->constraints == NIL)
4976  {
4977  /*
4978  * Forget any previous SET CONSTRAINTS commands in this transaction.
4979  */
4980  afterTriggers.state->numstates = 0;
4981 
4982  /*
4983  * Set the per-transaction ALL state to known.
4984  */
4985  afterTriggers.state->all_isset = true;
4986  afterTriggers.state->all_isdeferred = stmt->deferred;
4987  }
4988  else
4989  {
4990  Relation conrel;
4991  Relation tgrel;
4992  List *conoidlist = NIL;
4993  List *tgoidlist = NIL;
4994  ListCell *lc;
4995 
4996  /*
4997  * Handle SET CONSTRAINTS constraint-name [, ...]
4998  *
4999  * First, identify all the named constraints and make a list of their
5000  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5001  * the same name within a schema, the specifications are not
5002  * necessarily unique. Our strategy is to target all matching
5003  * constraints within the first search-path schema that has any
5004  * matches, but disregard matches in schemas beyond the first match.
5005  * (This is a bit odd but it's the historical behavior.)
5006  *
5007  * A constraint in a partitioned table may have corresponding
5008  * constraints in the partitions. Grab those too.
5009  */
5010  conrel = table_open(ConstraintRelationId, AccessShareLock);
5011 
5012  foreach(lc, stmt->constraints)
5013  {
5014  RangeVar *constraint = lfirst(lc);
5015  bool found;
5016  List *namespacelist;
5017  ListCell *nslc;
5018 
5019  if (constraint->catalogname)
5020  {
5021  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5022  ereport(ERROR,
5023  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5024  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5025  constraint->catalogname, constraint->schemaname,
5026  constraint->relname)));
5027  }
5028 
5029  /*
5030  * If we're given the schema name with the constraint, look only
5031  * in that schema. If given a bare constraint name, use the
5032  * search path to find the first matching constraint.
5033  */
5034  if (constraint->schemaname)
5035  {
5036  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5037  false);
5038 
5039  namespacelist = list_make1_oid(namespaceId);
5040  }
5041  else
5042  {
5043  namespacelist = fetch_search_path(true);
5044  }
5045 
5046  found = false;
5047  foreach(nslc, namespacelist)
5048  {
5049  Oid namespaceId = lfirst_oid(nslc);
5050  SysScanDesc conscan;
5051  ScanKeyData skey[2];
5052  HeapTuple tup;
5053 
5054  ScanKeyInit(&skey[0],
5055  Anum_pg_constraint_conname,
5056  BTEqualStrategyNumber, F_NAMEEQ,
5057  CStringGetDatum(constraint->relname));
5058  ScanKeyInit(&skey[1],
5059  Anum_pg_constraint_connamespace,
5060  BTEqualStrategyNumber, F_OIDEQ,
5061  ObjectIdGetDatum(namespaceId));
5062 
5063  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5064  true, NULL, 2, skey);
5065 
5066  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5067  {
5069 
5070  if (con->condeferrable)
5071  conoidlist = lappend_oid(conoidlist, con->oid);
5072  else if (stmt->deferred)
5073  ereport(ERROR,
5074  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5075  errmsg("constraint \"%s\" is not deferrable",
5076  constraint->relname)));
5077  found = true;
5078  }
5079 
5080  systable_endscan(conscan);
5081 
5082  /*
5083  * Once we've found a matching constraint we do not search
5084  * later parts of the search path.
5085  */
5086  if (found)
5087  break;
5088  }
5089 
5090  list_free(namespacelist);
5091 
5092  /*
5093  * Not found ?
5094  */
5095  if (!found)
5096  ereport(ERROR,
5097  (errcode(ERRCODE_UNDEFINED_OBJECT),
5098  errmsg("constraint \"%s\" does not exist",
5099  constraint->relname)));
5100  }
5101 
5102  /*
5103  * Scan for any possible descendants of the constraints. We append
5104  * whatever we find to the same list that we're scanning; this has the
5105  * effect that we create new scans for those, too, so if there are
5106  * further descendents, we'll also catch them.
5107  */
5108  foreach(lc, conoidlist)
5109  {
5110  Oid parent = lfirst_oid(lc);
5111  ScanKeyData key;
5112  SysScanDesc scan;
5113  HeapTuple tuple;
5114 
5115  ScanKeyInit(&key,
5116  Anum_pg_constraint_conparentid,
5117  BTEqualStrategyNumber, F_OIDEQ,
5118  ObjectIdGetDatum(parent));
5119 
5120  scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5121 
5122  while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5123  {
5125 
5126  conoidlist = lappend_oid(conoidlist, con->oid);
5127  }
5128 
5129  systable_endscan(scan);
5130  }
5131 
5132  table_close(conrel, AccessShareLock);
5133 
5134  /*
5135  * Now, locate the trigger(s) implementing each of these constraints,
5136  * and make a list of their OIDs.
5137  */
5138  tgrel = table_open(TriggerRelationId, AccessShareLock);
5139 
5140  foreach(lc, conoidlist)
5141  {
5142  Oid conoid = lfirst_oid(lc);
5143  ScanKeyData skey;
5144  SysScanDesc tgscan;
5145  HeapTuple htup;
5146 
5147  ScanKeyInit(&skey,
5148  Anum_pg_trigger_tgconstraint,
5149  BTEqualStrategyNumber, F_OIDEQ,
5150  ObjectIdGetDatum(conoid));
5151 
5152  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5153  NULL, 1, &skey);
5154 
5155  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5156  {
5157  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5158 
5159  /*
5160  * Silently skip triggers that are marked as non-deferrable in
5161  * pg_trigger. This is not an error condition, since a
5162  * deferrable RI constraint may have some non-deferrable
5163  * actions.
5164  */
5165  if (pg_trigger->tgdeferrable)
5166  tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5167  }
5168 
5169  systable_endscan(tgscan);
5170  }
5171 
5172  table_close(tgrel, AccessShareLock);
5173 
5174  /*
5175  * Now we can set the trigger states of individual triggers for this
5176  * xact.
5177  */
5178  foreach(lc, tgoidlist)
5179  {
5180  Oid tgoid = lfirst_oid(lc);
5181  SetConstraintState state = afterTriggers.state;
5182  bool found = false;
5183  int i;
5184 
5185  for (i = 0; i < state->numstates; i++)
5186  {
5187  if (state->trigstates[i].sct_tgoid == tgoid)
5188  {
5189  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5190  found = true;
5191  break;
5192  }
5193  }
5194  if (!found)
5195  {
5196  afterTriggers.state =
5197  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5198  }
5199  }
5200  }
5201 
5202  /*
5203  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5204  * checks against that constraint must be made when the SET CONSTRAINTS
5205  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5206  * apply retroactively. We've updated the constraints state, so scan the
5207  * list of previously deferred events to fire any that have now become
5208  * immediate.
5209  *
5210  * Obviously, if this was SET ... DEFERRED then it can't have converted
5211  * any unfired events to immediate, so we need do nothing in that case.
5212  */
5213  if (!stmt->deferred)
5214  {
5215  AfterTriggerEventList *events = &afterTriggers.events;
5216  bool snapshot_set = false;
5217 
5218  while (afterTriggerMarkEvents(events, NULL, true))
5219  {
5220  CommandId firing_id = afterTriggers.firing_counter++;
5221 
5222  /*
5223  * Make sure a snapshot has been established in case trigger
5224  * functions need one. Note that we avoid setting a snapshot if
5225  * we don't find at least one trigger that has to be fired now.
5226  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5227  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5228  * at the start of a transaction it's not possible for any trigger
5229  * events to be queued yet.)
5230  */
5231  if (!snapshot_set)
5232  {
5234  snapshot_set = true;
5235  }
5236 
5237  /*
5238  * We can delete fired events if we are at top transaction level,
5239  * but we'd better not if inside a subtransaction, since the
5240  * subtransaction could later get rolled back.
5241  */
5242  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5243  !IsSubTransaction()))
5244  break; /* all fired */
5245  }
5246 
5247  if (snapshot_set)
5249  }
5250 }
5251 
5252 /* ----------
5253  * AfterTriggerPendingOnRel()
5254  * Test to see if there are any pending after-trigger events for rel.
5255  *
5256  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5257  * it is unsafe to perform major surgery on a relation. Note that only
5258  * local pending events are examined. We assume that having exclusive lock
5259  * on a rel guarantees there are no unserviced events in other backends ---
5260  * but having a lock does not prevent there being such events in our own.
5261  *
5262  * In some scenarios it'd be reasonable to remove pending events (more
5263  * specifically, mark them DONE by the current subxact) but without a lot
5264  * of knowledge of the trigger semantics we can't do this in general.
5265  * ----------
5266  */
5267 bool
5269 {
5270  AfterTriggerEvent event;
5271  AfterTriggerEventChunk *chunk;
5272  int depth;
5273 
5274  /* Scan queued events */
5275  for_each_event_chunk(event, chunk, afterTriggers.events)
5276  {
5277  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5278 
5279  /*
5280  * We can ignore completed events. (Even if a DONE flag is rolled
5281  * back by subxact abort, it's OK because the effects of the TRUNCATE
5282  * or whatever must get rolled back too.)
5283  */
5284  if (event->ate_flags & AFTER_TRIGGER_DONE)
5285  continue;
5286 
5287  if (evtshared->ats_relid == relid)
5288  return true;
5289  }
5290 
5291  /*
5292  * Also scan events queued by incomplete queries. This could only matter
5293  * if TRUNCATE/etc is executed by a function or trigger within an updating
5294  * query on the same relation, which is pretty perverse, but let's check.
5295  */
5296  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5297  {
5298  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
5299  {
5300  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5301 
5302  if (event->ate_flags & AFTER_TRIGGER_DONE)
5303  continue;
5304 
5305  if (evtshared->ats_relid == relid)
5306  return true;
5307  }
5308  }
5309 
5310  return false;
5311 }
5312 
5313 
5314 /* ----------
5315  * AfterTriggerSaveEvent()
5316  *
5317  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5318  * be fired for an event.
5319  *
5320  * NOTE: this is called whenever there are any triggers associated with
5321  * the event (even if they are disabled). This function decides which
5322  * triggers actually need to be queued. It is also called after each row,
5323  * even if there are no triggers for that event, if there are any AFTER
5324  * STATEMENT triggers for the statement which use transition tables, so that
5325  * the transition tuplestores can be built. Furthermore, if the transition
5326  * capture is happening for UPDATEd rows being moved to another partition due
5327  * to the partition-key being changed, then this function is called once when
5328  * the row is deleted (to capture OLD row), and once when the row is inserted
5329  * into another partition (to capture NEW row). This is done separately because
5330  * DELETE and INSERT happen on different tables.
5331  *
5332  * Transition tuplestores are built now, rather than when events are pulled
5333  * off of the queue because AFTER ROW triggers are allowed to select from the
5334  * transition tables for the statement.
5335  * ----------
5336  */
5337 static void
5339  int event, bool row_trigger,
5340  TupleTableSlot *oldslot, TupleTableSlot *newslot,
5341  List *recheckIndexes, Bitmapset *modifiedCols,
5342  TransitionCaptureState *transition_capture)
5343 {
5344  Relation rel = relinfo->ri_RelationDesc;
5345  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5346  AfterTriggerEventData new_event;
5347  AfterTriggerSharedData new_shared;
5348  char relkind = rel->rd_rel->relkind;
5349  int tgtype_event;
5350  int tgtype_level;
5351  int i;
5352  Tuplestorestate *fdw_tuplestore = NULL;
5353 
5354  /*
5355  * Check state. We use a normal test not Assert because it is possible to
5356  * reach here in the wrong state given misconfigured RI triggers, in
5357  * particular deferring a cascade action trigger.
5358  */
5359  if (afterTriggers.query_depth < 0)
5360  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5361 
5362  /* Be sure we have enough space to record events at this query depth. */
5363  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5365 
5366  /*
5367  * If the directly named relation has any triggers with transition tables,
5368  * then we need to capture transition tuples.
5369  */
5370  if (row_trigger && transition_capture != NULL)
5371  {
5372  TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5373  TupleConversionMap *map = relinfo->ri_ChildToRootMap;
5374  bool delete_old_table = transition_capture->tcs_delete_old_table;
5375  bool update_old_table = transition_capture->tcs_update_old_table;
5376  bool update_new_table = transition_capture->tcs_update_new_table;
5377  bool insert_new_table = transition_capture->tcs_insert_new_table;
5378 
5379  /*
5380  * For INSERT events NEW should be non-NULL, for DELETE events OLD
5381  * should be non-NULL, whereas for UPDATE events normally both OLD and
5382  * NEW are non-NULL. But for UPDATE events fired for capturing
5383  * transition tuples during UPDATE partition-key row movement, OLD is
5384  * NULL when the event is for a row being inserted, whereas NEW is
5385  * NULL when the event is for a row being deleted.
5386  */
5387  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5388  TupIsNull(oldslot)));
5389  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5390  TupIsNull(newslot)));
5391 
5392  if (!TupIsNull(oldslot) &&
5393  ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
5394  (event == TRIGGER_EVENT_UPDATE && update_old_table)))
5395  {
5396  Tuplestorestate *old_tuplestore;
5397 
5398  old_tuplestore = transition_capture->tcs_private->old_tuplestore;
5399 
5400  if (map != NULL)
5401  {
5402  TupleTableSlot *storeslot;
5403 
5404  storeslot = transition_capture->tcs_private->storeslot;
5405  if (!storeslot)
5406  {
5407  storeslot = ExecAllocTableSlot(&estate->es_tupleTable,
5408  map->outdesc,
5409  &TTSOpsVirtual);
5410  transition_capture->tcs_private->storeslot = storeslot;
5411  }
5412 
5413  execute_attr_map_slot(map->attrMap, oldslot, storeslot);
5414  tuplestore_puttupleslot(old_tuplestore, storeslot);
5415  }
5416  else
5417  tuplestore_puttupleslot(old_tuplestore, oldslot);
5418  }
5419  if (!TupIsNull(newslot) &&
5420  ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
5421  (event == TRIGGER_EVENT_UPDATE && update_new_table)))
5422  {
5423  Tuplestorestate *new_tuplestore;
5424 
5425  new_tuplestore = transition_capture->tcs_private->new_tuplestore;
5426 
5427  if (original_insert_tuple != NULL)
5428  tuplestore_puttupleslot(new_tuplestore,
5429  original_insert_tuple);
5430  else if (map != NULL)
5431  {
5432  TupleTableSlot *storeslot;
5433 
5434  storeslot = transition_capture->tcs_private->storeslot;
5435 
5436  if (!storeslot)
5437  {
5438  storeslot = ExecAllocTableSlot(&estate->es_tupleTable,
5439  map->outdesc,
5440  &TTSOpsVirtual);
5441  transition_capture->tcs_private->storeslot = storeslot;
5442  }
5443 
5444  execute_attr_map_slot(map->attrMap, newslot, storeslot);
5445  tuplestore_puttupleslot(new_tuplestore, storeslot);
5446  }
5447  else
5448  tuplestore_puttupleslot(new_tuplestore, newslot);
5449  }
5450 
5451  /*
5452  * If transition tables are the only reason we're here, return. As
5453  * mentioned above, we can also be here during update tuple routing in
5454  * presence of transition tables, in which case this function is
5455  * called separately for oldtup and newtup, so we expect exactly one
5456  * of them to be NULL.
5457  */
5458  if (trigdesc == NULL ||
5459  (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5460  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5461  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
5462  (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
5463  return;
5464  }
5465 
5466  /*
5467  * Validate the event code and collect the associated tuple CTIDs.
5468  *
5469  * The event code will be used both as a bitmask and an array offset, so
5470  * validation is important to make sure we don't walk off the edge of our
5471  * arrays.
5472  *
5473  * Also, if we're considering statement-level triggers, check whether we
5474  * already queued a set of them for this event, and cancel the prior set
5475  * if so. This preserves the behavior that statement-level triggers fire
5476  * just once per statement and fire after row-level triggers.
5477  */
5478  switch (event)
5479  {
5480  case TRIGGER_EVENT_INSERT:
5481  tgtype_event = TRIGGER_TYPE_INSERT;
5482  if (row_trigger)
5483  {
5484  Assert(oldslot == NULL);
5485  Assert(newslot != NULL);
5486  ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
5487  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5488  }
5489  else
5490  {
5491  Assert(oldslot == NULL);
5492  Assert(newslot == NULL);
5493  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5494  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5496  CMD_INSERT, event);
5497  }
5498  break;
5499  case TRIGGER_EVENT_DELETE:
5500  tgtype_event = TRIGGER_TYPE_DELETE;
5501  if (row_trigger)
5502  {
5503  Assert(oldslot != NULL);
5504  Assert(newslot == NULL);
5505  ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5506  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5507  }
5508  else
5509  {
5510  Assert(oldslot == NULL);
5511  Assert(newslot == NULL);
5512  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5513  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5515  CMD_DELETE, event);
5516  }
5517  break;
5518  case TRIGGER_EVENT_UPDATE:
5519  tgtype_event = TRIGGER_TYPE_UPDATE;
5520  if (row_trigger)
5521  {
5522  Assert(oldslot != NULL);
5523  Assert(newslot != NULL);
5524  ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5525  ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2));
5526  }
5527  else
5528  {
5529  Assert(oldslot == NULL);
5530  Assert(newslot == NULL);
5531  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5532  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5534  CMD_UPDATE, event);
5535  }
5536  break;
5538  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5539  Assert(oldslot == NULL);
5540  Assert(newslot == NULL);
5541  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5542  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5543  break;
5544  default:
5545  elog(ERROR, "invalid after-trigger event code: %d", event);
5546  tgtype_event = 0; /* keep compiler quiet */
5547  break;
5548  }
5549 
5550  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5551  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5553  /* else, we'll initialize ate_flags for each trigger */
5554 
5555  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5556 
5557  for (i = 0; i < trigdesc->numtriggers; i++)
5558  {
5559  Trigger *trigger = &trigdesc->triggers[i];
5560 
5561  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5562  tgtype_level,
5563  TRIGGER_TYPE_AFTER,
5564  tgtype_event))
5565  continue;
5566  if (!TriggerEnabled(estate, relinfo, trigger, event,
5567  modifiedCols, oldslot, newslot))
5568  continue;
5569 
5570  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5571  {
5572  if (fdw_tuplestore == NULL)
5573  {
5574  fdw_tuplestore = GetCurrentFDWTuplestore();
5575  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5576  }
5577  else
5578  /* subsequent event for the same tuple */
5579  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5580  }
5581 
5582  /*
5583  * If the trigger is a foreign key enforcement trigger, there are
5584  * certain cases where we can skip queueing the event because we can
5585  * tell by inspection that the FK constraint will still pass.
5586  */
5587  if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event))
5588  {
5589  switch (RI_FKey_trigger_type(trigger->tgfoid))
5590  {
5591  case RI_TRIGGER_PK:
5592  /* Update or delete on trigger's PK table */
5593  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5594  oldslot, newslot))
5595  {
5596  /* skip queuing this event */
5597  continue;
5598  }
5599  break;
5600 
5601  case RI_TRIGGER_FK:
5602  /* Update on trigger's FK table */
5603  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5604  oldslot, newslot))
5605  {
5606  /* skip queuing this event */
5607  continue;
5608  }
5609  break;
5610 
5611  case RI_TRIGGER_NONE:
5612  /* Not an FK trigger */
5613  break;
5614  }
5615  }
5616 
5617  /*
5618  * If the trigger is a deferred unique constraint check trigger, only
5619  * queue it if the unique constraint was potentially violated, which
5620  * we know from index insertion time.
5621  */
5622  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5623  {
5624  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5625  continue; /* Uniqueness definitely not violated */
5626  }
5627 
5628  /*
5629  * Fill in event structure and add it to the current query's queue.
5630  * Note we set ats_table to NULL whenever this trigger doesn't use
5631  * transition tables, to improve sharability of the shared event data.
5632  */
5633  new_shared.ats_event =
5634  (event & TRIGGER_EVENT_OPMASK) |
5635  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5636  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5637  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5638  new_shared.ats_tgoid = trigger->tgoid;
5639  new_shared.ats_relid = RelationGetRelid(rel);
5640  new_shared.ats_firing_id = 0;
5641  if ((trigger->tgoldtable || trigger->tgnewtable) &&
5642  transition_capture != NULL)
5643  new_shared.ats_table = transition_capture->tcs_private;
5644  else
5645  new_shared.ats_table = NULL;
5646  new_shared.ats_modifiedcols = modifiedCols;
5647 
5648  afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events,
5649  &new_event, &new_shared);
5650  }
5651 
5652  /*
5653  * Finally, spool any foreign tuple(s). The tuplestore squashes them to
5654  * minimal tuples, so this loses any system columns. The executor lost
5655  * those columns before us, for an unrelated reason, so this is fine.
5656  */
5657  if (fdw_tuplestore)
5658  {
5659  if (oldslot != NULL)
5660  tuplestore_puttupleslot(fdw_tuplestore, oldslot);
5661  if (newslot != NULL)
5662  tuplestore_puttupleslot(fdw_tuplestore, newslot);
5663  }
5664 }
5665 
5666 /*
5667  * Detect whether we already queued BEFORE STATEMENT triggers for the given
5668  * relation + operation, and set the flag so the next call will report "true".
5669  */
5670 static bool
5672 {
5673  bool result;
5674  AfterTriggersTableData *table;
5675 
5676  /* Check state, like AfterTriggerSaveEvent. */
5677  if (afterTriggers.query_depth < 0)
5678  elog(ERROR, "before_stmt_triggers_fired() called outside of query");
5679 
5680  /* Be sure we have enough space to record events at this query depth. */
5681  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5683 
5684  /*
5685  * We keep this state in the AfterTriggersTableData that also holds
5686  * transition tables for the relation + operation. In this way, if we are
5687  * forced to make a new set of transition tables because more tuples get
5688  * entered after we've already fired triggers, we will allow a new set of
5689  * statement triggers to get queued.
5690  */
5691  table = GetAfterTriggersTableData(relid, cmdType);
5692  result = table->before_trig_done;
5693  table->before_trig_done = true;
5694  return result;
5695 }
5696 
5697 /*
5698  * If we previously queued a set of AFTER STATEMENT triggers for the given
5699  * relation + operation, and they've not been fired yet, cancel them. The
5700  * caller will queue a fresh set that's after any row-level triggers that may
5701  * have been queued by the current sub-statement, preserving (as much as
5702  * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT
5703  * triggers, and that the latter only fire once. This deals with the
5704  * situation where several FK enforcement triggers sequentially queue triggers
5705  * for the same table into the same trigger query level. We can't fully
5706  * prevent odd behavior though: if there are AFTER ROW triggers taking
5707  * transition tables, we don't want to change the transition tables once the
5708  * first such trigger has seen them. In such a case, any additional events
5709  * will result in creating new transition tables and allowing new firings of
5710  * statement triggers.
5711  *
5712  * This also saves the current event list location so that a later invocation
5713  * of this function can cheaply find the triggers we're about to queue and
5714  * cancel them.
5715  */
5716 static void
5717 cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent)
5718 {
5719  AfterTriggersTableData *table;
5720  AfterTriggersQueryData *qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5721 
5722  /*
5723  * We keep this state in the AfterTriggersTableData that also holds
5724  * transition tables for the relation + operation. In this way, if we are
5725  * forced to make a new set of transition tables because more tuples get
5726  * entered after we've already fired triggers, we will allow a new set of
5727  * statement triggers to get queued without canceling the old ones.
5728  */
5729  table = GetAfterTriggersTableData(relid, cmdType);
5730 
5731  if (table->after_trig_done)
5732  {
5733  /*
5734  * We want to start scanning from the tail location that existed just
5735  * before we inserted any statement triggers. But the events list
5736  * might've been entirely empty then, in which case scan from the
5737  * current head.
5738  */
5739  AfterTriggerEvent event;