PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/inval.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/rel.h"
62 #include "utils/snapmgr.h"
63 #include "utils/syscache.h"
64 #include "utils/tuplestore.h"
65 
66 
67 /* GUC variables */
69 
70 /* How many levels deep into trigger execution are we? */
71 static int MyTriggerDepth = 0;
72 
73 /*
74  * The authoritative version of this macro is in executor/execMain.c. Be sure
75  * to keep everything in sync.
76  */
77 #define GetAllUpdatedColumns(relinfo, estate) \
78  (bms_union(exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols, \
79  exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->extraUpdatedCols))
80 
81 /* Local function prototypes */
82 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
83 static bool GetTupleForTrigger(EState *estate,
84  EPQState *epqstate,
85  ResultRelInfo *relinfo,
86  ItemPointer tid,
87  LockTupleMode lockmode,
88  TupleTableSlot *oldslot,
89  TupleTableSlot **newSlot);
90 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
91  Trigger *trigger, TriggerEvent event,
92  Bitmapset *modifiedCols,
93  TupleTableSlot *oldslot, TupleTableSlot *newslot);
95  int tgindx,
96  FmgrInfo *finfo,
97  Instrumentation *instr,
98  MemoryContext per_tuple_context);
99 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
100  int event, bool row_trigger,
101  TupleTableSlot *oldtup, TupleTableSlot *newtup,
102  List *recheckIndexes, Bitmapset *modifiedCols,
103  TransitionCaptureState *transition_capture);
104 static void AfterTriggerEnlargeQueryState(void);
105 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
106 
107 
108 /*
109  * Create a trigger. Returns the address of the created trigger.
110  *
111  * queryString is the source text of the CREATE TRIGGER command.
112  * This must be supplied if a whenClause is specified, else it can be NULL.
113  *
114  * relOid, if nonzero, is the relation on which the trigger should be
115  * created. If zero, the name provided in the statement will be looked up.
116  *
117  * refRelOid, if nonzero, is the relation to which the constraint trigger
118  * refers. If zero, the constraint relation name provided in the statement
119  * will be looked up as needed.
120  *
121  * constraintOid, if nonzero, says that this trigger is being created
122  * internally to implement that constraint. A suitable pg_depend entry will
123  * be made to link the trigger to that constraint. constraintOid is zero when
124  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
125  * TRIGGER, we build a pg_constraint entry internally.)
126  *
127  * indexOid, if nonzero, is the OID of an index associated with the constraint.
128  * We do nothing with this except store it into pg_trigger.tgconstrindid;
129  * but when creating a trigger for a deferrable unique constraint on a
130  * partitioned table, its children are looked up. Note we don't cope with
131  * invalid indexes in that case.
132  *
133  * funcoid, if nonzero, is the OID of the function to invoke. When this is
134  * given, stmt->funcname is ignored.
135  *
136  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
137  * if that trigger is dropped, this one should be too. (This is passed as
138  * Invalid by most callers; it's set here when recursing on a partition.)
139  *
140  * If whenClause is passed, it is an already-transformed expression for
141  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
142  *
143  * If isInternal is true then this is an internally-generated trigger.
144  * This argument sets the tgisinternal field of the pg_trigger entry, and
145  * if true causes us to modify the given trigger name to ensure uniqueness.
146  *
147  * When isInternal is not true we require ACL_TRIGGER permissions on the
148  * relation, as well as ACL_EXECUTE on the trigger function. For internal
149  * triggers the caller must apply any required permission checks.
150  *
151  * When called on partitioned tables, this function recurses to create the
152  * trigger on all the partitions, except if isInternal is true, in which
153  * case caller is expected to execute recursion on its own. in_partition
154  * indicates such a recursive call; outside callers should pass "false"
155  * (but see CloneRowTriggersToPartition).
156  */
158 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
159  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
160  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
161  bool isInternal, bool in_partition)
162 {
163  int16 tgtype;
164  int ncolumns;
165  int16 *columns;
166  int2vector *tgattr;
167  List *whenRtable;
168  char *qual;
169  Datum values[Natts_pg_trigger];
170  bool nulls[Natts_pg_trigger];
171  Relation rel;
172  AclResult aclresult;
173  Relation tgrel;
174  Relation pgrel;
175  HeapTuple tuple = NULL;
176  Oid funcrettype;
177  Oid trigoid = InvalidOid;
178  char internaltrigname[NAMEDATALEN];
179  char *trigname;
180  Oid constrrelid = InvalidOid;
181  ObjectAddress myself,
182  referenced;
183  char *oldtablename = NULL;
184  char *newtablename = NULL;
185  bool partition_recurse;
186  bool trigger_exists = false;
187  Oid existing_constraint_oid = InvalidOid;
188  bool existing_isInternal = false;
189 
190  if (OidIsValid(relOid))
191  rel = table_open(relOid, ShareRowExclusiveLock);
192  else
194 
195  /*
196  * Triggers must be on tables or views, and there are additional
197  * relation-type-specific restrictions.
198  */
199  if (rel->rd_rel->relkind == RELKIND_RELATION)
200  {
201  /* Tables can't have INSTEAD OF triggers */
202  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
203  stmt->timing != TRIGGER_TYPE_AFTER)
204  ereport(ERROR,
205  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
206  errmsg("\"%s\" is a table",
208  errdetail("Tables cannot have INSTEAD OF triggers.")));
209  }
210  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
211  {
212  /* Partitioned tables can't have INSTEAD OF triggers */
213  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
214  stmt->timing != TRIGGER_TYPE_AFTER)
215  ereport(ERROR,
216  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
217  errmsg("\"%s\" is a table",
219  errdetail("Tables cannot have INSTEAD OF triggers.")));
220 
221  /*
222  * FOR EACH ROW triggers have further restrictions
223  */
224  if (stmt->row)
225  {
226  /*
227  * Disallow use of transition tables.
228  *
229  * Note that we have another restriction about transition tables
230  * in partitions; search for 'has_superclass' below for an
231  * explanation. The check here is just to protect from the fact
232  * that if we allowed it here, the creation would succeed for a
233  * partitioned table with no partitions, but would be blocked by
234  * the other restriction when the first partition was created,
235  * which is very unfriendly behavior.
236  */
237  if (stmt->transitionRels != NIL)
238  ereport(ERROR,
239  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
240  errmsg("\"%s\" is a partitioned table",
242  errdetail("Triggers on partitioned tables cannot have transition tables.")));
243  }
244  }
245  else if (rel->rd_rel->relkind == RELKIND_VIEW)
246  {
247  /*
248  * Views can have INSTEAD OF triggers (which we check below are
249  * row-level), or statement-level BEFORE/AFTER triggers.
250  */
251  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
252  ereport(ERROR,
253  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
254  errmsg("\"%s\" is a view",
256  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
257  /* Disallow TRUNCATE triggers on VIEWs */
258  if (TRIGGER_FOR_TRUNCATE(stmt->events))
259  ereport(ERROR,
260  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
261  errmsg("\"%s\" is a view",
263  errdetail("Views cannot have TRUNCATE triggers.")));
264  }
265  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
266  {
267  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
268  stmt->timing != TRIGGER_TYPE_AFTER)
269  ereport(ERROR,
270  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
271  errmsg("\"%s\" is a foreign table",
273  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
274 
275  if (TRIGGER_FOR_TRUNCATE(stmt->events))
276  ereport(ERROR,
277  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
278  errmsg("\"%s\" is a foreign table",
280  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
281 
282  /*
283  * We disallow constraint triggers to protect the assumption that
284  * triggers on FKs can't be deferred. See notes with AfterTriggers
285  * data structures, below.
286  */
287  if (stmt->isconstraint)
288  ereport(ERROR,
289  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
290  errmsg("\"%s\" is a foreign table",
292  errdetail("Foreign tables cannot have constraint triggers.")));
293  }
294  else
295  ereport(ERROR,
296  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
297  errmsg("\"%s\" is not a table or view",
298  RelationGetRelationName(rel))));
299 
301  ereport(ERROR,
302  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
303  errmsg("permission denied: \"%s\" is a system catalog",
304  RelationGetRelationName(rel))));
305 
306  if (stmt->isconstraint)
307  {
308  /*
309  * We must take a lock on the target relation to protect against
310  * concurrent drop. It's not clear that AccessShareLock is strong
311  * enough, but we certainly need at least that much... otherwise, we
312  * might end up creating a pg_constraint entry referencing a
313  * nonexistent table.
314  */
315  if (OidIsValid(refRelOid))
316  {
317  LockRelationOid(refRelOid, AccessShareLock);
318  constrrelid = refRelOid;
319  }
320  else if (stmt->constrrel != NULL)
321  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
322  false);
323  }
324 
325  /* permission checks */
326  if (!isInternal)
327  {
328  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
329  ACL_TRIGGER);
330  if (aclresult != ACLCHECK_OK)
331  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
333 
334  if (OidIsValid(constrrelid))
335  {
336  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
337  ACL_TRIGGER);
338  if (aclresult != ACLCHECK_OK)
339  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
340  get_rel_name(constrrelid));
341  }
342  }
343 
344  /*
345  * When called on a partitioned table to create a FOR EACH ROW trigger
346  * that's not internal, we create one trigger for each partition, too.
347  *
348  * For that, we'd better hold lock on all of them ahead of time.
349  */
350  partition_recurse = !isInternal && stmt->row &&
351  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
352  if (partition_recurse)
354  ShareRowExclusiveLock, NULL));
355 
356  /* Compute tgtype */
357  TRIGGER_CLEAR_TYPE(tgtype);
358  if (stmt->row)
359  TRIGGER_SETT_ROW(tgtype);
360  tgtype |= stmt->timing;
361  tgtype |= stmt->events;
362 
363  /* Disallow ROW-level TRUNCATE triggers */
364  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
365  ereport(ERROR,
366  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
367  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
368 
369  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
370  if (TRIGGER_FOR_INSTEAD(tgtype))
371  {
372  if (!TRIGGER_FOR_ROW(tgtype))
373  ereport(ERROR,
374  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
375  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
376  if (stmt->whenClause)
377  ereport(ERROR,
378  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
379  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
380  if (stmt->columns != NIL)
381  ereport(ERROR,
382  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
383  errmsg("INSTEAD OF triggers cannot have column lists")));
384  }
385 
386  /*
387  * We don't yet support naming ROW transition variables, but the parser
388  * recognizes the syntax so we can give a nicer message here.
389  *
390  * Per standard, REFERENCING TABLE names are only allowed on AFTER
391  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
392  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
393  * only allowed once. Per standard, OLD may not be specified when
394  * creating a trigger only for INSERT, and NEW may not be specified when
395  * creating a trigger only for DELETE.
396  *
397  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
398  * reference both ROW and TABLE transition data.
399  */
400  if (stmt->transitionRels != NIL)
401  {
402  List *varList = stmt->transitionRels;
403  ListCell *lc;
404 
405  foreach(lc, varList)
406  {
408 
409  if (!(tt->isTable))
410  ereport(ERROR,
411  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
412  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
413  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
414 
415  /*
416  * Because of the above test, we omit further ROW-related testing
417  * below. If we later allow naming OLD and NEW ROW variables,
418  * adjustments will be needed below.
419  */
420 
421  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
422  ereport(ERROR,
423  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
424  errmsg("\"%s\" is a foreign table",
426  errdetail("Triggers on foreign tables cannot have transition tables.")));
427 
428  if (rel->rd_rel->relkind == RELKIND_VIEW)
429  ereport(ERROR,
430  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
431  errmsg("\"%s\" is a view",
433  errdetail("Triggers on views cannot have transition tables.")));
434 
435  /*
436  * We currently don't allow row-level triggers with transition
437  * tables on partition or inheritance children. Such triggers
438  * would somehow need to see tuples converted to the format of the
439  * table they're attached to, and it's not clear which subset of
440  * tuples each child should see. See also the prohibitions in
441  * ATExecAttachPartition() and ATExecAddInherit().
442  */
443  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
444  {
445  /* Use appropriate error message. */
446  if (rel->rd_rel->relispartition)
447  ereport(ERROR,
448  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
449  errmsg("ROW triggers with transition tables are not supported on partitions")));
450  else
451  ereport(ERROR,
452  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
453  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
454  }
455 
456  if (stmt->timing != TRIGGER_TYPE_AFTER)
457  ereport(ERROR,
458  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
459  errmsg("transition table name can only be specified for an AFTER trigger")));
460 
461  if (TRIGGER_FOR_TRUNCATE(tgtype))
462  ereport(ERROR,
463  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
464  errmsg("TRUNCATE triggers with transition tables are not supported")));
465 
466  /*
467  * We currently don't allow multi-event triggers ("INSERT OR
468  * UPDATE") with transition tables, because it's not clear how to
469  * handle INSERT ... ON CONFLICT statements which can fire both
470  * INSERT and UPDATE triggers. We show the inserted tuples to
471  * INSERT triggers and the updated tuples to UPDATE triggers, but
472  * it's not yet clear what INSERT OR UPDATE trigger should see.
473  * This restriction could be lifted if we can decide on the right
474  * semantics in a later release.
475  */
476  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
477  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
478  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
479  ereport(ERROR,
480  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
481  errmsg("transition tables cannot be specified for triggers with more than one event")));
482 
483  /*
484  * We currently don't allow column-specific triggers with
485  * transition tables. Per spec, that seems to require
486  * accumulating separate transition tables for each combination of
487  * columns, which is a lot of work for a rather marginal feature.
488  */
489  if (stmt->columns != NIL)
490  ereport(ERROR,
491  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
492  errmsg("transition tables cannot be specified for triggers with column lists")));
493 
494  /*
495  * We disallow constraint triggers with transition tables, to
496  * protect the assumption that such triggers can't be deferred.
497  * See notes with AfterTriggers data structures, below.
498  *
499  * Currently this is enforced by the grammar, so just Assert here.
500  */
501  Assert(!stmt->isconstraint);
502 
503  if (tt->isNew)
504  {
505  if (!(TRIGGER_FOR_INSERT(tgtype) ||
506  TRIGGER_FOR_UPDATE(tgtype)))
507  ereport(ERROR,
508  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
509  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
510 
511  if (newtablename != NULL)
512  ereport(ERROR,
513  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
514  errmsg("NEW TABLE cannot be specified multiple times")));
515 
516  newtablename = tt->name;
517  }
518  else
519  {
520  if (!(TRIGGER_FOR_DELETE(tgtype) ||
521  TRIGGER_FOR_UPDATE(tgtype)))
522  ereport(ERROR,
523  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
524  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
525 
526  if (oldtablename != NULL)
527  ereport(ERROR,
528  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
529  errmsg("OLD TABLE cannot be specified multiple times")));
530 
531  oldtablename = tt->name;
532  }
533  }
534 
535  if (newtablename != NULL && oldtablename != NULL &&
536  strcmp(newtablename, oldtablename) == 0)
537  ereport(ERROR,
538  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
539  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
540  }
541 
542  /*
543  * Parse the WHEN clause, if any and we weren't passed an already
544  * transformed one.
545  *
546  * Note that as a side effect, we fill whenRtable when parsing. If we got
547  * an already parsed clause, this does not occur, which is what we want --
548  * no point in adding redundant dependencies below.
549  */
550  if (!whenClause && stmt->whenClause)
551  {
552  ParseState *pstate;
553  ParseNamespaceItem *nsitem;
554  List *varList;
555  ListCell *lc;
556 
557  /* Set up a pstate to parse with */
558  pstate = make_parsestate(NULL);
559  pstate->p_sourcetext = queryString;
560 
561  /*
562  * Set up nsitems for OLD and NEW references.
563  *
564  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
565  */
566  nsitem = addRangeTableEntryForRelation(pstate, rel,
568  makeAlias("old", NIL),
569  false, false);
570  addNSItemToQuery(pstate, nsitem, false, true, true);
571  nsitem = addRangeTableEntryForRelation(pstate, rel,
573  makeAlias("new", NIL),
574  false, false);
575  addNSItemToQuery(pstate, nsitem, false, true, true);
576 
577  /* Transform expression. Copy to be sure we don't modify original */
578  whenClause = transformWhereClause(pstate,
579  copyObject(stmt->whenClause),
581  "WHEN");
582  /* we have to fix its collations too */
583  assign_expr_collations(pstate, whenClause);
584 
585  /*
586  * Check for disallowed references to OLD/NEW.
587  *
588  * NB: pull_var_clause is okay here only because we don't allow
589  * subselects in WHEN clauses; it would fail to examine the contents
590  * of subselects.
591  */
592  varList = pull_var_clause(whenClause, 0);
593  foreach(lc, varList)
594  {
595  Var *var = (Var *) lfirst(lc);
596 
597  switch (var->varno)
598  {
599  case PRS2_OLD_VARNO:
600  if (!TRIGGER_FOR_ROW(tgtype))
601  ereport(ERROR,
602  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
603  errmsg("statement trigger's WHEN condition cannot reference column values"),
604  parser_errposition(pstate, var->location)));
605  if (TRIGGER_FOR_INSERT(tgtype))
606  ereport(ERROR,
607  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
608  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
609  parser_errposition(pstate, var->location)));
610  /* system columns are okay here */
611  break;
612  case PRS2_NEW_VARNO:
613  if (!TRIGGER_FOR_ROW(tgtype))
614  ereport(ERROR,
615  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
616  errmsg("statement trigger's WHEN condition cannot reference column values"),
617  parser_errposition(pstate, var->location)));
618  if (TRIGGER_FOR_DELETE(tgtype))
619  ereport(ERROR,
620  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
621  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
622  parser_errposition(pstate, var->location)));
623  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
624  ereport(ERROR,
625  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
626  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
627  parser_errposition(pstate, var->location)));
628  if (TRIGGER_FOR_BEFORE(tgtype) &&
629  var->varattno == 0 &&
630  RelationGetDescr(rel)->constr &&
631  RelationGetDescr(rel)->constr->has_generated_stored)
632  ereport(ERROR,
633  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
634  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
635  errdetail("A whole-row reference is used and the table contains generated columns."),
636  parser_errposition(pstate, var->location)));
637  if (TRIGGER_FOR_BEFORE(tgtype) &&
638  var->varattno > 0 &&
639  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
640  ereport(ERROR,
641  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
642  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
643  errdetail("Column \"%s\" is a generated column.",
644  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
645  parser_errposition(pstate, var->location)));
646  break;
647  default:
648  /* can't happen without add_missing_from, so just elog */
649  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
650  break;
651  }
652  }
653 
654  /* we'll need the rtable for recordDependencyOnExpr */
655  whenRtable = pstate->p_rtable;
656 
657  qual = nodeToString(whenClause);
658 
659  free_parsestate(pstate);
660  }
661  else if (!whenClause)
662  {
663  whenClause = NULL;
664  whenRtable = NIL;
665  qual = NULL;
666  }
667  else
668  {
669  qual = nodeToString(whenClause);
670  whenRtable = NIL;
671  }
672 
673  /*
674  * Find and validate the trigger function.
675  */
676  if (!OidIsValid(funcoid))
677  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
678  if (!isInternal)
679  {
680  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
681  if (aclresult != ACLCHECK_OK)
682  aclcheck_error(aclresult, OBJECT_FUNCTION,
683  NameListToString(stmt->funcname));
684  }
685  funcrettype = get_func_rettype(funcoid);
686  if (funcrettype != TRIGGEROID)
687  ereport(ERROR,
688  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
689  errmsg("function %s must return type %s",
690  NameListToString(stmt->funcname), "trigger")));
691 
692  /*
693  * Scan pg_trigger to see if there is already a trigger of the same name.
694  * Skip this for internally generated triggers, since we'll modify the
695  * name to be unique below.
696  *
697  * NOTE that this is cool only because we have ShareRowExclusiveLock on
698  * the relation, so the trigger set won't be changing underneath us.
699  */
700  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
701  if (!isInternal)
702  {
703  ScanKeyData skeys[2];
704  SysScanDesc tgscan;
705 
706  ScanKeyInit(&skeys[0],
707  Anum_pg_trigger_tgrelid,
708  BTEqualStrategyNumber, F_OIDEQ,
710 
711  ScanKeyInit(&skeys[1],
712  Anum_pg_trigger_tgname,
713  BTEqualStrategyNumber, F_NAMEEQ,
714  CStringGetDatum(stmt->trigname));
715 
716  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
717  NULL, 2, skeys);
718 
719  /* There should be at most one matching tuple */
720  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
721  {
722  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
723 
724  trigoid = oldtrigger->oid;
725  existing_constraint_oid = oldtrigger->tgconstraint;
726  existing_isInternal = oldtrigger->tgisinternal;
727  trigger_exists = true;
728  /* copy the tuple to use in CatalogTupleUpdate() */
729  tuple = heap_copytuple(tuple);
730  }
731  systable_endscan(tgscan);
732  }
733 
734  if (!trigger_exists)
735  {
736  /* Generate the OID for the new trigger. */
737  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
738  Anum_pg_trigger_oid);
739  }
740  else
741  {
742  /*
743  * If OR REPLACE was specified, we'll replace the old trigger;
744  * otherwise complain about the duplicate name.
745  */
746  if (!stmt->replace)
747  ereport(ERROR,
749  errmsg("trigger \"%s\" for relation \"%s\" already exists",
750  stmt->trigname, RelationGetRelationName(rel))));
751 
752  /*
753  * An internal trigger cannot be replaced by a user-defined trigger.
754  * However, skip this test when in_partition, because then we're
755  * recursing from a partitioned table and the check was made at the
756  * parent level. Child triggers will always be marked "internal" (so
757  * this test does protect us from the user trying to replace a child
758  * trigger directly).
759  */
760  if (existing_isInternal && !isInternal && !in_partition)
761  ereport(ERROR,
763  errmsg("trigger \"%s\" for relation \"%s\" is an internal trigger",
764  stmt->trigname, RelationGetRelationName(rel))));
765 
766  /*
767  * It is not allowed to replace with a constraint trigger; gram.y
768  * should have enforced this already.
769  */
770  Assert(!stmt->isconstraint);
771 
772  /*
773  * It is not allowed to replace an existing constraint trigger,
774  * either. (The reason for these restrictions is partly that it seems
775  * difficult to deal with pending trigger events in such cases, and
776  * partly that the command might imply changing the constraint's
777  * properties as well, which doesn't seem nice.)
778  */
779  if (OidIsValid(existing_constraint_oid))
780  ereport(ERROR,
782  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
783  stmt->trigname, RelationGetRelationName(rel))));
784  }
785 
786  /*
787  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
788  * corresponding pg_constraint entry.
789  */
790  if (stmt->isconstraint && !OidIsValid(constraintOid))
791  {
792  /* Internal callers should have made their own constraints */
793  Assert(!isInternal);
794  constraintOid = CreateConstraintEntry(stmt->trigname,
796  CONSTRAINT_TRIGGER,
797  stmt->deferrable,
798  stmt->initdeferred,
799  true,
800  InvalidOid, /* no parent */
801  RelationGetRelid(rel),
802  NULL, /* no conkey */
803  0,
804  0,
805  InvalidOid, /* no domain */
806  InvalidOid, /* no index */
807  InvalidOid, /* no foreign key */
808  NULL,
809  NULL,
810  NULL,
811  NULL,
812  0,
813  ' ',
814  ' ',
815  ' ',
816  NULL, /* no exclusion */
817  NULL, /* no check constraint */
818  NULL,
819  true, /* islocal */
820  0, /* inhcount */
821  true, /* noinherit */
822  isInternal); /* is_internal */
823  }
824 
825  /*
826  * If trigger is internally generated, modify the provided trigger name to
827  * ensure uniqueness by appending the trigger OID. (Callers will usually
828  * supply a simple constant trigger name in these cases.)
829  */
830  if (isInternal)
831  {
832  snprintf(internaltrigname, sizeof(internaltrigname),
833  "%s_%u", stmt->trigname, trigoid);
834  trigname = internaltrigname;
835  }
836  else
837  {
838  /* user-defined trigger; use the specified trigger name as-is */
839  trigname = stmt->trigname;
840  }
841 
842  /*
843  * Build the new pg_trigger tuple.
844  *
845  * When we're creating a trigger in a partition, we mark it as internal,
846  * even though we don't do the isInternal magic in this function. This
847  * makes the triggers in partitions identical to the ones in the
848  * partitioned tables, except that they are marked internal.
849  */
850  memset(nulls, false, sizeof(nulls));
851 
852  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
853  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
854  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
855  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
856  CStringGetDatum(trigname));
857  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
858  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
859  values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(TRIGGER_FIRES_ON_ORIGIN);
860  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal || in_partition);
861  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
862  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
863  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
864  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
865  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
866 
867  if (stmt->args)
868  {
869  ListCell *le;
870  char *args;
871  int16 nargs = list_length(stmt->args);
872  int len = 0;
873 
874  foreach(le, stmt->args)
875  {
876  char *ar = strVal(lfirst(le));
877 
878  len += strlen(ar) + 4;
879  for (; *ar; ar++)
880  {
881  if (*ar == '\\')
882  len++;
883  }
884  }
885  args = (char *) palloc(len + 1);
886  args[0] = '\0';
887  foreach(le, stmt->args)
888  {
889  char *s = strVal(lfirst(le));
890  char *d = args + strlen(args);
891 
892  while (*s)
893  {
894  if (*s == '\\')
895  *d++ = '\\';
896  *d++ = *s++;
897  }
898  strcpy(d, "\\000");
899  }
900  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
901  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
902  CStringGetDatum(args));
903  }
904  else
905  {
906  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
907  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
908  CStringGetDatum(""));
909  }
910 
911  /* build column number array if it's a column-specific trigger */
912  ncolumns = list_length(stmt->columns);
913  if (ncolumns == 0)
914  columns = NULL;
915  else
916  {
917  ListCell *cell;
918  int i = 0;
919 
920  columns = (int16 *) palloc(ncolumns * sizeof(int16));
921  foreach(cell, stmt->columns)
922  {
923  char *name = strVal(lfirst(cell));
924  int16 attnum;
925  int j;
926 
927  /* Lookup column name. System columns are not allowed */
928  attnum = attnameAttNum(rel, name, false);
929  if (attnum == InvalidAttrNumber)
930  ereport(ERROR,
931  (errcode(ERRCODE_UNDEFINED_COLUMN),
932  errmsg("column \"%s\" of relation \"%s\" does not exist",
933  name, RelationGetRelationName(rel))));
934 
935  /* Check for duplicates */
936  for (j = i - 1; j >= 0; j--)
937  {
938  if (columns[j] == attnum)
939  ereport(ERROR,
940  (errcode(ERRCODE_DUPLICATE_COLUMN),
941  errmsg("column \"%s\" specified more than once",
942  name)));
943  }
944 
945  columns[i++] = attnum;
946  }
947  }
948  tgattr = buildint2vector(columns, ncolumns);
949  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
950 
951  /* set tgqual if trigger has WHEN clause */
952  if (qual)
953  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
954  else
955  nulls[Anum_pg_trigger_tgqual - 1] = true;
956 
957  if (oldtablename)
958  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
959  CStringGetDatum(oldtablename));
960  else
961  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
962  if (newtablename)
963  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
964  CStringGetDatum(newtablename));
965  else
966  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
967 
968  /*
969  * Insert or replace tuple in pg_trigger.
970  */
971  if (!trigger_exists)
972  {
973  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
974  CatalogTupleInsert(tgrel, tuple);
975  }
976  else
977  {
978  HeapTuple newtup;
979 
980  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
981  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
982  heap_freetuple(newtup);
983  }
984 
985  heap_freetuple(tuple); /* free either original or new tuple */
987 
988  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
989  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
990  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
991  if (oldtablename)
992  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
993  if (newtablename)
994  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
995 
996  /*
997  * Update relation's pg_class entry; if necessary; and if not, send an SI
998  * message to make other backends (and this one) rebuild relcache entries.
999  */
1000  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1001  tuple = SearchSysCacheCopy1(RELOID,
1003  if (!HeapTupleIsValid(tuple))
1004  elog(ERROR, "cache lookup failed for relation %u",
1005  RelationGetRelid(rel));
1006  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1007  {
1008  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1009 
1010  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1011 
1013  }
1014  else
1016 
1017  heap_freetuple(tuple);
1018  table_close(pgrel, RowExclusiveLock);
1019 
1020  /*
1021  * If we're replacing a trigger, flush all the old dependencies before
1022  * recording new ones.
1023  */
1024  if (trigger_exists)
1025  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1026 
1027  /*
1028  * Record dependencies for trigger. Always place a normal dependency on
1029  * the function.
1030  */
1031  myself.classId = TriggerRelationId;
1032  myself.objectId = trigoid;
1033  myself.objectSubId = 0;
1034 
1035  referenced.classId = ProcedureRelationId;
1036  referenced.objectId = funcoid;
1037  referenced.objectSubId = 0;
1038  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1039 
1040  if (isInternal && OidIsValid(constraintOid))
1041  {
1042  /*
1043  * Internally-generated trigger for a constraint, so make it an
1044  * internal dependency of the constraint. We can skip depending on
1045  * the relation(s), as there'll be an indirect dependency via the
1046  * constraint.
1047  */
1048  referenced.classId = ConstraintRelationId;
1049  referenced.objectId = constraintOid;
1050  referenced.objectSubId = 0;
1051  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1052  }
1053  else
1054  {
1055  /*
1056  * User CREATE TRIGGER, so place dependencies. We make trigger be
1057  * auto-dropped if its relation is dropped or if the FK relation is
1058  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1059  */
1060  referenced.classId = RelationRelationId;
1061  referenced.objectId = RelationGetRelid(rel);
1062  referenced.objectSubId = 0;
1063  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1064 
1065  if (OidIsValid(constrrelid))
1066  {
1067  referenced.classId = RelationRelationId;
1068  referenced.objectId = constrrelid;
1069  referenced.objectSubId = 0;
1070  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1071  }
1072  /* Not possible to have an index dependency in this case */
1073  Assert(!OidIsValid(indexOid));
1074 
1075  /*
1076  * If it's a user-specified constraint trigger, make the constraint
1077  * internally dependent on the trigger instead of vice versa.
1078  */
1079  if (OidIsValid(constraintOid))
1080  {
1081  referenced.classId = ConstraintRelationId;
1082  referenced.objectId = constraintOid;
1083  referenced.objectSubId = 0;
1084  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1085  }
1086 
1087  /*
1088  * If it's a partition trigger, create the partition dependencies.
1089  */
1090  if (OidIsValid(parentTriggerOid))
1091  {
1092  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1093  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1094  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1095  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1096  }
1097  }
1098 
1099  /* If column-specific trigger, add normal dependencies on columns */
1100  if (columns != NULL)
1101  {
1102  int i;
1103 
1104  referenced.classId = RelationRelationId;
1105  referenced.objectId = RelationGetRelid(rel);
1106  for (i = 0; i < ncolumns; i++)
1107  {
1108  referenced.objectSubId = columns[i];
1109  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1110  }
1111  }
1112 
1113  /*
1114  * If it has a WHEN clause, add dependencies on objects mentioned in the
1115  * expression (eg, functions, as well as any columns used).
1116  */
1117  if (whenRtable != NIL)
1118  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1120 
1121  /* Post creation hook for new trigger */
1122  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1123  isInternal);
1124 
1125  /*
1126  * Lastly, create the trigger on child relations, if needed.
1127  */
1128  if (partition_recurse)
1129  {
1130  PartitionDesc partdesc = RelationGetPartitionDesc(rel);
1131  List *idxs = NIL;
1132  List *childTbls = NIL;
1133  ListCell *l;
1134  int i;
1135  MemoryContext oldcxt,
1136  perChildCxt;
1137 
1139  "part trig clone",
1141 
1142  /*
1143  * When a trigger is being created associated with an index, we'll
1144  * need to associate the trigger in each child partition with the
1145  * corresponding index on it.
1146  */
1147  if (OidIsValid(indexOid))
1148  {
1149  ListCell *l;
1150  List *idxs = NIL;
1151 
1153  foreach(l, idxs)
1154  childTbls = lappend_oid(childTbls,
1156  false));
1157  }
1158 
1159  oldcxt = MemoryContextSwitchTo(perChildCxt);
1160 
1161  /* Iterate to create the trigger on each existing partition */
1162  for (i = 0; i < partdesc->nparts; i++)
1163  {
1164  Oid indexOnChild = InvalidOid;
1165  ListCell *l2;
1166  CreateTrigStmt *childStmt;
1167  Relation childTbl;
1168  Node *qual;
1169 
1170  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1171 
1172  /* Find which of the child indexes is the one on this partition */
1173  if (OidIsValid(indexOid))
1174  {
1175  forboth(l, idxs, l2, childTbls)
1176  {
1177  if (lfirst_oid(l2) == partdesc->oids[i])
1178  {
1179  indexOnChild = lfirst_oid(l);
1180  break;
1181  }
1182  }
1183  if (!OidIsValid(indexOnChild))
1184  elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"",
1185  get_rel_name(indexOid),
1186  get_rel_name(partdesc->oids[i]));
1187  }
1188 
1189  /*
1190  * Initialize our fabricated parse node by copying the original
1191  * one, then resetting fields that we pass separately.
1192  */
1193  childStmt = (CreateTrigStmt *) copyObject(stmt);
1194  childStmt->funcname = NIL;
1195  childStmt->whenClause = NULL;
1196 
1197  /* If there is a WHEN clause, create a modified copy of it */
1198  qual = copyObject(whenClause);
1199  qual = (Node *)
1201  childTbl, rel);
1202  qual = (Node *)
1204  childTbl, rel);
1205 
1206  CreateTrigger(childStmt, queryString,
1207  partdesc->oids[i], refRelOid,
1208  InvalidOid, indexOnChild,
1209  funcoid, trigoid, qual,
1210  isInternal, true);
1211 
1212  table_close(childTbl, NoLock);
1213 
1214  MemoryContextReset(perChildCxt);
1215  }
1216 
1217  MemoryContextSwitchTo(oldcxt);
1218  MemoryContextDelete(perChildCxt);
1219  list_free(idxs);
1220  list_free(childTbls);
1221  }
1222 
1223  /* Keep lock on target rel until end of xact */
1224  table_close(rel, NoLock);
1225 
1226  return myself;
1227 }
1228 
1229 
1230 /*
1231  * Guts of trigger deletion.
1232  */
1233 void
1235 {
1236  Relation tgrel;
1237  SysScanDesc tgscan;
1238  ScanKeyData skey[1];
1239  HeapTuple tup;
1240  Oid relid;
1241  Relation rel;
1242 
1243  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1244 
1245  /*
1246  * Find the trigger to delete.
1247  */
1248  ScanKeyInit(&skey[0],
1249  Anum_pg_trigger_oid,
1250  BTEqualStrategyNumber, F_OIDEQ,
1251  ObjectIdGetDatum(trigOid));
1252 
1253  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1254  NULL, 1, skey);
1255 
1256  tup = systable_getnext(tgscan);
1257  if (!HeapTupleIsValid(tup))
1258  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1259 
1260  /*
1261  * Open and exclusive-lock the relation the trigger belongs to.
1262  */
1263  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1264 
1265  rel = table_open(relid, AccessExclusiveLock);
1266 
1267  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1268  rel->rd_rel->relkind != RELKIND_VIEW &&
1269  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1270  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1271  ereport(ERROR,
1272  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1273  errmsg("\"%s\" is not a table, view, or foreign table",
1274  RelationGetRelationName(rel))));
1275 
1277  ereport(ERROR,
1278  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1279  errmsg("permission denied: \"%s\" is a system catalog",
1280  RelationGetRelationName(rel))));
1281 
1282  /*
1283  * Delete the pg_trigger tuple.
1284  */
1285  CatalogTupleDelete(tgrel, &tup->t_self);
1286 
1287  systable_endscan(tgscan);
1288  table_close(tgrel, RowExclusiveLock);
1289 
1290  /*
1291  * We do not bother to try to determine whether any other triggers remain,
1292  * which would be needed in order to decide whether it's safe to clear the
1293  * relation's relhastriggers. (In any case, there might be a concurrent
1294  * process adding new triggers.) Instead, just force a relcache inval to
1295  * make other backends (and this one too!) rebuild their relcache entries.
1296  * There's no great harm in leaving relhastriggers true even if there are
1297  * no triggers left.
1298  */
1300 
1301  /* Keep lock on trigger's rel until end of xact */
1302  table_close(rel, NoLock);
1303 }
1304 
1305 /*
1306  * get_trigger_oid - Look up a trigger by name to find its OID.
1307  *
1308  * If missing_ok is false, throw an error if trigger not found. If
1309  * true, just return InvalidOid.
1310  */
1311 Oid
1312 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1313 {
1314  Relation tgrel;
1315  ScanKeyData skey[2];
1316  SysScanDesc tgscan;
1317  HeapTuple tup;
1318  Oid oid;
1319 
1320  /*
1321  * Find the trigger, verify permissions, set up object address
1322  */
1323  tgrel = table_open(TriggerRelationId, AccessShareLock);
1324 
1325  ScanKeyInit(&skey[0],
1326  Anum_pg_trigger_tgrelid,
1327  BTEqualStrategyNumber, F_OIDEQ,
1328  ObjectIdGetDatum(relid));
1329  ScanKeyInit(&skey[1],
1330  Anum_pg_trigger_tgname,
1331  BTEqualStrategyNumber, F_NAMEEQ,
1332  CStringGetDatum(trigname));
1333 
1334  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1335  NULL, 2, skey);
1336 
1337  tup = systable_getnext(tgscan);
1338 
1339  if (!HeapTupleIsValid(tup))
1340  {
1341  if (!missing_ok)
1342  ereport(ERROR,
1343  (errcode(ERRCODE_UNDEFINED_OBJECT),
1344  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1345  trigname, get_rel_name(relid))));
1346  oid = InvalidOid;
1347  }
1348  else
1349  {
1350  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1351  }
1352 
1353  systable_endscan(tgscan);
1354  table_close(tgrel, AccessShareLock);
1355  return oid;
1356 }
1357 
1358 /*
1359  * Perform permissions and integrity checks before acquiring a relation lock.
1360  */
1361 static void
1363  void *arg)
1364 {
1365  HeapTuple tuple;
1366  Form_pg_class form;
1367 
1368  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1369  if (!HeapTupleIsValid(tuple))
1370  return; /* concurrently dropped */
1371  form = (Form_pg_class) GETSTRUCT(tuple);
1372 
1373  /* only tables and views can have triggers */
1374  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1375  form->relkind != RELKIND_FOREIGN_TABLE &&
1376  form->relkind != RELKIND_PARTITIONED_TABLE)
1377  ereport(ERROR,
1378  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1379  errmsg("\"%s\" is not a table, view, or foreign table",
1380  rv->relname)));
1381 
1382  /* you must own the table to rename one of its triggers */
1383  if (!pg_class_ownercheck(relid, GetUserId()))
1385  if (!allowSystemTableMods && IsSystemClass(relid, form))
1386  ereport(ERROR,
1387  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1388  errmsg("permission denied: \"%s\" is a system catalog",
1389  rv->relname)));
1390 
1391  ReleaseSysCache(tuple);
1392 }
1393 
1394 /*
1395  * renametrig - changes the name of a trigger on a relation
1396  *
1397  * trigger name is changed in trigger catalog.
1398  * No record of the previous name is kept.
1399  *
1400  * get proper relrelation from relation catalog (if not arg)
1401  * scan trigger catalog
1402  * for name conflict (within rel)
1403  * for original trigger (if not arg)
1404  * modify tgname in trigger tuple
1405  * update row in catalog
1406  */
1409 {
1410  Oid tgoid;
1411  Relation targetrel;
1412  Relation tgrel;
1413  HeapTuple tuple;
1414  SysScanDesc tgscan;
1415  ScanKeyData key[2];
1416  Oid relid;
1417  ObjectAddress address;
1418 
1419  /*
1420  * Look up name, check permissions, and acquire lock (which we will NOT
1421  * release until end of transaction).
1422  */
1424  0,
1426  NULL);
1427 
1428  /* Have lock already, so just need to build relcache entry. */
1429  targetrel = relation_open(relid, NoLock);
1430 
1431  /*
1432  * Scan pg_trigger twice for existing triggers on relation. We do this in
1433  * order to ensure a trigger does not exist with newname (The unique index
1434  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1435  * exist with oldname.
1436  *
1437  * NOTE that this is cool only because we have AccessExclusiveLock on the
1438  * relation, so the trigger set won't be changing underneath us.
1439  */
1440  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1441 
1442  /*
1443  * First pass -- look for name conflict
1444  */
1445  ScanKeyInit(&key[0],
1446  Anum_pg_trigger_tgrelid,
1447  BTEqualStrategyNumber, F_OIDEQ,
1448  ObjectIdGetDatum(relid));
1449  ScanKeyInit(&key[1],
1450  Anum_pg_trigger_tgname,
1451  BTEqualStrategyNumber, F_NAMEEQ,
1452  PointerGetDatum(stmt->newname));
1453  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1454  NULL, 2, key);
1455  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1456  ereport(ERROR,
1458  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1459  stmt->newname, RelationGetRelationName(targetrel))));
1460  systable_endscan(tgscan);
1461 
1462  /*
1463  * Second pass -- look for trigger existing with oldname and update
1464  */
1465  ScanKeyInit(&key[0],
1466  Anum_pg_trigger_tgrelid,
1467  BTEqualStrategyNumber, F_OIDEQ,
1468  ObjectIdGetDatum(relid));
1469  ScanKeyInit(&key[1],
1470  Anum_pg_trigger_tgname,
1471  BTEqualStrategyNumber, F_NAMEEQ,
1472  PointerGetDatum(stmt->subname));
1473  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1474  NULL, 2, key);
1475  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1476  {
1477  Form_pg_trigger trigform;
1478 
1479  /*
1480  * Update pg_trigger tuple with new tgname.
1481  */
1482  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1483  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1484  tgoid = trigform->oid;
1485 
1486  namestrcpy(&trigform->tgname,
1487  stmt->newname);
1488 
1489  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1490 
1491  InvokeObjectPostAlterHook(TriggerRelationId,
1492  tgoid, 0);
1493 
1494  /*
1495  * Invalidate relation's relcache entry so that other backends (and
1496  * this one too!) are sent SI message to make them rebuild relcache
1497  * entries. (Ideally this should happen automatically...)
1498  */
1499  CacheInvalidateRelcache(targetrel);
1500  }
1501  else
1502  {
1503  ereport(ERROR,
1504  (errcode(ERRCODE_UNDEFINED_OBJECT),
1505  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1506  stmt->subname, RelationGetRelationName(targetrel))));
1507  }
1508 
1509  ObjectAddressSet(address, TriggerRelationId, tgoid);
1510 
1511  systable_endscan(tgscan);
1512 
1513  table_close(tgrel, RowExclusiveLock);
1514 
1515  /*
1516  * Close rel, but keep exclusive lock!
1517  */
1518  relation_close(targetrel, NoLock);
1519 
1520  return address;
1521 }
1522 
1523 
1524 /*
1525  * EnableDisableTrigger()
1526  *
1527  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1528  * to change 'tgenabled' field for the specified trigger(s)
1529  *
1530  * rel: relation to process (caller must hold suitable lock on it)
1531  * tgname: trigger to process, or NULL to scan all triggers
1532  * fires_when: new value for tgenabled field. In addition to generic
1533  * enablement/disablement, this also defines when the trigger
1534  * should be fired in session replication roles.
1535  * skip_system: if true, skip "system" triggers (constraint triggers)
1536  *
1537  * Caller should have checked permissions for the table; here we also
1538  * enforce that superuser privilege is required to alter the state of
1539  * system triggers
1540  */
1541 void
1542 EnableDisableTrigger(Relation rel, const char *tgname,
1543  char fires_when, bool skip_system, LOCKMODE lockmode)
1544 {
1545  Relation tgrel;
1546  int nkeys;
1547  ScanKeyData keys[2];
1548  SysScanDesc tgscan;
1549  HeapTuple tuple;
1550  bool found;
1551  bool changed;
1552 
1553  /* Scan the relevant entries in pg_triggers */
1554  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1555 
1556  ScanKeyInit(&keys[0],
1557  Anum_pg_trigger_tgrelid,
1558  BTEqualStrategyNumber, F_OIDEQ,
1560  if (tgname)
1561  {
1562  ScanKeyInit(&keys[1],
1563  Anum_pg_trigger_tgname,
1564  BTEqualStrategyNumber, F_NAMEEQ,
1565  CStringGetDatum(tgname));
1566  nkeys = 2;
1567  }
1568  else
1569  nkeys = 1;
1570 
1571  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1572  NULL, nkeys, keys);
1573 
1574  found = changed = false;
1575 
1576  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1577  {
1578  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1579 
1580  if (oldtrig->tgisinternal)
1581  {
1582  /* system trigger ... ok to process? */
1583  if (skip_system)
1584  continue;
1585  if (!superuser())
1586  ereport(ERROR,
1587  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1588  errmsg("permission denied: \"%s\" is a system trigger",
1589  NameStr(oldtrig->tgname))));
1590  }
1591 
1592  found = true;
1593 
1594  if (oldtrig->tgenabled != fires_when)
1595  {
1596  /* need to change this one ... make a copy to scribble on */
1597  HeapTuple newtup = heap_copytuple(tuple);
1598  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1599 
1600  newtrig->tgenabled = fires_when;
1601 
1602  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1603 
1604  heap_freetuple(newtup);
1605 
1606  changed = true;
1607  }
1608 
1609  InvokeObjectPostAlterHook(TriggerRelationId,
1610  oldtrig->oid, 0);
1611  }
1612 
1613  systable_endscan(tgscan);
1614 
1615  table_close(tgrel, RowExclusiveLock);
1616 
1617  if (tgname && !found)
1618  ereport(ERROR,
1619  (errcode(ERRCODE_UNDEFINED_OBJECT),
1620  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1621  tgname, RelationGetRelationName(rel))));
1622 
1623  /*
1624  * If we changed anything, broadcast a SI inval message to force each
1625  * backend (including our own!) to rebuild relation's relcache entry.
1626  * Otherwise they will fail to apply the change promptly.
1627  */
1628  if (changed)
1630 }
1631 
1632 
1633 /*
1634  * Build trigger data to attach to the given relcache entry.
1635  *
1636  * Note that trigger data attached to a relcache entry must be stored in
1637  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1638  * But we should be running in a less long-lived working context. To avoid
1639  * leaking cache memory if this routine fails partway through, we build a
1640  * temporary TriggerDesc in working memory and then copy the completed
1641  * structure into cache memory.
1642  */
1643 void
1645 {
1646  TriggerDesc *trigdesc;
1647  int numtrigs;
1648  int maxtrigs;
1649  Trigger *triggers;
1650  Relation tgrel;
1651  ScanKeyData skey;
1652  SysScanDesc tgscan;
1653  HeapTuple htup;
1654  MemoryContext oldContext;
1655  int i;
1656 
1657  /*
1658  * Allocate a working array to hold the triggers (the array is extended if
1659  * necessary)
1660  */
1661  maxtrigs = 16;
1662  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1663  numtrigs = 0;
1664 
1665  /*
1666  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1667  * be reading the triggers in name order, except possibly during
1668  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1669  * ensures that triggers will be fired in name order.
1670  */
1671  ScanKeyInit(&skey,
1672  Anum_pg_trigger_tgrelid,
1673  BTEqualStrategyNumber, F_OIDEQ,
1674  ObjectIdGetDatum(RelationGetRelid(relation)));
1675 
1676  tgrel = table_open(TriggerRelationId, AccessShareLock);
1677  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1678  NULL, 1, &skey);
1679 
1680  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1681  {
1682  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1683  Trigger *build;
1684  Datum datum;
1685  bool isnull;
1686 
1687  if (numtrigs >= maxtrigs)
1688  {
1689  maxtrigs *= 2;
1690  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1691  }
1692  build = &(triggers[numtrigs]);
1693 
1694  build->tgoid = pg_trigger->oid;
1696  NameGetDatum(&pg_trigger->tgname)));
1697  build->tgfoid = pg_trigger->tgfoid;
1698  build->tgtype = pg_trigger->tgtype;
1699  build->tgenabled = pg_trigger->tgenabled;
1700  build->tgisinternal = pg_trigger->tgisinternal;
1701  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1702  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1703  build->tgconstrindid = pg_trigger->tgconstrindid;
1704  build->tgconstraint = pg_trigger->tgconstraint;
1705  build->tgdeferrable = pg_trigger->tgdeferrable;
1706  build->tginitdeferred = pg_trigger->tginitdeferred;
1707  build->tgnargs = pg_trigger->tgnargs;
1708  /* tgattr is first var-width field, so OK to access directly */
1709  build->tgnattr = pg_trigger->tgattr.dim1;
1710  if (build->tgnattr > 0)
1711  {
1712  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1713  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1714  build->tgnattr * sizeof(int16));
1715  }
1716  else
1717  build->tgattr = NULL;
1718  if (build->tgnargs > 0)
1719  {
1720  bytea *val;
1721  char *p;
1722 
1723  val = DatumGetByteaPP(fastgetattr(htup,
1724  Anum_pg_trigger_tgargs,
1725  tgrel->rd_att, &isnull));
1726  if (isnull)
1727  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1728  RelationGetRelationName(relation));
1729  p = (char *) VARDATA_ANY(val);
1730  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1731  for (i = 0; i < build->tgnargs; i++)
1732  {
1733  build->tgargs[i] = pstrdup(p);
1734  p += strlen(p) + 1;
1735  }
1736  }
1737  else
1738  build->tgargs = NULL;
1739 
1740  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1741  tgrel->rd_att, &isnull);
1742  if (!isnull)
1743  build->tgoldtable =
1745  else
1746  build->tgoldtable = NULL;
1747 
1748  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1749  tgrel->rd_att, &isnull);
1750  if (!isnull)
1751  build->tgnewtable =
1753  else
1754  build->tgnewtable = NULL;
1755 
1756  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1757  tgrel->rd_att, &isnull);
1758  if (!isnull)
1759  build->tgqual = TextDatumGetCString(datum);
1760  else
1761  build->tgqual = NULL;
1762 
1763  numtrigs++;
1764  }
1765 
1766  systable_endscan(tgscan);
1767  table_close(tgrel, AccessShareLock);
1768 
1769  /* There might not be any triggers */
1770  if (numtrigs == 0)
1771  {
1772  pfree(triggers);
1773  return;
1774  }
1775 
1776  /* Build trigdesc */
1777  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1778  trigdesc->triggers = triggers;
1779  trigdesc->numtriggers = numtrigs;
1780  for (i = 0; i < numtrigs; i++)
1781  SetTriggerFlags(trigdesc, &(triggers[i]));
1782 
1783  /* Copy completed trigdesc into cache storage */
1785  relation->trigdesc = CopyTriggerDesc(trigdesc);
1786  MemoryContextSwitchTo(oldContext);
1787 
1788  /* Release working memory */
1789  FreeTriggerDesc(trigdesc);
1790 }
1791 
1792 /*
1793  * Update the TriggerDesc's hint flags to include the specified trigger
1794  */
1795 static void
1797 {
1798  int16 tgtype = trigger->tgtype;
1799 
1800  trigdesc->trig_insert_before_row |=
1801  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1802  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1803  trigdesc->trig_insert_after_row |=
1804  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1805  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1806  trigdesc->trig_insert_instead_row |=
1807  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1808  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
1809  trigdesc->trig_insert_before_statement |=
1810  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1811  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1812  trigdesc->trig_insert_after_statement |=
1813  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1814  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1815  trigdesc->trig_update_before_row |=
1816  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1817  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1818  trigdesc->trig_update_after_row |=
1819  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1820  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1821  trigdesc->trig_update_instead_row |=
1822  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1823  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
1824  trigdesc->trig_update_before_statement |=
1825  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1826  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1827  trigdesc->trig_update_after_statement |=
1828  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1829  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1830  trigdesc->trig_delete_before_row |=
1831  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1832  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1833  trigdesc->trig_delete_after_row |=
1834  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1835  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1836  trigdesc->trig_delete_instead_row |=
1837  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1838  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
1839  trigdesc->trig_delete_before_statement |=
1840  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1841  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1842  trigdesc->trig_delete_after_statement |=
1843  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1844  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1845  /* there are no row-level truncate triggers */
1846  trigdesc->trig_truncate_before_statement |=
1847  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1848  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
1849  trigdesc->trig_truncate_after_statement |=
1850  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1851  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
1852 
1853  trigdesc->trig_insert_new_table |=
1854  (TRIGGER_FOR_INSERT(tgtype) &&
1855  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1856  trigdesc->trig_update_old_table |=
1857  (TRIGGER_FOR_UPDATE(tgtype) &&
1858  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1859  trigdesc->trig_update_new_table |=
1860  (TRIGGER_FOR_UPDATE(tgtype) &&
1861  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1862  trigdesc->trig_delete_old_table |=
1863  (TRIGGER_FOR_DELETE(tgtype) &&
1864  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
1865 }
1866 
1867 /*
1868  * Copy a TriggerDesc data structure.
1869  *
1870  * The copy is allocated in the current memory context.
1871  */
1872 TriggerDesc *
1874 {
1875  TriggerDesc *newdesc;
1876  Trigger *trigger;
1877  int i;
1878 
1879  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1880  return NULL;
1881 
1882  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1883  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1884 
1885  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1886  memcpy(trigger, trigdesc->triggers,
1887  trigdesc->numtriggers * sizeof(Trigger));
1888  newdesc->triggers = trigger;
1889 
1890  for (i = 0; i < trigdesc->numtriggers; i++)
1891  {
1892  trigger->tgname = pstrdup(trigger->tgname);
1893  if (trigger->tgnattr > 0)
1894  {
1895  int16 *newattr;
1896 
1897  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1898  memcpy(newattr, trigger->tgattr,
1899  trigger->tgnattr * sizeof(int16));
1900  trigger->tgattr = newattr;
1901  }
1902  if (trigger->tgnargs > 0)
1903  {
1904  char **newargs;
1905  int16 j;
1906 
1907  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1908  for (j = 0; j < trigger->tgnargs; j++)
1909  newargs[j] = pstrdup(trigger->tgargs[j]);
1910  trigger->tgargs = newargs;
1911  }
1912  if (trigger->tgqual)
1913  trigger->tgqual = pstrdup(trigger->tgqual);
1914  if (trigger->tgoldtable)
1915  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1916  if (trigger->tgnewtable)
1917  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1918  trigger++;
1919  }
1920 
1921  return newdesc;
1922 }
1923 
1924 /*
1925  * Free a TriggerDesc data structure.
1926  */
1927 void
1929 {
1930  Trigger *trigger;
1931  int i;
1932 
1933  if (trigdesc == NULL)
1934  return;
1935 
1936  trigger = trigdesc->triggers;
1937  for (i = 0; i < trigdesc->numtriggers; i++)
1938  {
1939  pfree(trigger->tgname);
1940  if (trigger->tgnattr > 0)
1941  pfree(trigger->tgattr);
1942  if (trigger->tgnargs > 0)
1943  {
1944  while (--(trigger->tgnargs) >= 0)
1945  pfree(trigger->tgargs[trigger->tgnargs]);
1946  pfree(trigger->tgargs);
1947  }
1948  if (trigger->tgqual)
1949  pfree(trigger->tgqual);
1950  if (trigger->tgoldtable)
1951  pfree(trigger->tgoldtable);
1952  if (trigger->tgnewtable)
1953  pfree(trigger->tgnewtable);
1954  trigger++;
1955  }
1956  pfree(trigdesc->triggers);
1957  pfree(trigdesc);
1958 }
1959 
1960 /*
1961  * Compare two TriggerDesc structures for logical equality.
1962  */
1963 #ifdef NOT_USED
1964 bool
1965 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1966 {
1967  int i,
1968  j;
1969 
1970  /*
1971  * We need not examine the hint flags, just the trigger array itself; if
1972  * we have the same triggers with the same types, the flags should match.
1973  *
1974  * As of 7.3 we assume trigger set ordering is significant in the
1975  * comparison; so we just compare corresponding slots of the two sets.
1976  *
1977  * Note: comparing the stringToNode forms of the WHEN clauses means that
1978  * parse column locations will affect the result. This is okay as long as
1979  * this function is only used for detecting exact equality, as for example
1980  * in checking for staleness of a cache entry.
1981  */
1982  if (trigdesc1 != NULL)
1983  {
1984  if (trigdesc2 == NULL)
1985  return false;
1986  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1987  return false;
1988  for (i = 0; i < trigdesc1->numtriggers; i++)
1989  {
1990  Trigger *trig1 = trigdesc1->triggers + i;
1991  Trigger *trig2 = trigdesc2->triggers + i;
1992 
1993  if (trig1->tgoid != trig2->tgoid)
1994  return false;
1995  if (strcmp(trig1->tgname, trig2->tgname) != 0)
1996  return false;
1997  if (trig1->tgfoid != trig2->tgfoid)
1998  return false;
1999  if (trig1->tgtype != trig2->tgtype)
2000  return false;
2001  if (trig1->tgenabled != trig2->tgenabled)
2002  return false;
2003  if (trig1->tgisinternal != trig2->tgisinternal)
2004  return false;
2005  if (trig1->tgisclone != trig2->tgisclone)
2006  return false;
2007  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2008  return false;
2009  if (trig1->tgconstrindid != trig2->tgconstrindid)
2010  return false;
2011  if (trig1->tgconstraint != trig2->tgconstraint)
2012  return false;
2013  if (trig1->tgdeferrable != trig2->tgdeferrable)
2014  return false;
2015  if (trig1->tginitdeferred != trig2->tginitdeferred)
2016  return false;
2017  if (trig1->tgnargs != trig2->tgnargs)
2018  return false;
2019  if (trig1->tgnattr != trig2->tgnattr)
2020  return false;
2021  if (trig1->tgnattr > 0 &&
2022  memcmp(trig1->tgattr, trig2->tgattr,
2023  trig1->tgnattr * sizeof(int16)) != 0)
2024  return false;
2025  for (j = 0; j < trig1->tgnargs; j++)
2026  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2027  return false;
2028  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2029  /* ok */ ;
2030  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2031  return false;
2032  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2033  return false;
2034  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2035  /* ok */ ;
2036  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2037  return false;
2038  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2039  return false;
2040  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2041  /* ok */ ;
2042  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2043  return false;
2044  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2045  return false;
2046  }
2047  }
2048  else if (trigdesc2 != NULL)
2049  return false;
2050  return true;
2051 }
2052 #endif /* NOT_USED */
2053 
2054 /*
2055  * Check if there is a row-level trigger with transition tables that prevents
2056  * a table from becoming an inheritance child or partition. Return the name
2057  * of the first such incompatible trigger, or NULL if there is none.
2058  */
2059 const char *
2061 {
2062  if (trigdesc != NULL)
2063  {
2064  int i;
2065 
2066  for (i = 0; i < trigdesc->numtriggers; ++i)
2067  {
2068  Trigger *trigger = &trigdesc->triggers[i];
2069 
2070  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2071  return trigger->tgname;
2072  }
2073  }
2074 
2075  return NULL;
2076 }
2077 
2078 /*
2079  * Call a trigger function.
2080  *
2081  * trigdata: trigger descriptor.
2082  * tgindx: trigger's index in finfo and instr arrays.
2083  * finfo: array of cached trigger function call information.
2084  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2085  * per_tuple_context: memory context to execute the function in.
2086  *
2087  * Returns the tuple (or NULL) as returned by the function.
2088  */
2089 static HeapTuple
2091  int tgindx,
2092  FmgrInfo *finfo,
2093  Instrumentation *instr,
2094  MemoryContext per_tuple_context)
2095 {
2096  LOCAL_FCINFO(fcinfo, 0);
2097  PgStat_FunctionCallUsage fcusage;
2098  Datum result;
2099  MemoryContext oldContext;
2100 
2101  /*
2102  * Protect against code paths that may fail to initialize transition table
2103  * info.
2104  */
2105  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2106  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2107  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2108  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2109  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2110  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2111  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2112 
2113  finfo += tgindx;
2114 
2115  /*
2116  * We cache fmgr lookup info, to avoid making the lookup again on each
2117  * call.
2118  */
2119  if (finfo->fn_oid == InvalidOid)
2120  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2121 
2122  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2123 
2124  /*
2125  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2126  */
2127  if (instr)
2128  InstrStartNode(instr + tgindx);
2129 
2130  /*
2131  * Do the function evaluation in the per-tuple memory context, so that
2132  * leaked memory will be reclaimed once per tuple. Note in particular that
2133  * any new tuple created by the trigger function will live till the end of
2134  * the tuple cycle.
2135  */
2136  oldContext = MemoryContextSwitchTo(per_tuple_context);
2137 
2138  /*
2139  * Call the function, passing no arguments but setting a context.
2140  */
2141  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2142  InvalidOid, (Node *) trigdata, NULL);
2143 
2144  pgstat_init_function_usage(fcinfo, &fcusage);
2145 
2146  MyTriggerDepth++;
2147  PG_TRY();
2148  {
2149  result = FunctionCallInvoke(fcinfo);
2150  }
2151  PG_FINALLY();
2152  {
2153  MyTriggerDepth--;
2154  }
2155  PG_END_TRY();
2156 
2157  pgstat_end_function_usage(&fcusage, true);
2158 
2159  MemoryContextSwitchTo(oldContext);
2160 
2161  /*
2162  * Trigger protocol allows function to return a null pointer, but NOT to
2163  * set the isnull result flag.
2164  */
2165  if (fcinfo->isnull)
2166  ereport(ERROR,
2167  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2168  errmsg("trigger function %u returned null value",
2169  fcinfo->flinfo->fn_oid)));
2170 
2171  /*
2172  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2173  * one "tuple returned" (really the number of firings).
2174  */
2175  if (instr)
2176  InstrStopNode(instr + tgindx, 1);
2177 
2178  return (HeapTuple) DatumGetPointer(result);
2179 }
2180 
2181 void
2183 {
2184  TriggerDesc *trigdesc;
2185  int i;
2186  TriggerData LocTriggerData = {0};
2187 
2188  trigdesc = relinfo->ri_TrigDesc;
2189 
2190  if (trigdesc == NULL)
2191  return;
2192  if (!trigdesc->trig_insert_before_statement)
2193  return;
2194 
2195  /* no-op if we already fired BS triggers in this context */
2197  CMD_INSERT))
2198  return;
2199 
2200  LocTriggerData.type = T_TriggerData;
2201  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2203  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2204  for (i = 0; i < trigdesc->numtriggers; i++)
2205  {
2206  Trigger *trigger = &trigdesc->triggers[i];
2207  HeapTuple newtuple;
2208 
2209  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2210  TRIGGER_TYPE_STATEMENT,
2211  TRIGGER_TYPE_BEFORE,
2212  TRIGGER_TYPE_INSERT))
2213  continue;
2214  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2215  NULL, NULL, NULL))
2216  continue;
2217 
2218  LocTriggerData.tg_trigger = trigger;
2219  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2220  i,
2221  relinfo->ri_TrigFunctions,
2222  relinfo->ri_TrigInstrument,
2223  GetPerTupleMemoryContext(estate));
2224 
2225  if (newtuple)
2226  ereport(ERROR,
2227  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2228  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2229  }
2230 }
2231 
2232 void
2234  TransitionCaptureState *transition_capture)
2235 {
2236  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2237 
2238  if (trigdesc && trigdesc->trig_insert_after_statement)
2240  false, NULL, NULL, NIL, NULL, transition_capture);
2241 }
2242 
2243 bool
2245  TupleTableSlot *slot)
2246 {
2247  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2248  HeapTuple newtuple = NULL;
2249  bool should_free;
2250  TriggerData LocTriggerData = {0};
2251  int i;
2252 
2253  LocTriggerData.type = T_TriggerData;
2254  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2257  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2258  for (i = 0; i < trigdesc->numtriggers; i++)
2259  {
2260  Trigger *trigger = &trigdesc->triggers[i];
2261  HeapTuple oldtuple;
2262 
2263  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2264  TRIGGER_TYPE_ROW,
2265  TRIGGER_TYPE_BEFORE,
2266  TRIGGER_TYPE_INSERT))
2267  continue;
2268  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2269  NULL, NULL, slot))
2270  continue;
2271 
2272  if (!newtuple)
2273  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2274 
2275  LocTriggerData.tg_trigslot = slot;
2276  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2277  LocTriggerData.tg_trigger = trigger;
2278  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2279  i,
2280  relinfo->ri_TrigFunctions,
2281  relinfo->ri_TrigInstrument,
2282  GetPerTupleMemoryContext(estate));
2283  if (newtuple == NULL)
2284  {
2285  if (should_free)
2286  heap_freetuple(oldtuple);
2287  return false; /* "do nothing" */
2288  }
2289  else if (newtuple != oldtuple)
2290  {
2291  ExecForceStoreHeapTuple(newtuple, slot, false);
2292 
2293  /*
2294  * After a tuple in a partition goes through a trigger, the user
2295  * could have changed the partition key enough that the tuple no
2296  * longer fits the partition. Verify that.
2297  */
2298  if (trigger->tgisclone &&
2299  !ExecPartitionCheck(relinfo, slot, estate, false))
2300  ereport(ERROR,
2301  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2302  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2303  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2304  trigger->tgname,
2307 
2308  if (should_free)
2309  heap_freetuple(oldtuple);
2310 
2311  /* signal tuple should be re-fetched if used */
2312  newtuple = NULL;
2313  }
2314  }
2315 
2316  return true;
2317 }
2318 
2319 void
2321  TupleTableSlot *slot, List *recheckIndexes,
2322  TransitionCaptureState *transition_capture)
2323 {
2324  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2325 
2326  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2327  (transition_capture && transition_capture->tcs_insert_new_table))
2329  true, NULL, slot,
2330  recheckIndexes, NULL,
2331  transition_capture);
2332 }
2333 
2334 bool
2336  TupleTableSlot *slot)
2337 {
2338  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2339  HeapTuple newtuple = NULL;
2340  bool should_free;
2341  TriggerData LocTriggerData = {0};
2342  int i;
2343 
2344  LocTriggerData.type = T_TriggerData;
2345  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2348  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2349  for (i = 0; i < trigdesc->numtriggers; i++)
2350  {
2351  Trigger *trigger = &trigdesc->triggers[i];
2352  HeapTuple oldtuple;
2353 
2354  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2355  TRIGGER_TYPE_ROW,
2356  TRIGGER_TYPE_INSTEAD,
2357  TRIGGER_TYPE_INSERT))
2358  continue;
2359  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2360  NULL, NULL, slot))
2361  continue;
2362 
2363  if (!newtuple)
2364  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2365 
2366  LocTriggerData.tg_trigslot = slot;
2367  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2368  LocTriggerData.tg_trigger = trigger;
2369  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2370  i,
2371  relinfo->ri_TrigFunctions,
2372  relinfo->ri_TrigInstrument,
2373  GetPerTupleMemoryContext(estate));
2374  if (newtuple == NULL)
2375  {
2376  if (should_free)
2377  heap_freetuple(oldtuple);
2378  return false; /* "do nothing" */
2379  }
2380  else if (newtuple != oldtuple)
2381  {
2382  ExecForceStoreHeapTuple(newtuple, slot, false);
2383 
2384  if (should_free)
2385  heap_freetuple(oldtuple);
2386 
2387  /* signal tuple should be re-fetched if used */
2388  newtuple = NULL;
2389  }
2390  }
2391 
2392  return true;
2393 }
2394 
2395 void
2397 {
2398  TriggerDesc *trigdesc;
2399  int i;
2400  TriggerData LocTriggerData = {0};
2401 
2402  trigdesc = relinfo->ri_TrigDesc;
2403 
2404  if (trigdesc == NULL)
2405  return;
2406  if (!trigdesc->trig_delete_before_statement)
2407  return;
2408 
2409  /* no-op if we already fired BS triggers in this context */
2411  CMD_DELETE))
2412  return;
2413 
2414  LocTriggerData.type = T_TriggerData;
2415  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2417  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2418  for (i = 0; i < trigdesc->numtriggers; i++)
2419  {
2420  Trigger *trigger = &trigdesc->triggers[i];
2421  HeapTuple newtuple;
2422 
2423  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2424  TRIGGER_TYPE_STATEMENT,
2425  TRIGGER_TYPE_BEFORE,
2426  TRIGGER_TYPE_DELETE))
2427  continue;
2428  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2429  NULL, NULL, NULL))
2430  continue;
2431 
2432  LocTriggerData.tg_trigger = trigger;
2433  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2434  i,
2435  relinfo->ri_TrigFunctions,
2436  relinfo->ri_TrigInstrument,
2437  GetPerTupleMemoryContext(estate));
2438 
2439  if (newtuple)
2440  ereport(ERROR,
2441  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2442  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2443  }
2444 }
2445 
2446 void
2448  TransitionCaptureState *transition_capture)
2449 {
2450  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2451 
2452  if (trigdesc && trigdesc->trig_delete_after_statement)
2454  false, NULL, NULL, NIL, NULL, transition_capture);
2455 }
2456 
2457 /*
2458  * Execute BEFORE ROW DELETE triggers.
2459  *
2460  * True indicates caller can proceed with the delete. False indicates caller
2461  * need to suppress the delete and additionally if requested, we need to pass
2462  * back the concurrently updated tuple if any.
2463  */
2464 bool
2466  ResultRelInfo *relinfo,
2467  ItemPointer tupleid,
2468  HeapTuple fdw_trigtuple,
2469  TupleTableSlot **epqslot)
2470 {
2471  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2472  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2473  bool result = true;
2474  TriggerData LocTriggerData = {0};
2475  HeapTuple trigtuple;
2476  bool should_free = false;
2477  int i;
2478 
2479  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2480  if (fdw_trigtuple == NULL)
2481  {
2482  TupleTableSlot *epqslot_candidate = NULL;
2483 
2484  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2485  LockTupleExclusive, slot, &epqslot_candidate))
2486  return false;
2487 
2488  /*
2489  * If the tuple was concurrently updated and the caller of this
2490  * function requested for the updated tuple, skip the trigger
2491  * execution.
2492  */
2493  if (epqslot_candidate != NULL && epqslot != NULL)
2494  {
2495  *epqslot = epqslot_candidate;
2496  return false;
2497  }
2498 
2499  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2500 
2501  }
2502  else
2503  {
2504  trigtuple = fdw_trigtuple;
2505  ExecForceStoreHeapTuple(trigtuple, slot, false);
2506  }
2507 
2508  LocTriggerData.type = T_TriggerData;
2509  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2512  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2513  for (i = 0; i < trigdesc->numtriggers; i++)
2514  {
2515  HeapTuple newtuple;
2516  Trigger *trigger = &trigdesc->triggers[i];
2517 
2518  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2519  TRIGGER_TYPE_ROW,
2520  TRIGGER_TYPE_BEFORE,
2521  TRIGGER_TYPE_DELETE))
2522  continue;
2523  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2524  NULL, slot, NULL))
2525  continue;
2526 
2527  LocTriggerData.tg_trigslot = slot;
2528  LocTriggerData.tg_trigtuple = trigtuple;
2529  LocTriggerData.tg_trigger = trigger;
2530  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2531  i,
2532  relinfo->ri_TrigFunctions,
2533  relinfo->ri_TrigInstrument,
2534  GetPerTupleMemoryContext(estate));
2535  if (newtuple == NULL)
2536  {
2537  result = false; /* tell caller to suppress delete */
2538  break;
2539  }
2540  if (newtuple != trigtuple)
2541  heap_freetuple(newtuple);
2542  }
2543  if (should_free)
2544  heap_freetuple(trigtuple);
2545 
2546  return result;
2547 }
2548 
2549 void
2551  ItemPointer tupleid,
2552  HeapTuple fdw_trigtuple,
2553  TransitionCaptureState *transition_capture)
2554 {
2555  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2556  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2557 
2558  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2559  (transition_capture && transition_capture->tcs_delete_old_table))
2560  {
2561  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2562  if (fdw_trigtuple == NULL)
2563  GetTupleForTrigger(estate,
2564  NULL,
2565  relinfo,
2566  tupleid,
2568  slot,
2569  NULL);
2570  else
2571  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2572 
2574  true, slot, NULL, NIL, NULL,
2575  transition_capture);
2576  }
2577 }
2578 
2579 bool
2581  HeapTuple trigtuple)
2582 {
2583  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2584  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2585  TriggerData LocTriggerData = {0};
2586  int i;
2587 
2588  LocTriggerData.type = T_TriggerData;
2589  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2592  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2593 
2594  ExecForceStoreHeapTuple(trigtuple, slot, false);
2595 
2596  for (i = 0; i < trigdesc->numtriggers; i++)
2597  {
2598  HeapTuple rettuple;
2599  Trigger *trigger = &trigdesc->triggers[i];
2600 
2601  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2602  TRIGGER_TYPE_ROW,
2603  TRIGGER_TYPE_INSTEAD,
2604  TRIGGER_TYPE_DELETE))
2605  continue;
2606  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2607  NULL, slot, NULL))
2608  continue;
2609 
2610  LocTriggerData.tg_trigslot = slot;
2611  LocTriggerData.tg_trigtuple = trigtuple;
2612  LocTriggerData.tg_trigger = trigger;
2613  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2614  i,
2615  relinfo->ri_TrigFunctions,
2616  relinfo->ri_TrigInstrument,
2617  GetPerTupleMemoryContext(estate));
2618  if (rettuple == NULL)
2619  return false; /* Delete was suppressed */
2620  if (rettuple != trigtuple)
2621  heap_freetuple(rettuple);
2622  }
2623  return true;
2624 }
2625 
2626 void
2628 {
2629  TriggerDesc *trigdesc;
2630  int i;
2631  TriggerData LocTriggerData = {0};
2632  Bitmapset *updatedCols;
2633 
2634  trigdesc = relinfo->ri_TrigDesc;
2635 
2636  if (trigdesc == NULL)
2637  return;
2638  if (!trigdesc->trig_update_before_statement)
2639  return;
2640 
2641  /* no-op if we already fired BS triggers in this context */
2643  CMD_UPDATE))
2644  return;
2645 
2646  updatedCols = GetAllUpdatedColumns(relinfo, estate);
2647 
2648  LocTriggerData.type = T_TriggerData;
2649  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2651  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2652  LocTriggerData.tg_updatedcols = updatedCols;
2653  for (i = 0; i < trigdesc->numtriggers; i++)
2654  {
2655  Trigger *trigger = &trigdesc->triggers[i];
2656  HeapTuple newtuple;
2657 
2658  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2659  TRIGGER_TYPE_STATEMENT,
2660  TRIGGER_TYPE_BEFORE,
2661  TRIGGER_TYPE_UPDATE))
2662  continue;
2663  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2664  updatedCols, NULL, NULL))
2665  continue;
2666 
2667  LocTriggerData.tg_trigger = trigger;
2668  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2669  i,
2670  relinfo->ri_TrigFunctions,
2671  relinfo->ri_TrigInstrument,
2672  GetPerTupleMemoryContext(estate));
2673 
2674  if (newtuple)
2675  ereport(ERROR,
2676  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2677  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2678  }
2679 }
2680 
2681 void
2683  TransitionCaptureState *transition_capture)
2684 {
2685  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2686 
2687  if (trigdesc && trigdesc->trig_update_after_statement)
2689  false, NULL, NULL, NIL,
2690  GetAllUpdatedColumns(relinfo, estate),
2691  transition_capture);
2692 }
2693 
2694 bool
2696  ResultRelInfo *relinfo,
2697  ItemPointer tupleid,
2698  HeapTuple fdw_trigtuple,
2699  TupleTableSlot *newslot)
2700 {
2701  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2702  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2703  HeapTuple newtuple = NULL;
2704  HeapTuple trigtuple;
2705  bool should_free_trig = false;
2706  bool should_free_new = false;
2707  TriggerData LocTriggerData = {0};
2708  int i;
2709  Bitmapset *updatedCols;
2710  LockTupleMode lockmode;
2711 
2712  /* Determine lock mode to use */
2713  lockmode = ExecUpdateLockMode(estate, relinfo);
2714 
2715  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2716  if (fdw_trigtuple == NULL)
2717  {
2718  TupleTableSlot *epqslot_candidate = NULL;
2719 
2720  /* get a copy of the on-disk tuple we are planning to update */
2721  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2722  lockmode, oldslot, &epqslot_candidate))
2723  return false; /* cancel the update action */
2724 
2725  /*
2726  * In READ COMMITTED isolation level it's possible that target tuple
2727  * was changed due to concurrent update. In that case we have a raw
2728  * subplan output tuple in epqslot_candidate, and need to run it
2729  * through the junk filter to produce an insertable tuple.
2730  *
2731  * Caution: more than likely, the passed-in slot is the same as the
2732  * junkfilter's output slot, so we are clobbering the original value
2733  * of slottuple by doing the filtering. This is OK since neither we
2734  * nor our caller have any more interest in the prior contents of that
2735  * slot.
2736  */
2737  if (epqslot_candidate != NULL)
2738  {
2739  TupleTableSlot *epqslot_clean;
2740 
2741  epqslot_clean = ExecFilterJunk(relinfo->ri_junkFilter, epqslot_candidate);
2742 
2743  if (newslot != epqslot_clean)
2744  ExecCopySlot(newslot, epqslot_clean);
2745  }
2746 
2747  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
2748  }
2749  else
2750  {
2751  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2752  trigtuple = fdw_trigtuple;
2753  }
2754 
2755  LocTriggerData.type = T_TriggerData;
2756  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2759  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2760  updatedCols = GetAllUpdatedColumns(relinfo, estate);
2761  LocTriggerData.tg_updatedcols = updatedCols;
2762  for (i = 0; i < trigdesc->numtriggers; i++)
2763  {
2764  Trigger *trigger = &trigdesc->triggers[i];
2765  HeapTuple oldtuple;
2766 
2767  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2768  TRIGGER_TYPE_ROW,
2769  TRIGGER_TYPE_BEFORE,
2770  TRIGGER_TYPE_UPDATE))
2771  continue;
2772  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2773  updatedCols, oldslot, newslot))
2774  continue;
2775 
2776  if (!newtuple)
2777  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
2778 
2779  LocTriggerData.tg_trigslot = oldslot;
2780  LocTriggerData.tg_trigtuple = trigtuple;
2781  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2782  LocTriggerData.tg_newslot = newslot;
2783  LocTriggerData.tg_trigger = trigger;
2784  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2785  i,
2786  relinfo->ri_TrigFunctions,
2787  relinfo->ri_TrigInstrument,
2788  GetPerTupleMemoryContext(estate));
2789 
2790  if (newtuple == NULL)
2791  {
2792  if (should_free_trig)
2793  heap_freetuple(trigtuple);
2794  if (should_free_new)
2795  heap_freetuple(oldtuple);
2796  return false; /* "do nothing" */
2797  }
2798  else if (newtuple != oldtuple)
2799  {
2800  ExecForceStoreHeapTuple(newtuple, newslot, false);
2801 
2802  if (trigger->tgisclone &&
2803  !ExecPartitionCheck(relinfo, newslot, estate, false))
2804  ereport(ERROR,
2805  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2806  errmsg("moving row to another partition during a BEFORE trigger is not supported"),
2807  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2808  trigger->tgname,
2811 
2812  /*
2813  * If the tuple returned by the trigger / being stored, is the old
2814  * row version, and the heap tuple passed to the trigger was
2815  * allocated locally, materialize the slot. Otherwise we might
2816  * free it while still referenced by the slot.
2817  */
2818  if (should_free_trig && newtuple == trigtuple)
2819  ExecMaterializeSlot(newslot);
2820 
2821  if (should_free_new)
2822  heap_freetuple(oldtuple);
2823 
2824  /* signal tuple should be re-fetched if used */
2825  newtuple = NULL;
2826  }
2827  }
2828  if (should_free_trig)
2829  heap_freetuple(trigtuple);
2830 
2831  return true;
2832 }
2833 
2834 void
2836  ItemPointer tupleid,
2837  HeapTuple fdw_trigtuple,
2838  TupleTableSlot *newslot,
2839  List *recheckIndexes,
2840  TransitionCaptureState *transition_capture)
2841 {
2842  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2843  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2844 
2845  ExecClearTuple(oldslot);
2846 
2847  if ((trigdesc && trigdesc->trig_update_after_row) ||
2848  (transition_capture &&
2849  (transition_capture->tcs_update_old_table ||
2850  transition_capture->tcs_update_new_table)))
2851  {
2852  /*
2853  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
2854  * update-partition-key operation, then this function is also called
2855  * separately for DELETE and INSERT to capture transition table rows.
2856  * In such case, either old tuple or new tuple can be NULL.
2857  */
2858  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
2859  GetTupleForTrigger(estate,
2860  NULL,
2861  relinfo,
2862  tupleid,
2864  oldslot,
2865  NULL);
2866  else if (fdw_trigtuple != NULL)
2867  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2868 
2870  true, oldslot, newslot, recheckIndexes,
2871  GetAllUpdatedColumns(relinfo, estate),
2872  transition_capture);
2873  }
2874 }
2875 
2876 bool
2878  HeapTuple trigtuple, TupleTableSlot *newslot)
2879 {
2880  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2881  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2882  HeapTuple newtuple = NULL;
2883  bool should_free;
2884  TriggerData LocTriggerData = {0};
2885  int i;
2886 
2887  LocTriggerData.type = T_TriggerData;
2888  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2891  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2892 
2893  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
2894 
2895  for (i = 0; i < trigdesc->numtriggers; i++)
2896  {
2897  Trigger *trigger = &trigdesc->triggers[i];
2898  HeapTuple oldtuple;
2899 
2900  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2901  TRIGGER_TYPE_ROW,
2902  TRIGGER_TYPE_INSTEAD,
2903  TRIGGER_TYPE_UPDATE))
2904  continue;
2905  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2906  NULL, oldslot, newslot))
2907  continue;
2908 
2909  if (!newtuple)
2910  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
2911 
2912  LocTriggerData.tg_trigslot = oldslot;
2913  LocTriggerData.tg_trigtuple = trigtuple;
2914  LocTriggerData.tg_newslot = newslot;
2915  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2916 
2917  LocTriggerData.tg_trigger = trigger;
2918  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2919  i,
2920  relinfo->ri_TrigFunctions,
2921  relinfo->ri_TrigInstrument,
2922  GetPerTupleMemoryContext(estate));
2923  if (newtuple == NULL)
2924  {
2925  return false; /* "do nothing" */
2926  }
2927  else if (newtuple != oldtuple)
2928  {
2929  ExecForceStoreHeapTuple(newtuple, newslot, false);
2930 
2931  if (should_free)
2932  heap_freetuple(oldtuple);
2933 
2934  /* signal tuple should be re-fetched if used */
2935  newtuple = NULL;
2936  }
2937  }
2938 
2939  return true;
2940 }
2941 
2942 void
2944 {
2945  TriggerDesc *trigdesc;
2946  int i;
2947  TriggerData LocTriggerData = {0};
2948 
2949  trigdesc = relinfo->ri_TrigDesc;
2950 
2951  if (trigdesc == NULL)
2952  return;
2953  if (!trigdesc->trig_truncate_before_statement)
2954  return;
2955 
2956  LocTriggerData.type = T_TriggerData;
2957  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2959  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2960 
2961  for (i = 0; i < trigdesc->numtriggers; i++)
2962  {
2963  Trigger *trigger = &trigdesc->triggers[i];
2964  HeapTuple newtuple;
2965 
2966  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2967  TRIGGER_TYPE_STATEMENT,
2968  TRIGGER_TYPE_BEFORE,
2969  TRIGGER_TYPE_TRUNCATE))
2970  continue;
2971  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2972  NULL, NULL, NULL))
2973  continue;
2974 
2975  LocTriggerData.tg_trigger = trigger;
2976  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2977  i,
2978  relinfo->ri_TrigFunctions,
2979  relinfo->ri_TrigInstrument,
2980  GetPerTupleMemoryContext(estate));
2981 
2982  if (newtuple)
2983  ereport(ERROR,
2984  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2985  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2986  }
2987 }
2988 
2989 void
2991 {
2992  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2993 
2994  if (trigdesc && trigdesc->trig_truncate_after_statement)
2996  false, NULL, NULL, NIL, NULL, NULL);
2997 }
2998 
2999 
3000 /*
3001  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3002  */
3003 static bool
3005  EPQState *epqstate,
3006  ResultRelInfo *relinfo,
3007  ItemPointer tid,
3008  LockTupleMode lockmode,
3009  TupleTableSlot *oldslot,
3010  TupleTableSlot **epqslot)
3011 {
3012  Relation relation = relinfo->ri_RelationDesc;
3013 
3014  if (epqslot != NULL)
3015  {
3016  TM_Result test;
3017  TM_FailureData tmfd;
3018  int lockflags = 0;
3019 
3020  *epqslot = NULL;
3021 
3022  /* caller must pass an epqstate if EvalPlanQual is possible */
3023  Assert(epqstate != NULL);
3024 
3025  /*
3026  * lock tuple for update
3027  */
3029  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3030  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3031  estate->es_output_cid,
3032  lockmode, LockWaitBlock,
3033  lockflags,
3034  &tmfd);
3035 
3036  switch (test)
3037  {
3038  case TM_SelfModified:
3039 
3040  /*
3041  * The target tuple was already updated or deleted by the
3042  * current command, or by a later command in the current
3043  * transaction. We ignore the tuple in the former case, and
3044  * throw error in the latter case, for the same reasons
3045  * enumerated in ExecUpdate and ExecDelete in
3046  * nodeModifyTable.c.
3047  */
3048  if (tmfd.cmax != estate->es_output_cid)
3049  ereport(ERROR,
3050  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3051  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3052  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3053 
3054  /* treat it as deleted; do not process */
3055  return false;
3056 
3057  case TM_Ok:
3058  if (tmfd.traversed)
3059  {
3060  *epqslot = EvalPlanQual(epqstate,
3061  relation,
3062  relinfo->ri_RangeTableIndex,
3063  oldslot);
3064 
3065  /*
3066  * If PlanQual failed for updated tuple - we must not
3067  * process this tuple!
3068  */
3069  if (TupIsNull(*epqslot))
3070  {
3071  *epqslot = NULL;
3072  return false;
3073  }
3074  }
3075  break;
3076 
3077  case TM_Updated:
3079  ereport(ERROR,
3080  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3081  errmsg("could not serialize access due to concurrent update")));
3082  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3083  break;
3084 
3085  case TM_Deleted:
3087  ereport(ERROR,
3088  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3089  errmsg("could not serialize access due to concurrent delete")));
3090  /* tuple was deleted */
3091  return false;
3092 
3093  case TM_Invisible:
3094  elog(ERROR, "attempted to lock invisible tuple");
3095  break;
3096 
3097  default:
3098  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3099  return false; /* keep compiler quiet */
3100  }
3101  }
3102  else
3103  {
3104  /*
3105  * We expect the tuple to be present, thus very simple error handling
3106  * suffices.
3107  */
3108  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3109  oldslot))
3110  elog(ERROR, "failed to fetch tuple for trigger");
3111  }
3112 
3113  return true;
3114 }
3115 
3116 /*
3117  * Is trigger enabled to fire?
3118  */
3119 static bool
3121  Trigger *trigger, TriggerEvent event,
3122  Bitmapset *modifiedCols,
3123  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3124 {
3125  /* Check replication-role-dependent enable state */
3127  {
3128  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3129  trigger->tgenabled == TRIGGER_DISABLED)
3130  return false;
3131  }
3132  else /* ORIGIN or LOCAL role */
3133  {
3134  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3135  trigger->tgenabled == TRIGGER_DISABLED)
3136  return false;
3137  }
3138 
3139  /*
3140  * Check for column-specific trigger (only possible for UPDATE, and in
3141  * fact we *must* ignore tgattr for other event types)
3142  */
3143  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3144  {
3145  int i;
3146  bool modified;
3147 
3148  modified = false;
3149  for (i = 0; i < trigger->tgnattr; i++)
3150  {
3152  modifiedCols))
3153  {
3154  modified = true;
3155  break;
3156  }
3157  }
3158  if (!modified)
3159  return false;
3160  }
3161 
3162  /* Check for WHEN clause */
3163  if (trigger->tgqual)
3164  {
3165  ExprState **predicate;
3166  ExprContext *econtext;
3167  MemoryContext oldContext;
3168  int i;
3169 
3170  Assert(estate != NULL);
3171 
3172  /*
3173  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3174  * matching element of relinfo->ri_TrigWhenExprs[]
3175  */
3176  i = trigger - relinfo->ri_TrigDesc->triggers;
3177  predicate = &relinfo->ri_TrigWhenExprs[i];
3178 
3179  /*
3180  * If first time through for this WHEN expression, build expression
3181  * nodetrees for it. Keep them in the per-query memory context so
3182  * they'll survive throughout the query.
3183  */
3184  if (*predicate == NULL)
3185  {
3186  Node *tgqual;
3187 
3188  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3189  tgqual = stringToNode(trigger->tgqual);
3190  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3193  /* ExecPrepareQual wants implicit-AND form */
3194  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3195  *predicate = ExecPrepareQual((List *) tgqual, estate);
3196  MemoryContextSwitchTo(oldContext);
3197  }
3198 
3199  /*
3200  * We will use the EState's per-tuple context for evaluating WHEN
3201  * expressions (creating it if it's not already there).
3202  */
3203  econtext = GetPerTupleExprContext(estate);
3204 
3205  /*
3206  * Finally evaluate the expression, making the old and/or new tuples
3207  * available as INNER_VAR/OUTER_VAR respectively.
3208  */
3209  econtext->ecxt_innertuple = oldslot;
3210  econtext->ecxt_outertuple = newslot;
3211  if (!ExecQual(*predicate, econtext))
3212  return false;
3213  }
3214 
3215  return true;
3216 }
3217 
3218 
3219 /* ----------
3220  * After-trigger stuff
3221  *
3222  * The AfterTriggersData struct holds data about pending AFTER trigger events
3223  * during the current transaction tree. (BEFORE triggers are fired
3224  * immediately so we don't need any persistent state about them.) The struct
3225  * and most of its subsidiary data are kept in TopTransactionContext; however
3226  * some data that can be discarded sooner appears in the CurTransactionContext
3227  * of the relevant subtransaction. Also, the individual event records are
3228  * kept in a separate sub-context of TopTransactionContext. This is done
3229  * mainly so that it's easy to tell from a memory context dump how much space
3230  * is being eaten by trigger events.
3231  *
3232  * Because the list of pending events can grow large, we go to some
3233  * considerable effort to minimize per-event memory consumption. The event
3234  * records are grouped into chunks and common data for similar events in the
3235  * same chunk is only stored once.
3236  *
3237  * XXX We need to be able to save the per-event data in a file if it grows too
3238  * large.
3239  * ----------
3240  */
3241 
3242 /* Per-trigger SET CONSTRAINT status */
3244 {
3248 
3250 
3251 /*
3252  * SET CONSTRAINT intra-transaction status.
3253  *
3254  * We make this a single palloc'd object so it can be copied and freed easily.
3255  *
3256  * all_isset and all_isdeferred are used to keep track
3257  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3258  *
3259  * trigstates[] stores per-trigger tgisdeferred settings.
3260  */
3262 {
3265  int numstates; /* number of trigstates[] entries in use */
3266  int numalloc; /* allocated size of trigstates[] */
3269 
3271 
3272 
3273 /*
3274  * Per-trigger-event data
3275  *
3276  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3277  * status bits and up to two tuple CTIDs. Each event record also has an
3278  * associated AfterTriggerSharedData that is shared across all instances of
3279  * similar events within a "chunk".
3280  *
3281  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3282  * fields. Updates of regular tables use two; inserts and deletes of regular
3283  * tables use one; foreign tables always use zero and save the tuple(s) to a
3284  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3285  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3286  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3287  * tuple(s). This permits storing tuples once regardless of the number of
3288  * row-level triggers on a foreign table.
3289  *
3290  * Note that we need triggers on foreign tables to be fired in exactly the
3291  * order they were queued, so that the tuples come out of the tuplestore in
3292  * the right order. To ensure that, we forbid deferrable (constraint)
3293  * triggers on foreign tables. This also ensures that such triggers do not
3294  * get deferred into outer trigger query levels, meaning that it's okay to
3295  * destroy the tuplestore at the end of the query level.
3296  *
3297  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3298  * require no ctid field. We lack the flag bit space to neatly represent that
3299  * distinct case, and it seems unlikely to be worth much trouble.
3300  *
3301  * Note: ats_firing_id is initially zero and is set to something else when
3302  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3303  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3304  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3305  * because all instances of the same type of event in a given event list will
3306  * be fired at the same time, if they were queued between the same firing
3307  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3308  * a new event to an existing AfterTriggerSharedData record.
3309  */
3311 
3312 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3313 #define AFTER_TRIGGER_DONE 0x10000000
3314 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3315 /* bits describing the size and tuple sources of this event */
3316 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3317 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3318 #define AFTER_TRIGGER_1CTID 0x40000000
3319 #define AFTER_TRIGGER_2CTID 0xC0000000
3320 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3321 
3323 
3325 {
3326  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3327  Oid ats_tgoid; /* the trigger's ID */
3328  Oid ats_relid; /* the relation it's on */
3329  CommandId ats_firing_id; /* ID for firing cycle */
3330  struct AfterTriggersTableData *ats_table; /* transition table access */
3331  Bitmapset *ats_modifiedcols; /* modified columns */
3333 
3335 
3337 {
3338  TriggerFlags ate_flags; /* status bits and offset to shared data */
3339  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3340  ItemPointerData ate_ctid2; /* new updated tuple */
3342 
3343 /* AfterTriggerEventData, minus ate_ctid2 */
3345 {
3346  TriggerFlags ate_flags; /* status bits and offset to shared data */
3347  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3349 
3350 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3352 {
3353  TriggerFlags ate_flags; /* status bits and offset to shared data */
3355 
3356 #define SizeofTriggerEvent(evt) \
3357  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3358  sizeof(AfterTriggerEventData) : \
3359  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3360  sizeof(AfterTriggerEventDataOneCtid) : \
3361  sizeof(AfterTriggerEventDataZeroCtids))
3362 
3363 #define GetTriggerSharedData(evt) \
3364  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3365 
3366 /*
3367  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3368  * larger chunks (a slightly more sophisticated version of an expansible
3369  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3370  * AfterTriggerEventData records; the space between endfree and endptr is
3371  * occupied by AfterTriggerSharedData records.
3372  */
3374 {
3375  struct AfterTriggerEventChunk *next; /* list link */
3376  char *freeptr; /* start of free space in chunk */
3377  char *endfree; /* end of free space in chunk */
3378  char *endptr; /* end of chunk */
3379  /* event data follows here */
3381 
3382 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3383 
3384 /* A list of events */
3386 {
3389  char *tailfree; /* freeptr of tail chunk */
3391 
3392 /* Macros to help in iterating over a list of events */
3393 #define for_each_chunk(cptr, evtlist) \
3394  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3395 #define for_each_event(eptr, cptr) \
3396  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3397  (char *) eptr < (cptr)->freeptr; \
3398  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3399 /* Use this if no special per-chunk processing is needed */
3400 #define for_each_event_chunk(eptr, cptr, evtlist) \
3401  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3402 
3403 /* Macros for iterating from a start point that might not be list start */
3404 #define for_each_chunk_from(cptr) \
3405  for (; cptr != NULL; cptr = cptr->next)
3406 #define for_each_event_from(eptr, cptr) \
3407  for (; \
3408  (char *) eptr < (cptr)->freeptr; \
3409  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3410 
3411 
3412 /*
3413  * All per-transaction data for the AFTER TRIGGERS module.
3414  *
3415  * AfterTriggersData has the following fields:
3416  *
3417  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3418  * We mark firable events with the current firing cycle's ID so that we can
3419  * tell which ones to work on. This ensures sane behavior if a trigger
3420  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3421  * only fire those events that weren't already scheduled for firing.
3422  *
3423  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3424  * This is saved and restored across failed subtransactions.
3425  *
3426  * events is the current list of deferred events. This is global across
3427  * all subtransactions of the current transaction. In a subtransaction
3428  * abort, we know that the events added by the subtransaction are at the
3429  * end of the list, so it is relatively easy to discard them. The event
3430  * list chunks themselves are stored in event_cxt.
3431  *
3432  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3433  * (-1 when the stack is empty).
3434  *
3435  * query_stack[query_depth] is the per-query-level data, including these fields:
3436  *
3437  * events is a list of AFTER trigger events queued by the current query.
3438  * None of these are valid until the matching AfterTriggerEndQuery call
3439  * occurs. At that point we fire immediate-mode triggers, and append any
3440  * deferred events to the main events list.
3441  *
3442  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3443  * needed by events queued by the current query. (Note: we use just one
3444  * tuplestore even though more than one foreign table might be involved.
3445  * This is okay because tuplestores don't really care what's in the tuples
3446  * they store; but it's possible that someday it'd break.)
3447  *
3448  * tables is a List of AfterTriggersTableData structs for target tables
3449  * of the current query (see below).
3450  *
3451  * maxquerydepth is just the allocated length of query_stack.
3452  *
3453  * trans_stack holds per-subtransaction data, including these fields:
3454  *
3455  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3456  * state data. Each subtransaction level that modifies that state first
3457  * saves a copy, which we use to restore the state if we abort.
3458  *
3459  * events is a copy of the events head/tail pointers,
3460  * which we use to restore those values during subtransaction abort.
3461  *
3462  * query_depth is the subtransaction-start-time value of query_depth,
3463  * which we similarly use to clean up at subtransaction abort.
3464  *
3465  * firing_counter is the subtransaction-start-time value of firing_counter.
3466  * We use this to recognize which deferred triggers were fired (or marked
3467  * for firing) within an aborted subtransaction.
3468  *
3469  * We use GetCurrentTransactionNestLevel() to determine the correct array
3470  * index in trans_stack. maxtransdepth is the number of allocated entries in
3471  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3472  * in cases where errors during subxact abort cause multiple invocations
3473  * of AfterTriggerEndSubXact() at the same nesting depth.)
3474  *
3475  * We create an AfterTriggersTableData struct for each target table of the
3476  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3477  * either transition tables or statement-level triggers. This is used to
3478  * hold the relevant transition tables, as well as info tracking whether
3479  * we already queued the statement triggers. (We use that info to prevent
3480  * firing the same statement triggers more than once per statement, or really
3481  * once per transition table set.) These structs, along with the transition
3482  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3483  * That's sufficient lifespan because we don't allow transition tables to be
3484  * used by deferrable triggers, so they only need to survive until
3485  * AfterTriggerEndQuery.
3486  */
3490 
3491 typedef struct AfterTriggersData
3492 {
3493  CommandId firing_counter; /* next firing ID to assign */
3494  SetConstraintState state; /* the active S C state */
3495  AfterTriggerEventList events; /* deferred-event list */
3496  MemoryContext event_cxt; /* memory context for events, if any */
3497 
3498  /* per-query-level data: */
3499  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3500  int query_depth; /* current index in above array */
3501  int maxquerydepth; /* allocated len of above array */
3502 
3503  /* per-subtransaction-level data: */
3504  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3505  int maxtransdepth; /* allocated len of above array */
3507 
3509 {
3510  AfterTriggerEventList events; /* events pending from this query */
3511  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3512  List *tables; /* list of AfterTriggersTableData, see below */
3513 };
3514 
3516 {
3517  /* these fields are just for resetting at subtrans abort: */
3518  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3519  AfterTriggerEventList events; /* saved list pointer */
3520  int query_depth; /* saved query_depth */
3521  CommandId firing_counter; /* saved firing_counter */
3522 };
3523 
3525 {
3526  /* relid + cmdType form the lookup key for these structs: */
3527  Oid relid; /* target table's OID */
3528  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3529  bool closed; /* true when no longer OK to add tuples */
3530  bool before_trig_done; /* did we already queue BS triggers? */
3531  bool after_trig_done; /* did we already queue AS triggers? */
3532  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3533  Tuplestorestate *old_tuplestore; /* "old" transition table, if any */
3534  Tuplestorestate *new_tuplestore; /* "new" transition table, if any */
3535  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3536 };
3537 
3539 
3540 static void AfterTriggerExecute(EState *estate,
3541  AfterTriggerEvent event,
3542  ResultRelInfo *relInfo,
3543  TriggerDesc *trigdesc,
3544  FmgrInfo *finfo,
3545  Instrumentation *instr,
3546  MemoryContext per_tuple_context,
3547  TupleTableSlot *trig_tuple_slot1,
3548  TupleTableSlot *trig_tuple_slot2);
3550  CmdType cmdType);
3552 static SetConstraintState SetConstraintStateCreate(int numalloc);
3555  Oid tgoid, bool tgisdeferred);
3556 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3557 
3558 
3559 /*
3560  * Get the FDW tuplestore for the current trigger query level, creating it
3561  * if necessary.
3562  */
3563 static Tuplestorestate *
3565 {
3566  Tuplestorestate *ret;
3567 
3568  ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3569  if (ret == NULL)
3570  {
3571  MemoryContext oldcxt;
3572  ResourceOwner saveResourceOwner;
3573 
3574  /*
3575  * Make the tuplestore valid until end of subtransaction. We really
3576  * only need it until AfterTriggerEndQuery().
3577  */
3579  saveResourceOwner = CurrentResourceOwner;
3581 
3582  ret = tuplestore_begin_heap(false, false, work_mem);
3583 
3584  CurrentResourceOwner = saveResourceOwner;
3585  MemoryContextSwitchTo(oldcxt);
3586 
3587  afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3588  }
3589 
3590  return ret;
3591 }
3592 
3593 /* ----------
3594  * afterTriggerCheckState()
3595  *
3596  * Returns true if the trigger event is actually in state DEFERRED.
3597  * ----------
3598  */
3599 static bool
3600 afterTriggerCheckState(AfterTriggerShared evtshared)
3601 {
3602  Oid tgoid = evtshared->ats_tgoid;
3603  SetConstraintState state = afterTriggers.state;
3604  int i;
3605 
3606  /*
3607  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3608  * constraints declared NOT DEFERRABLE), the state is always false.
3609  */
3610  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3611  return false;
3612 
3613  /*
3614  * If constraint state exists, SET CONSTRAINTS might have been executed
3615  * either for this trigger or for all triggers.
3616  */
3617  if (state != NULL)
3618  {
3619  /* Check for SET CONSTRAINTS for this specific trigger. */
3620  for (i = 0; i < state->numstates; i++)
3621  {
3622  if (state->trigstates[i].sct_tgoid == tgoid)
3623  return state->trigstates[i].sct_tgisdeferred;
3624  }
3625 
3626  /* Check for SET CONSTRAINTS ALL. */
3627  if (state->all_isset)
3628  return state->all_isdeferred;
3629  }
3630 
3631  /*
3632  * Otherwise return the default state for the trigger.
3633  */
3634  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3635 }
3636 
3637 
3638 /* ----------
3639  * afterTriggerAddEvent()
3640  *
3641  * Add a new trigger event to the specified queue.
3642  * The passed-in event data is copied.
3643  * ----------
3644  */
3645 static void
3647  AfterTriggerEvent event, AfterTriggerShared evtshared)
3648 {
3649  Size eventsize = SizeofTriggerEvent(event);
3650  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3651  AfterTriggerEventChunk *chunk;
3652  AfterTriggerShared newshared;
3653  AfterTriggerEvent newevent;
3654 
3655  /*
3656  * If empty list or not enough room in the tail chunk, make a new chunk.
3657  * We assume here that a new shared record will always be needed.
3658  */
3659  chunk = events->tail;
3660  if (chunk == NULL ||
3661  chunk->endfree - chunk->freeptr < needed)
3662  {
3663  Size chunksize;
3664 
3665  /* Create event context if we didn't already */
3666  if (afterTriggers.event_cxt == NULL)
3667  afterTriggers.event_cxt =
3669  "AfterTriggerEvents",
3671 
3672  /*
3673  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3674  * These numbers are fairly arbitrary, though there is a hard limit at
3675  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3676  * shared records using the available space in ate_flags. Another
3677  * constraint is that if the chunk size gets too huge, the search loop
3678  * below would get slow given a (not too common) usage pattern with
3679  * many distinct event types in a chunk. Therefore, we double the
3680  * preceding chunk size only if there weren't too many shared records
3681  * in the preceding chunk; otherwise we halve it. This gives us some
3682  * ability to adapt to the actual usage pattern of the current query
3683  * while still having large chunk sizes in typical usage. All chunk
3684  * sizes used should be MAXALIGN multiples, to ensure that the shared
3685  * records will be aligned safely.
3686  */
3687 #define MIN_CHUNK_SIZE 1024
3688 #define MAX_CHUNK_SIZE (1024*1024)
3689 
3690 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3691 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3692 #endif
3693 
3694  if (chunk == NULL)
3695  chunksize = MIN_CHUNK_SIZE;
3696  else
3697  {
3698  /* preceding chunk size... */
3699  chunksize = chunk->endptr - (char *) chunk;
3700  /* check number of shared records in preceding chunk */
3701  if ((chunk->endptr - chunk->endfree) <=
3702  (100 * sizeof(AfterTriggerSharedData)))
3703  chunksize *= 2; /* okay, double it */
3704  else
3705  chunksize /= 2; /* too many shared records */
3706  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3707  }
3708  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3709  chunk->next = NULL;
3710  chunk->freeptr = CHUNK_DATA_START(chunk);
3711  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3712  Assert(chunk->endfree - chunk->freeptr >= needed);
3713 
3714  if (events->head == NULL)
3715  events->head = chunk;
3716  else
3717  events->tail->next = chunk;
3718  events->tail = chunk;
3719  /* events->tailfree is now out of sync, but we'll fix it below */
3720  }
3721 
3722  /*
3723  * Try to locate a matching shared-data record already in the chunk. If
3724  * none, make a new one.
3725  */
3726  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3727  (char *) newshared >= chunk->endfree;
3728  newshared--)
3729  {
3730  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3731  newshared->ats_relid == evtshared->ats_relid &&
3732  newshared->ats_event == evtshared->ats_event &&
3733  newshared->ats_table == evtshared->ats_table &&
3734  newshared->ats_firing_id == 0)
3735  break;
3736  }
3737  if ((char *) newshared < chunk->endfree)
3738  {
3739  *newshared = *evtshared;
3740  newshared->ats_firing_id = 0; /* just to be sure */
3741  chunk->endfree = (char *) newshared;
3742  }
3743 
3744  /* Insert the data */
3745  newevent = (AfterTriggerEvent) chunk->freeptr;
3746  memcpy(newevent, event, eventsize);
3747  /* ... and link the new event to its shared record */
3748  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3749  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3750 
3751  chunk->freeptr += eventsize;
3752  events->tailfree = chunk->freeptr;
3753 }
3754 
3755 /* ----------
3756  * afterTriggerFreeEventList()
3757  *
3758  * Free all the event storage in the given list.
3759  * ----------
3760  */
3761 static void
3763 {
3764  AfterTriggerEventChunk *chunk;
3765 
3766  while ((chunk = events->head) != NULL)
3767  {
3768  events->head = chunk->next;
3769  pfree(chunk);
3770  }
3771  events->tail = NULL;
3772  events->tailfree = NULL;
3773 }
3774 
3775 /* ----------
3776  * afterTriggerRestoreEventList()
3777  *
3778  * Restore an event list to its prior length, removing all the events
3779  * added since it had the value old_events.
3780  * ----------
3781  */
3782 static void
3784  const AfterTriggerEventList *old_events)
3785 {
3786  AfterTriggerEventChunk *chunk;
3787  AfterTriggerEventChunk *next_chunk;
3788 
3789  if (old_events->tail == NULL)
3790  {
3791  /* restoring to a completely empty state, so free everything */
3792  afterTriggerFreeEventList(events);
3793  }
3794  else
3795  {
3796  *events = *old_events;
3797  /* free any chunks after the last one we want to keep */
3798  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3799  {
3800  next_chunk = chunk->next;
3801  pfree(chunk);
3802  }
3803  /* and clean up the tail chunk to be the right length */
3804  events->tail->next = NULL;
3805  events->tail->freeptr = events->tailfree;
3806 
3807  /*
3808  * We don't make any effort to remove now-unused shared data records.
3809  * They might still be useful, anyway.
3810  */
3811  }
3812 }
3813 
3814 /* ----------
3815  * afterTriggerDeleteHeadEventChunk()
3816  *
3817  * Remove the first chunk of events from the query level's event list.
3818  * Keep any event list pointers elsewhere in the query level's data
3819  * structures in sync.
3820  * ----------
3821  */
3822 static void
3824 {
3825  AfterTriggerEventChunk *target = qs->events.head;
3826  ListCell *lc;
3827 
3828  Assert(target && target->next);
3829 
3830  /*
3831  * First, update any pointers in the per-table data, so that they won't be
3832  * dangling. Resetting obsoleted pointers to NULL will make
3833  * cancel_prior_stmt_triggers start from the list head, which is fine.
3834  */
3835  foreach(lc, qs->tables)
3836  {
3838 
3839  if (table->after_trig_done &&
3840  table->after_trig_events.tail == target)
3841  {
3842  table->after_trig_events.head = NULL;
3843  table->after_trig_events.tail = NULL;
3844  table->after_trig_events.tailfree = NULL;
3845  }
3846  }
3847 
3848  /* Now we can flush the head chunk */
3849  qs->events.head = target->next;
3850  pfree(target);
3851 }
3852 
3853 
3854 /* ----------
3855  * AfterTriggerExecute()
3856  *
3857  * Fetch the required tuples back from the heap and fire one
3858  * single trigger function.
3859  *
3860  * Frequently, this will be fired many times in a row for triggers of
3861  * a single relation. Therefore, we cache the open relation and provide
3862  * fmgr lookup cache space at the caller level. (For triggers fired at
3863  * the end of a query, we can even piggyback on the executor's state.)
3864  *
3865  * event: event currently being fired.
3866  * rel: open relation for event.
3867  * trigdesc: working copy of rel's trigger info.
3868  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3869  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3870  * or NULL if no instrumentation is wanted.
3871  * per_tuple_context: memory context to call trigger function in.
3872  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3873  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3874  * ----------
3875  */
3876 static void
3878  AfterTriggerEvent event,
3879  ResultRelInfo *relInfo,
3880  TriggerDesc *trigdesc,
3881  FmgrInfo *finfo, Instrumentation *instr,
3882  MemoryContext per_tuple_context,
3883  TupleTableSlot *trig_tuple_slot1,
3884  TupleTableSlot *trig_tuple_slot2)
3885 {
3886  Relation rel = relInfo->ri_RelationDesc;
3887  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3888  Oid tgoid = evtshared->ats_tgoid;
3889  TriggerData LocTriggerData = {0};
3890  HeapTuple rettuple;
3891  int tgindx;
3892  bool should_free_trig = false;
3893  bool should_free_new = false;
3894 
3895  /*
3896  * Locate trigger in trigdesc.
3897  */
3898  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3899  {
3900  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3901  {
3902  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3903  break;
3904  }
3905  }
3906  if (LocTriggerData.tg_trigger == NULL)
3907  elog(ERROR, "could not find trigger %u", tgoid);
3908 
3909  /*
3910  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3911  * to include time spent re-fetching tuples in the trigger cost.
3912  */
3913  if (instr)
3914  InstrStartNode(instr + tgindx);
3915 
3916  /*
3917  * Fetch the required tuple(s).
3918  */
3919  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3920  {
3922  {
3923  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
3924 
3925  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3926  trig_tuple_slot1))
3927  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3928 
3929  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3931  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3932  trig_tuple_slot2))
3933  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3934  }
3935  /* fall through */
3937 
3938  /*
3939  * Store tuple in the slot so that tg_trigtuple does not reference
3940  * tuplestore memory. (It is formally possible for the trigger
3941  * function to queue trigger events that add to the same
3942  * tuplestore, which can push other tuples out of memory.) The
3943  * distinction is academic, because we start with a minimal tuple
3944  * that is stored as a heap tuple, constructed in different memory
3945  * context, in the slot anyway.
3946  */
3947  LocTriggerData.tg_trigslot = trig_tuple_slot1;
3948  LocTriggerData.tg_trigtuple =
3949  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
3950 
3951  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3953  {
3954  LocTriggerData.tg_newslot = trig_tuple_slot2;
3955  LocTriggerData.tg_newtuple =
3956  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
3957  }
3958  else
3959  {
3960  LocTriggerData.tg_newtuple = NULL;
3961  }
3962  break;
3963 
3964  default:
3965  if (ItemPointerIsValid(&(event->ate_ctid1)))
3966  {
3967  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
3968 
3969  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
3970  SnapshotAny,
3971  LocTriggerData.tg_trigslot))
3972  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3973  LocTriggerData.tg_trigtuple =
3974  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
3975  }
3976  else
3977  {
3978  LocTriggerData.tg_trigtuple = NULL;
3979  }
3980 
3981  /* don't touch ctid2 if not there */
3982  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3984  ItemPointerIsValid(&(event->ate_ctid2)))
3985  {
3986  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
3987 
3988  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
3989  SnapshotAny,
3990  LocTriggerData.tg_newslot))
3991  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3992  LocTriggerData.tg_newtuple =
3993  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
3994  }
3995  else
3996  {
3997  LocTriggerData.tg_newtuple = NULL;
3998  }
3999  }
4000 
4001  /*
4002  * Set up the tuplestore information to let the trigger have access to
4003  * transition tables. When we first make a transition table available to
4004  * a trigger, mark it "closed" so that it cannot change anymore. If any
4005  * additional events of the same type get queued in the current trigger
4006  * query level, they'll go into new transition tables.
4007  */
4008  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4009  if (evtshared->ats_table)
4010  {
4011  if (LocTriggerData.tg_trigger->tgoldtable)
4012  {
4013  LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore;
4014  evtshared->ats_table->closed = true;
4015  }
4016 
4017  if (LocTriggerData.tg_trigger->tgnewtable)
4018  {
4019  LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore;
4020  evtshared->ats_table->closed = true;
4021  }
4022  }
4023 
4024  /*
4025  * Setup the remaining trigger information
4026  */
4027  LocTriggerData.type = T_TriggerData;
4028  LocTriggerData.tg_event =
4030  LocTriggerData.tg_relation = rel;
4031  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4032  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4033 
4034  MemoryContextReset(per_tuple_context);
4035 
4036  /*
4037  * Call the trigger and throw away any possibly returned updated tuple.
4038  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4039  */
4040  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4041  tgindx,
4042  finfo,
4043  NULL,
4044  per_tuple_context);
4045  if (rettuple != NULL &&
4046  rettuple != LocTriggerData.tg_trigtuple &&
4047  rettuple != LocTriggerData.tg_newtuple)
4048  heap_freetuple(rettuple);
4049 
4050  /*
4051  * Release resources
4052  */
4053  if (should_free_trig)
4054  heap_freetuple(LocTriggerData.tg_trigtuple);
4055  if (should_free_new)
4056  heap_freetuple(LocTriggerData.tg_newtuple);
4057 
4058  /* don't clear slots' contents if foreign table */
4059  if (trig_tuple_slot1 == NULL)
4060  {
4061  if (LocTriggerData.tg_trigslot)
4062  ExecClearTuple(LocTriggerData.tg_trigslot);
4063  if (LocTriggerData.tg_newslot)
4064  ExecClearTuple(LocTriggerData.tg_newslot);
4065  }
4066 
4067  /*
4068  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4069  * one "tuple returned" (really the number of firings).
4070  */
4071  if (instr)
4072  InstrStopNode(instr + tgindx, 1);
4073 }
4074 
4075 
4076 /*
4077  * afterTriggerMarkEvents()
4078  *
4079  * Scan the given event list for not yet invoked events. Mark the ones
4080  * that can be invoked now with the current firing ID.
4081  *
4082  * If move_list isn't NULL, events that are not to be invoked now are
4083  * transferred to move_list.
4084  *
4085  * When immediate_only is true, do not invoke currently-deferred triggers.
4086  * (This will be false only at main transaction exit.)
4087  *
4088  * Returns true if any invokable events were found.
4089  */
4090 static bool
4092  AfterTriggerEventList *move_list,
4093  bool immediate_only)
4094 {
4095  bool found = false;
4096  bool deferred_found = false;
4097  AfterTriggerEvent event;
4098  AfterTriggerEventChunk *chunk;
4099 
4100  for_each_event_chunk(event, chunk, *events)
4101  {
4102  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4103  bool defer_it = false;
4104 
4105  if (!(event->ate_flags &
4107  {
4108  /*
4109  * This trigger hasn't been called or scheduled yet. Check if we
4110  * should call it now.
4111  */
4112  if (immediate_only && afterTriggerCheckState(evtshared))
4113  {
4114  defer_it = true;
4115  }
4116  else
4117  {
4118  /*
4119  * Mark it as to be fired in this firing cycle.
4120  */
4121  evtshared->ats_firing_id = afterTriggers.firing_counter;
4122  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4123  found = true;
4124  }
4125  }
4126 
4127  /*
4128  * If it's deferred, move it to move_list, if requested.
4129  */
4130  if (defer_it && move_list != NULL)
4131  {
4132  deferred_found = true;
4133  /* add it to move_list */
4134  afterTriggerAddEvent(move_list, event, evtshared);
4135  /* mark original copy "done" so we don't do it again */
4136  event->ate_flags |= AFTER_TRIGGER_DONE;
4137  }
4138  }
4139 
4140  /*
4141  * We could allow deferred triggers if, before the end of the
4142  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4143  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4144  */
4145  if (deferred_found && InSecurityRestrictedOperation())
4146  ereport(ERROR,
4147  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4148  errmsg("cannot fire deferred trigger within security-restricted operation")));
4149 
4150  return found;
4151 }
4152 
4153 /*
4154  * afterTriggerInvokeEvents()
4155  *
4156  * Scan the given event list for events that are marked as to be fired
4157  * in the current firing cycle, and fire them.
4158  *
4159  * If estate isn't NULL, we use its result relation info to avoid repeated
4160  * openings and closing of trigger target relations. If it is NULL, we
4161  * make one locally to cache the info in case there are multiple trigger
4162  * events per rel.
4163  *
4164  * When delete_ok is true, it's safe to delete fully-processed events.
4165  * (We are not very tense about that: we simply reset a chunk to be empty
4166  * if all its events got fired. The objective here is just to avoid useless
4167  * rescanning of events when a trigger queues new events during transaction
4168  * end, so it's not necessary to worry much about the case where only
4169  * some events are fired.)
4170  *
4171  * Returns true if no unfired events remain in the list (this allows us
4172  * to avoid repeating afterTriggerMarkEvents).
4173  */
4174 static bool
4176  CommandId firing_id,
4177  EState *estate,
4178  bool delete_ok)
4179 {
4180  bool all_fired = true;
4181  AfterTriggerEventChunk *chunk;
4182  MemoryContext per_tuple_context;
4183  bool local_estate = false;
4184  ResultRelInfo *rInfo = NULL;
4185  Relation rel = NULL;
4186  TriggerDesc *trigdesc = NULL;
4187  FmgrInfo *finfo = NULL;
4188  Instrumentation *instr = NULL;
4189  TupleTableSlot *slot1 = NULL,
4190  *slot2 = NULL;
4191 
4192  /* Make a local EState if need be */
4193  if (estate == NULL)
4194  {
4195  estate = CreateExecutorState();
4196  local_estate = true;
4197  }
4198 
4199  /* Make a per-tuple memory context for trigger function calls */
4200  per_tuple_context =
4202  "AfterTriggerTupleContext",
4204 
4205  for_each_chunk(chunk, *events)
4206  {
4207  AfterTriggerEvent event;
4208  bool all_fired_in_chunk = true;
4209 
4210  for_each_event(event, chunk)
4211  {
4212  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4213 
4214  /*
4215  * Is it one for me to fire?
4216  */
4217  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4218  evtshared->ats_firing_id == firing_id)
4219  {
4220  /*
4221  * So let's fire it... but first, find the correct relation if
4222  * this is not the same relation as before.
4223  */
4224  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4225  {
4226  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4227  rel = rInfo->ri_RelationDesc;
4228  trigdesc = rInfo->ri_TrigDesc;
4229  finfo = rInfo->ri_TrigFunctions;
4230  instr = rInfo->ri_TrigInstrument;
4231  if (slot1 != NULL)
4232  {
4235  slot1 = slot2 = NULL;
4236  }
4237  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4238  {
4239  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4241  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4243  }
4244  if (trigdesc == NULL) /* should not happen */
4245  elog(ERROR, "relation %u has no triggers",
4246  evtshared->ats_relid);
4247  }
4248 
4249  /*
4250  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4251  * still set, so recursive examinations of the event list
4252  * won't try to re-fire it.
4253  */
4254  AfterTriggerExecute(estate, event, rInfo, trigdesc, finfo, instr,
4255  per_tuple_context, slot1, slot2);
4256 
4257  /*
4258  * Mark the event as done.
4259  */
4260  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4261  event->ate_flags |= AFTER_TRIGGER_DONE;
4262  }
4263  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4264  {
4265  /* something remains to be done */
4266  all_fired = all_fired_in_chunk = false;
4267  }
4268  }
4269 
4270  /* Clear the chunk if delete_ok and nothing left of interest */
4271  if (delete_ok && all_fired_in_chunk)
4272  {
4273  chunk->freeptr = CHUNK_DATA_START(chunk);
4274  chunk->endfree = chunk->endptr;
4275 
4276  /*
4277  * If it's last chunk, must sync event list's tailfree too. Note
4278  * that delete_ok must NOT be passed as true if there could be
4279  * additional AfterTriggerEventList values pointing at this event
4280  * list, since we'd fail to fix their copies of tailfree.
4281  */
4282  if (chunk == events->tail)
4283  events->tailfree = chunk->freeptr;
4284  }
4285  }
4286  if (slot1 != NULL)
4287  {
4290  }
4291 
4292  /* Release working resources */
4293  MemoryContextDelete(per_tuple_context);
4294 
4295  if (local_estate)
4296  {
4297  ExecCloseResultRelations(estate);
4298  ExecResetTupleTable(estate->es_tupleTable, false);
4299  FreeExecutorState(estate);
4300  }
4301 
4302  return all_fired;
4303 }
4304 
4305 
4306 /*
4307  * GetAfterTriggersTableData
4308  *
4309  * Find or create an AfterTriggersTableData struct for the specified
4310  * trigger event (relation + operation type). Ignore existing structs
4311  * marked "closed"; we don't want to put any additional tuples into them,
4312  * nor change their stmt-triggers-fired state.
4313  *
4314  * Note: the AfterTriggersTableData list is allocated in the current
4315  * (sub)transaction's CurTransactionContext. This is OK because
4316  * we don't need it to live past AfterTriggerEndQuery.
4317  */
4318 static AfterTriggersTableData *
4320 {
4321  AfterTriggersTableData *table;
4323  MemoryContext oldcxt;
4324  ListCell *lc;
4325 
4326  /* Caller should have ensured query_depth is OK. */
4327  Assert(afterTriggers.query_depth >= 0 &&
4328  afterTriggers.query_depth < afterTriggers.maxquerydepth);
4329  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4330 
4331  foreach(lc, qs->tables)
4332  {
4333  table = (AfterTriggersTableData *) lfirst(lc);
4334  if (table->relid == relid && table->cmdType == cmdType &&
4335  !table->closed)
4336  return table;
4337  }
4338 
4340 
4342  table->relid = relid;
4343  table->cmdType = cmdType;
4344  qs->tables = lappend(qs->tables, table);
4345 
4346  MemoryContextSwitchTo(oldcxt);
4347 
4348  return table;
4349 }
4350 
4351 
4352 /*
4353  * MakeTransitionCaptureState
4354  *
4355  * Make a TransitionCaptureState object for the given TriggerDesc, target
4356  * relation, and operation type. The TCS object holds all the state needed
4357  * to decide whether to capture tuples in transition tables.
4358  *
4359  * If there are no triggers in 'trigdesc' that request relevant transition
4360  * tables, then return NULL.
4361  *
4362  * The resulting object can be passed to the ExecAR* functions. When
4363  * dealing with child tables, the caller can set tcs_original_insert_tuple
4364  * to avoid having to reconstruct the original tuple in the root table's
4365  * format.
4366  *
4367  * Note that we copy the flags from a parent table into this struct (rather
4368  * than subsequently using the relation's TriggerDesc directly) so that we can
4369  * use it to control collection of transition tuples from child tables.
4370  *
4371  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4372  * on the same table during one query should share one transition table.
4373  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4374  * looked up using the table OID + CmdType, and are merely referenced by
4375  * the TransitionCaptureState objects we hand out to callers.
4376  */
4379 {
4381  bool need_old,
4382  need_new;
4383  AfterTriggersTableData *table;
4384  MemoryContext oldcxt;
4385  ResourceOwner saveResourceOwner;
4386 
4387  if (trigdesc == NULL)
4388  return NULL;
4389 
4390  /* Detect which table(s) we need. */
4391  switch (cmdType)
4392  {
4393  case CMD_INSERT:
4394  need_old = false;
4395  need_new = trigdesc->trig_insert_new_table;
4396  break;
4397  case CMD_UPDATE:
4398  need_old = trigdesc->trig_update_old_table;
4399  need_new = trigdesc->trig_update_new_table;
4400  break;
4401  case CMD_DELETE:
4402  need_old = trigdesc->trig_delete_old_table;
4403  need_new = false;
4404  break;
4405  default:
4406  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4407  need_old = need_new = false; /* keep compiler quiet */
4408  break;
4409  }
4410  if (!need_old && !need_new)
4411  return NULL;
4412 
4413  /* Check state, like AfterTriggerSaveEvent. */
4414  if (afterTriggers.query_depth < 0)
4415  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4416 
4417  /* Be sure we have enough space to record events at this query depth. */
4418  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4420 
4421  /*
4422  * Find or create an AfterTriggersTableData struct to hold the
4423  * tuplestore(s). If there's a matching struct but it's marked closed,
4424  * ignore it; we need a newer one.
4425  *
4426  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4427  * allocated in the current (sub)transaction's CurTransactionContext, and
4428  * the tuplestores are managed by the (sub)transaction's resource owner.
4429  * This is sufficient lifespan because we do not allow triggers using
4430  * transition tables to be deferrable; they will be fired during
4431  * AfterTriggerEndQuery, after which it's okay to delete the data.
4432  */
4433  table = GetAfterTriggersTableData(relid, cmdType);
4434 
4435  /* Now create required tuplestore(s), if we don't have them already. */
4437  saveResourceOwner = CurrentResourceOwner;
4439 
4440  if (need_old && table->old_tuplestore == NULL)
4441  table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4442  if (need_new && table->new_tuplestore == NULL)
4443  table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4444 
4445  CurrentResourceOwner = saveResourceOwner;
4446  MemoryContextSwitchTo(oldcxt);
4447 
4448  /* Now build the TransitionCaptureState struct, in caller's context */
4450  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4451  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4452  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4453  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4454  state->tcs_private = table;
4455 
4456  return state;
4457 }
4458 
4459 
4460 /* ----------
4461  * AfterTriggerBeginXact()
4462  *
4463  * Called at transaction start (either BEGIN or implicit for single
4464  * statement outside of transaction block).
4465  * ----------
4466  */
4467 void
4469 {
4470  /*
4471  * Initialize after-trigger state structure to empty
4472  */
4473  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4474  afterTriggers.query_depth = -1;
4475 
4476  /*
4477  * Verify that there is no leftover state remaining. If these assertions
4478  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4479  * up properly.
4480  */
4481  Assert(afterTriggers.state == NULL);
4482  Assert(afterTriggers.query_stack == NULL);
4483  Assert(afterTriggers.maxquerydepth == 0);
4484  Assert(afterTriggers.event_cxt == NULL);
4485  Assert(afterTriggers.events.head == NULL);
4486  Assert(afterTriggers.trans_stack == NULL);
4487  Assert(afterTriggers.maxtransdepth == 0);
4488 }
4489 
4490 
4491 /* ----------
4492  * AfterTriggerBeginQuery()
4493  *
4494  * Called just before we start processing a single query within a
4495  * transaction (or subtransaction). Most of the real work gets deferred
4496  * until somebody actually tries to queue a trigger event.
4497  * ----------
4498  */
4499 void
4501 {
4502  /* Increase the query stack depth */
4503  afterTriggers.query_depth++;
4504 }
4505 
4506 
4507 /* ----------
4508  * AfterTriggerEndQuery()
4509  *
4510  * Called after one query has been completely processed. At this time
4511  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4512  * transfer deferred trigger events to the global deferred-trigger list.
4513  *
4514  * Note that this must be called BEFORE closing down the executor
4515  * with ExecutorEnd, because we make use of the EState's info about
4516  * target relations. Normally it is called from ExecutorFinish.
4517  * ----------
4518  */
4519 void
4521 {
4523 
4524  /* Must be inside a query, too */
4525  Assert(afterTriggers.query_depth >= 0);
4526 
4527  /*
4528  * If we never even got as far as initializing the event stack, there
4529  * certainly won't be any events, so exit quickly.
4530  */
4531  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4532  {
4533  afterTriggers.query_depth--;
4534  return;
4535  }
4536 
4537  /*
4538  * Process all immediate-mode triggers queued by the query, and move the
4539  * deferred ones to the main list of deferred events.
4540  *
4541  * Notice that we decide which ones will be fired, and put the deferred
4542  * ones on the main list, before anything is actually fired. This ensures
4543  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4544  * IMMEDIATE: all events we have decided to defer will be available for it
4545  * to fire.
4546  *
4547  * We loop in case a trigger queues more events at the same query level.
4548  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4549  * will instead fire any triggers in a dedicated query level. Foreign key
4550  * enforcement triggers do add to the current query level, thanks to their
4551  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4552  * C-language triggers might do likewise.
4553  *
4554  * If we find no firable events, we don't have to increment
4555  * firing_counter.
4556  */
4557  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4558 
4559  for (;;)
4560  {
4561  if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
4562  {
4563  CommandId firing_id = afterTriggers.firing_counter++;
4564  AfterTriggerEventChunk *oldtail = qs->events.tail;
4565 
4566  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
4567  break; /* all fired */
4568 
4569  /*
4570  * Firing a trigger could result in query_stack being repalloc'd,
4571  * so we must recalculate qs after each afterTriggerInvokeEvents
4572  * call. Furthermore, it's unsafe to pass delete_ok = true here,
4573  * because that could cause afterTriggerInvokeEvents to try to
4574  * access qs->events after the stack has been repalloc'd.
4575  */
4576  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4577 
4578  /*
4579  * We'll need to scan the events list again. To reduce the cost
4580  * of doing so, get rid of completely-fired chunks. We know that
4581  * all events were marked IN_PROGRESS or DONE at the conclusion of
4582  * afterTriggerMarkEvents, so any still-interesting events must
4583  * have been added after that, and so must be in the chunk that
4584  * was then the tail chunk, or in later chunks. So, zap all
4585  * chunks before oldtail. This is approximately the same set of
4586  * events we would have gotten rid of by passing delete_ok = true.
4587  */
4588  Assert(oldtail != NULL);
4589  while (qs->events.head != oldtail)
4591  }
4592  else
4593  break;
4594  }
4595 
4596  /* Release query-level-local storage, including tuplestores if any */
4597  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4598 
4599  afterTriggers.query_depth--;
4600 }
4601 
4602 
4603 /*
4604  * AfterTriggerFreeQuery
4605  * Release subsidiary storage for a trigger query level.
4606  * This includes closing down tuplestores.
4607  * Note: it's important for this to be safe if interrupted by an error
4608  * and then called again for the same query level.
4609  */
4610 static void
4612 {
4613  Tuplestorestate *ts;
4614  List *tables;
4615  ListCell *lc;
4616 
4617  /* Drop the trigger events */
4619 
4620  /* Drop FDW tuplestore if any */
4621  ts = qs->fdw_tuplestore;
4622  qs->fdw_tuplestore = NULL;
4623  if (ts)
4624  tuplestore_end(ts);
4625 
4626  /* Release per-table subsidiary storage */
4627  tables = qs->tables;
4628  foreach(lc, tables)
4629  {
4631 
4632  ts = table->old_tuplestore;
4633  table->old_tuplestore = NULL;
4634  if (ts)
4635  tuplestore_end(ts);
4636  ts = table->new_tuplestore;
4637  table->new_tuplestore = NULL;
4638  if (ts)
4639  tuplestore_end(ts);
4640  }
4641 
4642  /*
4643  * Now free the AfterTriggersTableData structs and list cells. Reset list
4644  * pointer first; if list_free_deep somehow gets an error, better to leak
4645  * that storage than have an infinite loop.
4646  */
4647  qs->tables = NIL;
4648  list_free_deep(tables);
4649 }
4650 
4651 
4652 /* ----------
4653  * AfterTriggerFireDeferred()
4654  *
4655  * Called just before the current transaction is committed. At this
4656  * time we invoke all pending DEFERRED triggers.
4657  *
4658  * It is possible for other modules to queue additional deferred triggers
4659  * during pre-commit processing; therefore xact.c may have to call this
4660  * multiple times.
4661  * ----------
4662  */
4663 void
4665 {
4666  AfterTriggerEventList *events;
4667  bool snap_pushed = false;
4668 
4669  /* Must not be inside a query */
4670  Assert(afterTriggers.query_depth == -1);
4671 
4672  /*
4673  * If there are any triggers to fire, make sure we have set a snapshot for
4674  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4675  * can't assume ActiveSnapshot is valid on entry.)
4676  */
4677  events = &afterTriggers.events;
4678  if (events->head != NULL)
4679  {
4681  snap_pushed = true;
4682  }
4683 
4684  /*
4685  * Run all the remaining triggers. Loop until they are all gone, in case
4686  * some trigger queues more for us to do.
4687  */
4688  while (afterTriggerMarkEvents(events, NULL, false))
4689  {
4690  CommandId firing_id = afterTriggers.firing_counter++;
4691 
4692  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4693  break; /* all fired */
4694  }
4695 
4696  /*
4697  * We don't bother freeing the event list, since it will go away anyway
4698  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4699  */
4700 
4701  if (snap_pushed)
4703 }
4704 
4705 
4706 /* ----------
4707  * AfterTriggerEndXact()
4708  *
4709  * The current transaction is finishing.
4710  *
4711  * Any unfired triggers are canceled so we simply throw
4712  * away anything we know.
4713  *
4714  * Note: it is possible for this to be called repeatedly in case of
4715  * error during transaction abort; therefore, do not complain if
4716  * already closed down.
4717  * ----------
4718  */
4719 void
4720 AfterTriggerEndXact(bool isCommit)
4721 {
4722  /*
4723  * Forget the pending-events list.
4724  *
4725  * Since all the info is in TopTransactionContext or children thereof, we
4726  * don't really need to do anything to reclaim memory. However, the
4727  * pending-events list could be large, and so it's useful to discard it as
4728  * soon as possible --- especially if we are aborting because we ran out
4729  * of memory for the list!
4730  */
4731  if (afterTriggers.event_cxt)
4732  {
4733  MemoryContextDelete(afterTriggers.event_cxt);
4734  afterTriggers.event_cxt = NULL;
4735  afterTriggers.events.head = NULL;
4736  afterTriggers.events.tail = NULL;
4737  afterTriggers.events.tailfree = NULL;
4738  }
4739 
4740  /*
4741  * Forget any subtransaction state as well. Since this can't be very
4742  * large, we let the eventual reset of TopTransactionContext free the
4743  * memory instead of doing it here.
4744  */
4745  afterTriggers.trans_stack = NULL;
4746  afterTriggers.maxtransdepth = 0;
4747 
4748 
4749  /*
4750  * Forget the query stack and constraint-related state information. As
4751  * with the subtransaction state information, we don't bother freeing the
4752  * memory here.
4753  */
4754  afterTriggers.query_stack = NULL;
4755  afterTriggers.maxquerydepth = 0;
4756  afterTriggers.state = NULL;
4757 
4758  /* No more afterTriggers manipulation until next transaction starts. */
4759  afterTriggers.query_depth = -1;
4760 }
4761 
4762 /*
4763  * AfterTriggerBeginSubXact()
4764  *
4765  * Start a subtransaction.
4766  */
4767 void
4769 {
4770  int my_level = GetCurrentTransactionNestLevel();
4771 
4772  /*
4773  * Allocate more space in the trans_stack if needed. (Note: because the
4774  * minimum nest level of a subtransaction is 2, we waste the first couple
4775  * entries of the array; not worth the notational effort to avoid it.)
4776  */
4777  while (my_level >= afterTriggers.maxtransdepth)
4778  {
4779  if (afterTriggers.maxtransdepth == 0)
4780  {
4781  /* Arbitrarily initialize for max of 8 subtransaction levels */
4782  afterTriggers.trans_stack = (AfterTriggersTransData *)
4784  8 * sizeof(AfterTriggersTransData));
4785  afterTriggers.maxtransdepth = 8;
4786  }
4787  else
4788  {
4789  /* repalloc will keep the stack in the same context */
4790  int new_alloc = afterTriggers.maxtransdepth * 2;
4791 
4792  afterTriggers.trans_stack = (AfterTriggersTransData *)
4793  repalloc(afterTriggers.trans_stack,
4794  new_alloc * sizeof(AfterTriggersTransData));
4795  afterTriggers.maxtransdepth = new_alloc;
4796  }
4797  }
4798 
4799  /*
4800  * Push the current information into the stack. The SET CONSTRAINTS state
4801  * is not saved until/unless changed. Likewise, we don't make a
4802  * per-subtransaction event context until needed.
4803  */
4804  afterTriggers.trans_stack[my_level].state = NULL;
4805  afterTriggers.trans_stack[my_level].events = afterTriggers.events;
4806  afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
4807  afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
4808 }
4809 
4810 /*
4811  * AfterTriggerEndSubXact()
4812  *
4813  * The current subtransaction is ending.
4814  */
4815 void
4817 {
4818  int my_level = GetCurrentTransactionNestLevel();
4820  AfterTriggerEvent event;
4821  AfterTriggerEventChunk *chunk;
4822  CommandId subxact_firing_id;
4823 
4824  /*
4825  * Pop the prior state if needed.
4826  */
4827  if (isCommit)
4828  {
4829  Assert(my_level < afterTriggers.maxtransdepth);
4830  /* If we saved a prior state, we don't need it anymore */
4831  state = afterTriggers.trans_stack[my_level].state;
4832  if (state != NULL)
4833  pfree(state);
4834  /* this avoids double pfree if error later: */
4835  afterTriggers.trans_stack[my_level].state = NULL;
4836  Assert(afterTriggers.query_depth ==
4837  afterTriggers.trans_stack[my_level].query_depth);
4838  }
4839  else
4840  {
4841  /*
4842  * Aborting. It is possible subxact start failed before calling
4843  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4844  * trans_stack levels that aren't there.
4845  */
4846  if (my_level >= afterTriggers.maxtransdepth)
4847  return;
4848 
4849  /*
4850  * Release query-level storage for queries being aborted, and restore
4851  * query_depth to its pre-subxact value. This assumes that a
4852  * subtransaction will not add events to query levels started in a
4853  * earlier transaction state.
4854  */
4855  while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
4856  {
4857  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4858  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4859  afterTriggers.query_depth--;
4860  }
4861  Assert(afterTriggers.query_depth ==
4862  afterTriggers.trans_stack[my_level].query_depth);
4863 
4864  /*
4865  * Restore the global deferred-event list to its former length,
4866  * discarding any events queued by the subxact.
4867  */
4868  afterTriggerRestoreEventList(&afterTriggers.events,
4869  &afterTriggers.trans_stack[my_level].events);
4870 
4871  /*
4872  * Restore the trigger state. If the saved state is NULL, then this
4873  * subxact didn't save it, so it doesn't need restoring.
4874  */
4875  state = afterTriggers.trans_stack[my_level].state;
4876  if (state != NULL)
4877  {
4878  pfree(afterTriggers.state);
4879  afterTriggers.state = state;
4880  }
4881  /* this avoids double pfree if error later: */
4882  afterTriggers.trans_stack[my_level].state = NULL;
4883 
4884  /*
4885  * Scan for any remaining deferred events that were marked DONE or IN
4886  * PROGRESS by this subxact or a child, and un-mark them. We can
4887  * recognize such events because they have a firing ID greater than or
4888  * equal to the firing_counter value we saved at subtransaction start.
4889  * (This essentially assumes that the current subxact includes all
4890  * subxacts started after it.)
4891  */
4892  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
4893  for_each_event_chunk(event, chunk, afterTriggers.events)
4894  {
4895  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4896 
4897  if (event->ate_flags &
4899  {
4900  if (evtshared->ats_firing_id >= subxact_firing_id)
4901  event->ate_flags &=
4903  }
4904  }
4905  }
4906 }
4907 
4908 /* ----------
4909  * AfterTriggerEnlargeQueryState()
4910  *
4911  * Prepare the necessary state so that we can record AFTER trigger events
4912  * queued by a query. It is allowed to have nested queries within a
4913  * (sub)transaction, so we need to have separate state for each query
4914  * nesting level.
4915  * ----------
4916  */
4917 static void
4919 {
4920  int init_depth = afterTriggers.maxquerydepth;
4921 
4922  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4923 
4924  if (afterTriggers.maxquerydepth == 0)
4925  {
4926  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4927 
4928  afterTriggers.query_stack = (AfterTriggersQueryData *)
4930  new_alloc * sizeof(AfterTriggersQueryData));
4931  afterTriggers.maxquerydepth = new_alloc;
4932  }
4933  else
4934  {
4935  /* repalloc will keep the stack in the same context */
4936  int old_alloc = afterTriggers.maxquerydepth;
4937  int new_alloc = Max(afterTriggers.query_depth + 1,
4938  old_alloc * 2);
4939 
4940  afterTriggers.query_stack = (AfterTriggersQueryData *)
4941  repalloc(afterTriggers.query_stack,
4942  new_alloc * sizeof(AfterTriggersQueryData));
4943  afterTriggers.maxquerydepth = new_alloc;
4944  }
4945 
4946  /* Initialize new array entries to empty */
4947  while (init_depth < afterTriggers.maxquerydepth)
4948  {
4949  AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
4950 
4951  qs->events.head = NULL;
4952  qs->events.tail = NULL;
4953  qs->events.tailfree = NULL;
4954  qs->fdw_tuplestore = NULL;
4955  qs->tables = NIL;
4956 
4957  ++init_depth;
4958  }
4959 }
4960 
4961 /*
4962  * Create an empty SetConstraintState with room for numalloc trigstates
4963  */
4964 static SetConstraintState
4966 {
4968 
4969  /* Behave sanely with numalloc == 0 */
4970  if (numalloc <= 0)
4971  numalloc = 1;
4972 
4973  /*
4974  * We assume that zeroing will correctly initialize the state values.
4975  */
4976  state = (SetConstraintState)
4978  offsetof(SetConstraintStateData, trigstates) +
4979  numalloc * sizeof(SetConstraintTriggerData));
4980 
4981  state->numalloc = numalloc;
4982 
4983  return state;
4984 }
4985 
4986 /*
4987  * Copy a SetConstraintState
4988  */
4989 static SetConstraintState
4991 {
4993 
4994  state = SetConstraintStateCreate(origstate->numstates);
4995 
4996  state->all_isset = origstate->all_isset;
4997  state->all_isdeferred = origstate->all_isdeferred;
4998  state->numstates = origstate->numstates;
4999  memcpy(state->trigstates, origstate->trigstates,
5000  origstate->numstates * sizeof(SetConstraintTriggerData));
5001 
5002  return state;
5003 }
5004 
5005 /*
5006  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5007  * pointer to the state object (it will change if we have to repalloc).
5008  */
5009 static SetConstraintState
5011  Oid tgoid, bool tgisdeferred)
5012 {
5013  if (state->numstates >= state->numalloc)
5014  {
5015  int newalloc = state->numalloc * 2;
5016 
5017  newalloc = Max(newalloc, 8); /* in case original has size 0 */
5018  state = (SetConstraintState)
5019  repalloc(state,
5020  offsetof(SetConstraintStateData, trigstates) +
5021  newalloc * sizeof(SetConstraintTriggerData));
5022  state->numalloc = newalloc;
5023  Assert(state->numstates < state->numalloc);
5024  }
5025 
5026  state->trigstates[state->numstates].sct_tgoid = tgoid;
5027  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
5028  state->numstates++;
5029 
5030  return state;
5031 }
5032 
5033 /* ----------
5034  * AfterTriggerSetState()
5035  *
5036  * Execute the SET CONSTRAINTS ... utility command.
5037  * ----------
5038  */
5039 void
5041 {
5042  int my_level = GetCurrentTransactionNestLevel();
5043 
5044  /* If we haven't already done so, initialize our state. */
5045  if (afterTriggers.state == NULL)
5046  afterTriggers.state = SetConstraintStateCreate(8);
5047 
5048  /*
5049  * If in a subtransaction, and we didn't save the current state already,
5050  * save it so it can be restored if the subtransaction aborts.
5051  */
5052  if (my_level > 1 &&
5053  afterTriggers.trans_stack[my_level].state == NULL)
5054  {
5055  afterTriggers.trans_stack[my_level].state =
5056  SetConstraintStateCopy(afterTriggers.state);
5057  }
5058 
5059  /*
5060  * Handle SET CONSTRAINTS ALL ...
5061  */
5062  if (stmt->constraints == NIL)
5063  {
5064  /*
5065  * Forget any previous SET CONSTRAINTS commands in this transaction.
5066  */
5067  afterTriggers.state->numstates = 0;
5068 
5069  /*
5070  * Set the per-transaction ALL state to known.
5071  */
5072  afterTriggers.state->all_isset = true;
5073  afterTriggers.state->all_isdeferred = stmt->deferred;
5074  }
5075  else
5076  {
5077  Relation conrel;
5078  Relation tgrel;
5079  List *conoidlist = NIL;
5080  List *tgoidlist = NIL;
5081  ListCell *lc;
5082 
5083  /*
5084  * Handle SET CONSTRAINTS constraint-name [, ...]
5085  *
5086  * First, identify all the named constraints and make a list of their
5087  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5088  * the same name within a schema, the specifications are not
5089  * necessarily unique. Our strategy is to target all matching
5090  * constraints within the first search-path schema that has any
5091  * matches, but disregard matches in schemas beyond the first match.
5092  * (This is a bit odd but it's the historical behavior.)
5093  *
5094  * A constraint in a partitioned table may have corresponding
5095  * constraints in the partitions. Grab those too.
5096  */
5097  conrel = table_open(ConstraintRelationId, AccessShareLock);
5098 
5099  foreach(lc, stmt->constraints)
5100  {
5101  RangeVar *constraint = lfirst(lc);
5102  bool found;
5103  List *namespacelist;
5104  ListCell *nslc;
5105 
5106  if (constraint->catalogname)
5107  {
5108  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5109  ereport(ERROR,
5110  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5111  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5112  constraint->catalogname, constraint->schemaname,
5113  constraint->relname)));
5114  }
5115 
5116  /*
5117  * If we're given the schema name with the constraint, look only
5118  * in that schema. If given a bare constraint name, use the
5119  * search path to find the first matching constraint.
5120  */
5121  if (constraint->schemaname)
5122  {
5123  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5124  false);
5125 
5126  namespacelist = list_make1_oid(namespaceId);
5127  }
5128  else
5129  {
5130  namespacelist = fetch_search_path(true);
5131  }
5132 
5133  found = false;
5134  foreach(nslc, namespacelist)
5135  {
5136  Oid namespaceId = lfirst_oid(nslc);
5137  SysScanDesc conscan;
5138  ScanKeyData skey[2];
5139  HeapTuple tup;
5140 
5141  ScanKeyInit(&skey[0],
5142  Anum_pg_constraint_conname,
5143  BTEqualStrategyNumber, F_NAMEEQ,
5144  CStringGetDatum(constraint->relname));
5145  ScanKeyInit(&skey[1],
5146  Anum_pg_constraint_connamespace,
5147  BTEqualStrategyNumber, F_OIDEQ,
5148  ObjectIdGetDatum(namespaceId));
5149 
5150  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5151  true, NULL, 2, skey);
5152 
5153  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5154  {
5156 
5157  if (con->condeferrable)
5158  conoidlist = lappend_oid(conoidlist, con->oid);
5159  else if (stmt->deferred)
5160  ereport(ERROR,
5161  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5162  errmsg("constraint \"%s\" is not deferrable",
5163  constraint->relname)));
5164  found = true;
5165  }
5166 
5167  systable_endscan(conscan);
5168 
5169  /*
5170  * Once we've found a matching constraint we do not search
5171  * later parts of the search path.
5172  */
5173  if (found)
5174  break;
5175  }
5176 
5177  list_free(namespacelist);
5178 
5179  /*
5180  * Not found ?
5181  */
5182  if (!found)
5183  ereport(ERROR,
5184  (errcode(ERRCODE_UNDEFINED_OBJECT),
5185  errmsg("constraint \"%s\" does not exist",
5186  constraint->relname)));
5187  }
5188 
5189  /*
5190  * Scan for any possible descendants of the constraints. We append
5191  * whatever we find to the same list that we're scanning; this has the
5192  * effect that we create new scans for those, too, so if there are
5193  * further descendents, we'll also catch them.
5194  */
5195  foreach(lc, conoidlist)
5196  {
5197  Oid parent = lfirst_oid(lc);
5198  ScanKeyData key;
5199  SysScanDesc scan;
5200  HeapTuple tuple;
5201 
5202  ScanKeyInit(&key,
5203  Anum_pg_constraint_conparentid,
5204  BTEqualStrategyNumber, F_OIDEQ,
5205  ObjectIdGetDatum(parent));
5206 
5207  scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5208 
5209  while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5210  {
5212 
5213  conoidlist = lappend_oid(conoidlist, con->oid);
5214  }
5215 
5216  systable_endscan(scan);
5217  }
5218 
5219  table_close(conrel, AccessShareLock);
5220 
5221  /*
5222  * Now, locate the trigger(s) implementing each of these constraints,
5223  * and make a list of their OIDs.
5224  */
5225  tgrel = table_open(TriggerRelationId, AccessShareLock);
5226 
5227  foreach(lc, conoidlist)
5228  {
5229  Oid conoid = lfirst_oid(lc);
5230  ScanKeyData skey;
5231  SysScanDesc tgscan;
5232  HeapTuple htup;
5233 
5234  ScanKeyInit(&skey,
5235  Anum_pg_trigger_tgconstraint,
5236  BTEqualStrategyNumber, F_OIDEQ,
5237  ObjectIdGetDatum(conoid));
5238 
5239  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5240  NULL, 1, &skey);
5241 
5242  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5243  {
5244  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5245 
5246  /*
5247  * Silently skip triggers that are marked as non-deferrable in
5248  * pg_trigger. This is not an error condition, since a
5249  * deferrable RI constraint may have some non-deferrable
5250  * actions.
5251  */
5252  if (pg_trigger->tgdeferrable)
5253  tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5254  }
5255 
5256  systable_endscan(tgscan);
5257  }
5258 
5259  table_close(tgrel, AccessShareLock);
5260 
5261  /*
5262  * Now we can set the trigger states of individual triggers for this
5263  * xact.
5264  */
5265  foreach(lc, tgoidlist)
5266  {
5267  Oid tgoid = lfirst_oid(lc);
5268  SetConstraintState state = afterTriggers.state;
5269  bool found = false;
5270  int i;
5271 
5272  for (i = 0; i < state->numstates; i++)
5273  {
5274  if (state->trigstates[i].sct_tgoid == tgoid)
5275  {
5276  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5277  found = true;
5278  break;
5279  }
5280  }
5281  if (!found)
5282  {
5283  afterTriggers.state =
5284  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5285  }
5286  }
5287  }
5288 
5289  /*
5290  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5291  * checks against that constraint must be made when the SET CONSTRAINTS
5292  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5293  * apply retroactively. We've updated the constraints state, so scan the
5294  * list of previously deferred events to fire any that have now become
5295  * immediate.
5296  *
5297  * Obviously, if this was SET ... DEFERRED then it can't have converted
5298  * any unfired events to immediate, so we need do nothing in that case.
5299  */
5300  if (!stmt->deferred)
5301  {
5302  AfterTriggerEventList *events = &afterTriggers.events;
5303  bool snapshot_set = false;
5304 
5305  while (afterTriggerMarkEvents(events, NULL, true))
5306  {
5307  CommandId firing_id = afterTriggers.firing_counter++;
5308 
5309  /*
5310  * Make sure a snapshot has been established in case trigger
5311  * functions need one. Note that we avoid setting a snapshot if
5312  * we don't find at least one trigger that has to be fired now.
5313  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5314  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5315  * at the start of a transaction it's not possible for any trigger
5316  * events to be queued yet.)
5317  */
5318  if (!snapshot_set)
5319  {
5321  snapshot_set = true;
5322  }
5323 
5324  /*
5325  * We can delete fired events if we are at top transaction level,
5326  * but we'd better not if inside a subtransaction, since the
5327  * subtransaction could later get rolled back.
5328  */
5329  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5330  !IsSubTransaction()))
5331  break; /* all fired */
5332  }
5333 
5334  if (snapshot_set)
5336  }
5337 }
5338 
5339 /* ----------
5340  * AfterTriggerPendingOnRel()
5341  * Test to see if there are any pending after-trigger events for rel.
5342  *
5343  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5344  * it is unsafe to perform major surgery on a relation. Note that only
5345  * local pending events are examined. We assume that having exclusive lock
5346  * on a rel guarantees there are no unserviced events in other backends ---
5347  * but having a lock does not prevent there being such events in our own.
5348  *
5349  * In some scenarios it'd be reasonable to remove pending events (more
5350  * specifically, mark them DONE by the current subxact) but without a lot
5351  * of knowledge of the trigger semantics we can't do this in general.
5352  * ----------
5353  */
5354 bool
5356 {
5357  AfterTriggerEvent event;
5358  AfterTriggerEventChunk *chunk;
5359  int depth;
5360 
5361  /* Scan queued events */
5362  for_each_event_chunk(event, chunk, afterTriggers.events)
5363  {
5364  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5365 
5366  /*
5367  * We can ignore completed events. (Even if a DONE flag is rolled
5368  * back by subxact abort, it's OK because the effects of the TRUNCATE
5369  * or whatever must get rolled back too.)
5370  */
5371  if (event->ate_flags & AFTER_TRIGGER_DONE)
5372  continue;
5373 
5374  if (evtshared->ats_relid == relid)
5375  return true;
5376  }
5377 
5378  /*
5379  * Also scan events queued by incomplete queries. This could only matter
5380  * if TRUNCATE/etc is executed by a function or trigger within an updating
5381  * query on the same relation, which is pretty perverse, but let's check.
5382  */
5383  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5384  {
5385  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
5386  {
5387  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5388 
5389  if (event->ate_flags & AFTER_TRIGGER_DONE)
5390  continue;
5391 
5392  if (evtshared->ats_relid == relid)
5393  return true;
5394  }
5395  }
5396 
5397  return false;
5398 }
5399 
5400 
5401 /* ----------
5402  * AfterTriggerSaveEvent()
5403  *
5404  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5405  * be fired for an event.
5406  *
5407  * NOTE: this is called whenever there are any triggers associated with
5408  * the event (even if they are disabled). This function decides which
5409  * triggers actually need to be queued. It is also called after each row,
5410  * even if there are no triggers for that event, if there are any AFTER
5411  * STATEMENT triggers for the statement which use transition tables, so that
5412  * the transition tuplestores can be built. Furthermore, if the transition
5413  * capture is happening for UPDATEd rows being moved to another partition due
5414  * to the partition-key being changed, then this function is called once when
5415  * the row is deleted (to capture OLD row), and once when the row is inserted
5416  * into another partition (to capture NEW row). This is done separately because
5417  * DELETE and INSERT happen on different tables.
5418  *
5419  * Transition tuplestores are built now, rather than when events are pulled
5420  * off of the queue because AFTER ROW triggers are allowed to select from the
5421  * transition tables for the statement.
5422  * ----------
5423  */
5424 static void
5426  int event, bool row_trigger,
5427  TupleTableSlot *oldslot, TupleTableSlot *newslot,
5428  List *recheckIndexes, Bitmapset *modifiedCols,
5429  TransitionCaptureState *transition_capture)
5430 {
5431  Relation rel = relinfo->ri_RelationDesc;
5432  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5433  AfterTriggerEventData new_event;
5434  AfterTriggerSharedData new_shared;
5435  char relkind = rel->rd_rel->relkind;
5436  int tgtype_event;
5437  int tgtype_level;
5438  int i;
5439  Tuplestorestate *fdw_tuplestore = NULL;
5440 
5441  /*
5442  * Check state. We use a normal test not Assert because it is possible to
5443  * reach here in the wrong state given misconfigured RI triggers, in
5444  * particular deferring a cascade action trigger.
5445  */
5446  if (afterTriggers.query_depth < 0)
5447  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5448 
5449  /* Be sure we have enough space to record events at this query depth. */
5450  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5452 
5453  /*
5454  * If the directly named relation has any triggers with transition tables,
5455  * then we need to capture transition tuples.
5456  */
5457  if (row_trigger && transition_capture != NULL)
5458  {
5459  TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5460  TupleConversionMap *map = relinfo->ri_ChildToRootMap;
5461  bool delete_old_table = transition_capture->tcs_delete_old_table;
5462  bool update_old_table = transition_capture->tcs_update_old_table;
5463  bool update_new_table = transition_capture->tcs_update_new_table;
5464  bool insert_new_table = transition_capture->tcs_insert_new_table;
5465 
5466  /*
5467  * For INSERT events NEW should be non-NULL, for DELETE events OLD
5468  * should be non-NULL, whereas for UPDATE events normally both OLD and
5469  * NEW are non-NULL. But for UPDATE events fired for capturing
5470  * transition tuples during UPDATE partition-key row movement, OLD is
5471  * NULL when the event is for a row being inserted, whereas NEW is
5472  * NULL when the event is for a row being deleted.
5473  */
5474  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5475  TupIsNull(oldslot)));
5476  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5477  TupIsNull(newslot)));
5478 
5479  if (!TupIsNull(oldslot) &&
5480  ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
5481  (event == TRIGGER_EVENT_UPDATE && update_old_table)))
5482  {
5483  Tuplestorestate *old_tuplestore;
5484 
5485  old_tuplestore = transition_capture->tcs_private->old_tuplestore;
5486 
5487  if (map != NULL)
5488  {
5489  TupleTableSlot *storeslot;
5490 
5491  storeslot = transition_capture->tcs_private->storeslot;
5492  if (!storeslot)
5493  {
5494  storeslot = ExecAllocTableSlot(&estate->es_tupleTable,
5495  map->outdesc,
5496  &TTSOpsVirtual);
5497  transition_capture->tcs_private->storeslot = storeslot;
5498  }
5499 
5500  execute_attr_map_slot(map->attrMap, oldslot, storeslot);
5501  tuplestore_puttupleslot(old_tuplestore, storeslot);
5502  }
5503  else
5504  tuplestore_puttupleslot(old_tuplestore, oldslot);
5505  }
5506  if (!TupIsNull(newslot) &&
5507  ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
5508  (event == TRIGGER_EVENT_UPDATE && update_new_table)))
5509  {
5510  Tuplestorestate *new_tuplestore;
5511 
5512  new_tuplestore = transition_capture->tcs_private->new_tuplestore;
5513 
5514  if (original_insert_tuple != NULL)
5515  tuplestore_puttupleslot(new_tuplestore,
5516  original_insert_tuple);
5517  else if (map != NULL)
5518  {
5519  TupleTableSlot *storeslot;
5520 
5521  storeslot = transition_capture->tcs_private->storeslot;
5522 
5523  if (!storeslot)
5524  {
5525  storeslot = ExecAllocTableSlot(&estate->es_tupleTable,
5526  map->outdesc,
5527  &TTSOpsVirtual);
5528  transition_capture->tcs_private->storeslot = storeslot;
5529  }
5530 
5531  execute_attr_map_slot(map->attrMap, newslot, storeslot);
5532  tuplestore_puttupleslot(new_tuplestore, storeslot);
5533  }
5534  else
5535  tuplestore_puttupleslot(new_tuplestore, newslot);
5536  }
5537 
5538  /*
5539  * If transition tables are the only reason we're here, return. As
5540  * mentioned above, we can also be here during update tuple routing in
5541  * presence of transition tables, in which case this function is
5542  * called separately for oldtup and newtup, so we expect exactly one
5543  * of them to be NULL.
5544  */
5545  if (trigdesc == NULL ||
5546  (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5547  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5548  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
5549  (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
5550  return;
5551  }
5552 
5553  /*
5554  * Validate the event code and collect the associated tuple CTIDs.
5555  *
5556  * The event code will be used both as a bitmask and an array offset, so
5557  * validation is important to make sure we don't walk off the edge of our
5558  * arrays.
5559  *
5560  * Also, if we're considering statement-level triggers, check whether we
5561  * already queued a set of them for this event, and cancel the prior set
5562  * if so. This preserves the behavior that statement-level triggers fire
5563  * just once per statement and fire after row-level triggers.
5564  */
5565  switch (event)
5566  {
5567  case TRIGGER_EVENT_INSERT:
5568  tgtype_event = TRIGGER_TYPE_INSERT;
5569  if (row_trigger)
5570  {
5571  Assert(oldslot == NULL);
5572  Assert(newslot != NULL);
5573  ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
5574  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5575  }
5576  else
5577  {
5578  Assert(oldslot == NULL);
5579  Assert(newslot == NULL);
5580  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5581  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5583  CMD_INSERT, event);
5584  }
5585  break;
5586  case TRIGGER_EVENT_DELETE:
5587  tgtype_event = TRIGGER_TYPE_DELETE;
5588  if (row_trigger)
5589  {
5590  Assert(oldslot != NULL);
5591  Assert(newslot == NULL);
5592  ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5593  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5594  }
5595  else
5596  {
5597  Assert(oldslot == NULL);
5598  Assert(newslot == NULL);
5599  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5600  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5602  CMD_DELETE, event);
5603  }
5604  break;
5605  case TRIGGER_EVENT_UPDATE:
5606  tgtype_event = TRIGGER_TYPE_UPDATE;
5607  if (row_trigger)
5608  {
5609  Assert(oldslot != NULL);
5610  Assert(newslot != NULL);
5611  ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
5612  ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2));
5613  }
5614  else
5615  {
5616  Assert(oldslot == NULL);
5617  Assert(newslot == NULL);
5618  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5619  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5621  CMD_UPDATE, event);
5622  }
5623  break;
5625  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5626  Assert(oldslot == NULL);
5627  Assert(newslot == NULL);
5628  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5629  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5630  break;
5631  default:
5632  elog(ERROR, "invalid after-trigger event code: %d", event);
5633  tgtype_event = 0; /* keep compiler quiet */
5634  break;
5635  }
5636 
5637  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5638  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5640  /* else, we'll initialize ate_flags for each trigger */
5641 
5642  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5643 
5644  for (i = 0; i < trigdesc->numtriggers; i++)
5645  {
5646  Trigger *trigger = &trigdesc->triggers[i];
5647 
5648  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5649  tgtype_level,
5650  TRIGGER_TYPE_AFTER,
5651  tgtype_event))
5652  continue;
5653  if (!TriggerEnabled(estate, relinfo, trigger, event,
5654  modifiedCols, oldslot, newslot))
5655  continue;
5656 
5657  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5658  {
5659  if (fdw_tuplestore == NULL)
5660  {
5661  fdw_tuplestore = GetCurrentFDWTuplestore();
5662  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5663  }
5664  else
5665  /* subsequent event for the same tuple */
5666  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5667  }
5668 
5669  /*
5670  * If the trigger is a foreign key enforcement trigger, there are
5671  * certain cases where we can skip queueing the event because we can
5672  * tell by inspection that the FK constraint will still pass.
5673  */
5674  if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event))
5675  {
5676  switch (RI_FKey_trigger_type(trigger->tgfoid))
5677  {
5678  case RI_TRIGGER_PK:
5679  /* Update or delete on trigger's PK table */
5680  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5681  oldslot, newslot))
5682  {
5683  /* skip queuing this event */
5684  continue;
5685  }
5686  break;
5687 
5688  case RI_TRIGGER_FK:
5689  /* Update on trigger's FK table */
5690  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5691  oldslot, newslot))
5692  {
5693  /* skip queuing this event */
5694  continue;
5695  }
5696  break;
5697 
5698  case RI_TRIGGER_NONE:
5699  /* Not an FK trigger */
5700  break;
5701  }
5702  }
5703 
5704  /*
5705  * If the trigger is a deferred unique constraint check trigger, only
5706  * queue it if the unique constraint was potentially violated, which
5707  * we know from index insertion time.
5708  */
5709  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5710  {
5711  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5712  continue; /* Uniqueness definitely not violated */
5713  }
5714 
5715  /*
5716  * Fill in event structure and add it to the current query's queue.
5717  * Note we set ats_table to NULL whenever this trigger doesn't use
5718  * transition tables, to improve sharability of the shared event data.
5719  */
5720  new_shared.ats_event =
5721  (event & TRIGGER_EVENT_OPMASK) |
5722  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5723  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5724  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5725  new_shared.ats_tgoid = trigger->tgoid;
5726  new_shared.ats_relid = RelationGetRelid(rel);
5727  new_shared.ats_firing_id = 0;
5728  if ((trigger->tgoldtable || trigger->tgnewtable) &&
5729  transition_capture != NULL)
5730  new_shared.ats_table = transition_capture->tcs_private;
5731  else
5732  new_shared.ats_table = NULL;
5733  new_shared.ats_modifiedcols = modifiedCols;
5734 
5735  afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events,
5736  &new_event, &new_shared);</