PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/indexing.h"
24 #include "catalog/objectaccess.h"
25 #include "catalog/pg_constraint.h"
27 #include "catalog/pg_inherits_fn.h"
28 #include "catalog/pg_proc.h"
29 #include "catalog/pg_trigger.h"
30 #include "catalog/pg_type.h"
31 #include "commands/dbcommands.h"
32 #include "commands/defrem.h"
33 #include "commands/trigger.h"
34 #include "executor/executor.h"
35 #include "miscadmin.h"
36 #include "nodes/bitmapset.h"
37 #include "nodes/makefuncs.h"
38 #include "optimizer/clauses.h"
39 #include "optimizer/var.h"
40 #include "parser/parse_clause.h"
41 #include "parser/parse_collate.h"
42 #include "parser/parse_func.h"
43 #include "parser/parse_relation.h"
44 #include "parser/parsetree.h"
45 #include "pgstat.h"
46 #include "rewrite/rewriteManip.h"
47 #include "storage/bufmgr.h"
48 #include "storage/lmgr.h"
49 #include "tcop/utility.h"
50 #include "utils/acl.h"
51 #include "utils/builtins.h"
52 #include "utils/bytea.h"
53 #include "utils/fmgroids.h"
54 #include "utils/inval.h"
55 #include "utils/lsyscache.h"
56 #include "utils/memutils.h"
57 #include "utils/rel.h"
58 #include "utils/snapmgr.h"
59 #include "utils/syscache.h"
60 #include "utils/tqual.h"
61 #include "utils/tuplestore.h"
62 
63 
64 /* GUC variables */
66 
67 /* How many levels deep into trigger execution are we? */
68 static int MyTriggerDepth = 0;
69 
70 /*
71  * Note that similar macros also exist in executor/execMain.c. There does not
72  * appear to be any good header to put them into, given the structures that
73  * they use, so we let them be duplicated. Be sure to update all if one needs
74  * to be changed, however.
75  */
76 #define GetUpdatedColumns(relinfo, estate) \
77  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
78 
79 /* Local function prototypes */
80 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
81 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
82 static HeapTuple GetTupleForTrigger(EState *estate,
83  EPQState *epqstate,
84  ResultRelInfo *relinfo,
85  ItemPointer tid,
86  LockTupleMode lockmode,
87  TupleTableSlot **newSlot);
88 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
89  Trigger *trigger, TriggerEvent event,
90  Bitmapset *modifiedCols,
91  HeapTuple oldtup, HeapTuple newtup);
93  int tgindx,
94  FmgrInfo *finfo,
95  Instrumentation *instr,
96  MemoryContext per_tuple_context);
97 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
98  int event, bool row_trigger,
99  HeapTuple oldtup, HeapTuple newtup,
100  List *recheckIndexes, Bitmapset *modifiedCols,
101  TransitionCaptureState *transition_capture);
102 static void AfterTriggerEnlargeQueryState(void);
103 
104 
105 /*
106  * Create a trigger. Returns the address of the created trigger.
107  *
108  * queryString is the source text of the CREATE TRIGGER command.
109  * This must be supplied if a whenClause is specified, else it can be NULL.
110  *
111  * relOid, if nonzero, is the relation on which the trigger should be
112  * created. If zero, the name provided in the statement will be looked up.
113  *
114  * refRelOid, if nonzero, is the relation to which the constraint trigger
115  * refers. If zero, the constraint relation name provided in the statement
116  * will be looked up as needed.
117  *
118  * constraintOid, if nonzero, says that this trigger is being created
119  * internally to implement that constraint. A suitable pg_depend entry will
120  * be made to link the trigger to that constraint. constraintOid is zero when
121  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
122  * TRIGGER, we build a pg_constraint entry internally.)
123  *
124  * indexOid, if nonzero, is the OID of an index associated with the constraint.
125  * We do nothing with this except store it into pg_trigger.tgconstrindid.
126  *
127  * If isInternal is true then this is an internally-generated trigger.
128  * This argument sets the tgisinternal field of the pg_trigger entry, and
129  * if TRUE causes us to modify the given trigger name to ensure uniqueness.
130  *
131  * When isInternal is not true we require ACL_TRIGGER permissions on the
132  * relation, as well as ACL_EXECUTE on the trigger function. For internal
133  * triggers the caller must apply any required permission checks.
134  *
135  * Note: can return InvalidObjectAddress if we decided to not create a trigger
136  * at all, but a foreign-key constraint. This is a kluge for backwards
137  * compatibility.
138  */
140 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
141  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
142  bool isInternal)
143 {
144  int16 tgtype;
145  int ncolumns;
146  int16 *columns;
147  int2vector *tgattr;
148  Node *whenClause;
149  List *whenRtable;
150  char *qual;
152  bool nulls[Natts_pg_trigger];
153  Relation rel;
154  AclResult aclresult;
155  Relation tgrel;
156  SysScanDesc tgscan;
157  ScanKeyData key;
158  Relation pgrel;
159  HeapTuple tuple;
160  Oid fargtypes[1]; /* dummy */
161  Oid funcoid;
162  Oid funcrettype;
163  Oid trigoid;
164  char internaltrigname[NAMEDATALEN];
165  char *trigname;
166  Oid constrrelid = InvalidOid;
167  ObjectAddress myself,
168  referenced;
169  char *oldtablename = NULL;
170  char *newtablename = NULL;
171 
172  if (OidIsValid(relOid))
173  rel = heap_open(relOid, ShareRowExclusiveLock);
174  else
176 
177  /*
178  * Triggers must be on tables or views, and there are additional
179  * relation-type-specific restrictions.
180  */
181  if (rel->rd_rel->relkind == RELKIND_RELATION ||
182  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
183  {
184  /* Tables can't have INSTEAD OF triggers */
185  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
186  stmt->timing != TRIGGER_TYPE_AFTER)
187  ereport(ERROR,
188  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
189  errmsg("\"%s\" is a table",
191  errdetail("Tables cannot have INSTEAD OF triggers.")));
192  /* Disallow ROW triggers on partitioned tables */
193  if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
194  ereport(ERROR,
195  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
196  errmsg("\"%s\" is a partitioned table",
198  errdetail("Partitioned tables cannot have ROW triggers.")));
199  }
200  else if (rel->rd_rel->relkind == RELKIND_VIEW)
201  {
202  /*
203  * Views can have INSTEAD OF triggers (which we check below are
204  * row-level), or statement-level BEFORE/AFTER triggers.
205  */
206  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
207  ereport(ERROR,
208  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
209  errmsg("\"%s\" is a view",
211  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
212  /* Disallow TRUNCATE triggers on VIEWs */
213  if (TRIGGER_FOR_TRUNCATE(stmt->events))
214  ereport(ERROR,
215  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
216  errmsg("\"%s\" is a view",
218  errdetail("Views cannot have TRUNCATE triggers.")));
219  }
220  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
221  {
222  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
223  stmt->timing != TRIGGER_TYPE_AFTER)
224  ereport(ERROR,
225  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
226  errmsg("\"%s\" is a foreign table",
228  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
229 
230  if (TRIGGER_FOR_TRUNCATE(stmt->events))
231  ereport(ERROR,
232  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
233  errmsg("\"%s\" is a foreign table",
235  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
236 
237  if (stmt->isconstraint)
238  ereport(ERROR,
239  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
240  errmsg("\"%s\" is a foreign table",
242  errdetail("Foreign tables cannot have constraint triggers.")));
243  }
244  else
245  ereport(ERROR,
246  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
247  errmsg("\"%s\" is not a table or view",
248  RelationGetRelationName(rel))));
249 
251  ereport(ERROR,
252  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
253  errmsg("permission denied: \"%s\" is a system catalog",
254  RelationGetRelationName(rel))));
255 
256  if (stmt->isconstraint)
257  {
258  /*
259  * We must take a lock on the target relation to protect against
260  * concurrent drop. It's not clear that AccessShareLock is strong
261  * enough, but we certainly need at least that much... otherwise, we
262  * might end up creating a pg_constraint entry referencing a
263  * nonexistent table.
264  */
265  if (OidIsValid(refRelOid))
266  {
267  LockRelationOid(refRelOid, AccessShareLock);
268  constrrelid = refRelOid;
269  }
270  else if (stmt->constrrel != NULL)
271  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
272  false);
273  }
274 
275  /* permission checks */
276  if (!isInternal)
277  {
278  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
279  ACL_TRIGGER);
280  if (aclresult != ACLCHECK_OK)
281  aclcheck_error(aclresult, ACL_KIND_CLASS,
283 
284  if (OidIsValid(constrrelid))
285  {
286  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
287  ACL_TRIGGER);
288  if (aclresult != ACLCHECK_OK)
289  aclcheck_error(aclresult, ACL_KIND_CLASS,
290  get_rel_name(constrrelid));
291  }
292  }
293 
294  /* Compute tgtype */
295  TRIGGER_CLEAR_TYPE(tgtype);
296  if (stmt->row)
297  TRIGGER_SETT_ROW(tgtype);
298  tgtype |= stmt->timing;
299  tgtype |= stmt->events;
300 
301  /* Disallow ROW-level TRUNCATE triggers */
302  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
303  ereport(ERROR,
304  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
305  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
306 
307  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
308  if (TRIGGER_FOR_INSTEAD(tgtype))
309  {
310  if (!TRIGGER_FOR_ROW(tgtype))
311  ereport(ERROR,
312  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
313  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
314  if (stmt->whenClause)
315  ereport(ERROR,
316  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
317  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
318  if (stmt->columns != NIL)
319  ereport(ERROR,
320  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
321  errmsg("INSTEAD OF triggers cannot have column lists")));
322  }
323 
324  /*
325  * We don't yet support naming ROW transition variables, but the parser
326  * recognizes the syntax so we can give a nicer message here.
327  *
328  * Per standard, REFERENCING TABLE names are only allowed on AFTER
329  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
330  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
331  * only allowed once. Per standard, OLD may not be specified when
332  * creating a trigger only for INSERT, and NEW may not be specified when
333  * creating a trigger only for DELETE.
334  *
335  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
336  * reference both ROW and TABLE transition data.
337  */
338  if (stmt->transitionRels != NIL)
339  {
340  List *varList = stmt->transitionRels;
341  ListCell *lc;
342 
343  foreach(lc, varList)
344  {
346 
347  if (!(tt->isTable))
348  ereport(ERROR,
349  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
350  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
351  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
352 
353  /*
354  * Because of the above test, we omit further ROW-related testing
355  * below. If we later allow naming OLD and NEW ROW variables,
356  * adjustments will be needed below.
357  */
358 
359  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
360  ereport(ERROR,
361  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
362  errmsg("\"%s\" is a foreign table",
364  errdetail("Triggers on foreign tables cannot have transition tables.")));
365 
366  if (rel->rd_rel->relkind == RELKIND_VIEW)
367  ereport(ERROR,
368  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
369  errmsg("\"%s\" is a view",
371  errdetail("Triggers on views cannot have transition tables.")));
372 
373  /*
374  * We currently don't allow row-level triggers with transition
375  * tables on partition or inheritance children. Such triggers
376  * would somehow need to see tuples converted to the format of the
377  * table they're attached to, and it's not clear which subset of
378  * tuples each child should see. See also the prohibitions in
379  * ATExecAttachPartition() and ATExecAddInherit().
380  */
381  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
382  {
383  /* Use appropriate error message. */
384  if (rel->rd_rel->relispartition)
385  ereport(ERROR,
386  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
387  errmsg("ROW triggers with transition tables are not supported on partitions")));
388  else
389  ereport(ERROR,
390  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
391  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
392  }
393 
394  if (stmt->timing != TRIGGER_TYPE_AFTER)
395  ereport(ERROR,
396  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
397  errmsg("transition table name can only be specified for an AFTER trigger")));
398 
399  if (TRIGGER_FOR_TRUNCATE(tgtype))
400  ereport(ERROR,
401  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
402  errmsg("TRUNCATE triggers with transition tables are not supported")));
403 
404  /*
405  * We currently don't allow multi-event triggers ("INSERT OR
406  * UPDATE") with transition tables, because it's not clear how to
407  * handle INSERT ... ON CONFLICT statements which can fire both
408  * INSERT and UPDATE triggers. We show the inserted tuples to
409  * INSERT triggers and the updated tuples to UPDATE triggers, but
410  * it's not yet clear what INSERT OR UPDATE trigger should see.
411  * This restriction could be lifted if we can decide on the right
412  * semantics in a later release.
413  */
414  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
415  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
416  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
417  ereport(ERROR,
418  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
419  errmsg("Transition tables cannot be specified for triggers with more than one event")));
420 
421  if (tt->isNew)
422  {
423  if (!(TRIGGER_FOR_INSERT(tgtype) ||
424  TRIGGER_FOR_UPDATE(tgtype)))
425  ereport(ERROR,
426  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
427  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
428 
429  if (newtablename != NULL)
430  ereport(ERROR,
431  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
432  errmsg("NEW TABLE cannot be specified multiple times")));
433 
434  newtablename = tt->name;
435  }
436  else
437  {
438  if (!(TRIGGER_FOR_DELETE(tgtype) ||
439  TRIGGER_FOR_UPDATE(tgtype)))
440  ereport(ERROR,
441  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
442  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
443 
444  if (oldtablename != NULL)
445  ereport(ERROR,
446  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
447  errmsg("OLD TABLE cannot be specified multiple times")));
448 
449  oldtablename = tt->name;
450  }
451  }
452 
453  if (newtablename != NULL && oldtablename != NULL &&
454  strcmp(newtablename, oldtablename) == 0)
455  ereport(ERROR,
456  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
457  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
458  }
459 
460  /*
461  * Parse the WHEN clause, if any
462  */
463  if (stmt->whenClause)
464  {
465  ParseState *pstate;
466  RangeTblEntry *rte;
467  List *varList;
468  ListCell *lc;
469 
470  /* Set up a pstate to parse with */
471  pstate = make_parsestate(NULL);
472  pstate->p_sourcetext = queryString;
473 
474  /*
475  * Set up RTEs for OLD and NEW references.
476  *
477  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
478  */
479  rte = addRangeTableEntryForRelation(pstate, rel,
480  makeAlias("old", NIL),
481  false, false);
482  addRTEtoQuery(pstate, rte, false, true, true);
483  rte = addRangeTableEntryForRelation(pstate, rel,
484  makeAlias("new", NIL),
485  false, false);
486  addRTEtoQuery(pstate, rte, false, true, true);
487 
488  /* Transform expression. Copy to be sure we don't modify original */
489  whenClause = transformWhereClause(pstate,
490  copyObject(stmt->whenClause),
492  "WHEN");
493  /* we have to fix its collations too */
494  assign_expr_collations(pstate, whenClause);
495 
496  /*
497  * Check for disallowed references to OLD/NEW.
498  *
499  * NB: pull_var_clause is okay here only because we don't allow
500  * subselects in WHEN clauses; it would fail to examine the contents
501  * of subselects.
502  */
503  varList = pull_var_clause(whenClause, 0);
504  foreach(lc, varList)
505  {
506  Var *var = (Var *) lfirst(lc);
507 
508  switch (var->varno)
509  {
510  case PRS2_OLD_VARNO:
511  if (!TRIGGER_FOR_ROW(tgtype))
512  ereport(ERROR,
513  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
514  errmsg("statement trigger's WHEN condition cannot reference column values"),
515  parser_errposition(pstate, var->location)));
516  if (TRIGGER_FOR_INSERT(tgtype))
517  ereport(ERROR,
518  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
519  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
520  parser_errposition(pstate, var->location)));
521  /* system columns are okay here */
522  break;
523  case PRS2_NEW_VARNO:
524  if (!TRIGGER_FOR_ROW(tgtype))
525  ereport(ERROR,
526  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
527  errmsg("statement trigger's WHEN condition cannot reference column values"),
528  parser_errposition(pstate, var->location)));
529  if (TRIGGER_FOR_DELETE(tgtype))
530  ereport(ERROR,
531  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
532  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
533  parser_errposition(pstate, var->location)));
534  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
535  ereport(ERROR,
536  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
537  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
538  parser_errposition(pstate, var->location)));
539  break;
540  default:
541  /* can't happen without add_missing_from, so just elog */
542  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
543  break;
544  }
545  }
546 
547  /* we'll need the rtable for recordDependencyOnExpr */
548  whenRtable = pstate->p_rtable;
549 
550  qual = nodeToString(whenClause);
551 
552  free_parsestate(pstate);
553  }
554  else
555  {
556  whenClause = NULL;
557  whenRtable = NIL;
558  qual = NULL;
559  }
560 
561  /*
562  * Find and validate the trigger function.
563  */
564  funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
565  if (!isInternal)
566  {
567  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
568  if (aclresult != ACLCHECK_OK)
569  aclcheck_error(aclresult, ACL_KIND_PROC,
570  NameListToString(stmt->funcname));
571  }
572  funcrettype = get_func_rettype(funcoid);
573  if (funcrettype != TRIGGEROID)
574  {
575  /*
576  * We allow OPAQUE just so we can load old dump files. When we see a
577  * trigger function declared OPAQUE, change it to TRIGGER.
578  */
579  if (funcrettype == OPAQUEOID)
580  {
582  (errmsg("changing return type of function %s from %s to %s",
583  NameListToString(stmt->funcname),
584  "opaque", "trigger")));
586  }
587  else
588  ereport(ERROR,
589  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
590  errmsg("function %s must return type %s",
591  NameListToString(stmt->funcname), "trigger")));
592  }
593 
594  /*
595  * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
596  * references one of the built-in RI_FKey trigger functions, assume it is
597  * from a dump of a pre-7.3 foreign key constraint, and take steps to
598  * convert this legacy representation into a regular foreign key
599  * constraint. Ugly, but necessary for loading old dump files.
600  */
601  if (stmt->isconstraint && !isInternal &&
602  list_length(stmt->args) >= 6 &&
603  (list_length(stmt->args) % 2) == 0 &&
605  {
606  /* Keep lock on target rel until end of xact */
607  heap_close(rel, NoLock);
608 
609  ConvertTriggerToFK(stmt, funcoid);
610 
611  return InvalidObjectAddress;
612  }
613 
614  /*
615  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
616  * corresponding pg_constraint entry.
617  */
618  if (stmt->isconstraint && !OidIsValid(constraintOid))
619  {
620  /* Internal callers should have made their own constraints */
621  Assert(!isInternal);
622  constraintOid = CreateConstraintEntry(stmt->trigname,
625  stmt->deferrable,
626  stmt->initdeferred,
627  true,
628  RelationGetRelid(rel),
629  NULL, /* no conkey */
630  0,
631  InvalidOid, /* no domain */
632  InvalidOid, /* no index */
633  InvalidOid, /* no foreign key */
634  NULL,
635  NULL,
636  NULL,
637  NULL,
638  0,
639  ' ',
640  ' ',
641  ' ',
642  NULL, /* no exclusion */
643  NULL, /* no check constraint */
644  NULL,
645  NULL,
646  true, /* islocal */
647  0, /* inhcount */
648  true, /* isnoinherit */
649  isInternal); /* is_internal */
650  }
651 
652  /*
653  * Generate the trigger's OID now, so that we can use it in the name if
654  * needed.
655  */
657 
658  trigoid = GetNewOid(tgrel);
659 
660  /*
661  * If trigger is internally generated, modify the provided trigger name to
662  * ensure uniqueness by appending the trigger OID. (Callers will usually
663  * supply a simple constant trigger name in these cases.)
664  */
665  if (isInternal)
666  {
667  snprintf(internaltrigname, sizeof(internaltrigname),
668  "%s_%u", stmt->trigname, trigoid);
669  trigname = internaltrigname;
670  }
671  else
672  {
673  /* user-defined trigger; use the specified trigger name as-is */
674  trigname = stmt->trigname;
675  }
676 
677  /*
678  * Scan pg_trigger for existing triggers on relation. We do this only to
679  * give a nice error message if there's already a trigger of the same
680  * name. (The unique index on tgrelid/tgname would complain anyway.) We
681  * can skip this for internally generated triggers, since the name
682  * modification above should be sufficient.
683  *
684  * NOTE that this is cool only because we have ShareRowExclusiveLock on
685  * the relation, so the trigger set won't be changing underneath us.
686  */
687  if (!isInternal)
688  {
689  ScanKeyInit(&key,
691  BTEqualStrategyNumber, F_OIDEQ,
693  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
694  NULL, 1, &key);
695  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
696  {
697  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
698 
699  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
700  ereport(ERROR,
702  errmsg("trigger \"%s\" for relation \"%s\" already exists",
703  trigname, RelationGetRelationName(rel))));
704  }
705  systable_endscan(tgscan);
706  }
707 
708  /*
709  * Build the new pg_trigger tuple.
710  */
711  memset(nulls, false, sizeof(nulls));
712 
715  CStringGetDatum(trigname));
716  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
717  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
719  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
720  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
721  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
722  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
725 
726  if (stmt->args)
727  {
728  ListCell *le;
729  char *args;
730  int16 nargs = list_length(stmt->args);
731  int len = 0;
732 
733  foreach(le, stmt->args)
734  {
735  char *ar = strVal(lfirst(le));
736 
737  len += strlen(ar) + 4;
738  for (; *ar; ar++)
739  {
740  if (*ar == '\\')
741  len++;
742  }
743  }
744  args = (char *) palloc(len + 1);
745  args[0] = '\0';
746  foreach(le, stmt->args)
747  {
748  char *s = strVal(lfirst(le));
749  char *d = args + strlen(args);
750 
751  while (*s)
752  {
753  if (*s == '\\')
754  *d++ = '\\';
755  *d++ = *s++;
756  }
757  strcpy(d, "\\000");
758  }
759  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
761  CStringGetDatum(args));
762  }
763  else
764  {
765  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
767  CStringGetDatum(""));
768  }
769 
770  /* build column number array if it's a column-specific trigger */
771  ncolumns = list_length(stmt->columns);
772  if (ncolumns == 0)
773  columns = NULL;
774  else
775  {
776  ListCell *cell;
777  int i = 0;
778 
779  columns = (int16 *) palloc(ncolumns * sizeof(int16));
780  foreach(cell, stmt->columns)
781  {
782  char *name = strVal(lfirst(cell));
783  int16 attnum;
784  int j;
785 
786  /* Lookup column name. System columns are not allowed */
787  attnum = attnameAttNum(rel, name, false);
788  if (attnum == InvalidAttrNumber)
789  ereport(ERROR,
790  (errcode(ERRCODE_UNDEFINED_COLUMN),
791  errmsg("column \"%s\" of relation \"%s\" does not exist",
792  name, RelationGetRelationName(rel))));
793 
794  /* Check for duplicates */
795  for (j = i - 1; j >= 0; j--)
796  {
797  if (columns[j] == attnum)
798  ereport(ERROR,
799  (errcode(ERRCODE_DUPLICATE_COLUMN),
800  errmsg("column \"%s\" specified more than once",
801  name)));
802  }
803 
804  columns[i++] = attnum;
805  }
806  }
807  tgattr = buildint2vector(columns, ncolumns);
808  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
809 
810  /* set tgqual if trigger has WHEN clause */
811  if (qual)
812  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
813  else
814  nulls[Anum_pg_trigger_tgqual - 1] = true;
815 
816  if (oldtablename)
818  CStringGetDatum(oldtablename));
819  else
820  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
821  if (newtablename)
823  CStringGetDatum(newtablename));
824  else
825  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
826 
827  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
828 
829  /* force tuple to have the desired OID */
830  HeapTupleSetOid(tuple, trigoid);
831 
832  /*
833  * Insert tuple into pg_trigger.
834  */
835  CatalogTupleInsert(tgrel, tuple);
836 
837  heap_freetuple(tuple);
839 
843  if (oldtablename)
845  if (newtablename)
847 
848  /*
849  * Update relation's pg_class entry. Crucial side-effect: other backends
850  * (and this one too!) are sent SI message to make them rebuild relcache
851  * entries.
852  */
854  tuple = SearchSysCacheCopy1(RELOID,
856  if (!HeapTupleIsValid(tuple))
857  elog(ERROR, "cache lookup failed for relation %u",
858  RelationGetRelid(rel));
859 
860  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
861 
862  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
863 
864  heap_freetuple(tuple);
866 
867  /*
868  * We used to try to update the rel's relcache entry here, but that's
869  * fairly pointless since it will happen as a byproduct of the upcoming
870  * CommandCounterIncrement...
871  */
872 
873  /*
874  * Record dependencies for trigger. Always place a normal dependency on
875  * the function.
876  */
877  myself.classId = TriggerRelationId;
878  myself.objectId = trigoid;
879  myself.objectSubId = 0;
880 
881  referenced.classId = ProcedureRelationId;
882  referenced.objectId = funcoid;
883  referenced.objectSubId = 0;
884  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
885 
886  if (isInternal && OidIsValid(constraintOid))
887  {
888  /*
889  * Internally-generated trigger for a constraint, so make it an
890  * internal dependency of the constraint. We can skip depending on
891  * the relation(s), as there'll be an indirect dependency via the
892  * constraint.
893  */
894  referenced.classId = ConstraintRelationId;
895  referenced.objectId = constraintOid;
896  referenced.objectSubId = 0;
897  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
898  }
899  else
900  {
901  /*
902  * User CREATE TRIGGER, so place dependencies. We make trigger be
903  * auto-dropped if its relation is dropped or if the FK relation is
904  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
905  */
906  referenced.classId = RelationRelationId;
907  referenced.objectId = RelationGetRelid(rel);
908  referenced.objectSubId = 0;
909  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
910  if (OidIsValid(constrrelid))
911  {
912  referenced.classId = RelationRelationId;
913  referenced.objectId = constrrelid;
914  referenced.objectSubId = 0;
915  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
916  }
917  /* Not possible to have an index dependency in this case */
918  Assert(!OidIsValid(indexOid));
919 
920  /*
921  * If it's a user-specified constraint trigger, make the constraint
922  * internally dependent on the trigger instead of vice versa.
923  */
924  if (OidIsValid(constraintOid))
925  {
926  referenced.classId = ConstraintRelationId;
927  referenced.objectId = constraintOid;
928  referenced.objectSubId = 0;
929  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
930  }
931  }
932 
933  /* If column-specific trigger, add normal dependencies on columns */
934  if (columns != NULL)
935  {
936  int i;
937 
938  referenced.classId = RelationRelationId;
939  referenced.objectId = RelationGetRelid(rel);
940  for (i = 0; i < ncolumns; i++)
941  {
942  referenced.objectSubId = columns[i];
943  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
944  }
945  }
946 
947  /*
948  * If it has a WHEN clause, add dependencies on objects mentioned in the
949  * expression (eg, functions, as well as any columns used).
950  */
951  if (whenClause != NULL)
952  recordDependencyOnExpr(&myself, whenClause, whenRtable,
954 
955  /* Post creation hook for new trigger */
957  isInternal);
958 
959  /* Keep lock on target rel until end of xact */
960  heap_close(rel, NoLock);
961 
962  return myself;
963 }
964 
965 
966 /*
967  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
968  * full-fledged foreign key constraints.
969  *
970  * The conversion is complex because a pre-7.3 foreign key involved three
971  * separate triggers, which were reported separately in dumps. While the
972  * single trigger on the referencing table adds no new information, we need
973  * to know the trigger functions of both of the triggers on the referenced
974  * table to build the constraint declaration. Also, due to lack of proper
975  * dependency checking pre-7.3, it is possible that the source database had
976  * an incomplete set of triggers resulting in an only partially enforced
977  * FK constraint. (This would happen if one of the tables had been dropped
978  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
979  * that caused loss of tgconstrrelid information.) We choose to translate to
980  * an FK constraint only when we've seen all three triggers of a set. This is
981  * implemented by storing unmatched items in a list in TopMemoryContext.
982  * We match triggers together by comparing the trigger arguments (which
983  * include constraint name, table and column names, so should be good enough).
984  */
985 typedef struct
986 {
987  List *args; /* list of (T_String) Values or NIL */
988  Oid funcoids[3]; /* OIDs of trigger functions */
989  /* The three function OIDs are stored in the order update, delete, child */
991 
992 static void
994 {
995  static List *info_list = NIL;
996 
997  static const char *const funcdescr[3] = {
998  gettext_noop("Found referenced table's UPDATE trigger."),
999  gettext_noop("Found referenced table's DELETE trigger."),
1000  gettext_noop("Found referencing table's trigger.")
1001  };
1002 
1003  char *constr_name;
1004  char *fk_table_name;
1005  char *pk_table_name;
1006  char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1007  List *fk_attrs = NIL;
1008  List *pk_attrs = NIL;
1010  int funcnum;
1011  OldTriggerInfo *info = NULL;
1012  ListCell *l;
1013  int i;
1014 
1015  /* Parse out the trigger arguments */
1016  constr_name = strVal(linitial(stmt->args));
1017  fk_table_name = strVal(lsecond(stmt->args));
1018  pk_table_name = strVal(lthird(stmt->args));
1019  i = 0;
1020  foreach(l, stmt->args)
1021  {
1022  Value *arg = (Value *) lfirst(l);
1023 
1024  i++;
1025  if (i < 4) /* skip constraint and table names */
1026  continue;
1027  if (i == 4) /* handle match type */
1028  {
1029  if (strcmp(strVal(arg), "FULL") == 0)
1030  fk_matchtype = FKCONSTR_MATCH_FULL;
1031  else
1032  fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1033  continue;
1034  }
1035  if (i % 2)
1036  fk_attrs = lappend(fk_attrs, arg);
1037  else
1038  pk_attrs = lappend(pk_attrs, arg);
1039  }
1040 
1041  /* Prepare description of constraint for use in messages */
1042  initStringInfo(&buf);
1043  appendStringInfo(&buf, "FOREIGN KEY %s(",
1044  quote_identifier(fk_table_name));
1045  i = 0;
1046  foreach(l, fk_attrs)
1047  {
1048  Value *arg = (Value *) lfirst(l);
1049 
1050  if (i++ > 0)
1051  appendStringInfoChar(&buf, ',');
1053  }
1054  appendStringInfo(&buf, ") REFERENCES %s(",
1055  quote_identifier(pk_table_name));
1056  i = 0;
1057  foreach(l, pk_attrs)
1058  {
1059  Value *arg = (Value *) lfirst(l);
1060 
1061  if (i++ > 0)
1062  appendStringInfoChar(&buf, ',');
1064  }
1065  appendStringInfoChar(&buf, ')');
1066 
1067  /* Identify class of trigger --- update, delete, or referencing-table */
1068  switch (funcoid)
1069  {
1070  case F_RI_FKEY_CASCADE_UPD:
1071  case F_RI_FKEY_RESTRICT_UPD:
1072  case F_RI_FKEY_SETNULL_UPD:
1073  case F_RI_FKEY_SETDEFAULT_UPD:
1074  case F_RI_FKEY_NOACTION_UPD:
1075  funcnum = 0;
1076  break;
1077 
1078  case F_RI_FKEY_CASCADE_DEL:
1079  case F_RI_FKEY_RESTRICT_DEL:
1080  case F_RI_FKEY_SETNULL_DEL:
1081  case F_RI_FKEY_SETDEFAULT_DEL:
1082  case F_RI_FKEY_NOACTION_DEL:
1083  funcnum = 1;
1084  break;
1085 
1086  default:
1087  funcnum = 2;
1088  break;
1089  }
1090 
1091  /* See if we have a match to this trigger */
1092  foreach(l, info_list)
1093  {
1094  info = (OldTriggerInfo *) lfirst(l);
1095  if (info->funcoids[funcnum] == InvalidOid &&
1096  equal(info->args, stmt->args))
1097  {
1098  info->funcoids[funcnum] = funcoid;
1099  break;
1100  }
1101  }
1102 
1103  if (l == NULL)
1104  {
1105  /* First trigger of set, so create a new list entry */
1106  MemoryContext oldContext;
1107 
1108  ereport(NOTICE,
1109  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1110  constr_name, buf.data),
1111  errdetail_internal("%s", _(funcdescr[funcnum]))));
1113  info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
1114  info->args = copyObject(stmt->args);
1115  info->funcoids[funcnum] = funcoid;
1116  info_list = lappend(info_list, info);
1117  MemoryContextSwitchTo(oldContext);
1118  }
1119  else if (info->funcoids[0] == InvalidOid ||
1120  info->funcoids[1] == InvalidOid ||
1121  info->funcoids[2] == InvalidOid)
1122  {
1123  /* Second trigger of set */
1124  ereport(NOTICE,
1125  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1126  constr_name, buf.data),
1127  errdetail_internal("%s", _(funcdescr[funcnum]))));
1128  }
1129  else
1130  {
1131  /* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
1134  Constraint *fkcon = makeNode(Constraint);
1135  PlannedStmt *wrapper = makeNode(PlannedStmt);
1136 
1137  ereport(NOTICE,
1138  (errmsg("converting trigger group into constraint \"%s\" %s",
1139  constr_name, buf.data),
1140  errdetail_internal("%s", _(funcdescr[funcnum]))));
1141  fkcon->contype = CONSTR_FOREIGN;
1142  fkcon->location = -1;
1143  if (funcnum == 2)
1144  {
1145  /* This trigger is on the FK table */
1146  atstmt->relation = stmt->relation;
1147  if (stmt->constrrel)
1148  fkcon->pktable = stmt->constrrel;
1149  else
1150  {
1151  /* Work around ancient pg_dump bug that omitted constrrel */
1152  fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
1153  }
1154  }
1155  else
1156  {
1157  /* This trigger is on the PK table */
1158  fkcon->pktable = stmt->relation;
1159  if (stmt->constrrel)
1160  atstmt->relation = stmt->constrrel;
1161  else
1162  {
1163  /* Work around ancient pg_dump bug that omitted constrrel */
1164  atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1165  }
1166  }
1167  atstmt->cmds = list_make1(atcmd);
1168  atstmt->relkind = OBJECT_TABLE;
1169  atcmd->subtype = AT_AddConstraint;
1170  atcmd->def = (Node *) fkcon;
1171  if (strcmp(constr_name, "<unnamed>") == 0)
1172  fkcon->conname = NULL;
1173  else
1174  fkcon->conname = constr_name;
1175  fkcon->fk_attrs = fk_attrs;
1176  fkcon->pk_attrs = pk_attrs;
1177  fkcon->fk_matchtype = fk_matchtype;
1178  switch (info->funcoids[0])
1179  {
1180  case F_RI_FKEY_NOACTION_UPD:
1182  break;
1183  case F_RI_FKEY_CASCADE_UPD:
1185  break;
1186  case F_RI_FKEY_RESTRICT_UPD:
1188  break;
1189  case F_RI_FKEY_SETNULL_UPD:
1191  break;
1192  case F_RI_FKEY_SETDEFAULT_UPD:
1194  break;
1195  default:
1196  /* can't get here because of earlier checks */
1197  elog(ERROR, "confused about RI update function");
1198  }
1199  switch (info->funcoids[1])
1200  {
1201  case F_RI_FKEY_NOACTION_DEL:
1203  break;
1204  case F_RI_FKEY_CASCADE_DEL:
1206  break;
1207  case F_RI_FKEY_RESTRICT_DEL:
1209  break;
1210  case F_RI_FKEY_SETNULL_DEL:
1212  break;
1213  case F_RI_FKEY_SETDEFAULT_DEL:
1215  break;
1216  default:
1217  /* can't get here because of earlier checks */
1218  elog(ERROR, "confused about RI delete function");
1219  }
1220  fkcon->deferrable = stmt->deferrable;
1221  fkcon->initdeferred = stmt->initdeferred;
1222  fkcon->skip_validation = false;
1223  fkcon->initially_valid = true;
1224 
1225  /* finally, wrap it in a dummy PlannedStmt */
1226  wrapper->commandType = CMD_UTILITY;
1227  wrapper->canSetTag = false;
1228  wrapper->utilityStmt = (Node *) atstmt;
1229  wrapper->stmt_location = -1;
1230  wrapper->stmt_len = -1;
1231 
1232  /* ... and execute it */
1233  ProcessUtility(wrapper,
1234  "(generated ALTER TABLE ADD FOREIGN KEY command)",
1236  None_Receiver, NULL);
1237 
1238  /* Remove the matched item from the list */
1239  info_list = list_delete_ptr(info_list, info);
1240  pfree(info);
1241  /* We leak the copied args ... not worth worrying about */
1242  }
1243 }
1244 
1245 /*
1246  * Guts of trigger deletion.
1247  */
1248 void
1250 {
1251  Relation tgrel;
1252  SysScanDesc tgscan;
1253  ScanKeyData skey[1];
1254  HeapTuple tup;
1255  Oid relid;
1256  Relation rel;
1257 
1259 
1260  /*
1261  * Find the trigger to delete.
1262  */
1263  ScanKeyInit(&skey[0],
1265  BTEqualStrategyNumber, F_OIDEQ,
1266  ObjectIdGetDatum(trigOid));
1267 
1268  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1269  NULL, 1, skey);
1270 
1271  tup = systable_getnext(tgscan);
1272  if (!HeapTupleIsValid(tup))
1273  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1274 
1275  /*
1276  * Open and exclusive-lock the relation the trigger belongs to.
1277  */
1278  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1279 
1280  rel = heap_open(relid, AccessExclusiveLock);
1281 
1282  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1283  rel->rd_rel->relkind != RELKIND_VIEW &&
1284  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1285  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1286  ereport(ERROR,
1287  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1288  errmsg("\"%s\" is not a table, view, or foreign table",
1289  RelationGetRelationName(rel))));
1290 
1292  ereport(ERROR,
1293  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1294  errmsg("permission denied: \"%s\" is a system catalog",
1295  RelationGetRelationName(rel))));
1296 
1297  /*
1298  * Delete the pg_trigger tuple.
1299  */
1300  CatalogTupleDelete(tgrel, &tup->t_self);
1301 
1302  systable_endscan(tgscan);
1303  heap_close(tgrel, RowExclusiveLock);
1304 
1305  /*
1306  * We do not bother to try to determine whether any other triggers remain,
1307  * which would be needed in order to decide whether it's safe to clear the
1308  * relation's relhastriggers. (In any case, there might be a concurrent
1309  * process adding new triggers.) Instead, just force a relcache inval to
1310  * make other backends (and this one too!) rebuild their relcache entries.
1311  * There's no great harm in leaving relhastriggers true even if there are
1312  * no triggers left.
1313  */
1315 
1316  /* Keep lock on trigger's rel until end of xact */
1317  heap_close(rel, NoLock);
1318 }
1319 
1320 /*
1321  * get_trigger_oid - Look up a trigger by name to find its OID.
1322  *
1323  * If missing_ok is false, throw an error if trigger not found. If
1324  * true, just return InvalidOid.
1325  */
1326 Oid
1327 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1328 {
1329  Relation tgrel;
1330  ScanKeyData skey[2];
1331  SysScanDesc tgscan;
1332  HeapTuple tup;
1333  Oid oid;
1334 
1335  /*
1336  * Find the trigger, verify permissions, set up object address
1337  */
1339 
1340  ScanKeyInit(&skey[0],
1342  BTEqualStrategyNumber, F_OIDEQ,
1343  ObjectIdGetDatum(relid));
1344  ScanKeyInit(&skey[1],
1346  BTEqualStrategyNumber, F_NAMEEQ,
1347  CStringGetDatum(trigname));
1348 
1349  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1350  NULL, 2, skey);
1351 
1352  tup = systable_getnext(tgscan);
1353 
1354  if (!HeapTupleIsValid(tup))
1355  {
1356  if (!missing_ok)
1357  ereport(ERROR,
1358  (errcode(ERRCODE_UNDEFINED_OBJECT),
1359  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1360  trigname, get_rel_name(relid))));
1361  oid = InvalidOid;
1362  }
1363  else
1364  {
1365  oid = HeapTupleGetOid(tup);
1366  }
1367 
1368  systable_endscan(tgscan);
1369  heap_close(tgrel, AccessShareLock);
1370  return oid;
1371 }
1372 
1373 /*
1374  * Perform permissions and integrity checks before acquiring a relation lock.
1375  */
1376 static void
1378  void *arg)
1379 {
1380  HeapTuple tuple;
1381  Form_pg_class form;
1382 
1383  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1384  if (!HeapTupleIsValid(tuple))
1385  return; /* concurrently dropped */
1386  form = (Form_pg_class) GETSTRUCT(tuple);
1387 
1388  /* only tables and views can have triggers */
1389  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1390  form->relkind != RELKIND_FOREIGN_TABLE &&
1391  form->relkind != RELKIND_PARTITIONED_TABLE)
1392  ereport(ERROR,
1393  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1394  errmsg("\"%s\" is not a table, view, or foreign table",
1395  rv->relname)));
1396 
1397  /* you must own the table to rename one of its triggers */
1398  if (!pg_class_ownercheck(relid, GetUserId()))
1400  if (!allowSystemTableMods && IsSystemClass(relid, form))
1401  ereport(ERROR,
1402  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1403  errmsg("permission denied: \"%s\" is a system catalog",
1404  rv->relname)));
1405 
1406  ReleaseSysCache(tuple);
1407 }
1408 
1409 /*
1410  * renametrig - changes the name of a trigger on a relation
1411  *
1412  * trigger name is changed in trigger catalog.
1413  * No record of the previous name is kept.
1414  *
1415  * get proper relrelation from relation catalog (if not arg)
1416  * scan trigger catalog
1417  * for name conflict (within rel)
1418  * for original trigger (if not arg)
1419  * modify tgname in trigger tuple
1420  * update row in catalog
1421  */
1424 {
1425  Oid tgoid;
1426  Relation targetrel;
1427  Relation tgrel;
1428  HeapTuple tuple;
1429  SysScanDesc tgscan;
1430  ScanKeyData key[2];
1431  Oid relid;
1432  ObjectAddress address;
1433 
1434  /*
1435  * Look up name, check permissions, and acquire lock (which we will NOT
1436  * release until end of transaction).
1437  */
1439  false, false,
1441  NULL);
1442 
1443  /* Have lock already, so just need to build relcache entry. */
1444  targetrel = relation_open(relid, NoLock);
1445 
1446  /*
1447  * Scan pg_trigger twice for existing triggers on relation. We do this in
1448  * order to ensure a trigger does not exist with newname (The unique index
1449  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1450  * exist with oldname.
1451  *
1452  * NOTE that this is cool only because we have AccessExclusiveLock on the
1453  * relation, so the trigger set won't be changing underneath us.
1454  */
1456 
1457  /*
1458  * First pass -- look for name conflict
1459  */
1460  ScanKeyInit(&key[0],
1462  BTEqualStrategyNumber, F_OIDEQ,
1463  ObjectIdGetDatum(relid));
1464  ScanKeyInit(&key[1],
1466  BTEqualStrategyNumber, F_NAMEEQ,
1467  PointerGetDatum(stmt->newname));
1468  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1469  NULL, 2, key);
1470  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1471  ereport(ERROR,
1473  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1474  stmt->newname, RelationGetRelationName(targetrel))));
1475  systable_endscan(tgscan);
1476 
1477  /*
1478  * Second pass -- look for trigger existing with oldname and update
1479  */
1480  ScanKeyInit(&key[0],
1482  BTEqualStrategyNumber, F_OIDEQ,
1483  ObjectIdGetDatum(relid));
1484  ScanKeyInit(&key[1],
1486  BTEqualStrategyNumber, F_NAMEEQ,
1487  PointerGetDatum(stmt->subname));
1488  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1489  NULL, 2, key);
1490  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1491  {
1492  tgoid = HeapTupleGetOid(tuple);
1493 
1494  /*
1495  * Update pg_trigger tuple with new tgname.
1496  */
1497  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1498 
1499  namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1500  stmt->newname);
1501 
1502  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1503 
1505  HeapTupleGetOid(tuple), 0);
1506 
1507  /*
1508  * Invalidate relation's relcache entry so that other backends (and
1509  * this one too!) are sent SI message to make them rebuild relcache
1510  * entries. (Ideally this should happen automatically...)
1511  */
1512  CacheInvalidateRelcache(targetrel);
1513  }
1514  else
1515  {
1516  ereport(ERROR,
1517  (errcode(ERRCODE_UNDEFINED_OBJECT),
1518  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1519  stmt->subname, RelationGetRelationName(targetrel))));
1520  }
1521 
1522  ObjectAddressSet(address, TriggerRelationId, tgoid);
1523 
1524  systable_endscan(tgscan);
1525 
1526  heap_close(tgrel, RowExclusiveLock);
1527 
1528  /*
1529  * Close rel, but keep exclusive lock!
1530  */
1531  relation_close(targetrel, NoLock);
1532 
1533  return address;
1534 }
1535 
1536 
1537 /*
1538  * EnableDisableTrigger()
1539  *
1540  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1541  * to change 'tgenabled' field for the specified trigger(s)
1542  *
1543  * rel: relation to process (caller must hold suitable lock on it)
1544  * tgname: trigger to process, or NULL to scan all triggers
1545  * fires_when: new value for tgenabled field. In addition to generic
1546  * enablement/disablement, this also defines when the trigger
1547  * should be fired in session replication roles.
1548  * skip_system: if true, skip "system" triggers (constraint triggers)
1549  *
1550  * Caller should have checked permissions for the table; here we also
1551  * enforce that superuser privilege is required to alter the state of
1552  * system triggers
1553  */
1554 void
1555 EnableDisableTrigger(Relation rel, const char *tgname,
1556  char fires_when, bool skip_system)
1557 {
1558  Relation tgrel;
1559  int nkeys;
1560  ScanKeyData keys[2];
1561  SysScanDesc tgscan;
1562  HeapTuple tuple;
1563  bool found;
1564  bool changed;
1565 
1566  /* Scan the relevant entries in pg_triggers */
1568 
1569  ScanKeyInit(&keys[0],
1571  BTEqualStrategyNumber, F_OIDEQ,
1573  if (tgname)
1574  {
1575  ScanKeyInit(&keys[1],
1577  BTEqualStrategyNumber, F_NAMEEQ,
1578  CStringGetDatum(tgname));
1579  nkeys = 2;
1580  }
1581  else
1582  nkeys = 1;
1583 
1584  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1585  NULL, nkeys, keys);
1586 
1587  found = changed = false;
1588 
1589  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1590  {
1591  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1592 
1593  if (oldtrig->tgisinternal)
1594  {
1595  /* system trigger ... ok to process? */
1596  if (skip_system)
1597  continue;
1598  if (!superuser())
1599  ereport(ERROR,
1600  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1601  errmsg("permission denied: \"%s\" is a system trigger",
1602  NameStr(oldtrig->tgname))));
1603  }
1604 
1605  found = true;
1606 
1607  if (oldtrig->tgenabled != fires_when)
1608  {
1609  /* need to change this one ... make a copy to scribble on */
1610  HeapTuple newtup = heap_copytuple(tuple);
1611  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1612 
1613  newtrig->tgenabled = fires_when;
1614 
1615  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1616 
1617  heap_freetuple(newtup);
1618 
1619  changed = true;
1620  }
1621 
1623  HeapTupleGetOid(tuple), 0);
1624  }
1625 
1626  systable_endscan(tgscan);
1627 
1628  heap_close(tgrel, RowExclusiveLock);
1629 
1630  if (tgname && !found)
1631  ereport(ERROR,
1632  (errcode(ERRCODE_UNDEFINED_OBJECT),
1633  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1634  tgname, RelationGetRelationName(rel))));
1635 
1636  /*
1637  * If we changed anything, broadcast a SI inval message to force each
1638  * backend (including our own!) to rebuild relation's relcache entry.
1639  * Otherwise they will fail to apply the change promptly.
1640  */
1641  if (changed)
1643 }
1644 
1645 
1646 /*
1647  * Build trigger data to attach to the given relcache entry.
1648  *
1649  * Note that trigger data attached to a relcache entry must be stored in
1650  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1651  * But we should be running in a less long-lived working context. To avoid
1652  * leaking cache memory if this routine fails partway through, we build a
1653  * temporary TriggerDesc in working memory and then copy the completed
1654  * structure into cache memory.
1655  */
1656 void
1658 {
1659  TriggerDesc *trigdesc;
1660  int numtrigs;
1661  int maxtrigs;
1662  Trigger *triggers;
1663  Relation tgrel;
1664  ScanKeyData skey;
1665  SysScanDesc tgscan;
1666  HeapTuple htup;
1667  MemoryContext oldContext;
1668  int i;
1669 
1670  /*
1671  * Allocate a working array to hold the triggers (the array is extended if
1672  * necessary)
1673  */
1674  maxtrigs = 16;
1675  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1676  numtrigs = 0;
1677 
1678  /*
1679  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1680  * be reading the triggers in name order, except possibly during
1681  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1682  * ensures that triggers will be fired in name order.
1683  */
1684  ScanKeyInit(&skey,
1686  BTEqualStrategyNumber, F_OIDEQ,
1687  ObjectIdGetDatum(RelationGetRelid(relation)));
1688 
1690  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1691  NULL, 1, &skey);
1692 
1693  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1694  {
1695  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1696  Trigger *build;
1697  Datum datum;
1698  bool isnull;
1699 
1700  if (numtrigs >= maxtrigs)
1701  {
1702  maxtrigs *= 2;
1703  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1704  }
1705  build = &(triggers[numtrigs]);
1706 
1707  build->tgoid = HeapTupleGetOid(htup);
1709  NameGetDatum(&pg_trigger->tgname)));
1710  build->tgfoid = pg_trigger->tgfoid;
1711  build->tgtype = pg_trigger->tgtype;
1712  build->tgenabled = pg_trigger->tgenabled;
1713  build->tgisinternal = pg_trigger->tgisinternal;
1714  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1715  build->tgconstrindid = pg_trigger->tgconstrindid;
1716  build->tgconstraint = pg_trigger->tgconstraint;
1717  build->tgdeferrable = pg_trigger->tgdeferrable;
1718  build->tginitdeferred = pg_trigger->tginitdeferred;
1719  build->tgnargs = pg_trigger->tgnargs;
1720  /* tgattr is first var-width field, so OK to access directly */
1721  build->tgnattr = pg_trigger->tgattr.dim1;
1722  if (build->tgnattr > 0)
1723  {
1724  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1725  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1726  build->tgnattr * sizeof(int16));
1727  }
1728  else
1729  build->tgattr = NULL;
1730  if (build->tgnargs > 0)
1731  {
1732  bytea *val;
1733  char *p;
1734 
1735  val = DatumGetByteaPP(fastgetattr(htup,
1737  tgrel->rd_att, &isnull));
1738  if (isnull)
1739  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1740  RelationGetRelationName(relation));
1741  p = (char *) VARDATA_ANY(val);
1742  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1743  for (i = 0; i < build->tgnargs; i++)
1744  {
1745  build->tgargs[i] = pstrdup(p);
1746  p += strlen(p) + 1;
1747  }
1748  }
1749  else
1750  build->tgargs = NULL;
1751 
1753  tgrel->rd_att, &isnull);
1754  if (!isnull)
1755  build->tgoldtable =
1757  else
1758  build->tgoldtable = NULL;
1759 
1761  tgrel->rd_att, &isnull);
1762  if (!isnull)
1763  build->tgnewtable =
1765  else
1766  build->tgnewtable = NULL;
1767 
1768  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1769  tgrel->rd_att, &isnull);
1770  if (!isnull)
1771  build->tgqual = TextDatumGetCString(datum);
1772  else
1773  build->tgqual = NULL;
1774 
1775  numtrigs++;
1776  }
1777 
1778  systable_endscan(tgscan);
1779  heap_close(tgrel, AccessShareLock);
1780 
1781  /* There might not be any triggers */
1782  if (numtrigs == 0)
1783  {
1784  pfree(triggers);
1785  return;
1786  }
1787 
1788  /* Build trigdesc */
1789  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1790  trigdesc->triggers = triggers;
1791  trigdesc->numtriggers = numtrigs;
1792  for (i = 0; i < numtrigs; i++)
1793  SetTriggerFlags(trigdesc, &(triggers[i]));
1794 
1795  /* Copy completed trigdesc into cache storage */
1797  relation->trigdesc = CopyTriggerDesc(trigdesc);
1798  MemoryContextSwitchTo(oldContext);
1799 
1800  /* Release working memory */
1801  FreeTriggerDesc(trigdesc);
1802 }
1803 
1804 /*
1805  * Update the TriggerDesc's hint flags to include the specified trigger
1806  */
1807 static void
1809 {
1810  int16 tgtype = trigger->tgtype;
1811 
1812  trigdesc->trig_insert_before_row |=
1815  trigdesc->trig_insert_after_row |=
1818  trigdesc->trig_insert_instead_row |=
1821  trigdesc->trig_insert_before_statement |=
1824  trigdesc->trig_insert_after_statement |=
1827  trigdesc->trig_update_before_row |=
1830  trigdesc->trig_update_after_row |=
1833  trigdesc->trig_update_instead_row |=
1836  trigdesc->trig_update_before_statement |=
1839  trigdesc->trig_update_after_statement |=
1842  trigdesc->trig_delete_before_row |=
1845  trigdesc->trig_delete_after_row |=
1848  trigdesc->trig_delete_instead_row |=
1851  trigdesc->trig_delete_before_statement |=
1854  trigdesc->trig_delete_after_statement |=
1857  /* there are no row-level truncate triggers */
1858  trigdesc->trig_truncate_before_statement |=
1861  trigdesc->trig_truncate_after_statement |=
1864 
1865  trigdesc->trig_insert_new_table |=
1866  (TRIGGER_FOR_INSERT(tgtype) &&
1868  trigdesc->trig_update_old_table |=
1869  (TRIGGER_FOR_UPDATE(tgtype) &&
1871  trigdesc->trig_update_new_table |=
1872  (TRIGGER_FOR_UPDATE(tgtype) &&
1874  trigdesc->trig_delete_old_table |=
1875  (TRIGGER_FOR_DELETE(tgtype) &&
1877 }
1878 
1879 /*
1880  * Copy a TriggerDesc data structure.
1881  *
1882  * The copy is allocated in the current memory context.
1883  */
1884 TriggerDesc *
1886 {
1887  TriggerDesc *newdesc;
1888  Trigger *trigger;
1889  int i;
1890 
1891  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1892  return NULL;
1893 
1894  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1895  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1896 
1897  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1898  memcpy(trigger, trigdesc->triggers,
1899  trigdesc->numtriggers * sizeof(Trigger));
1900  newdesc->triggers = trigger;
1901 
1902  for (i = 0; i < trigdesc->numtriggers; i++)
1903  {
1904  trigger->tgname = pstrdup(trigger->tgname);
1905  if (trigger->tgnattr > 0)
1906  {
1907  int16 *newattr;
1908 
1909  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1910  memcpy(newattr, trigger->tgattr,
1911  trigger->tgnattr * sizeof(int16));
1912  trigger->tgattr = newattr;
1913  }
1914  if (trigger->tgnargs > 0)
1915  {
1916  char **newargs;
1917  int16 j;
1918 
1919  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1920  for (j = 0; j < trigger->tgnargs; j++)
1921  newargs[j] = pstrdup(trigger->tgargs[j]);
1922  trigger->tgargs = newargs;
1923  }
1924  if (trigger->tgqual)
1925  trigger->tgqual = pstrdup(trigger->tgqual);
1926  if (trigger->tgoldtable)
1927  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1928  if (trigger->tgnewtable)
1929  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1930  trigger++;
1931  }
1932 
1933  return newdesc;
1934 }
1935 
1936 /*
1937  * Free a TriggerDesc data structure.
1938  */
1939 void
1941 {
1942  Trigger *trigger;
1943  int i;
1944 
1945  if (trigdesc == NULL)
1946  return;
1947 
1948  trigger = trigdesc->triggers;
1949  for (i = 0; i < trigdesc->numtriggers; i++)
1950  {
1951  pfree(trigger->tgname);
1952  if (trigger->tgnattr > 0)
1953  pfree(trigger->tgattr);
1954  if (trigger->tgnargs > 0)
1955  {
1956  while (--(trigger->tgnargs) >= 0)
1957  pfree(trigger->tgargs[trigger->tgnargs]);
1958  pfree(trigger->tgargs);
1959  }
1960  if (trigger->tgqual)
1961  pfree(trigger->tgqual);
1962  if (trigger->tgoldtable)
1963  pfree(trigger->tgoldtable);
1964  if (trigger->tgnewtable)
1965  pfree(trigger->tgnewtable);
1966  trigger++;
1967  }
1968  pfree(trigdesc->triggers);
1969  pfree(trigdesc);
1970 }
1971 
1972 /*
1973  * Compare two TriggerDesc structures for logical equality.
1974  */
1975 #ifdef NOT_USED
1976 bool
1977 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1978 {
1979  int i,
1980  j;
1981 
1982  /*
1983  * We need not examine the hint flags, just the trigger array itself; if
1984  * we have the same triggers with the same types, the flags should match.
1985  *
1986  * As of 7.3 we assume trigger set ordering is significant in the
1987  * comparison; so we just compare corresponding slots of the two sets.
1988  *
1989  * Note: comparing the stringToNode forms of the WHEN clauses means that
1990  * parse column locations will affect the result. This is okay as long as
1991  * this function is only used for detecting exact equality, as for example
1992  * in checking for staleness of a cache entry.
1993  */
1994  if (trigdesc1 != NULL)
1995  {
1996  if (trigdesc2 == NULL)
1997  return false;
1998  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1999  return false;
2000  for (i = 0; i < trigdesc1->numtriggers; i++)
2001  {
2002  Trigger *trig1 = trigdesc1->triggers + i;
2003  Trigger *trig2 = trigdesc2->triggers + i;
2004 
2005  if (trig1->tgoid != trig2->tgoid)
2006  return false;
2007  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2008  return false;
2009  if (trig1->tgfoid != trig2->tgfoid)
2010  return false;
2011  if (trig1->tgtype != trig2->tgtype)
2012  return false;
2013  if (trig1->tgenabled != trig2->tgenabled)
2014  return false;
2015  if (trig1->tgisinternal != trig2->tgisinternal)
2016  return false;
2017  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2018  return false;
2019  if (trig1->tgconstrindid != trig2->tgconstrindid)
2020  return false;
2021  if (trig1->tgconstraint != trig2->tgconstraint)
2022  return false;
2023  if (trig1->tgdeferrable != trig2->tgdeferrable)
2024  return false;
2025  if (trig1->tginitdeferred != trig2->tginitdeferred)
2026  return false;
2027  if (trig1->tgnargs != trig2->tgnargs)
2028  return false;
2029  if (trig1->tgnattr != trig2->tgnattr)
2030  return false;
2031  if (trig1->tgnattr > 0 &&
2032  memcmp(trig1->tgattr, trig2->tgattr,
2033  trig1->tgnattr * sizeof(int16)) != 0)
2034  return false;
2035  for (j = 0; j < trig1->tgnargs; j++)
2036  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2037  return false;
2038  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2039  /* ok */ ;
2040  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2041  return false;
2042  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2043  return false;
2044  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2045  /* ok */ ;
2046  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2047  return false;
2048  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2049  return false;
2050  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2051  /* ok */ ;
2052  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2053  return false;
2054  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2055  return false;
2056  }
2057  }
2058  else if (trigdesc2 != NULL)
2059  return false;
2060  return true;
2061 }
2062 #endif /* NOT_USED */
2063 
2064 /*
2065  * Check if there is a row-level trigger with transition tables that prevents
2066  * a table from becoming an inheritance child or partition. Return the name
2067  * of the first such incompatible trigger, or NULL if there is none.
2068  */
2069 const char *
2071 {
2072  if (trigdesc != NULL)
2073  {
2074  int i;
2075 
2076  for (i = 0; i < trigdesc->numtriggers; ++i)
2077  {
2078  Trigger *trigger = &trigdesc->triggers[i];
2079 
2080  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2081  return trigger->tgname;
2082  }
2083  }
2084 
2085  return NULL;
2086 }
2087 
2088 /*
2089  * Make a TransitionCaptureState object from a given TriggerDesc. The
2090  * resulting object holds the flags which control whether transition tuples
2091  * are collected when tables are modified, and the tuplestores themselves.
2092  * Note that we copy the flags from a parent table into this struct (rather
2093  * than using each relation's TriggerDesc directly) so that we can use it to
2094  * control the collection of transition tuples from child tables.
2095  *
2096  * If there are no triggers with transition tables configured for 'trigdesc',
2097  * then return NULL.
2098  *
2099  * The resulting object can be passed to the ExecAR* functions. The caller
2100  * should set tcs_map or tcs_original_insert_tuple as appropriate when dealing
2101  * with child tables.
2102  */
2105 {
2107 
2108  if (trigdesc != NULL &&
2109  (trigdesc->trig_delete_old_table || trigdesc->trig_update_old_table ||
2110  trigdesc->trig_update_new_table || trigdesc->trig_insert_new_table))
2111  {
2112  MemoryContext oldcxt;
2113  ResourceOwner saveResourceOwner;
2114 
2115  /*
2116  * Normally DestroyTransitionCaptureState should be called after
2117  * executing all AFTER triggers for the current statement.
2118  *
2119  * To handle error cleanup, TransitionCaptureState and the tuplestores
2120  * it contains will live in the current [sub]transaction's memory
2121  * context. Likewise for the current resource owner, because we also
2122  * want to clean up temporary files spilled to disk by the tuplestore
2123  * in that scenario. This scope is sufficient, because AFTER triggers
2124  * with transition tables cannot be deferred (only constraint triggers
2125  * can be deferred, and constraint triggers cannot have transition
2126  * tables). The AFTER trigger queue may contain pointers to this
2127  * TransitionCaptureState, but any such entries will be processed or
2128  * discarded before the end of the current [sub]transaction.
2129  *
2130  * If a future release allows deferred triggers with transition
2131  * tables, we'll need to reconsider the scope of the
2132  * TransitionCaptureState object.
2133  */
2135  saveResourceOwner = CurrentResourceOwner;
2136 
2137  state = (TransitionCaptureState *)
2139  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
2140  state->tcs_update_old_table = trigdesc->trig_update_old_table;
2141  state->tcs_update_new_table = trigdesc->trig_update_new_table;
2142  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
2143  PG_TRY();
2144  {
2146  if (trigdesc->trig_delete_old_table || trigdesc->trig_update_old_table)
2147  state->tcs_old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
2148  if (trigdesc->trig_insert_new_table)
2149  state->tcs_insert_tuplestore = tuplestore_begin_heap(false, false, work_mem);
2150  if (trigdesc->trig_update_new_table)
2151  state->tcs_update_tuplestore = tuplestore_begin_heap(false, false, work_mem);
2152  }
2153  PG_CATCH();
2154  {
2155  CurrentResourceOwner = saveResourceOwner;
2156  PG_RE_THROW();
2157  }
2158  PG_END_TRY();
2159  CurrentResourceOwner = saveResourceOwner;
2160  MemoryContextSwitchTo(oldcxt);
2161  }
2162 
2163  return state;
2164 }
2165 
2166 void
2168 {
2169  if (tcs->tcs_insert_tuplestore != NULL)
2171  if (tcs->tcs_update_tuplestore != NULL)
2173  if (tcs->tcs_old_tuplestore != NULL)
2175  pfree(tcs);
2176 }
2177 
2178 /*
2179  * Call a trigger function.
2180  *
2181  * trigdata: trigger descriptor.
2182  * tgindx: trigger's index in finfo and instr arrays.
2183  * finfo: array of cached trigger function call information.
2184  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2185  * per_tuple_context: memory context to execute the function in.
2186  *
2187  * Returns the tuple (or NULL) as returned by the function.
2188  */
2189 static HeapTuple
2191  int tgindx,
2192  FmgrInfo *finfo,
2193  Instrumentation *instr,
2194  MemoryContext per_tuple_context)
2195 {
2196  FunctionCallInfoData fcinfo;
2197  PgStat_FunctionCallUsage fcusage;
2198  Datum result;
2199  MemoryContext oldContext;
2200 
2201  /*
2202  * Protect against code paths that may fail to initialize transition table
2203  * info.
2204  */
2205  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2206  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2207  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2208  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2209  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2210  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2211  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2212 
2213  finfo += tgindx;
2214 
2215  /*
2216  * We cache fmgr lookup info, to avoid making the lookup again on each
2217  * call.
2218  */
2219  if (finfo->fn_oid == InvalidOid)
2220  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2221 
2222  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2223 
2224  /*
2225  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2226  */
2227  if (instr)
2228  InstrStartNode(instr + tgindx);
2229 
2230  /*
2231  * Do the function evaluation in the per-tuple memory context, so that
2232  * leaked memory will be reclaimed once per tuple. Note in particular that
2233  * any new tuple created by the trigger function will live till the end of
2234  * the tuple cycle.
2235  */
2236  oldContext = MemoryContextSwitchTo(per_tuple_context);
2237 
2238  /*
2239  * Call the function, passing no arguments but setting a context.
2240  */
2241  InitFunctionCallInfoData(fcinfo, finfo, 0,
2242  InvalidOid, (Node *) trigdata, NULL);
2243 
2244  pgstat_init_function_usage(&fcinfo, &fcusage);
2245 
2246  MyTriggerDepth++;
2247  PG_TRY();
2248  {
2249  result = FunctionCallInvoke(&fcinfo);
2250  }
2251  PG_CATCH();
2252  {
2253  MyTriggerDepth--;
2254  PG_RE_THROW();
2255  }
2256  PG_END_TRY();
2257  MyTriggerDepth--;
2258 
2259  pgstat_end_function_usage(&fcusage, true);
2260 
2261  MemoryContextSwitchTo(oldContext);
2262 
2263  /*
2264  * Trigger protocol allows function to return a null pointer, but NOT to
2265  * set the isnull result flag.
2266  */
2267  if (fcinfo.isnull)
2268  ereport(ERROR,
2269  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2270  errmsg("trigger function %u returned null value",
2271  fcinfo.flinfo->fn_oid)));
2272 
2273  /*
2274  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2275  * one "tuple returned" (really the number of firings).
2276  */
2277  if (instr)
2278  InstrStopNode(instr + tgindx, 1);
2279 
2280  return (HeapTuple) DatumGetPointer(result);
2281 }
2282 
2283 void
2285 {
2286  TriggerDesc *trigdesc;
2287  int i;
2288  TriggerData LocTriggerData;
2289 
2290  trigdesc = relinfo->ri_TrigDesc;
2291 
2292  if (trigdesc == NULL)
2293  return;
2294  if (!trigdesc->trig_insert_before_statement)
2295  return;
2296 
2297  LocTriggerData.type = T_TriggerData;
2298  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2300  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2301  LocTriggerData.tg_trigtuple = NULL;
2302  LocTriggerData.tg_newtuple = NULL;
2303  LocTriggerData.tg_oldtable = NULL;
2304  LocTriggerData.tg_newtable = NULL;
2305  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2306  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2307  for (i = 0; i < trigdesc->numtriggers; i++)
2308  {
2309  Trigger *trigger = &trigdesc->triggers[i];
2310  HeapTuple newtuple;
2311 
2312  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2316  continue;
2317  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2318  NULL, NULL, NULL))
2319  continue;
2320 
2321  LocTriggerData.tg_trigger = trigger;
2322  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2323  i,
2324  relinfo->ri_TrigFunctions,
2325  relinfo->ri_TrigInstrument,
2326  GetPerTupleMemoryContext(estate));
2327 
2328  if (newtuple)
2329  ereport(ERROR,
2330  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2331  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2332  }
2333 }
2334 
2335 void
2337  TransitionCaptureState *transition_capture)
2338 {
2339  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2340 
2341  if (trigdesc && trigdesc->trig_insert_after_statement)
2343  false, NULL, NULL, NIL, NULL, transition_capture);
2344 }
2345 
2348  TupleTableSlot *slot)
2349 {
2350  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2351  HeapTuple slottuple = ExecMaterializeSlot(slot);
2352  HeapTuple newtuple = slottuple;
2353  HeapTuple oldtuple;
2354  TriggerData LocTriggerData;
2355  int i;
2356 
2357  LocTriggerData.type = T_TriggerData;
2358  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2361  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2362  LocTriggerData.tg_newtuple = NULL;
2363  LocTriggerData.tg_oldtable = NULL;
2364  LocTriggerData.tg_newtable = NULL;
2365  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2366  for (i = 0; i < trigdesc->numtriggers; i++)
2367  {
2368  Trigger *trigger = &trigdesc->triggers[i];
2369 
2370  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2374  continue;
2375  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2376  NULL, NULL, newtuple))
2377  continue;
2378 
2379  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2380  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2381  LocTriggerData.tg_trigger = trigger;
2382  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2383  i,
2384  relinfo->ri_TrigFunctions,
2385  relinfo->ri_TrigInstrument,
2386  GetPerTupleMemoryContext(estate));
2387  if (oldtuple != newtuple && oldtuple != slottuple)
2388  heap_freetuple(oldtuple);
2389  if (newtuple == NULL)
2390  return NULL; /* "do nothing" */
2391  }
2392 
2393  if (newtuple != slottuple)
2394  {
2395  /*
2396  * Return the modified tuple using the es_trig_tuple_slot. We assume
2397  * the tuple was allocated in per-tuple memory context, and therefore
2398  * will go away by itself. The tuple table slot should not try to
2399  * clear it.
2400  */
2401  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2402  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2403 
2404  if (newslot->tts_tupleDescriptor != tupdesc)
2405  ExecSetSlotDescriptor(newslot, tupdesc);
2406  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2407  slot = newslot;
2408  }
2409  return slot;
2410 }
2411 
2412 void
2414  HeapTuple trigtuple, List *recheckIndexes,
2415  TransitionCaptureState *transition_capture)
2416 {
2417  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2418 
2419  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2420  (transition_capture && transition_capture->tcs_insert_new_table))
2422  true, NULL, trigtuple,
2423  recheckIndexes, NULL,
2424  transition_capture);
2425 }
2426 
2429  TupleTableSlot *slot)
2430 {
2431  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2432  HeapTuple slottuple = ExecMaterializeSlot(slot);
2433  HeapTuple newtuple = slottuple;
2434  HeapTuple oldtuple;
2435  TriggerData LocTriggerData;
2436  int i;
2437 
2438  LocTriggerData.type = T_TriggerData;
2439  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2442  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2443  LocTriggerData.tg_newtuple = NULL;
2444  LocTriggerData.tg_oldtable = NULL;
2445  LocTriggerData.tg_newtable = NULL;
2446  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2447  for (i = 0; i < trigdesc->numtriggers; i++)
2448  {
2449  Trigger *trigger = &trigdesc->triggers[i];
2450 
2451  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2455  continue;
2456  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2457  NULL, NULL, newtuple))
2458  continue;
2459 
2460  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2461  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2462  LocTriggerData.tg_trigger = trigger;
2463  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2464  i,
2465  relinfo->ri_TrigFunctions,
2466  relinfo->ri_TrigInstrument,
2467  GetPerTupleMemoryContext(estate));
2468  if (oldtuple != newtuple && oldtuple != slottuple)
2469  heap_freetuple(oldtuple);
2470  if (newtuple == NULL)
2471  return NULL; /* "do nothing" */
2472  }
2473 
2474  if (newtuple != slottuple)
2475  {
2476  /*
2477  * Return the modified tuple using the es_trig_tuple_slot. We assume
2478  * the tuple was allocated in per-tuple memory context, and therefore
2479  * will go away by itself. The tuple table slot should not try to
2480  * clear it.
2481  */
2482  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2483  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2484 
2485  if (newslot->tts_tupleDescriptor != tupdesc)
2486  ExecSetSlotDescriptor(newslot, tupdesc);
2487  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2488  slot = newslot;
2489  }
2490  return slot;
2491 }
2492 
2493 void
2495 {
2496  TriggerDesc *trigdesc;
2497  int i;
2498  TriggerData LocTriggerData;
2499 
2500  trigdesc = relinfo->ri_TrigDesc;
2501 
2502  if (trigdesc == NULL)
2503  return;
2504  if (!trigdesc->trig_delete_before_statement)
2505  return;
2506 
2507  LocTriggerData.type = T_TriggerData;
2508  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2510  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2511  LocTriggerData.tg_trigtuple = NULL;
2512  LocTriggerData.tg_newtuple = NULL;
2513  LocTriggerData.tg_oldtable = NULL;
2514  LocTriggerData.tg_newtable = NULL;
2515  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2516  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2517  for (i = 0; i < trigdesc->numtriggers; i++)
2518  {
2519  Trigger *trigger = &trigdesc->triggers[i];
2520  HeapTuple newtuple;
2521 
2522  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2526  continue;
2527  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2528  NULL, NULL, NULL))
2529  continue;
2530 
2531  LocTriggerData.tg_trigger = trigger;
2532  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2533  i,
2534  relinfo->ri_TrigFunctions,
2535  relinfo->ri_TrigInstrument,
2536  GetPerTupleMemoryContext(estate));
2537 
2538  if (newtuple)
2539  ereport(ERROR,
2540  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2541  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2542  }
2543 }
2544 
2545 void
2547  TransitionCaptureState *transition_capture)
2548 {
2549  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2550 
2551  if (trigdesc && trigdesc->trig_delete_after_statement)
2553  false, NULL, NULL, NIL, NULL, transition_capture);
2554 }
2555 
2556 bool
2558  ResultRelInfo *relinfo,
2559  ItemPointer tupleid,
2560  HeapTuple fdw_trigtuple)
2561 {
2562  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2563  bool result = true;
2564  TriggerData LocTriggerData;
2565  HeapTuple trigtuple;
2566  HeapTuple newtuple;
2567  TupleTableSlot *newSlot;
2568  int i;
2569 
2570  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2571  if (fdw_trigtuple == NULL)
2572  {
2573  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2574  LockTupleExclusive, &newSlot);
2575  if (trigtuple == NULL)
2576  return false;
2577  }
2578  else
2579  trigtuple = fdw_trigtuple;
2580 
2581  LocTriggerData.type = T_TriggerData;
2582  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2585  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2586  LocTriggerData.tg_newtuple = NULL;
2587  LocTriggerData.tg_oldtable = NULL;
2588  LocTriggerData.tg_newtable = NULL;
2589  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2590  for (i = 0; i < trigdesc->numtriggers; i++)
2591  {
2592  Trigger *trigger = &trigdesc->triggers[i];
2593 
2594  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2598  continue;
2599  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2600  NULL, trigtuple, NULL))
2601  continue;
2602 
2603  LocTriggerData.tg_trigtuple = trigtuple;
2604  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2605  LocTriggerData.tg_trigger = trigger;
2606  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2607  i,
2608  relinfo->ri_TrigFunctions,
2609  relinfo->ri_TrigInstrument,
2610  GetPerTupleMemoryContext(estate));
2611  if (newtuple == NULL)
2612  {
2613  result = false; /* tell caller to suppress delete */
2614  break;
2615  }
2616  if (newtuple != trigtuple)
2617  heap_freetuple(newtuple);
2618  }
2619  if (trigtuple != fdw_trigtuple)
2620  heap_freetuple(trigtuple);
2621 
2622  return result;
2623 }
2624 
2625 void
2627  ItemPointer tupleid,
2628  HeapTuple fdw_trigtuple,
2629  TransitionCaptureState *transition_capture)
2630 {
2631  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2632 
2633  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2634  (transition_capture && transition_capture->tcs_delete_old_table))
2635  {
2636  HeapTuple trigtuple;
2637 
2638  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2639  if (fdw_trigtuple == NULL)
2640  trigtuple = GetTupleForTrigger(estate,
2641  NULL,
2642  relinfo,
2643  tupleid,
2645  NULL);
2646  else
2647  trigtuple = fdw_trigtuple;
2648 
2650  true, trigtuple, NULL, NIL, NULL,
2651  transition_capture);
2652  if (trigtuple != fdw_trigtuple)
2653  heap_freetuple(trigtuple);
2654  }
2655 }
2656 
2657 bool
2659  HeapTuple trigtuple)
2660 {
2661  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2662  TriggerData LocTriggerData;
2663  HeapTuple rettuple;
2664  int i;
2665 
2666  LocTriggerData.type = T_TriggerData;
2667  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2670  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2671  LocTriggerData.tg_newtuple = NULL;
2672  LocTriggerData.tg_oldtable = NULL;
2673  LocTriggerData.tg_newtable = NULL;
2674  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2675  for (i = 0; i < trigdesc->numtriggers; i++)
2676  {
2677  Trigger *trigger = &trigdesc->triggers[i];
2678 
2679  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2683  continue;
2684  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2685  NULL, trigtuple, NULL))
2686  continue;
2687 
2688  LocTriggerData.tg_trigtuple = trigtuple;
2689  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2690  LocTriggerData.tg_trigger = trigger;
2691  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2692  i,
2693  relinfo->ri_TrigFunctions,
2694  relinfo->ri_TrigInstrument,
2695  GetPerTupleMemoryContext(estate));
2696  if (rettuple == NULL)
2697  return false; /* Delete was suppressed */
2698  if (rettuple != trigtuple)
2699  heap_freetuple(rettuple);
2700  }
2701  return true;
2702 }
2703 
2704 void
2706 {
2707  TriggerDesc *trigdesc;
2708  int i;
2709  TriggerData LocTriggerData;
2710  Bitmapset *updatedCols;
2711 
2712  trigdesc = relinfo->ri_TrigDesc;
2713 
2714  if (trigdesc == NULL)
2715  return;
2716  if (!trigdesc->trig_update_before_statement)
2717  return;
2718 
2719  updatedCols = GetUpdatedColumns(relinfo, estate);
2720 
2721  LocTriggerData.type = T_TriggerData;
2722  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2724  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2725  LocTriggerData.tg_trigtuple = NULL;
2726  LocTriggerData.tg_newtuple = NULL;
2727  LocTriggerData.tg_oldtable = NULL;
2728  LocTriggerData.tg_newtable = NULL;
2729  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2730  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2731  for (i = 0; i < trigdesc->numtriggers; i++)
2732  {
2733  Trigger *trigger = &trigdesc->triggers[i];
2734  HeapTuple newtuple;
2735 
2736  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2740  continue;
2741  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2742  updatedCols, NULL, NULL))
2743  continue;
2744 
2745  LocTriggerData.tg_trigger = trigger;
2746  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2747  i,
2748  relinfo->ri_TrigFunctions,
2749  relinfo->ri_TrigInstrument,
2750  GetPerTupleMemoryContext(estate));
2751 
2752  if (newtuple)
2753  ereport(ERROR,
2754  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2755  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2756  }
2757 }
2758 
2759 void
2761  TransitionCaptureState *transition_capture)
2762 {
2763  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2764 
2765  if (trigdesc && trigdesc->trig_update_after_statement)
2767  false, NULL, NULL, NIL,
2768  GetUpdatedColumns(relinfo, estate),
2769  transition_capture);
2770 }
2771 
2774  ResultRelInfo *relinfo,
2775  ItemPointer tupleid,
2776  HeapTuple fdw_trigtuple,
2777  TupleTableSlot *slot)
2778 {
2779  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2780  HeapTuple slottuple = ExecMaterializeSlot(slot);
2781  HeapTuple newtuple = slottuple;
2782  TriggerData LocTriggerData;
2783  HeapTuple trigtuple;
2784  HeapTuple oldtuple;
2785  TupleTableSlot *newSlot;
2786  int i;
2787  Bitmapset *updatedCols;
2788  LockTupleMode lockmode;
2789 
2790  /* Determine lock mode to use */
2791  lockmode = ExecUpdateLockMode(estate, relinfo);
2792 
2793  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2794  if (fdw_trigtuple == NULL)
2795  {
2796  /* get a copy of the on-disk tuple we are planning to update */
2797  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2798  lockmode, &newSlot);
2799  if (trigtuple == NULL)
2800  return NULL; /* cancel the update action */
2801  }
2802  else
2803  {
2804  trigtuple = fdw_trigtuple;
2805  newSlot = NULL;
2806  }
2807 
2808  /*
2809  * In READ COMMITTED isolation level it's possible that target tuple was
2810  * changed due to concurrent update. In that case we have a raw subplan
2811  * output tuple in newSlot, and need to run it through the junk filter to
2812  * produce an insertable tuple.
2813  *
2814  * Caution: more than likely, the passed-in slot is the same as the
2815  * junkfilter's output slot, so we are clobbering the original value of
2816  * slottuple by doing the filtering. This is OK since neither we nor our
2817  * caller have any more interest in the prior contents of that slot.
2818  */
2819  if (newSlot != NULL)
2820  {
2821  slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
2822  slottuple = ExecMaterializeSlot(slot);
2823  newtuple = slottuple;
2824  }
2825 
2826 
2827  LocTriggerData.type = T_TriggerData;
2828  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2831  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2832  LocTriggerData.tg_oldtable = NULL;
2833  LocTriggerData.tg_newtable = NULL;
2834  updatedCols = GetUpdatedColumns(relinfo, estate);
2835  for (i = 0; i < trigdesc->numtriggers; i++)
2836  {
2837  Trigger *trigger = &trigdesc->triggers[i];
2838 
2839  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2843  continue;
2844  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2845  updatedCols, trigtuple, newtuple))
2846  continue;
2847 
2848  LocTriggerData.tg_trigtuple = trigtuple;
2849  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2850  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2851  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2852  LocTriggerData.tg_trigger = trigger;
2853  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2854  i,
2855  relinfo->ri_TrigFunctions,
2856  relinfo->ri_TrigInstrument,
2857  GetPerTupleMemoryContext(estate));
2858  if (oldtuple != newtuple && oldtuple != slottuple)
2859  heap_freetuple(oldtuple);
2860  if (newtuple == NULL)
2861  {
2862  if (trigtuple != fdw_trigtuple)
2863  heap_freetuple(trigtuple);
2864  return NULL; /* "do nothing" */
2865  }
2866  }
2867  if (trigtuple != fdw_trigtuple)
2868  heap_freetuple(trigtuple);
2869 
2870  if (newtuple != slottuple)
2871  {
2872  /*
2873  * Return the modified tuple using the es_trig_tuple_slot. We assume
2874  * the tuple was allocated in per-tuple memory context, and therefore
2875  * will go away by itself. The tuple table slot should not try to
2876  * clear it.
2877  */
2878  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2879  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2880 
2881  if (newslot->tts_tupleDescriptor != tupdesc)
2882  ExecSetSlotDescriptor(newslot, tupdesc);
2883  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2884  slot = newslot;
2885  }
2886  return slot;
2887 }
2888 
2889 void
2891  ItemPointer tupleid,
2892  HeapTuple fdw_trigtuple,
2893  HeapTuple newtuple,
2894  List *recheckIndexes,
2895  TransitionCaptureState *transition_capture)
2896 {
2897  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2898 
2899  if ((trigdesc && trigdesc->trig_update_after_row) ||
2900  (transition_capture &&
2901  (transition_capture->tcs_update_old_table ||
2902  transition_capture->tcs_update_new_table)))
2903  {
2904  HeapTuple trigtuple;
2905 
2906  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2907  if (fdw_trigtuple == NULL)
2908  trigtuple = GetTupleForTrigger(estate,
2909  NULL,
2910  relinfo,
2911  tupleid,
2913  NULL);
2914  else
2915  trigtuple = fdw_trigtuple;
2916 
2918  true, trigtuple, newtuple, recheckIndexes,
2919  GetUpdatedColumns(relinfo, estate),
2920  transition_capture);
2921  if (trigtuple != fdw_trigtuple)
2922  heap_freetuple(trigtuple);
2923  }
2924 }
2925 
2928  HeapTuple trigtuple, TupleTableSlot *slot)
2929 {
2930  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2931  HeapTuple slottuple = ExecMaterializeSlot(slot);
2932  HeapTuple newtuple = slottuple;
2933  TriggerData LocTriggerData;
2934  HeapTuple oldtuple;
2935  int i;
2936 
2937  LocTriggerData.type = T_TriggerData;
2938  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2941  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2942  LocTriggerData.tg_oldtable = NULL;
2943  LocTriggerData.tg_newtable = NULL;
2944  for (i = 0; i < trigdesc->numtriggers; i++)
2945  {
2946  Trigger *trigger = &trigdesc->triggers[i];
2947 
2948  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2952  continue;
2953  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2954  NULL, trigtuple, newtuple))
2955  continue;
2956 
2957  LocTriggerData.tg_trigtuple = trigtuple;
2958  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2959  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2960  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2961  LocTriggerData.tg_trigger = trigger;
2962  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2963  i,
2964  relinfo->ri_TrigFunctions,
2965  relinfo->ri_TrigInstrument,
2966  GetPerTupleMemoryContext(estate));
2967  if (oldtuple != newtuple && oldtuple != slottuple)
2968  heap_freetuple(oldtuple);
2969  if (newtuple == NULL)
2970  return NULL; /* "do nothing" */
2971  }
2972 
2973  if (newtuple != slottuple)
2974  {
2975  /*
2976  * Return the modified tuple using the es_trig_tuple_slot. We assume
2977  * the tuple was allocated in per-tuple memory context, and therefore
2978  * will go away by itself. The tuple table slot should not try to
2979  * clear it.
2980  */
2981  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2982  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2983 
2984  if (newslot->tts_tupleDescriptor != tupdesc)
2985  ExecSetSlotDescriptor(newslot, tupdesc);
2986  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2987  slot = newslot;
2988  }
2989  return slot;
2990 }
2991 
2992 void
2994 {
2995  TriggerDesc *trigdesc;
2996  int i;
2997  TriggerData LocTriggerData;
2998 
2999  trigdesc = relinfo->ri_TrigDesc;
3000 
3001  if (trigdesc == NULL)
3002  return;
3003  if (!trigdesc->trig_truncate_before_statement)
3004  return;
3005 
3006  LocTriggerData.type = T_TriggerData;
3007  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3009  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3010  LocTriggerData.tg_trigtuple = NULL;
3011  LocTriggerData.tg_newtuple = NULL;
3012  LocTriggerData.tg_oldtable = NULL;
3013  LocTriggerData.tg_newtable = NULL;
3014  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3015  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3016  for (i = 0; i < trigdesc->numtriggers; i++)
3017  {
3018  Trigger *trigger = &trigdesc->triggers[i];
3019  HeapTuple newtuple;
3020 
3021  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3025  continue;
3026  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3027  NULL, NULL, NULL))
3028  continue;
3029 
3030  LocTriggerData.tg_trigger = trigger;
3031  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3032  i,
3033  relinfo->ri_TrigFunctions,
3034  relinfo->ri_TrigInstrument,
3035  GetPerTupleMemoryContext(estate));
3036 
3037  if (newtuple)
3038  ereport(ERROR,
3039  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3040  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3041  }
3042 }
3043 
3044 void
3046 {
3047  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3048 
3049  if (trigdesc && trigdesc->trig_truncate_after_statement)
3051  false, NULL, NULL, NIL, NULL, NULL);
3052 }
3053 
3054 
3055 static HeapTuple
3057  EPQState *epqstate,
3058  ResultRelInfo *relinfo,
3059  ItemPointer tid,
3060  LockTupleMode lockmode,
3061  TupleTableSlot **newSlot)
3062 {
3063  Relation relation = relinfo->ri_RelationDesc;
3064  HeapTupleData tuple;
3065  HeapTuple result;
3066  Buffer buffer;
3067 
3068  if (newSlot != NULL)
3069  {
3070  HTSU_Result test;
3071  HeapUpdateFailureData hufd;
3072 
3073  *newSlot = NULL;
3074 
3075  /* caller must pass an epqstate if EvalPlanQual is possible */
3076  Assert(epqstate != NULL);
3077 
3078  /*
3079  * lock tuple for update
3080  */
3081 ltrmark:;
3082  tuple.t_self = *tid;
3083  test = heap_lock_tuple(relation, &tuple,
3084  estate->es_output_cid,
3085  lockmode, LockWaitBlock,
3086  false, &buffer, &hufd);
3087  switch (test)
3088  {
3089  case HeapTupleSelfUpdated:
3090 
3091  /*
3092  * The target tuple was already updated or deleted by the
3093  * current command, or by a later command in the current
3094  * transaction. We ignore the tuple in the former case, and
3095  * throw error in the latter case, for the same reasons
3096  * enumerated in ExecUpdate and ExecDelete in
3097  * nodeModifyTable.c.
3098  */
3099  if (hufd.cmax != estate->es_output_cid)
3100  ereport(ERROR,
3101  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3102  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3103  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3104 
3105  /* treat it as deleted; do not process */
3106  ReleaseBuffer(buffer);
3107  return NULL;
3108 
3109  case HeapTupleMayBeUpdated:
3110  break;
3111 
3112  case HeapTupleUpdated:
3113  ReleaseBuffer(buffer);
3115  ereport(ERROR,
3116  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3117  errmsg("could not serialize access due to concurrent update")));
3118  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
3119  {
3120  /* it was updated, so look at the updated version */
3121  TupleTableSlot *epqslot;
3122 
3123  epqslot = EvalPlanQual(estate,
3124  epqstate,
3125  relation,
3126  relinfo->ri_RangeTableIndex,
3127  lockmode,
3128  &hufd.ctid,
3129  hufd.xmax);
3130  if (!TupIsNull(epqslot))
3131  {
3132  *tid = hufd.ctid;
3133  *newSlot = epqslot;
3134 
3135  /*
3136  * EvalPlanQual already locked the tuple, but we
3137  * re-call heap_lock_tuple anyway as an easy way of
3138  * re-fetching the correct tuple. Speed is hardly a
3139  * criterion in this path anyhow.
3140  */
3141  goto ltrmark;
3142  }
3143  }
3144 
3145  /*
3146  * if tuple was deleted or PlanQual failed for updated tuple -
3147  * we must not process this tuple!
3148  */
3149  return NULL;
3150 
3151  case HeapTupleInvisible:
3152  elog(ERROR, "attempted to lock invisible tuple");
3153 
3154  default:
3155  ReleaseBuffer(buffer);
3156  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
3157  return NULL; /* keep compiler quiet */
3158  }
3159  }
3160  else
3161  {
3162  Page page;
3163  ItemId lp;
3164 
3165  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3166 
3167  /*
3168  * Although we already know this tuple is valid, we must lock the
3169  * buffer to ensure that no one has a buffer cleanup lock; otherwise
3170  * they might move the tuple while we try to copy it. But we can
3171  * release the lock before actually doing the heap_copytuple call,
3172  * since holding pin is sufficient to prevent anyone from getting a
3173  * cleanup lock they don't already hold.
3174  */
3175  LockBuffer(buffer, BUFFER_LOCK_SHARE);
3176 
3177  page = BufferGetPage(buffer);
3178  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3179 
3180  Assert(ItemIdIsNormal(lp));
3181 
3182  tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3183  tuple.t_len = ItemIdGetLength(lp);
3184  tuple.t_self = *tid;
3185  tuple.t_tableOid = RelationGetRelid(relation);
3186 
3187  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3188  }
3189 
3190  result = heap_copytuple(&tuple);
3191  ReleaseBuffer(buffer);
3192 
3193  return result;
3194 }
3195 
3196 /*
3197  * Is trigger enabled to fire?
3198  */
3199 static bool
3201  Trigger *trigger, TriggerEvent event,
3202  Bitmapset *modifiedCols,
3203  HeapTuple oldtup, HeapTuple newtup)
3204 {
3205  /* Check replication-role-dependent enable state */
3207  {
3208  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3209  trigger->tgenabled == TRIGGER_DISABLED)
3210  return false;
3211  }
3212  else /* ORIGIN or LOCAL role */
3213  {
3214  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3215  trigger->tgenabled == TRIGGER_DISABLED)
3216  return false;
3217  }
3218 
3219  /*
3220  * Check for column-specific trigger (only possible for UPDATE, and in
3221  * fact we *must* ignore tgattr for other event types)
3222  */
3223  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3224  {
3225  int i;
3226  bool modified;
3227 
3228  modified = false;
3229  for (i = 0; i < trigger->tgnattr; i++)
3230  {
3232  modifiedCols))
3233  {
3234  modified = true;
3235  break;
3236  }
3237  }
3238  if (!modified)
3239  return false;
3240  }
3241 
3242  /* Check for WHEN clause */
3243  if (trigger->tgqual)
3244  {
3245  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3246  ExprState **predicate;
3247  ExprContext *econtext;
3248  TupleTableSlot *oldslot = NULL;
3249  TupleTableSlot *newslot = NULL;
3250  MemoryContext oldContext;
3251  int i;
3252 
3253  Assert(estate != NULL);
3254 
3255  /*
3256  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3257  * matching element of relinfo->ri_TrigWhenExprs[]
3258  */
3259  i = trigger - relinfo->ri_TrigDesc->triggers;
3260  predicate = &relinfo->ri_TrigWhenExprs[i];
3261 
3262  /*
3263  * If first time through for this WHEN expression, build expression
3264  * nodetrees for it. Keep them in the per-query memory context so
3265  * they'll survive throughout the query.
3266  */
3267  if (*predicate == NULL)
3268  {
3269  Node *tgqual;
3270 
3271  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3272  tgqual = stringToNode(trigger->tgqual);
3273  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3276  /* ExecPrepareQual wants implicit-AND form */
3277  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3278  *predicate = ExecPrepareQual((List *) tgqual, estate);
3279  MemoryContextSwitchTo(oldContext);
3280  }
3281 
3282  /*
3283  * We will use the EState's per-tuple context for evaluating WHEN
3284  * expressions (creating it if it's not already there).
3285  */
3286  econtext = GetPerTupleExprContext(estate);
3287 
3288  /*
3289  * Put OLD and NEW tuples into tupleslots for expression evaluation.
3290  * These slots can be shared across the whole estate, but be careful
3291  * that they have the current resultrel's tupdesc.
3292  */
3293  if (HeapTupleIsValid(oldtup))
3294  {
3295  if (estate->es_trig_oldtup_slot == NULL)
3296  {
3297  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3298  estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate);
3299  MemoryContextSwitchTo(oldContext);
3300  }
3301  oldslot = estate->es_trig_oldtup_slot;
3302  if (oldslot->tts_tupleDescriptor != tupdesc)
3303  ExecSetSlotDescriptor(oldslot, tupdesc);
3304  ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
3305  }
3306  if (HeapTupleIsValid(newtup))
3307  {
3308  if (estate->es_trig_newtup_slot == NULL)
3309  {
3310  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3311  estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
3312  MemoryContextSwitchTo(oldContext);
3313  }
3314  newslot = estate->es_trig_newtup_slot;
3315  if (newslot->tts_tupleDescriptor != tupdesc)
3316  ExecSetSlotDescriptor(newslot, tupdesc);
3317  ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
3318  }
3319 
3320  /*
3321  * Finally evaluate the expression, making the old and/or new tuples
3322  * available as INNER_VAR/OUTER_VAR respectively.
3323  */
3324  econtext->ecxt_innertuple = oldslot;
3325  econtext->ecxt_outertuple = newslot;
3326  if (!ExecQual(*predicate, econtext))
3327  return false;
3328  }
3329 
3330  return true;
3331 }
3332 
3333 
3334 /* ----------
3335  * After-trigger stuff
3336  *
3337  * The AfterTriggersData struct holds data about pending AFTER trigger events
3338  * during the current transaction tree. (BEFORE triggers are fired
3339  * immediately so we don't need any persistent state about them.) The struct
3340  * and most of its subsidiary data are kept in TopTransactionContext; however
3341  * the individual event records are kept in a separate sub-context. This is
3342  * done mainly so that it's easy to tell from a memory context dump how much
3343  * space is being eaten by trigger events.
3344  *
3345  * Because the list of pending events can grow large, we go to some
3346  * considerable effort to minimize per-event memory consumption. The event
3347  * records are grouped into chunks and common data for similar events in the
3348  * same chunk is only stored once.
3349  *
3350  * XXX We need to be able to save the per-event data in a file if it grows too
3351  * large.
3352  * ----------
3353  */
3354 
3355 /* Per-trigger SET CONSTRAINT status */
3357 {
3361 
3363 
3364 /*
3365  * SET CONSTRAINT intra-transaction status.
3366  *
3367  * We make this a single palloc'd object so it can be copied and freed easily.
3368  *
3369  * all_isset and all_isdeferred are used to keep track
3370  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3371  *
3372  * trigstates[] stores per-trigger tgisdeferred settings.
3373  */
3375 {
3378  int numstates; /* number of trigstates[] entries in use */
3379  int numalloc; /* allocated size of trigstates[] */
3380  SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3382 
3384 
3385 
3386 /*
3387  * Per-trigger-event data
3388  *
3389  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3390  * status bits and up to two tuple CTIDs. Each event record also has an
3391  * associated AfterTriggerSharedData that is shared across all instances of
3392  * similar events within a "chunk".
3393  *
3394  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3395  * fields. Updates of regular tables use two; inserts and deletes of regular
3396  * tables use one; foreign tables always use zero and save the tuple(s) to a
3397  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3398  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3399  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3400  * tuple(s). This permits storing tuples once regardless of the number of
3401  * row-level triggers on a foreign table.
3402  *
3403  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3404  * require no ctid field. We lack the flag bit space to neatly represent that
3405  * distinct case, and it seems unlikely to be worth much trouble.
3406  *
3407  * Note: ats_firing_id is initially zero and is set to something else when
3408  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3409  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3410  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3411  * because all instances of the same type of event in a given event list will
3412  * be fired at the same time, if they were queued between the same firing
3413  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3414  * a new event to an existing AfterTriggerSharedData record.
3415  */
3417 
3418 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3419 #define AFTER_TRIGGER_DONE 0x10000000
3420 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3421 /* bits describing the size and tuple sources of this event */
3422 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3423 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3424 #define AFTER_TRIGGER_1CTID 0x40000000
3425 #define AFTER_TRIGGER_2CTID 0xC0000000
3426 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3427 
3429 
3431 {
3432  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3433  Oid ats_tgoid; /* the trigger's ID */
3434  Oid ats_relid; /* the relation it's on */
3435  CommandId ats_firing_id; /* ID for firing cycle */
3438 
3440 
3442 {
3443  TriggerFlags ate_flags; /* status bits and offset to shared data */
3444  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3445  ItemPointerData ate_ctid2; /* new updated tuple */
3447 
3448 /* AfterTriggerEventData, minus ate_ctid2 */
3450 {
3451  TriggerFlags ate_flags; /* status bits and offset to shared data */
3452  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3454 
3455 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3457 {
3458  TriggerFlags ate_flags; /* status bits and offset to shared data */
3460 
3461 #define SizeofTriggerEvent(evt) \
3462  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3463  sizeof(AfterTriggerEventData) : \
3464  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3465  sizeof(AfterTriggerEventDataOneCtid) : \
3466  sizeof(AfterTriggerEventDataZeroCtids))
3467 
3468 #define GetTriggerSharedData(evt) \
3469  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3470 
3471 /*
3472  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3473  * larger chunks (a slightly more sophisticated version of an expansible
3474  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3475  * AfterTriggerEventData records; the space between endfree and endptr is
3476  * occupied by AfterTriggerSharedData records.
3477  */
3479 {
3480  struct AfterTriggerEventChunk *next; /* list link */
3481  char *freeptr; /* start of free space in chunk */
3482  char *endfree; /* end of free space in chunk */
3483  char *endptr; /* end of chunk */
3484  /* event data follows here */
3486 
3487 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3488 
3489 /* A list of events */
3491 {
3494  char *tailfree; /* freeptr of tail chunk */
3496 
3497 /* Macros to help in iterating over a list of events */
3498 #define for_each_chunk(cptr, evtlist) \
3499  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3500 #define for_each_event(eptr, cptr) \
3501  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3502  (char *) eptr < (cptr)->freeptr; \
3503  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3504 /* Use this if no special per-chunk processing is needed */
3505 #define for_each_event_chunk(eptr, cptr, evtlist) \
3506  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3507 
3508 
3509 /*
3510  * All per-transaction data for the AFTER TRIGGERS module.
3511  *
3512  * AfterTriggersData has the following fields:
3513  *
3514  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3515  * We mark firable events with the current firing cycle's ID so that we can
3516  * tell which ones to work on. This ensures sane behavior if a trigger
3517  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3518  * only fire those events that weren't already scheduled for firing.
3519  *
3520  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3521  * This is saved and restored across failed subtransactions.
3522  *
3523  * events is the current list of deferred events. This is global across
3524  * all subtransactions of the current transaction. In a subtransaction
3525  * abort, we know that the events added by the subtransaction are at the
3526  * end of the list, so it is relatively easy to discard them. The event
3527  * list chunks themselves are stored in event_cxt.
3528  *
3529  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3530  * (-1 when the stack is empty).
3531  *
3532  * query_stack[query_depth] is a list of AFTER trigger events queued by the
3533  * current query (and the query_stack entries below it are lists of trigger
3534  * events queued by calling queries). None of these are valid until the
3535  * matching AfterTriggerEndQuery call occurs. At that point we fire
3536  * immediate-mode triggers, and append any deferred events to the main events
3537  * list.
3538  *
3539  * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples
3540  * needed for the current query.
3541  *
3542  * maxquerydepth is just the allocated length of query_stack and the
3543  * tuplestores.
3544  *
3545  * state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS
3546  * state data; each subtransaction level that modifies that state first
3547  * saves a copy, which we use to restore the state if we abort.
3548  *
3549  * events_stack is a stack of copies of the events head/tail pointers,
3550  * which we use to restore those values during subtransaction abort.
3551  *
3552  * depth_stack is a stack of copies of subtransaction-start-time query_depth,
3553  * which we similarly use to clean up at subtransaction abort.
3554  *
3555  * firing_stack is a stack of copies of subtransaction-start-time
3556  * firing_counter. We use this to recognize which deferred triggers were
3557  * fired (or marked for firing) within an aborted subtransaction.
3558  *
3559  * We use GetCurrentTransactionNestLevel() to determine the correct array
3560  * index in these stacks. maxtransdepth is the number of allocated entries in
3561  * each stack. (By not keeping our own stack pointer, we can avoid trouble
3562  * in cases where errors during subxact abort cause multiple invocations
3563  * of AfterTriggerEndSubXact() at the same nesting depth.)
3564  */
3565 typedef struct AfterTriggersData
3566 {
3567  CommandId firing_counter; /* next firing ID to assign */
3568  SetConstraintState state; /* the active S C state */
3569  AfterTriggerEventList events; /* deferred-event list */
3570  int query_depth; /* current query list index */
3571  AfterTriggerEventList *query_stack; /* events pending from each query */
3572  Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from
3573  * each query */
3574  int maxquerydepth; /* allocated len of above array */
3575  MemoryContext event_cxt; /* memory context for events, if any */
3576 
3577  /* these fields are just for resetting at subtrans abort: */
3578 
3579  SetConstraintState *state_stack; /* stacked S C states */
3580  AfterTriggerEventList *events_stack; /* stacked list pointers */
3581  int *depth_stack; /* stacked query_depths */
3582  CommandId *firing_stack; /* stacked firing_counters */
3583  int maxtransdepth; /* allocated len of above arrays */
3585 
3587 
3588 static void AfterTriggerExecute(AfterTriggerEvent event,
3589  Relation rel, TriggerDesc *trigdesc,
3590  FmgrInfo *finfo,
3591  Instrumentation *instr,
3592  MemoryContext per_tuple_context,
3593  TupleTableSlot *trig_tuple_slot1,
3594  TupleTableSlot *trig_tuple_slot2,
3595  TransitionCaptureState *transition_capture);
3596 static SetConstraintState SetConstraintStateCreate(int numalloc);
3597 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3598 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3599  Oid tgoid, bool tgisdeferred);
3600 
3601 
3602 /*
3603  * Gets a current query transition tuplestore and initializes it if necessary.
3604  */
3605 static Tuplestorestate *
3607 {
3608  Tuplestorestate *ret;
3609 
3610  ret = tss[afterTriggers.query_depth];
3611  if (ret == NULL)
3612  {
3613  MemoryContext oldcxt;
3614  ResourceOwner saveResourceOwner;
3615 
3616  /*
3617  * Make the tuplestore valid until end of transaction. This is the
3618  * allocation lifespan of the associated events list, but we really
3619  * only need it until AfterTriggerEndQuery().
3620  */
3622  saveResourceOwner = CurrentResourceOwner;
3623  PG_TRY();
3624  {
3626  ret = tuplestore_begin_heap(false, false, work_mem);
3627  }
3628  PG_CATCH();
3629  {
3630  CurrentResourceOwner = saveResourceOwner;
3631  PG_RE_THROW();
3632  }
3633  PG_END_TRY();
3634  CurrentResourceOwner = saveResourceOwner;
3635  MemoryContextSwitchTo(oldcxt);
3636 
3637  tss[afterTriggers.query_depth] = ret;
3638  }
3639 
3640  return ret;
3641 }
3642 
3643 /* ----------
3644  * afterTriggerCheckState()
3645  *
3646  * Returns true if the trigger event is actually in state DEFERRED.
3647  * ----------
3648  */
3649 static bool
3650 afterTriggerCheckState(AfterTriggerShared evtshared)
3651 {
3652  Oid tgoid = evtshared->ats_tgoid;
3653  SetConstraintState state = afterTriggers.state;
3654  int i;
3655 
3656  /*
3657  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3658  * constraints declared NOT DEFERRABLE), the state is always false.
3659  */
3660  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3661  return false;
3662 
3663  /*
3664  * If constraint state exists, SET CONSTRAINTS might have been executed
3665  * either for this trigger or for all triggers.
3666  */
3667  if (state != NULL)
3668  {
3669  /* Check for SET CONSTRAINTS for this specific trigger. */
3670  for (i = 0; i < state->numstates; i++)
3671  {
3672  if (state->trigstates[i].sct_tgoid == tgoid)
3673  return state->trigstates[i].sct_tgisdeferred;
3674  }
3675 
3676  /* Check for SET CONSTRAINTS ALL. */
3677  if (state->all_isset)
3678  return state->all_isdeferred;
3679  }
3680 
3681  /*
3682  * Otherwise return the default state for the trigger.
3683  */
3684  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3685 }
3686 
3687 
3688 /* ----------
3689  * afterTriggerAddEvent()
3690  *
3691  * Add a new trigger event to the specified queue.
3692  * The passed-in event data is copied.
3693  * ----------
3694  */
3695 static void
3697  AfterTriggerEvent event, AfterTriggerShared evtshared)
3698 {
3699  Size eventsize = SizeofTriggerEvent(event);
3700  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3701  AfterTriggerEventChunk *chunk;
3702  AfterTriggerShared newshared;
3703  AfterTriggerEvent newevent;
3704 
3705  /*
3706  * If empty list or not enough room in the tail chunk, make a new chunk.
3707  * We assume here that a new shared record will always be needed.
3708  */
3709  chunk = events->tail;
3710  if (chunk == NULL ||
3711  chunk->endfree - chunk->freeptr < needed)
3712  {
3713  Size chunksize;
3714 
3715  /* Create event context if we didn't already */
3716  if (afterTriggers.event_cxt == NULL)
3717  afterTriggers.event_cxt =
3719  "AfterTriggerEvents",
3721 
3722  /*
3723  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3724  * These numbers are fairly arbitrary, though there is a hard limit at
3725  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3726  * shared records using the available space in ate_flags. Another
3727  * constraint is that if the chunk size gets too huge, the search loop
3728  * below would get slow given a (not too common) usage pattern with
3729  * many distinct event types in a chunk. Therefore, we double the
3730  * preceding chunk size only if there weren't too many shared records
3731  * in the preceding chunk; otherwise we halve it. This gives us some
3732  * ability to adapt to the actual usage pattern of the current query
3733  * while still having large chunk sizes in typical usage. All chunk
3734  * sizes used should be MAXALIGN multiples, to ensure that the shared
3735  * records will be aligned safely.
3736  */
3737 #define MIN_CHUNK_SIZE 1024
3738 #define MAX_CHUNK_SIZE (1024*1024)
3739 
3740 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3741 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3742 #endif
3743 
3744  if (chunk == NULL)
3745  chunksize = MIN_CHUNK_SIZE;
3746  else
3747  {
3748  /* preceding chunk size... */
3749  chunksize = chunk->endptr - (char *) chunk;
3750  /* check number of shared records in preceding chunk */
3751  if ((chunk->endptr - chunk->endfree) <=
3752  (100 * sizeof(AfterTriggerSharedData)))
3753  chunksize *= 2; /* okay, double it */
3754  else
3755  chunksize /= 2; /* too many shared records */
3756  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3757  }
3758  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3759  chunk->next = NULL;
3760  chunk->freeptr = CHUNK_DATA_START(chunk);
3761  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3762  Assert(chunk->endfree - chunk->freeptr >= needed);
3763 
3764  if (events->head == NULL)
3765  events->head = chunk;
3766  else
3767  events->tail->next = chunk;
3768  events->tail = chunk;
3769  /* events->tailfree is now out of sync, but we'll fix it below */
3770  }
3771 
3772  /*
3773  * Try to locate a matching shared-data record already in the chunk. If
3774  * none, make a new one.
3775  */
3776  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3777  (char *) newshared >= chunk->endfree;
3778  newshared--)
3779  {
3780  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3781  newshared->ats_relid == evtshared->ats_relid &&
3782  newshared->ats_event == evtshared->ats_event &&
3783  newshared->ats_transition_capture == evtshared->ats_transition_capture &&
3784  newshared->ats_firing_id == 0)
3785  break;
3786  }
3787  if ((char *) newshared < chunk->endfree)
3788  {
3789  *newshared = *evtshared;
3790  newshared->ats_firing_id = 0; /* just to be sure */
3791  chunk->endfree = (char *) newshared;
3792  }
3793 
3794  /* Insert the data */
3795  newevent = (AfterTriggerEvent) chunk->freeptr;
3796  memcpy(newevent, event, eventsize);
3797  /* ... and link the new event to its shared record */
3798  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3799  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3800 
3801  chunk->freeptr += eventsize;
3802  events->tailfree = chunk->freeptr;
3803 }
3804 
3805 /* ----------
3806  * afterTriggerFreeEventList()
3807  *
3808  * Free all the event storage in the given list.
3809  * ----------
3810  */
3811 static void
3813 {
3814  AfterTriggerEventChunk *chunk;
3815  AfterTriggerEventChunk *next_chunk;
3816 
3817  for (chunk = events->head; chunk != NULL; chunk = next_chunk)
3818  {
3819  next_chunk = chunk->next;
3820  pfree(chunk);
3821  }
3822  events->head = NULL;
3823  events->tail = NULL;
3824  events->tailfree = NULL;
3825 }
3826 
3827 /* ----------
3828  * afterTriggerRestoreEventList()
3829  *
3830  * Restore an event list to its prior length, removing all the events
3831  * added since it had the value old_events.
3832  * ----------
3833  */
3834 static void
3836  const AfterTriggerEventList *old_events)
3837 {
3838  AfterTriggerEventChunk *chunk;
3839  AfterTriggerEventChunk *next_chunk;
3840 
3841  if (old_events->tail == NULL)
3842  {
3843  /* restoring to a completely empty state, so free everything */
3844  afterTriggerFreeEventList(events);
3845  }
3846  else
3847  {
3848  *events = *old_events;
3849  /* free any chunks after the last one we want to keep */
3850  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3851  {
3852  next_chunk = chunk->next;
3853  pfree(chunk);
3854  }
3855  /* and clean up the tail chunk to be the right length */
3856  events->tail->next = NULL;
3857  events->tail->freeptr = events->tailfree;
3858 
3859  /*
3860  * We don't make any effort to remove now-unused shared data records.
3861  * They might still be useful, anyway.
3862  */
3863  }
3864 }
3865 
3866 
3867 /* ----------
3868  * AfterTriggerExecute()
3869  *
3870  * Fetch the required tuples back from the heap and fire one
3871  * single trigger function.
3872  *
3873  * Frequently, this will be fired many times in a row for triggers of
3874  * a single relation. Therefore, we cache the open relation and provide
3875  * fmgr lookup cache space at the caller level. (For triggers fired at
3876  * the end of a query, we can even piggyback on the executor's state.)
3877  *
3878  * event: event currently being fired.
3879  * rel: open relation for event.
3880  * trigdesc: working copy of rel's trigger info.
3881  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3882  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3883  * or NULL if no instrumentation is wanted.
3884  * per_tuple_context: memory context to call trigger function in.
3885  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3886  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3887  * ----------
3888  */
3889 static void
3890 AfterTriggerExecute(AfterTriggerEvent event,
3891  Relation rel, TriggerDesc *trigdesc,
3892  FmgrInfo *finfo, Instrumentation *instr,
3893  MemoryContext per_tuple_context,
3894  TupleTableSlot *trig_tuple_slot1,
3895  TupleTableSlot *trig_tuple_slot2,
3896  TransitionCaptureState *transition_capture)
3897 {
3898  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3899  Oid tgoid = evtshared->ats_tgoid;
3900  TriggerData LocTriggerData;
3901  HeapTupleData tuple1;
3902  HeapTupleData tuple2;
3903  HeapTuple rettuple;
3904  Buffer buffer1 = InvalidBuffer;
3905  Buffer buffer2 = InvalidBuffer;
3906  int tgindx;
3907 
3908  /*
3909  * Locate trigger in trigdesc.
3910  */
3911  LocTriggerData.tg_trigger = NULL;
3912  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3913  {
3914  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3915  {
3916  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3917  break;
3918  }
3919  }
3920  if (LocTriggerData.tg_trigger == NULL)
3921  elog(ERROR, "could not find trigger %u", tgoid);
3922 
3923  /*
3924  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3925  * to include time spent re-fetching tuples in the trigger cost.
3926  */
3927  if (instr)
3928  InstrStartNode(instr + tgindx);
3929 
3930  /*
3931  * Fetch the required tuple(s).
3932  */
3933  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3934  {
3936  {
3937  Tuplestorestate *fdw_tuplestore =
3939  (afterTriggers.fdw_tuplestores);
3940 
3941  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3942  trig_tuple_slot1))
3943  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3944 
3945  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3947  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3948  trig_tuple_slot2))
3949  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3950  }
3951  /* fall through */
3953 
3954  /*
3955  * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
3956  * ensures that tg_trigtuple does not reference tuplestore memory.
3957  * (It is formally possible for the trigger function to queue
3958  * trigger events that add to the same tuplestore, which can push
3959  * other tuples out of memory.) The distinction is academic,
3960  * because we start with a minimal tuple that ExecFetchSlotTuple()
3961  * must materialize anyway.
3962  */
3963  LocTriggerData.tg_trigtuple =
3964  ExecMaterializeSlot(trig_tuple_slot1);
3965  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3966 
3967  LocTriggerData.tg_newtuple =
3968  ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3970  ExecMaterializeSlot(trig_tuple_slot2) : NULL;
3971  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3972 
3973  break;
3974 
3975  default:
3976  if (ItemPointerIsValid(&(event->ate_ctid1)))
3977  {
3978  ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
3979  if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
3980  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3981  LocTriggerData.tg_trigtuple = &tuple1;
3982  LocTriggerData.tg_trigtuplebuf = buffer1;
3983  }
3984  else
3985  {
3986  LocTriggerData.tg_trigtuple = NULL;
3987  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3988  }
3989 
3990  /* don't touch ctid2 if not there */
3991  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3993  ItemPointerIsValid(&(event->ate_ctid2)))
3994  {
3995  ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
3996  if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
3997  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3998  LocTriggerData.tg_newtuple = &tuple2;
3999  LocTriggerData.tg_newtuplebuf = buffer2;
4000  }
4001  else
4002  {
4003  LocTriggerData.tg_newtuple = NULL;
4004  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
4005  }
4006  }
4007 
4008  /*
4009  * Set up the tuplestore information.
4010  */
4011  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4012  if (transition_capture != NULL)
4013  {
4014  if (LocTriggerData.tg_trigger->tgoldtable)
4015  LocTriggerData.tg_oldtable = transition_capture->tcs_old_tuplestore;
4016  if (LocTriggerData.tg_trigger->tgnewtable)
4017  {
4018  /*
4019  * Currently a trigger with transition tables may only be defined
4020  * for a single event type (here AFTER INSERT or AFTER UPDATE, but
4021  * not AFTER INSERT OR ...).
4022  */
4023  Assert((TRIGGER_FOR_INSERT(LocTriggerData.tg_trigger->tgtype) != 0) ^
4024  (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype) != 0));
4025 
4026  /*
4027  * Show either the insert or update new tuple images, depending on
4028  * which event type the trigger was registered for. A single
4029  * statement may have produced both in the case of INSERT ... ON
4030  * CONFLICT ... DO UPDATE, and in that case the event determines
4031  * which tuplestore the trigger sees as the NEW TABLE.
4032  */
4033  if (TRIGGER_FOR_INSERT(LocTriggerData.tg_trigger->tgtype))
4034  LocTriggerData.tg_newtable =
4035  transition_capture->tcs_insert_tuplestore;
4036  else
4037  LocTriggerData.tg_newtable =
4038  transition_capture->tcs_update_tuplestore;
4039  }
4040  }
4041 
4042  /*
4043  * Setup the remaining trigger information
4044  */
4045  LocTriggerData.type = T_TriggerData;
4046  LocTriggerData.tg_event =
4048  LocTriggerData.tg_relation = rel;
4049 
4050  MemoryContextReset(per_tuple_context);
4051 
4052  /*
4053  * Call the trigger and throw away any possibly returned updated tuple.
4054  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4055  */
4056  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4057  tgindx,
4058  finfo,
4059  NULL,
4060  per_tuple_context);
4061  if (rettuple != NULL &&
4062  rettuple != LocTriggerData.tg_trigtuple &&
4063  rettuple != LocTriggerData.tg_newtuple)
4064  heap_freetuple(rettuple);
4065 
4066  /*
4067  * Release buffers
4068  */
4069  if (buffer1 != InvalidBuffer)
4070  ReleaseBuffer(buffer1);
4071  if (buffer2 != InvalidBuffer)
4072  ReleaseBuffer(buffer2);
4073 
4074  /*
4075  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4076  * one "tuple returned" (really the number of firings).
4077  */
4078  if (instr)
4079  InstrStopNode(instr + tgindx, 1);
4080 }
4081 
4082 
4083 /*
4084  * afterTriggerMarkEvents()
4085  *
4086  * Scan the given event list for not yet invoked events. Mark the ones
4087  * that can be invoked now with the current firing ID.
4088  *
4089  * If move_list isn't NULL, events that are not to be invoked now are
4090  * transferred to move_list.
4091  *
4092  * When immediate_only is TRUE, do not invoke currently-deferred triggers.
4093  * (This will be FALSE only at main transaction exit.)
4094  *
4095  * Returns TRUE if any invokable events were found.
4096  */
4097 static bool
4099  AfterTriggerEventList *move_list,
4100  bool immediate_only)
4101 {
4102  bool found = false;
4103  AfterTriggerEvent event;
4104  AfterTriggerEventChunk *chunk;
4105 
4106  for_each_event_chunk(event, chunk, *events)
4107  {
4108  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4109  bool defer_it = false;
4110 
4111  if (!(event->ate_flags &
4113  {
4114  /*
4115  * This trigger hasn't been called or scheduled yet. Check if we
4116  * should call it now.
4117  */
4118  if (immediate_only && afterTriggerCheckState(evtshared))
4119  {
4120  defer_it = true;
4121  }
4122  else
4123  {
4124  /*
4125  * Mark it as to be fired in this firing cycle.
4126  */
4127  evtshared->ats_firing_id = afterTriggers.firing_counter;
4128  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4129  found = true;
4130  }
4131  }
4132 
4133  /*
4134  * If it's deferred, move it to move_list, if requested.
4135  */
4136  if (defer_it && move_list != NULL)
4137  {
4138  /* add it to move_list */
4139  afterTriggerAddEvent(move_list, event, evtshared);
4140  /* mark original copy "done" so we don't do it again */
4141  event->ate_flags |= AFTER_TRIGGER_DONE;
4142  }
4143  }
4144 
4145  return found;
4146 }
4147 
4148 /*
4149  * afterTriggerInvokeEvents()
4150  *
4151  * Scan the given event list for events that are marked as to be fired
4152  * in the current firing cycle, and fire them.
4153  *
4154  * If estate isn't NULL, we use its result relation info to avoid repeated
4155  * openings and closing of trigger target relations. If it is NULL, we
4156  * make one locally to cache the info in case there are multiple trigger
4157  * events per rel.
4158  *
4159  * When delete_ok is TRUE, it's safe to delete fully-processed events.
4160  * (We are not very tense about that: we simply reset a chunk to be empty
4161  * if all its events got fired. The objective here is just to avoid useless
4162  * rescanning of events when a trigger queues new events during transaction
4163  * end, so it's not necessary to worry much about the case where only
4164  * some events are fired.)
4165  *
4166  * Returns TRUE if no unfired events remain in the list (this allows us
4167  * to avoid repeating afterTriggerMarkEvents).
4168  */
4169 static bool
4171  CommandId firing_id,
4172  EState *estate,
4173  bool delete_ok)
4174 {
4175  bool all_fired = true;
4176  AfterTriggerEventChunk *chunk;
4177  MemoryContext per_tuple_context;
4178  bool local_estate = false;
4179  Relation rel = NULL;
4180  TriggerDesc *trigdesc = NULL;
4181  FmgrInfo *finfo = NULL;
4182  Instrumentation *instr = NULL;
4183  TupleTableSlot *slot1 = NULL,
4184  *slot2 = NULL;
4185 
4186  /* Make a local EState if need be */
4187  if (estate == NULL)
4188  {
4189  estate = CreateExecutorState();
4190  local_estate = true;
4191  }
4192 
4193  /* Make a per-tuple memory context for trigger function calls */
4194  per_tuple_context =
4196  "AfterTriggerTupleContext",
4198 
4199  for_each_chunk(chunk, *events)
4200  {
4201  AfterTriggerEvent event;
4202  bool all_fired_in_chunk = true;
4203 
4204  for_each_event(event, chunk)
4205  {
4206  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4207 
4208  /*
4209  * Is it one for me to fire?
4210  */
4211  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4212  evtshared->ats_firing_id == firing_id)
4213  {
4214  /*
4215  * So let's fire it... but first, find the correct relation if
4216  * this is not the same relation as before.
4217  */
4218  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4219  {
4220  ResultRelInfo *rInfo;
4221 
4222  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4223  rel = rInfo->ri_RelationDesc;
4224  trigdesc = rInfo->ri_TrigDesc;
4225  finfo = rInfo->ri_TrigFunctions;
4226  instr = rInfo->ri_TrigInstrument;
4227  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4228  {
4229  if (slot1 != NULL)
4230  {
4233  }
4234  slot1 = MakeSingleTupleTableSlot(rel->rd_att);
4235  slot2 = MakeSingleTupleTableSlot(rel->rd_att);
4236  }
4237  if (trigdesc == NULL) /* should not happen */
4238  elog(ERROR, "relation %u has no triggers",
4239  evtshared->ats_relid);
4240  }
4241 
4242  /*
4243  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4244  * still set, so recursive examinations of the event list
4245  * won't try to re-fire it.
4246  */
4247  AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
4248  per_tuple_context, slot1, slot2,
4249  evtshared->ats_transition_capture);
4250 
4251  /*
4252  * Mark the event as done.
4253  */
4254  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4255  event->ate_flags |= AFTER_TRIGGER_DONE;
4256  }
4257  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4258  {
4259  /* something remains to be done */
4260  all_fired = all_fired_in_chunk = false;
4261  }
4262  }
4263 
4264  /* Clear the chunk if delete_ok and nothing left of interest */
4265  if (delete_ok && all_fired_in_chunk)
4266  {
4267  chunk->freeptr = CHUNK_DATA_START(chunk);
4268  chunk->endfree = chunk->endptr;
4269 
4270  /*
4271  * If it's last chunk, must sync event list's tailfree too. Note
4272  * that delete_ok must NOT be passed as true if there could be
4273  * stacked AfterTriggerEventList values pointing at this event
4274  * list, since we'd fail to fix their copies of tailfree.
4275  */
4276  if (chunk == events->tail)
4277  events->tailfree = chunk->freeptr;
4278  }
4279  }
4280  if (slot1 != NULL)
4281  {
4284  }
4285 
4286  /* Release working resources */
4287  MemoryContextDelete(per_tuple_context);
4288 
4289  if (local_estate)
4290  {
4291  ExecCleanUpTriggerState(estate);
4292  FreeExecutorState(estate);
4293  }
4294 
4295  return all_fired;
4296 }
4297 
4298 
4299 /* ----------
4300  * AfterTriggerBeginXact()
4301  *
4302  * Called at transaction start (either BEGIN or implicit for single
4303  * statement outside of transaction block).
4304  * ----------
4305  */
4306 void
4308 {
4309  /*
4310  * Initialize after-trigger state structure to empty
4311  */
4312  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4313  afterTriggers.query_depth = -1;
4314 
4315  /*
4316  * Verify that there is no leftover state remaining. If these assertions
4317  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4318  * up properly.
4319  */
4320  Assert(afterTriggers.state == NULL);
4321  Assert(afterTriggers.query_stack == NULL);
4322  Assert(afterTriggers.fdw_tuplestores == NULL);
4323  Assert(afterTriggers.maxquerydepth == 0);
4324  Assert(afterTriggers.event_cxt == NULL);
4325  Assert(afterTriggers.events.head == NULL);
4326  Assert(afterTriggers.state_stack == NULL);
4327  Assert(afterTriggers.events_stack == NULL);
4328  Assert(afterTriggers.depth_stack == NULL);
4329  Assert(afterTriggers.firing_stack == NULL);
4330  Assert(afterTriggers.maxtransdepth == 0);
4331 }
4332 
4333 
4334 /* ----------
4335  * AfterTriggerBeginQuery()
4336  *
4337  * Called just before we start processing a single query within a
4338  * transaction (or subtransaction). Most of the real work gets deferred
4339  * until somebody actually tries to queue a trigger event.
4340  * ----------
4341  */
4342 void
4344 {
4345  /* Increase the query stack depth */
4346  afterTriggers.query_depth++;
4347 }
4348 
4349 
4350 /* ----------
4351  * AfterTriggerEndQuery()
4352  *
4353  * Called after one query has been completely processed. At this time
4354  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4355  * transfer deferred trigger events to the global deferred-trigger list.
4356  *
4357  * Note that this must be called BEFORE closing down the executor
4358  * with ExecutorEnd, because we make use of the EState's info about
4359  * target relations. Normally it is called from ExecutorFinish.
4360  * ----------
4361  */
4362 void
4364 {
4365  AfterTriggerEventList *events;
4366  Tuplestorestate *fdw_tuplestore;
4367 
4368  /* Must be inside a query, too */
4369  Assert(afterTriggers.query_depth >= 0);
4370 
4371  /*
4372  * If we never even got as far as initializing the event stack, there
4373  * certainly won't be any events, so exit quickly.
4374  */
4375  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4376  {
4377  afterTriggers.query_depth--;
4378  return;
4379  }
4380 
4381  /*
4382  * Process all immediate-mode triggers queued by the query, and move the
4383  * deferred ones to the main list of deferred events.
4384  *
4385  * Notice that we decide which ones will be fired, and put the deferred
4386  * ones on the main list, before anything is actually fired. This ensures
4387  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4388  * IMMEDIATE: all events we have decided to defer will be available for it
4389  * to fire.
4390  *
4391  * We loop in case a trigger queues more events at the same query level.
4392  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4393  * will instead fire any triggers in a dedicated query level. Foreign key
4394  * enforcement triggers do add to the current query level, thanks to their
4395  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4396  * C-language triggers might do likewise. Be careful here: firing a
4397  * trigger could result in query_stack being repalloc'd, so we can't save
4398  * its address across afterTriggerInvokeEvents calls.
4399  *
4400  * If we find no firable events, we don't have to increment
4401  * firing_counter.
4402  */
4403  for (;;)
4404  {
4405  events = &afterTriggers.query_stack[afterTriggers.query_depth];
4406  if (afterTriggerMarkEvents(events, &afterTriggers.events, true))
4407  {
4408  CommandId firing_id = afterTriggers.firing_counter++;
4409 
4410  /* OK to delete the immediate events after processing them */
4411  if (afterTriggerInvokeEvents(events, firing_id, estate, true))
4412  break; /* all fired */
4413  }
4414  else
4415  break;
4416  }
4417 
4418  /* Release query-local storage for events, including tuplestore if any */
4419  fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4420  if (fdw_tuplestore)
4421  {
4422  tuplestore_end(fdw_tuplestore);
4423  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4424  }
4425  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4426 
4427  afterTriggers.query_depth--;
4428 }
4429 
4430 
4431 /* ----------
4432  * AfterTriggerFireDeferred()
4433  *
4434  * Called just before the current transaction is committed. At this
4435  * time we invoke all pending DEFERRED triggers.
4436  *
4437  * It is possible for other modules to queue additional deferred triggers
4438  * during pre-commit processing; therefore xact.c may have to call this
4439  * multiple times.
4440  * ----------
4441  */
4442 void
4444 {
4445  AfterTriggerEventList *events;
4446  bool snap_pushed = false;
4447 
4448  /* Must not be inside a query */
4449  Assert(afterTriggers.query_depth == -1);
4450 
4451  /*
4452  * If there are any triggers to fire, make sure we have set a snapshot for
4453  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4454  * can't assume ActiveSnapshot is valid on entry.)
4455  */
4456  events = &afterTriggers.events;
4457  if (events->head != NULL)
4458  {
4460  snap_pushed = true;
4461  }
4462 
4463  /*
4464  * Run all the remaining triggers. Loop until they are all gone, in case
4465  * some trigger queues more for us to do.
4466  */
4467  while (afterTriggerMarkEvents(events, NULL, false))
4468  {
4469  CommandId firing_id = afterTriggers.firing_counter++;
4470 
4471  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4472  break; /* all fired */
4473  }
4474 
4475  /*
4476  * We don't bother freeing the event list, since it will go away anyway
4477  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4478  */
4479 
4480  if (snap_pushed)
4482 }
4483 
4484 
4485 /* ----------
4486  * AfterTriggerEndXact()
4487  *
4488  * The current transaction is finishing.
4489  *
4490  * Any unfired triggers are canceled so we simply throw
4491  * away anything we know.
4492  *
4493  * Note: it is possible for this to be called repeatedly in case of
4494  * error during transaction abort; therefore, do not complain if
4495  * already closed down.
4496  * ----------
4497  */
4498 void
4499 AfterTriggerEndXact(bool isCommit)
4500 {
4501  /*
4502  * Forget the pending-events list.
4503  *
4504  * Since all the info is in TopTransactionContext or children thereof, we
4505  * don't really need to do anything to reclaim memory. However, the
4506  * pending-events list could be large, and so it's useful to discard it as
4507  * soon as possible --- especially if we are aborting because we ran out
4508  * of memory for the list!
4509  */
4510  if (afterTriggers.event_cxt)
4511  {
4512  MemoryContextDelete(afterTriggers.event_cxt);
4513  afterTriggers.event_cxt = NULL;
4514  afterTriggers.events.head = NULL;
4515  afterTriggers.events.tail = NULL;
4516  afterTriggers.events.tailfree = NULL;
4517  }
4518 
4519  /*
4520  * Forget any subtransaction state as well. Since this can't be very
4521  * large, we let the eventual reset of TopTransactionContext free the
4522  * memory instead of doing it here.
4523  */
4524  afterTriggers.state_stack = NULL;
4525  afterTriggers.events_stack = NULL;
4526  afterTriggers.depth_stack = NULL;
4527  afterTriggers.firing_stack = NULL;
4528  afterTriggers.maxtransdepth = 0;
4529 
4530 
4531  /*
4532  * Forget the query stack and constraint-related state information. As
4533  * with the subtransaction state information, we don't bother freeing the
4534  * memory here.
4535  */
4536  afterTriggers.query_stack = NULL;
4537  afterTriggers.fdw_tuplestores = NULL;
4538  afterTriggers.maxquerydepth = 0;
4539  afterTriggers.state = NULL;
4540 
4541  /* No more afterTriggers manipulation until next transaction starts. */
4542  afterTriggers.query_depth = -1;
4543 }
4544 
4545 /*
4546  * AfterTriggerBeginSubXact()
4547  *
4548  * Start a subtransaction.
4549  */
4550 void
4552 {
4553  int my_level = GetCurrentTransactionNestLevel();
4554 
4555  /*
4556  * Allocate more space in the stacks if needed. (Note: because the
4557  * minimum nest level of a subtransaction is 2, we waste the first couple
4558  * entries of each array; not worth the notational effort to avoid it.)
4559  */
4560  while (my_level >= afterTriggers.maxtransdepth)
4561  {
4562  if (afterTriggers.maxtransdepth == 0)
4563  {
4564  MemoryContext old_cxt;
4565 
4567 
4568 #define DEFTRIG_INITALLOC 8
4569  afterTriggers.state_stack = (SetConstraintState *)
4570  palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState));
4571  afterTriggers.events_stack = (AfterTriggerEventList *)
4573  afterTriggers.depth_stack = (int *)
4574  palloc(DEFTRIG_INITALLOC * sizeof(int));
4575  afterTriggers.firing_stack = (CommandId *)
4576  palloc(DEFTRIG_INITALLOC * sizeof(CommandId));
4577  afterTriggers.maxtransdepth = DEFTRIG_INITALLOC;
4578 
4579  MemoryContextSwitchTo(old_cxt);
4580  }
4581  else
4582  {
4583  /* repalloc will keep the stacks in the same context */
4584  int new_alloc = afterTriggers.maxtransdepth * 2;
4585 
4586  afterTriggers.state_stack = (SetConstraintState *)
4587  repalloc(afterTriggers.state_stack,
4588  new_alloc * sizeof(SetConstraintState));
4589  afterTriggers.events_stack = (AfterTriggerEventList *)
4590  repalloc(afterTriggers.events_stack,
4591  new_alloc * sizeof(AfterTriggerEventList));
4592  afterTriggers.depth_stack = (int *)
4593  repalloc(afterTriggers.depth_stack,
4594  new_alloc * sizeof(int));
4595  afterTriggers.firing_stack = (CommandId *)
4596  repalloc(afterTriggers.firing_stack,
4597  new_alloc * sizeof(CommandId));
4598  afterTriggers.maxtransdepth = new_alloc;
4599  }
4600  }
4601 
4602  /*
4603  * Push the current information into the stack. The SET CONSTRAINTS state
4604  * is not saved until/unless changed. Likewise, we don't make a
4605  * per-subtransaction event context until needed.
4606  */
4607  afterTriggers.state_stack[my_level] = NULL;
4608  afterTriggers.events_stack[my_level] = afterTriggers.events;
4609  afterTriggers.depth_stack[my_level] = afterTriggers.query_depth;
4610  afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter;
4611 }
4612 
4613 /*
4614  * AfterTriggerEndSubXact()
4615  *
4616  * The current subtransaction is ending.
4617  */
4618 void
4620 {
4621  int my_level = GetCurrentTransactionNestLevel();
4622  SetConstraintState state;
4623  AfterTriggerEvent event;
4624  AfterTriggerEventChunk *chunk;
4625  CommandId subxact_firing_id;
4626 
4627  /*
4628  * Pop the prior state if needed.
4629  */
4630  if (isCommit)
4631  {
4632  Assert(my_level < afterTriggers.maxtransdepth);
4633  /* If we saved a prior state, we don't need it anymore */
4634  state = afterTriggers.state_stack[my_level];
4635  if (state != NULL)
4636  pfree(state);
4637  /* this avoids double pfree if error later: */
4638  afterTriggers.state_stack[my_level] = NULL;
4639  Assert(afterTriggers.query_depth ==
4640  afterTriggers.depth_stack[my_level]);
4641  }
4642  else
4643  {
4644  /*
4645  * Aborting. It is possible subxact start failed before calling
4646  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4647  * stack levels that aren't there.
4648  */
4649  if (my_level >= afterTriggers.maxtransdepth)
4650  return;
4651 
4652  /*
4653  * Release any event lists from queries being aborted, and restore
4654  * query_depth to its pre-subxact value. This assumes that a
4655  * subtransaction will not add events to query levels started in a
4656  * earlier transaction state.
4657  */
4658  while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level])
4659  {
4660  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4661  {
4662  Tuplestorestate *ts;
4663 
4664  ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4665  if (ts)
4666  {
4667  tuplestore_end(ts);
4668  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4669  }
4670 
4671  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4672  }
4673 
4674  afterTriggers.query_depth--;
4675  }
4676  Assert(afterTriggers.query_depth ==
4677  afterTriggers.depth_stack[my_level]);
4678 
4679  /*
4680  * Restore the global deferred-event list to its former length,
4681  * discarding any events queued by the subxact.
4682  */
4683  afterTriggerRestoreEventList(&afterTriggers.events,
4684  &afterTriggers.events_stack[my_level]);
4685 
4686  /*
4687  * Restore the trigger state. If the saved state is NULL, then this
4688  * subxact didn't save it, so it doesn't need restoring.
4689  */
4690  state = afterTriggers.state_stack[my_level];
4691  if (state != NULL)
4692  {
4693  pfree(afterTriggers.state);
4694  afterTriggers.state = state;
4695  }
4696  /* this avoids double pfree if error later: */
4697  afterTriggers.state_stack[my_level] = NULL;
4698 
4699  /*
4700  * Scan for any remaining deferred events that were marked DONE or IN
4701  * PROGRESS by this subxact or a child, and un-mark them. We can
4702  * recognize such events because they have a firing ID greater than or
4703  * equal to the firing_counter value we saved at subtransaction start.
4704  * (This essentially assumes that the current subxact includes all
4705  * subxacts started after it.)
4706  */
4707  subxact_firing_id = afterTriggers.firing_stack[my_level];
4708  for_each_event_chunk(event, chunk, afterTriggers.events)
4709  {
4710  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4711 
4712  if (event->ate_flags &
4714  {
4715  if (evtshared->ats_firing_id >= subxact_firing_id)
4716  event->ate_flags &=
4718  }
4719  }
4720  }
4721 }
4722 
4723 /* ----------
4724  * AfterTriggerEnlargeQueryState()
4725  *
4726  * Prepare the necessary state so that we can record AFTER trigger events
4727  * queued by a query. It is allowed to have nested queries within a
4728  * (sub)transaction, so we need to have separate state for each query
4729  * nesting level.
4730  * ----------
4731  */
4732 static void
4734 {
4735  int init_depth = afterTriggers.maxquerydepth;
4736 
4737  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4738 
4739  if (afterTriggers.maxquerydepth == 0)
4740  {
4741  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4742 
4743  afterTriggers.query_stack = (AfterTriggerEventList *)
4745  new_alloc * sizeof(AfterTriggerEventList));
4746  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4748  new_alloc * sizeof(Tuplestorestate *));
4749  afterTriggers.maxquerydepth = new_alloc;
4750  }
4751  else
4752  {
4753  /* repalloc will keep the stack in the same context */
4754  int old_alloc = afterTriggers.maxquerydepth;
4755  int new_alloc = Max(afterTriggers.query_depth + 1,
4756  old_alloc * 2);
4757 
4758  afterTriggers.query_stack = (AfterTriggerEventList *)
4759  repalloc(afterTriggers.query_stack,
4760  new_alloc * sizeof(AfterTriggerEventList));
4761  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4762  repalloc(afterTriggers.fdw_tuplestores,
4763  new_alloc * sizeof(Tuplestorestate *));
4764  /* Clear newly-allocated slots for subsequent lazy initialization. */
4765  memset(afterTriggers.fdw_tuplestores + old_alloc,
4766  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4767  afterTriggers.maxquerydepth = new_alloc;
4768  }
4769 
4770  /* Initialize new query lists to empty */
4771  while (init_depth < afterTriggers.maxquerydepth)
4772  {
4773  AfterTriggerEventList *events;
4774 
4775  events = &afterTriggers.query_stack[init_depth];
4776  events->head = NULL;
4777  events->tail = NULL;
4778  events->tailfree = NULL;
4779 
4780  ++init_depth;
4781  }
4782 }
4783 
4784 /*
4785  * Create an empty SetConstraintState with room for numalloc trigstates
4786  */
4787 static SetConstraintState
4789 {
4790  SetConstraintState state;
4791 
4792  /* Behave sanely with numalloc == 0 */
4793  if (numalloc <= 0)
4794  numalloc = 1;
4795 
4796  /*
4797  * We assume that zeroing will correctly initialize the state values.
4798  */
4799  state = (SetConstraintState)
4801  offsetof(SetConstraintStateData, trigstates) +
4802  numalloc * sizeof(SetConstraintTriggerData));
4803 
4804  state->numalloc = numalloc;
4805 
4806  return state;
4807 }
4808 
4809 /*
4810  * Copy a SetConstraintState
4811  */
4812 static SetConstraintState
4813 SetConstraintStateCopy(SetConstraintState origstate)
4814 {
4815  SetConstraintState state;
4816 
4817  state = SetConstraintStateCreate(origstate->numstates);
4818 
4819  state->all_isset = origstate->all_isset;
4820  state->all_isdeferred = origstate->all_isdeferred;
4821  state->numstates = origstate->numstates;
4822  memcpy(state->trigstates, origstate->trigstates,
4823  origstate->numstates * sizeof(SetConstraintTriggerData));
4824 
4825  return state;
4826 }
4827 
4828 /*
4829  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
4830  * pointer to the state object (it will change if we have to repalloc).
4831  */
4832 static SetConstraintState
4834  Oid tgoid, bool tgisdeferred)
4835 {
4836  if (state->numstates >= state->numalloc)
4837  {
4838  int newalloc = state->numalloc * 2;
4839 
4840  newalloc = Max(newalloc, 8); /* in case original has size 0 */
4841  state = (SetConstraintState)
4842  repalloc(state,
4843  offsetof(SetConstraintStateData, trigstates) +
4844  newalloc * sizeof(SetConstraintTriggerData));
4845  state->numalloc = newalloc;
4846  Assert(state->numstates < state->numalloc);
4847  }
4848 
4849  state->trigstates[state->numstates].sct_tgoid = tgoid;
4850  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4851  state->numstates++;
4852 
4853  return state;
4854 }
4855 
4856 /* ----------
4857  * AfterTriggerSetState()
4858  *
4859  * Execute the SET CONSTRAINTS ... utility command.
4860  * ----------
4861  */
4862 void
4864 {
4865  int my_level = GetCurrentTransactionNestLevel();
4866 
4867  /* If we haven't already done so, initialize our state. */
4868  if (afterTriggers.state == NULL)
4869  afterTriggers.state = SetConstraintStateCreate(8);
4870 
4871  /*
4872  * If in a subtransaction, and we didn't save the current state already,
4873  * save it so it can be restored if the subtransaction aborts.
4874  */
4875  if (my_level > 1 &&
4876  afterTriggers.state_stack[my_level] == NULL)
4877  {
4878  afterTriggers.state_stack[my_level] =
4879  SetConstraintStateCopy(afterTriggers.state);
4880  }
4881 
4882  /*
4883  * Handle SET CONSTRAINTS ALL ...
4884  */
4885  if (stmt->constraints == NIL)
4886  {
4887  /*
4888  * Forget any previous SET CONSTRAINTS commands in this transaction.
4889  */
4890  afterTriggers.state->numstates = 0;
4891 
4892  /*
4893  * Set the per-transaction ALL state to known.
4894  */
4895  afterTriggers.state->all_isset = true;
4896  afterTriggers.state->all_isdeferred = stmt->deferred;
4897  }
4898  else
4899  {
4900  Relation conrel;
4901  Relation tgrel;
4902  List *conoidlist = NIL;
4903  List *tgoidlist = NIL;
4904  ListCell *lc;
4905 
4906  /*
4907  * Handle SET CONSTRAINTS constraint-name [, ...]
4908  *
4909  * First, identify all the named constraints and make a list of their
4910  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
4911  * the same name within a schema, the specifications are not
4912  * necessarily unique. Our strategy is to target all matching
4913  * constraints within the first search-path schema that has any
4914  * matches, but disregard matches in schemas beyond the first match.
4915  * (This is a bit odd but it's the historical behavior.)
4916  */
4918 
4919  foreach(lc, stmt->constraints)
4920  {
4921  RangeVar *constraint = lfirst(lc);
4922  bool found;
4923  List *namespacelist;
4924  ListCell *nslc;
4925 
4926  if (constraint->catalogname)
4927  {
4928  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
4929  ereport(ERROR,
4930  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4931  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
4932  constraint->catalogname, constraint->schemaname,
4933  constraint->relname)));
4934  }
4935 
4936  /*
4937  * If we're given the schema name with the constraint, look only
4938  * in that schema. If given a bare constraint name, use the
4939  * search path to find the first matching constraint.
4940  */
4941  if (constraint->schemaname)
4942  {
4943  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
4944  false);
4945 
4946  namespacelist = list_make1_oid(namespaceId);
4947  }
4948  else
4949  {
4950  namespacelist = fetch_search_path(true);
4951  }
4952 
4953  found = false;
4954  foreach(nslc, namespacelist)
4955  {
4956  Oid namespaceId = lfirst_oid(nslc);
4957  SysScanDesc conscan;
4958  ScanKeyData skey[2];
4959  HeapTuple tup;
4960 
4961  ScanKeyInit(&skey[0],
4963  BTEqualStrategyNumber, F_NAMEEQ,
4964  CStringGetDatum(constraint->relname));
4965  ScanKeyInit(&skey[1],
4967  BTEqualStrategyNumber, F_OIDEQ,
4968  ObjectIdGetDatum(namespaceId));
4969 
4970  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
4971  true, NULL, 2, skey);
4972 
4973  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
4974  {
4976 
4977  if (con->condeferrable)
4978  conoidlist = lappend_oid(conoidlist,
4979  HeapTupleGetOid(tup));
4980  else if (stmt->deferred)
4981  ereport(ERROR,
4982  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
4983  errmsg("constraint \"%s\" is not deferrable",
4984  constraint->relname)));
4985  found = true;
4986  }
4987 
4988  systable_endscan(conscan);
4989 
4990  /*
4991  * Once we've found a matching constraint we do not search
4992  * later parts of the search path.
4993  */
4994  if (found)
4995  break;
4996  }
4997 
4998  list_free(namespacelist);
4999 
5000  /*
5001  * Not found ?
5002  */
5003  if (!found)
5004  ereport(ERROR,
5005  (errcode(ERRCODE_UNDEFINED_OBJECT),
5006  errmsg("constraint \"%s\" does not exist",
5007  constraint->relname)));
5008  }
5009 
5010  heap_close(conrel, AccessShareLock);
5011 
5012  /*
5013  * Now, locate the trigger(s) implementing each of these constraints,
5014  * and make a list of their OIDs.
5015  */
5017 
5018  foreach(lc, conoidlist)
5019  {
5020  Oid conoid = lfirst_oid(lc);
5021  bool found;
5022  ScanKeyData skey;
5023  SysScanDesc tgscan;
5024  HeapTuple htup;
5025 
5026  found = false;
5027 
5028  ScanKeyInit(&skey,
5030  BTEqualStrategyNumber, F_OIDEQ,
5031  ObjectIdGetDatum(conoid));
5032 
5033  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5034  NULL, 1, &skey);
5035 
5036  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5037  {
5038  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5039 
5040  /*
5041  * Silently skip triggers that are marked as non-deferrable in
5042  * pg_trigger. This is not an error condition, since a
5043  * deferrable RI constraint may have some non-deferrable
5044  * actions.
5045  */
5046  if (pg_trigger->tgdeferrable)
5047  tgoidlist = lappend_oid(tgoidlist,
5048  HeapTupleGetOid(htup));
5049 
5050  found = true;
5051  }
5052 
5053  systable_endscan(tgscan);
5054 
5055  /* Safety check: a deferrable constraint should have triggers */
5056  if (!found)
5057  elog(ERROR, "no triggers found for constraint with OID %u",
5058  conoid);
5059  }
5060 
5061  heap_close(tgrel, AccessShareLock);
5062 
5063  /*
5064  * Now we can set the trigger states of individual triggers for this
5065  * xact.
5066  */
5067  foreach(lc, tgoidlist)
5068  {
5069  Oid tgoid = lfirst_oid(lc);
5070  SetConstraintState state = afterTriggers.state;
5071  bool found = false;
5072  int i;
5073 
5074  for (i = 0; i < state->numstates; i++)
5075  {
5076  if (state->trigstates[i].sct_tgoid == tgoid)
5077  {
5078  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5079  found = true;
5080  break;
5081  }
5082  }
5083  if (!found)
5084  {
5085  afterTriggers.state =
5086  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5087  }
5088  }
5089  }
5090 
5091  /*
5092  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5093  * checks against that constraint must be made when the SET CONSTRAINTS
5094  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5095  * apply retroactively. We've updated the constraints state, so scan the
5096  * list of previously deferred events to fire any that have now become
5097  * immediate.
5098  *
5099  * Obviously, if this was SET ... DEFERRED then it can't have converted
5100  * any unfired events to immediate, so we need do nothing in that case.
5101  */
5102  if (!stmt->deferred)
5103  {
5104  AfterTriggerEventList *events = &afterTriggers.events;
5105  bool snapshot_set = false;
5106 
5107  while (afterTriggerMarkEvents(events, NULL, true))
5108  {
5109  CommandId firing_id = afterTriggers.firing_counter++;
5110 
5111  /*
5112  * Make sure a snapshot has been established in case trigger
5113  * functions need one. Note that we avoid setting a snapshot if
5114  * we don't find at least one trigger that has to be fired now.
5115  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5116  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5117  * at the start of a transaction it's not possible for any trigger
5118  * events to be queued yet.)
5119  */
5120  if (!snapshot_set)
5121  {
5123  snapshot_set = true;
5124  }
5125 
5126  /*
5127  * We can delete fired events if we are at top transaction level,
5128  * but we'd better not if inside a subtransaction, since the
5129  * subtransaction could later get rolled back.
5130  */
5131  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5132  !IsSubTransaction()))
5133  break; /* all fired */
5134  }
5135 
5136  if (snapshot_set)
5138  }
5139 }
5140 
5141 /* ----------
5142  * AfterTriggerPendingOnRel()
5143  * Test to see if there are any pending after-trigger events for rel.
5144  *
5145  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5146  * it is unsafe to perform major surgery on a relation. Note that only
5147  * local pending events are examined. We assume that having exclusive lock
5148  * on a rel guarantees there are no unserviced events in other backends ---
5149  * but having a lock does not prevent there being such events in our own.
5150  *
5151  * In some scenarios it'd be reasonable to remove pending events (more
5152  * specifically, mark them DONE by the current subxact) but without a lot
5153  * of knowledge of the trigger semantics we can't do this in general.
5154  * ----------
5155  */
5156 bool
5158 {
5159  AfterTriggerEvent event;
5160  AfterTriggerEventChunk *chunk;
5161  int depth;
5162 
5163  /* Scan queued events */
5164  for_each_event_chunk(event, chunk, afterTriggers.events)
5165  {
5166  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5167 
5168  /*
5169  * We can ignore completed events. (Even if a DONE flag is rolled
5170  * back by subxact abort, it's OK because the effects of the TRUNCATE
5171  * or whatever must get rolled back too.)
5172  */
5173  if (event->ate_flags & AFTER_TRIGGER_DONE)
5174  continue;
5175 
5176  if (evtshared->ats_relid == relid)
5177  return true;
5178  }
5179 
5180  /*
5181  * Also scan events queued by incomplete queries. This could only matter
5182  * if TRUNCATE/etc is executed by a function or trigger within an updating
5183  * query on the same relation, which is pretty perverse, but let's check.
5184  */
5185  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5186  {
5187  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth])
5188  {
5189  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5190 
5191  if (event->ate_flags & AFTER_TRIGGER_DONE)
5192  continue;
5193 
5194  if (evtshared->ats_relid == relid)
5195  return true;
5196  }
5197  }
5198 
5199  return false;
5200 }
5201 
5202 
5203 /* ----------
5204  * AfterTriggerSaveEvent()
5205  *
5206  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5207  * be fired for an event.
5208  *
5209  * NOTE: this is called whenever there are any triggers associated with
5210  * the event (even if they are disabled). This function decides which
5211  * triggers actually need to be queued. It is also called after each row,
5212  * even if there are no triggers for that event, if there are any AFTER
5213  * STATEMENT triggers for the statement which use transition tables, so that
5214  * the transition tuplestores can be built.
5215  *
5216  * Transition tuplestores are built now, rather than when events are pulled
5217  * off of the queue because AFTER ROW triggers are allowed to select from the
5218  * transition tables for the statement.
5219  * ----------
5220  */
5221 static void
5223  int event, bool row_trigger,
5224  HeapTuple oldtup, HeapTuple newtup,
5225  List *recheckIndexes, Bitmapset *modifiedCols,
5226  TransitionCaptureState *transition_capture)
5227 {
5228  Relation rel = relinfo->ri_RelationDesc;
5229  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5230  AfterTriggerEventData new_event;
5231  AfterTriggerSharedData new_shared;
5232  char relkind = relinfo->ri_RelationDesc->rd_rel->relkind;
5233  int tgtype_event;
5234  int tgtype_level;
5235  int i;
5236  Tuplestorestate *fdw_tuplestore = NULL;
5237 
5238  /*
5239  * Check state. We use a normal test not Assert because it is possible to
5240  * reach here in the wrong state given misconfigured RI triggers, in
5241  * particular deferring a cascade action trigger.
5242  */
5243  if (afterTriggers.query_depth < 0)
5244  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5245 
5246  /* Be sure we have enough space to record events at this query depth. */
5247  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5249 
5250  /*
5251  * If the directly named relation has any triggers with transition tables,
5252  * then we need to capture transition tuples.
5253  */
5254  if (row_trigger && transition_capture != NULL)
5255  {
5256  HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5257  TupleConversionMap *map = transition_capture->tcs_map;
5258  bool delete_old_table = transition_capture->tcs_delete_old_table;
5259  bool update_old_table = transition_capture->tcs_update_old_table;
5260  bool update_new_table = transition_capture->tcs_update_new_table;
5261  bool insert_new_table = transition_capture->tcs_insert_new_table;;
5262 
5263  if ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
5264  (event == TRIGGER_EVENT_UPDATE && update_old_table))
5265  {
5266  Tuplestorestate *old_tuplestore;
5267 
5268  Assert(oldtup != NULL);
5269  old_tuplestore = transition_capture->tcs_old_tuplestore;
5270 
5271  if (map != NULL)
5272  {
5273  HeapTuple converted = do_convert_tuple(oldtup, map);
5274 
5275  tuplestore_puttuple(old_tuplestore, converted);
5276  pfree(converted);
5277  }
5278  else
5279  tuplestore_puttuple(old_tuplestore, oldtup);
5280  }
5281  if ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
5282  (event == TRIGGER_EVENT_UPDATE && update_new_table))
5283  {
5284  Tuplestorestate *new_tuplestore;
5285 
5286  Assert(newtup != NULL);
5287  if (event == TRIGGER_EVENT_INSERT)
5288  new_tuplestore = transition_capture->tcs_insert_tuplestore;
5289  else
5290  new_tuplestore = transition_capture->tcs_update_tuplestore;
5291 
5292  if (original_insert_tuple != NULL)
5293  tuplestore_puttuple(new_tuplestore, original_insert_tuple);
5294  else if (map != NULL)
5295  {
5296  HeapTuple converted = do_convert_tuple(newtup, map);
5297 
5298  tuplestore_puttuple(new_tuplestore, converted);
5299  pfree(converted);
5300  }
5301  else
5302  tuplestore_puttuple(new_tuplestore, newtup);
5303  }
5304 
5305  /* If transition tables are the only reason we're here, return. */
5306  if (trigdesc == NULL ||
5307  (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5308  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5309  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
5310  return;
5311  }
5312 
5313  /*
5314  * Validate the event code and collect the associated tuple CTIDs.
5315  *
5316  * The event code will be used both as a bitmask and an array offset, so
5317  * validation is important to make sure we don't walk off the edge of our
5318  * arrays.
5319  */
5320  switch (event)
5321  {
5322  case TRIGGER_EVENT_INSERT:
5323  tgtype_event = TRIGGER_TYPE_INSERT;
5324  if (row_trigger)
5325  {
5326  Assert(oldtup == NULL);
5327  Assert(newtup != NULL);
5328  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid1));
5329  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5330  }
5331  else
5332  {
5333  Assert(oldtup == NULL);
5334  Assert(newtup == NULL);
5335  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5336  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5337  }
5338  break;
5339  case TRIGGER_EVENT_DELETE:
5340  tgtype_event = TRIGGER_TYPE_DELETE;
5341  if (row_trigger)
5342  {
5343  Assert(oldtup != NULL);
5344  Assert(newtup == NULL);
5345  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5346  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5347  }
5348  else
5349  {
5350  Assert(oldtup == NULL);
5351  Assert(newtup == NULL);
5352  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5353  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5354  }
5355  break;
5356  case TRIGGER_EVENT_UPDATE:
5357  tgtype_event = TRIGGER_TYPE_UPDATE;
5358  if (row_trigger)
5359  {
5360  Assert(oldtup != NULL);
5361  Assert(newtup != NULL);
5362  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5363  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid2));
5364  }
5365  else
5366  {
5367  Assert(oldtup == NULL);
5368  Assert(newtup == NULL);
5369  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5370  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5371  }
5372  break;
5374  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5375  Assert(oldtup == NULL);
5376  Assert(newtup == NULL);
5377  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5378  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5379  break;
5380  default:
5381  elog(ERROR, "invalid after-trigger event code: %d", event);
5382  tgtype_event = 0; /* keep compiler quiet */
5383  break;
5384  }
5385 
5386  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5387  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5389  /* else, we'll initialize ate_flags for each trigger */
5390 
5391  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5392 
5393  for (i = 0; i < trigdesc->numtriggers; i++)
5394  {
5395  Trigger *trigger = &trigdesc->triggers[i];
5396 
5397  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5398  tgtype_level,
5400  tgtype_event))
5401  continue;
5402  if (!TriggerEnabled(estate, relinfo, trigger, event,
5403  modifiedCols, oldtup, newtup))
5404  continue;
5405 
5406  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5407  {
5408  if (fdw_tuplestore == NULL)
5409  {
5410  fdw_tuplestore =
5412  (afterTriggers.fdw_tuplestores);
5413  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5414  }
5415  else
5416  /* subsequent event for the same tuple */
5417  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5418  }
5419 
5420  /*
5421  * If the trigger is a foreign key enforcement trigger, there are
5422  * certain cases where we can skip queueing the event because we can
5423  * tell by inspection that the FK constraint will still pass.
5424  */
5425  if (TRIGGER_FIRED_BY_UPDATE(event))
5426  {
5427  switch (RI_FKey_trigger_type(trigger->tgfoid))
5428  {
5429  case RI_TRIGGER_PK:
5430  /* Update on trigger's PK table */
5431  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5432  oldtup, newtup))
5433  {
5434  /* skip queuing this event */
5435  continue;
5436  }
5437  break;
5438 
5439  case RI_TRIGGER_FK:
5440  /* Update on trigger's FK table */
5441  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5442  oldtup, newtup))
5443  {
5444  /* skip queuing this event */
5445  continue;
5446  }
5447  break;
5448 
5449  case RI_TRIGGER_NONE:
5450  /* Not an FK trigger */
5451  break;
5452  }
5453  }
5454 
5455  /*
5456  * If the trigger is a deferred unique constraint check trigger, only
5457  * queue it if the unique constraint was potentially violated, which
5458  * we know from index insertion time.
5459  */
5460  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5461  {
5462  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5463  continue; /* Uniqueness definitely not violated */
5464  }
5465 
5466  /*
5467  * Fill in event structure and add it to the current query's queue.
5468  */
5469  new_shared.ats_event =
5470  (event & TRIGGER_EVENT_OPMASK) |
5471  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5472  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5473  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5474  new_shared.ats_tgoid = trigger->tgoid;
5475  new_shared.ats_relid = RelationGetRelid(rel);
5476  new_shared.ats_firing_id = 0;
5477  new_shared.ats_transition_capture = tr