PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/indexing.h"
24 #include "catalog/objectaccess.h"
25 #include "catalog/pg_constraint.h"
27 #include "catalog/pg_inherits_fn.h"
28 #include "catalog/pg_proc.h"
29 #include "catalog/pg_trigger.h"
30 #include "catalog/pg_type.h"
31 #include "commands/dbcommands.h"
32 #include "commands/defrem.h"
33 #include "commands/trigger.h"
34 #include "executor/executor.h"
35 #include "miscadmin.h"
36 #include "nodes/bitmapset.h"
37 #include "nodes/makefuncs.h"
38 #include "optimizer/clauses.h"
39 #include "optimizer/var.h"
40 #include "parser/parse_clause.h"
41 #include "parser/parse_collate.h"
42 #include "parser/parse_func.h"
43 #include "parser/parse_relation.h"
44 #include "parser/parsetree.h"
45 #include "pgstat.h"
46 #include "rewrite/rewriteManip.h"
47 #include "storage/bufmgr.h"
48 #include "storage/lmgr.h"
49 #include "tcop/utility.h"
50 #include "utils/acl.h"
51 #include "utils/builtins.h"
52 #include "utils/bytea.h"
53 #include "utils/fmgroids.h"
54 #include "utils/inval.h"
55 #include "utils/lsyscache.h"
56 #include "utils/memutils.h"
57 #include "utils/rel.h"
58 #include "utils/snapmgr.h"
59 #include "utils/syscache.h"
60 #include "utils/tqual.h"
61 #include "utils/tuplestore.h"
62 
63 
64 /* GUC variables */
66 
67 /* How many levels deep into trigger execution are we? */
68 static int MyTriggerDepth = 0;
69 
70 /*
71  * Note that similar macros also exist in executor/execMain.c. There does not
72  * appear to be any good header to put them into, given the structures that
73  * they use, so we let them be duplicated. Be sure to update all if one needs
74  * to be changed, however.
75  */
76 #define GetUpdatedColumns(relinfo, estate) \
77  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
78 
79 /* Local function prototypes */
80 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
81 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
82 static HeapTuple GetTupleForTrigger(EState *estate,
83  EPQState *epqstate,
84  ResultRelInfo *relinfo,
85  ItemPointer tid,
86  LockTupleMode lockmode,
87  TupleTableSlot **newSlot);
88 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
89  Trigger *trigger, TriggerEvent event,
90  Bitmapset *modifiedCols,
91  HeapTuple oldtup, HeapTuple newtup);
93  int tgindx,
94  FmgrInfo *finfo,
95  Instrumentation *instr,
96  MemoryContext per_tuple_context);
97 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
98  int event, bool row_trigger,
99  HeapTuple oldtup, HeapTuple newtup,
100  List *recheckIndexes, Bitmapset *modifiedCols,
101  TransitionCaptureState *transition_capture);
102 static void AfterTriggerEnlargeQueryState(void);
103 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
104 
105 
106 /*
107  * Create a trigger. Returns the address of the created trigger.
108  *
109  * queryString is the source text of the CREATE TRIGGER command.
110  * This must be supplied if a whenClause is specified, else it can be NULL.
111  *
112  * relOid, if nonzero, is the relation on which the trigger should be
113  * created. If zero, the name provided in the statement will be looked up.
114  *
115  * refRelOid, if nonzero, is the relation to which the constraint trigger
116  * refers. If zero, the constraint relation name provided in the statement
117  * will be looked up as needed.
118  *
119  * constraintOid, if nonzero, says that this trigger is being created
120  * internally to implement that constraint. A suitable pg_depend entry will
121  * be made to link the trigger to that constraint. constraintOid is zero when
122  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
123  * TRIGGER, we build a pg_constraint entry internally.)
124  *
125  * indexOid, if nonzero, is the OID of an index associated with the constraint.
126  * We do nothing with this except store it into pg_trigger.tgconstrindid.
127  *
128  * If isInternal is true then this is an internally-generated trigger.
129  * This argument sets the tgisinternal field of the pg_trigger entry, and
130  * if TRUE causes us to modify the given trigger name to ensure uniqueness.
131  *
132  * When isInternal is not true we require ACL_TRIGGER permissions on the
133  * relation, as well as ACL_EXECUTE on the trigger function. For internal
134  * triggers the caller must apply any required permission checks.
135  *
136  * Note: can return InvalidObjectAddress if we decided to not create a trigger
137  * at all, but a foreign-key constraint. This is a kluge for backwards
138  * compatibility.
139  */
141 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
142  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
143  bool isInternal)
144 {
145  int16 tgtype;
146  int ncolumns;
147  int16 *columns;
148  int2vector *tgattr;
149  Node *whenClause;
150  List *whenRtable;
151  char *qual;
153  bool nulls[Natts_pg_trigger];
154  Relation rel;
155  AclResult aclresult;
156  Relation tgrel;
157  SysScanDesc tgscan;
158  ScanKeyData key;
159  Relation pgrel;
160  HeapTuple tuple;
161  Oid fargtypes[1]; /* dummy */
162  Oid funcoid;
163  Oid funcrettype;
164  Oid trigoid;
165  char internaltrigname[NAMEDATALEN];
166  char *trigname;
167  Oid constrrelid = InvalidOid;
168  ObjectAddress myself,
169  referenced;
170  char *oldtablename = NULL;
171  char *newtablename = NULL;
172 
173  if (OidIsValid(relOid))
174  rel = heap_open(relOid, ShareRowExclusiveLock);
175  else
177 
178  /*
179  * Triggers must be on tables or views, and there are additional
180  * relation-type-specific restrictions.
181  */
182  if (rel->rd_rel->relkind == RELKIND_RELATION ||
183  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
184  {
185  /* Tables can't have INSTEAD OF triggers */
186  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
187  stmt->timing != TRIGGER_TYPE_AFTER)
188  ereport(ERROR,
189  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
190  errmsg("\"%s\" is a table",
192  errdetail("Tables cannot have INSTEAD OF triggers.")));
193  /* Disallow ROW triggers on partitioned tables */
194  if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
195  ereport(ERROR,
196  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
197  errmsg("\"%s\" is a partitioned table",
199  errdetail("Partitioned tables cannot have ROW triggers.")));
200  }
201  else if (rel->rd_rel->relkind == RELKIND_VIEW)
202  {
203  /*
204  * Views can have INSTEAD OF triggers (which we check below are
205  * row-level), or statement-level BEFORE/AFTER triggers.
206  */
207  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
208  ereport(ERROR,
209  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
210  errmsg("\"%s\" is a view",
212  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
213  /* Disallow TRUNCATE triggers on VIEWs */
214  if (TRIGGER_FOR_TRUNCATE(stmt->events))
215  ereport(ERROR,
216  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
217  errmsg("\"%s\" is a view",
219  errdetail("Views cannot have TRUNCATE triggers.")));
220  }
221  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
222  {
223  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
224  stmt->timing != TRIGGER_TYPE_AFTER)
225  ereport(ERROR,
226  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
227  errmsg("\"%s\" is a foreign table",
229  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
230 
231  if (TRIGGER_FOR_TRUNCATE(stmt->events))
232  ereport(ERROR,
233  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
234  errmsg("\"%s\" is a foreign table",
236  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
237 
238  /*
239  * We disallow constraint triggers to protect the assumption that
240  * triggers on FKs can't be deferred. See notes with AfterTriggers
241  * data structures, below.
242  */
243  if (stmt->isconstraint)
244  ereport(ERROR,
245  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
246  errmsg("\"%s\" is a foreign table",
248  errdetail("Foreign tables cannot have constraint triggers.")));
249  }
250  else
251  ereport(ERROR,
252  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
253  errmsg("\"%s\" is not a table or view",
254  RelationGetRelationName(rel))));
255 
257  ereport(ERROR,
258  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
259  errmsg("permission denied: \"%s\" is a system catalog",
260  RelationGetRelationName(rel))));
261 
262  if (stmt->isconstraint)
263  {
264  /*
265  * We must take a lock on the target relation to protect against
266  * concurrent drop. It's not clear that AccessShareLock is strong
267  * enough, but we certainly need at least that much... otherwise, we
268  * might end up creating a pg_constraint entry referencing a
269  * nonexistent table.
270  */
271  if (OidIsValid(refRelOid))
272  {
273  LockRelationOid(refRelOid, AccessShareLock);
274  constrrelid = refRelOid;
275  }
276  else if (stmt->constrrel != NULL)
277  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
278  false);
279  }
280 
281  /* permission checks */
282  if (!isInternal)
283  {
284  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
285  ACL_TRIGGER);
286  if (aclresult != ACLCHECK_OK)
287  aclcheck_error(aclresult, ACL_KIND_CLASS,
289 
290  if (OidIsValid(constrrelid))
291  {
292  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
293  ACL_TRIGGER);
294  if (aclresult != ACLCHECK_OK)
295  aclcheck_error(aclresult, ACL_KIND_CLASS,
296  get_rel_name(constrrelid));
297  }
298  }
299 
300  /* Compute tgtype */
301  TRIGGER_CLEAR_TYPE(tgtype);
302  if (stmt->row)
303  TRIGGER_SETT_ROW(tgtype);
304  tgtype |= stmt->timing;
305  tgtype |= stmt->events;
306 
307  /* Disallow ROW-level TRUNCATE triggers */
308  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
309  ereport(ERROR,
310  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
311  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
312 
313  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
314  if (TRIGGER_FOR_INSTEAD(tgtype))
315  {
316  if (!TRIGGER_FOR_ROW(tgtype))
317  ereport(ERROR,
318  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
319  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
320  if (stmt->whenClause)
321  ereport(ERROR,
322  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
323  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
324  if (stmt->columns != NIL)
325  ereport(ERROR,
326  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
327  errmsg("INSTEAD OF triggers cannot have column lists")));
328  }
329 
330  /*
331  * We don't yet support naming ROW transition variables, but the parser
332  * recognizes the syntax so we can give a nicer message here.
333  *
334  * Per standard, REFERENCING TABLE names are only allowed on AFTER
335  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
336  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
337  * only allowed once. Per standard, OLD may not be specified when
338  * creating a trigger only for INSERT, and NEW may not be specified when
339  * creating a trigger only for DELETE.
340  *
341  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
342  * reference both ROW and TABLE transition data.
343  */
344  if (stmt->transitionRels != NIL)
345  {
346  List *varList = stmt->transitionRels;
347  ListCell *lc;
348 
349  foreach(lc, varList)
350  {
352 
353  if (!(tt->isTable))
354  ereport(ERROR,
355  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
356  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
357  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
358 
359  /*
360  * Because of the above test, we omit further ROW-related testing
361  * below. If we later allow naming OLD and NEW ROW variables,
362  * adjustments will be needed below.
363  */
364 
365  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
366  ereport(ERROR,
367  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
368  errmsg("\"%s\" is a foreign table",
370  errdetail("Triggers on foreign tables cannot have transition tables.")));
371 
372  if (rel->rd_rel->relkind == RELKIND_VIEW)
373  ereport(ERROR,
374  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
375  errmsg("\"%s\" is a view",
377  errdetail("Triggers on views cannot have transition tables.")));
378 
379  /*
380  * We currently don't allow row-level triggers with transition
381  * tables on partition or inheritance children. Such triggers
382  * would somehow need to see tuples converted to the format of the
383  * table they're attached to, and it's not clear which subset of
384  * tuples each child should see. See also the prohibitions in
385  * ATExecAttachPartition() and ATExecAddInherit().
386  */
387  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
388  {
389  /* Use appropriate error message. */
390  if (rel->rd_rel->relispartition)
391  ereport(ERROR,
392  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
393  errmsg("ROW triggers with transition tables are not supported on partitions")));
394  else
395  ereport(ERROR,
396  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
397  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
398  }
399 
400  if (stmt->timing != TRIGGER_TYPE_AFTER)
401  ereport(ERROR,
402  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
403  errmsg("transition table name can only be specified for an AFTER trigger")));
404 
405  if (TRIGGER_FOR_TRUNCATE(tgtype))
406  ereport(ERROR,
407  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
408  errmsg("TRUNCATE triggers with transition tables are not supported")));
409 
410  /*
411  * We currently don't allow multi-event triggers ("INSERT OR
412  * UPDATE") with transition tables, because it's not clear how to
413  * handle INSERT ... ON CONFLICT statements which can fire both
414  * INSERT and UPDATE triggers. We show the inserted tuples to
415  * INSERT triggers and the updated tuples to UPDATE triggers, but
416  * it's not yet clear what INSERT OR UPDATE trigger should see.
417  * This restriction could be lifted if we can decide on the right
418  * semantics in a later release.
419  */
420  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
421  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
422  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
423  ereport(ERROR,
424  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
425  errmsg("transition tables cannot be specified for triggers with more than one event")));
426 
427  /*
428  * We currently don't allow column-specific triggers with
429  * transition tables. Per spec, that seems to require
430  * accumulating separate transition tables for each combination of
431  * columns, which is a lot of work for a rather marginal feature.
432  */
433  if (stmt->columns != NIL)
434  ereport(ERROR,
435  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
436  errmsg("transition tables cannot be specified for triggers with column lists")));
437 
438  /*
439  * We disallow constraint triggers with transition tables, to
440  * protect the assumption that such triggers can't be deferred.
441  * See notes with AfterTriggers data structures, below.
442  *
443  * Currently this is enforced by the grammar, so just Assert here.
444  */
445  Assert(!stmt->isconstraint);
446 
447  if (tt->isNew)
448  {
449  if (!(TRIGGER_FOR_INSERT(tgtype) ||
450  TRIGGER_FOR_UPDATE(tgtype)))
451  ereport(ERROR,
452  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
453  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
454 
455  if (newtablename != NULL)
456  ereport(ERROR,
457  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
458  errmsg("NEW TABLE cannot be specified multiple times")));
459 
460  newtablename = tt->name;
461  }
462  else
463  {
464  if (!(TRIGGER_FOR_DELETE(tgtype) ||
465  TRIGGER_FOR_UPDATE(tgtype)))
466  ereport(ERROR,
467  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
468  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
469 
470  if (oldtablename != NULL)
471  ereport(ERROR,
472  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
473  errmsg("OLD TABLE cannot be specified multiple times")));
474 
475  oldtablename = tt->name;
476  }
477  }
478 
479  if (newtablename != NULL && oldtablename != NULL &&
480  strcmp(newtablename, oldtablename) == 0)
481  ereport(ERROR,
482  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
483  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
484  }
485 
486  /*
487  * Parse the WHEN clause, if any
488  */
489  if (stmt->whenClause)
490  {
491  ParseState *pstate;
492  RangeTblEntry *rte;
493  List *varList;
494  ListCell *lc;
495 
496  /* Set up a pstate to parse with */
497  pstate = make_parsestate(NULL);
498  pstate->p_sourcetext = queryString;
499 
500  /*
501  * Set up RTEs for OLD and NEW references.
502  *
503  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
504  */
505  rte = addRangeTableEntryForRelation(pstate, rel,
506  makeAlias("old", NIL),
507  false, false);
508  addRTEtoQuery(pstate, rte, false, true, true);
509  rte = addRangeTableEntryForRelation(pstate, rel,
510  makeAlias("new", NIL),
511  false, false);
512  addRTEtoQuery(pstate, rte, false, true, true);
513 
514  /* Transform expression. Copy to be sure we don't modify original */
515  whenClause = transformWhereClause(pstate,
516  copyObject(stmt->whenClause),
518  "WHEN");
519  /* we have to fix its collations too */
520  assign_expr_collations(pstate, whenClause);
521 
522  /*
523  * Check for disallowed references to OLD/NEW.
524  *
525  * NB: pull_var_clause is okay here only because we don't allow
526  * subselects in WHEN clauses; it would fail to examine the contents
527  * of subselects.
528  */
529  varList = pull_var_clause(whenClause, 0);
530  foreach(lc, varList)
531  {
532  Var *var = (Var *) lfirst(lc);
533 
534  switch (var->varno)
535  {
536  case PRS2_OLD_VARNO:
537  if (!TRIGGER_FOR_ROW(tgtype))
538  ereport(ERROR,
539  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
540  errmsg("statement trigger's WHEN condition cannot reference column values"),
541  parser_errposition(pstate, var->location)));
542  if (TRIGGER_FOR_INSERT(tgtype))
543  ereport(ERROR,
544  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
545  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
546  parser_errposition(pstate, var->location)));
547  /* system columns are okay here */
548  break;
549  case PRS2_NEW_VARNO:
550  if (!TRIGGER_FOR_ROW(tgtype))
551  ereport(ERROR,
552  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
553  errmsg("statement trigger's WHEN condition cannot reference column values"),
554  parser_errposition(pstate, var->location)));
555  if (TRIGGER_FOR_DELETE(tgtype))
556  ereport(ERROR,
557  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
558  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
559  parser_errposition(pstate, var->location)));
560  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
561  ereport(ERROR,
562  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
563  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
564  parser_errposition(pstate, var->location)));
565  break;
566  default:
567  /* can't happen without add_missing_from, so just elog */
568  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
569  break;
570  }
571  }
572 
573  /* we'll need the rtable for recordDependencyOnExpr */
574  whenRtable = pstate->p_rtable;
575 
576  qual = nodeToString(whenClause);
577 
578  free_parsestate(pstate);
579  }
580  else
581  {
582  whenClause = NULL;
583  whenRtable = NIL;
584  qual = NULL;
585  }
586 
587  /*
588  * Find and validate the trigger function.
589  */
590  funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
591  if (!isInternal)
592  {
593  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
594  if (aclresult != ACLCHECK_OK)
595  aclcheck_error(aclresult, ACL_KIND_PROC,
596  NameListToString(stmt->funcname));
597  }
598  funcrettype = get_func_rettype(funcoid);
599  if (funcrettype != TRIGGEROID)
600  {
601  /*
602  * We allow OPAQUE just so we can load old dump files. When we see a
603  * trigger function declared OPAQUE, change it to TRIGGER.
604  */
605  if (funcrettype == OPAQUEOID)
606  {
608  (errmsg("changing return type of function %s from %s to %s",
609  NameListToString(stmt->funcname),
610  "opaque", "trigger")));
612  }
613  else
614  ereport(ERROR,
615  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
616  errmsg("function %s must return type %s",
617  NameListToString(stmt->funcname), "trigger")));
618  }
619 
620  /*
621  * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
622  * references one of the built-in RI_FKey trigger functions, assume it is
623  * from a dump of a pre-7.3 foreign key constraint, and take steps to
624  * convert this legacy representation into a regular foreign key
625  * constraint. Ugly, but necessary for loading old dump files.
626  */
627  if (stmt->isconstraint && !isInternal &&
628  list_length(stmt->args) >= 6 &&
629  (list_length(stmt->args) % 2) == 0 &&
631  {
632  /* Keep lock on target rel until end of xact */
633  heap_close(rel, NoLock);
634 
635  ConvertTriggerToFK(stmt, funcoid);
636 
637  return InvalidObjectAddress;
638  }
639 
640  /*
641  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
642  * corresponding pg_constraint entry.
643  */
644  if (stmt->isconstraint && !OidIsValid(constraintOid))
645  {
646  /* Internal callers should have made their own constraints */
647  Assert(!isInternal);
648  constraintOid = CreateConstraintEntry(stmt->trigname,
651  stmt->deferrable,
652  stmt->initdeferred,
653  true,
654  RelationGetRelid(rel),
655  NULL, /* no conkey */
656  0,
657  InvalidOid, /* no domain */
658  InvalidOid, /* no index */
659  InvalidOid, /* no foreign key */
660  NULL,
661  NULL,
662  NULL,
663  NULL,
664  0,
665  ' ',
666  ' ',
667  ' ',
668  NULL, /* no exclusion */
669  NULL, /* no check constraint */
670  NULL,
671  NULL,
672  true, /* islocal */
673  0, /* inhcount */
674  true, /* isnoinherit */
675  isInternal); /* is_internal */
676  }
677 
678  /*
679  * Generate the trigger's OID now, so that we can use it in the name if
680  * needed.
681  */
683 
684  trigoid = GetNewOid(tgrel);
685 
686  /*
687  * If trigger is internally generated, modify the provided trigger name to
688  * ensure uniqueness by appending the trigger OID. (Callers will usually
689  * supply a simple constant trigger name in these cases.)
690  */
691  if (isInternal)
692  {
693  snprintf(internaltrigname, sizeof(internaltrigname),
694  "%s_%u", stmt->trigname, trigoid);
695  trigname = internaltrigname;
696  }
697  else
698  {
699  /* user-defined trigger; use the specified trigger name as-is */
700  trigname = stmt->trigname;
701  }
702 
703  /*
704  * Scan pg_trigger for existing triggers on relation. We do this only to
705  * give a nice error message if there's already a trigger of the same
706  * name. (The unique index on tgrelid/tgname would complain anyway.) We
707  * can skip this for internally generated triggers, since the name
708  * modification above should be sufficient.
709  *
710  * NOTE that this is cool only because we have ShareRowExclusiveLock on
711  * the relation, so the trigger set won't be changing underneath us.
712  */
713  if (!isInternal)
714  {
715  ScanKeyInit(&key,
717  BTEqualStrategyNumber, F_OIDEQ,
719  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
720  NULL, 1, &key);
721  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
722  {
723  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
724 
725  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
726  ereport(ERROR,
728  errmsg("trigger \"%s\" for relation \"%s\" already exists",
729  trigname, RelationGetRelationName(rel))));
730  }
731  systable_endscan(tgscan);
732  }
733 
734  /*
735  * Build the new pg_trigger tuple.
736  */
737  memset(nulls, false, sizeof(nulls));
738 
741  CStringGetDatum(trigname));
742  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
743  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
745  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
746  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
747  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
748  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
751 
752  if (stmt->args)
753  {
754  ListCell *le;
755  char *args;
756  int16 nargs = list_length(stmt->args);
757  int len = 0;
758 
759  foreach(le, stmt->args)
760  {
761  char *ar = strVal(lfirst(le));
762 
763  len += strlen(ar) + 4;
764  for (; *ar; ar++)
765  {
766  if (*ar == '\\')
767  len++;
768  }
769  }
770  args = (char *) palloc(len + 1);
771  args[0] = '\0';
772  foreach(le, stmt->args)
773  {
774  char *s = strVal(lfirst(le));
775  char *d = args + strlen(args);
776 
777  while (*s)
778  {
779  if (*s == '\\')
780  *d++ = '\\';
781  *d++ = *s++;
782  }
783  strcpy(d, "\\000");
784  }
785  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
787  CStringGetDatum(args));
788  }
789  else
790  {
791  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
793  CStringGetDatum(""));
794  }
795 
796  /* build column number array if it's a column-specific trigger */
797  ncolumns = list_length(stmt->columns);
798  if (ncolumns == 0)
799  columns = NULL;
800  else
801  {
802  ListCell *cell;
803  int i = 0;
804 
805  columns = (int16 *) palloc(ncolumns * sizeof(int16));
806  foreach(cell, stmt->columns)
807  {
808  char *name = strVal(lfirst(cell));
809  int16 attnum;
810  int j;
811 
812  /* Lookup column name. System columns are not allowed */
813  attnum = attnameAttNum(rel, name, false);
814  if (attnum == InvalidAttrNumber)
815  ereport(ERROR,
816  (errcode(ERRCODE_UNDEFINED_COLUMN),
817  errmsg("column \"%s\" of relation \"%s\" does not exist",
818  name, RelationGetRelationName(rel))));
819 
820  /* Check for duplicates */
821  for (j = i - 1; j >= 0; j--)
822  {
823  if (columns[j] == attnum)
824  ereport(ERROR,
825  (errcode(ERRCODE_DUPLICATE_COLUMN),
826  errmsg("column \"%s\" specified more than once",
827  name)));
828  }
829 
830  columns[i++] = attnum;
831  }
832  }
833  tgattr = buildint2vector(columns, ncolumns);
834  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
835 
836  /* set tgqual if trigger has WHEN clause */
837  if (qual)
838  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
839  else
840  nulls[Anum_pg_trigger_tgqual - 1] = true;
841 
842  if (oldtablename)
844  CStringGetDatum(oldtablename));
845  else
846  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
847  if (newtablename)
849  CStringGetDatum(newtablename));
850  else
851  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
852 
853  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
854 
855  /* force tuple to have the desired OID */
856  HeapTupleSetOid(tuple, trigoid);
857 
858  /*
859  * Insert tuple into pg_trigger.
860  */
861  CatalogTupleInsert(tgrel, tuple);
862 
863  heap_freetuple(tuple);
865 
869  if (oldtablename)
871  if (newtablename)
873 
874  /*
875  * Update relation's pg_class entry. Crucial side-effect: other backends
876  * (and this one too!) are sent SI message to make them rebuild relcache
877  * entries.
878  */
880  tuple = SearchSysCacheCopy1(RELOID,
882  if (!HeapTupleIsValid(tuple))
883  elog(ERROR, "cache lookup failed for relation %u",
884  RelationGetRelid(rel));
885 
886  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
887 
888  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
889 
890  heap_freetuple(tuple);
892 
893  /*
894  * We used to try to update the rel's relcache entry here, but that's
895  * fairly pointless since it will happen as a byproduct of the upcoming
896  * CommandCounterIncrement...
897  */
898 
899  /*
900  * Record dependencies for trigger. Always place a normal dependency on
901  * the function.
902  */
903  myself.classId = TriggerRelationId;
904  myself.objectId = trigoid;
905  myself.objectSubId = 0;
906 
907  referenced.classId = ProcedureRelationId;
908  referenced.objectId = funcoid;
909  referenced.objectSubId = 0;
910  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
911 
912  if (isInternal && OidIsValid(constraintOid))
913  {
914  /*
915  * Internally-generated trigger for a constraint, so make it an
916  * internal dependency of the constraint. We can skip depending on
917  * the relation(s), as there'll be an indirect dependency via the
918  * constraint.
919  */
920  referenced.classId = ConstraintRelationId;
921  referenced.objectId = constraintOid;
922  referenced.objectSubId = 0;
923  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
924  }
925  else
926  {
927  /*
928  * User CREATE TRIGGER, so place dependencies. We make trigger be
929  * auto-dropped if its relation is dropped or if the FK relation is
930  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
931  */
932  referenced.classId = RelationRelationId;
933  referenced.objectId = RelationGetRelid(rel);
934  referenced.objectSubId = 0;
935  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
936  if (OidIsValid(constrrelid))
937  {
938  referenced.classId = RelationRelationId;
939  referenced.objectId = constrrelid;
940  referenced.objectSubId = 0;
941  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
942  }
943  /* Not possible to have an index dependency in this case */
944  Assert(!OidIsValid(indexOid));
945 
946  /*
947  * If it's a user-specified constraint trigger, make the constraint
948  * internally dependent on the trigger instead of vice versa.
949  */
950  if (OidIsValid(constraintOid))
951  {
952  referenced.classId = ConstraintRelationId;
953  referenced.objectId = constraintOid;
954  referenced.objectSubId = 0;
955  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
956  }
957  }
958 
959  /* If column-specific trigger, add normal dependencies on columns */
960  if (columns != NULL)
961  {
962  int i;
963 
964  referenced.classId = RelationRelationId;
965  referenced.objectId = RelationGetRelid(rel);
966  for (i = 0; i < ncolumns; i++)
967  {
968  referenced.objectSubId = columns[i];
969  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
970  }
971  }
972 
973  /*
974  * If it has a WHEN clause, add dependencies on objects mentioned in the
975  * expression (eg, functions, as well as any columns used).
976  */
977  if (whenClause != NULL)
978  recordDependencyOnExpr(&myself, whenClause, whenRtable,
980 
981  /* Post creation hook for new trigger */
983  isInternal);
984 
985  /* Keep lock on target rel until end of xact */
986  heap_close(rel, NoLock);
987 
988  return myself;
989 }
990 
991 
992 /*
993  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
994  * full-fledged foreign key constraints.
995  *
996  * The conversion is complex because a pre-7.3 foreign key involved three
997  * separate triggers, which were reported separately in dumps. While the
998  * single trigger on the referencing table adds no new information, we need
999  * to know the trigger functions of both of the triggers on the referenced
1000  * table to build the constraint declaration. Also, due to lack of proper
1001  * dependency checking pre-7.3, it is possible that the source database had
1002  * an incomplete set of triggers resulting in an only partially enforced
1003  * FK constraint. (This would happen if one of the tables had been dropped
1004  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
1005  * that caused loss of tgconstrrelid information.) We choose to translate to
1006  * an FK constraint only when we've seen all three triggers of a set. This is
1007  * implemented by storing unmatched items in a list in TopMemoryContext.
1008  * We match triggers together by comparing the trigger arguments (which
1009  * include constraint name, table and column names, so should be good enough).
1010  */
1011 typedef struct
1012 {
1013  List *args; /* list of (T_String) Values or NIL */
1014  Oid funcoids[3]; /* OIDs of trigger functions */
1015  /* The three function OIDs are stored in the order update, delete, child */
1016 } OldTriggerInfo;
1017 
1018 static void
1020 {
1021  static List *info_list = NIL;
1022 
1023  static const char *const funcdescr[3] = {
1024  gettext_noop("Found referenced table's UPDATE trigger."),
1025  gettext_noop("Found referenced table's DELETE trigger."),
1026  gettext_noop("Found referencing table's trigger.")
1027  };
1028 
1029  char *constr_name;
1030  char *fk_table_name;
1031  char *pk_table_name;
1032  char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1033  List *fk_attrs = NIL;
1034  List *pk_attrs = NIL;
1036  int funcnum;
1037  OldTriggerInfo *info = NULL;
1038  ListCell *l;
1039  int i;
1040 
1041  /* Parse out the trigger arguments */
1042  constr_name = strVal(linitial(stmt->args));
1043  fk_table_name = strVal(lsecond(stmt->args));
1044  pk_table_name = strVal(lthird(stmt->args));
1045  i = 0;
1046  foreach(l, stmt->args)
1047  {
1048  Value *arg = (Value *) lfirst(l);
1049 
1050  i++;
1051  if (i < 4) /* skip constraint and table names */
1052  continue;
1053  if (i == 4) /* handle match type */
1054  {
1055  if (strcmp(strVal(arg), "FULL") == 0)
1056  fk_matchtype = FKCONSTR_MATCH_FULL;
1057  else
1058  fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1059  continue;
1060  }
1061  if (i % 2)
1062  fk_attrs = lappend(fk_attrs, arg);
1063  else
1064  pk_attrs = lappend(pk_attrs, arg);
1065  }
1066 
1067  /* Prepare description of constraint for use in messages */
1068  initStringInfo(&buf);
1069  appendStringInfo(&buf, "FOREIGN KEY %s(",
1070  quote_identifier(fk_table_name));
1071  i = 0;
1072  foreach(l, fk_attrs)
1073  {
1074  Value *arg = (Value *) lfirst(l);
1075 
1076  if (i++ > 0)
1077  appendStringInfoChar(&buf, ',');
1079  }
1080  appendStringInfo(&buf, ") REFERENCES %s(",
1081  quote_identifier(pk_table_name));
1082  i = 0;
1083  foreach(l, pk_attrs)
1084  {
1085  Value *arg = (Value *) lfirst(l);
1086 
1087  if (i++ > 0)
1088  appendStringInfoChar(&buf, ',');
1090  }
1091  appendStringInfoChar(&buf, ')');
1092 
1093  /* Identify class of trigger --- update, delete, or referencing-table */
1094  switch (funcoid)
1095  {
1096  case F_RI_FKEY_CASCADE_UPD:
1097  case F_RI_FKEY_RESTRICT_UPD:
1098  case F_RI_FKEY_SETNULL_UPD:
1099  case F_RI_FKEY_SETDEFAULT_UPD:
1100  case F_RI_FKEY_NOACTION_UPD:
1101  funcnum = 0;
1102  break;
1103 
1104  case F_RI_FKEY_CASCADE_DEL:
1105  case F_RI_FKEY_RESTRICT_DEL:
1106  case F_RI_FKEY_SETNULL_DEL:
1107  case F_RI_FKEY_SETDEFAULT_DEL:
1108  case F_RI_FKEY_NOACTION_DEL:
1109  funcnum = 1;
1110  break;
1111 
1112  default:
1113  funcnum = 2;
1114  break;
1115  }
1116 
1117  /* See if we have a match to this trigger */
1118  foreach(l, info_list)
1119  {
1120  info = (OldTriggerInfo *) lfirst(l);
1121  if (info->funcoids[funcnum] == InvalidOid &&
1122  equal(info->args, stmt->args))
1123  {
1124  info->funcoids[funcnum] = funcoid;
1125  break;
1126  }
1127  }
1128 
1129  if (l == NULL)
1130  {
1131  /* First trigger of set, so create a new list entry */
1132  MemoryContext oldContext;
1133 
1134  ereport(NOTICE,
1135  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1136  constr_name, buf.data),
1137  errdetail_internal("%s", _(funcdescr[funcnum]))));
1139  info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
1140  info->args = copyObject(stmt->args);
1141  info->funcoids[funcnum] = funcoid;
1142  info_list = lappend(info_list, info);
1143  MemoryContextSwitchTo(oldContext);
1144  }
1145  else if (info->funcoids[0] == InvalidOid ||
1146  info->funcoids[1] == InvalidOid ||
1147  info->funcoids[2] == InvalidOid)
1148  {
1149  /* Second trigger of set */
1150  ereport(NOTICE,
1151  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1152  constr_name, buf.data),
1153  errdetail_internal("%s", _(funcdescr[funcnum]))));
1154  }
1155  else
1156  {
1157  /* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
1160  Constraint *fkcon = makeNode(Constraint);
1161  PlannedStmt *wrapper = makeNode(PlannedStmt);
1162 
1163  ereport(NOTICE,
1164  (errmsg("converting trigger group into constraint \"%s\" %s",
1165  constr_name, buf.data),
1166  errdetail_internal("%s", _(funcdescr[funcnum]))));
1167  fkcon->contype = CONSTR_FOREIGN;
1168  fkcon->location = -1;
1169  if (funcnum == 2)
1170  {
1171  /* This trigger is on the FK table */
1172  atstmt->relation = stmt->relation;
1173  if (stmt->constrrel)
1174  fkcon->pktable = stmt->constrrel;
1175  else
1176  {
1177  /* Work around ancient pg_dump bug that omitted constrrel */
1178  fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
1179  }
1180  }
1181  else
1182  {
1183  /* This trigger is on the PK table */
1184  fkcon->pktable = stmt->relation;
1185  if (stmt->constrrel)
1186  atstmt->relation = stmt->constrrel;
1187  else
1188  {
1189  /* Work around ancient pg_dump bug that omitted constrrel */
1190  atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1191  }
1192  }
1193  atstmt->cmds = list_make1(atcmd);
1194  atstmt->relkind = OBJECT_TABLE;
1195  atcmd->subtype = AT_AddConstraint;
1196  atcmd->def = (Node *) fkcon;
1197  if (strcmp(constr_name, "<unnamed>") == 0)
1198  fkcon->conname = NULL;
1199  else
1200  fkcon->conname = constr_name;
1201  fkcon->fk_attrs = fk_attrs;
1202  fkcon->pk_attrs = pk_attrs;
1203  fkcon->fk_matchtype = fk_matchtype;
1204  switch (info->funcoids[0])
1205  {
1206  case F_RI_FKEY_NOACTION_UPD:
1208  break;
1209  case F_RI_FKEY_CASCADE_UPD:
1211  break;
1212  case F_RI_FKEY_RESTRICT_UPD:
1214  break;
1215  case F_RI_FKEY_SETNULL_UPD:
1217  break;
1218  case F_RI_FKEY_SETDEFAULT_UPD:
1220  break;
1221  default:
1222  /* can't get here because of earlier checks */
1223  elog(ERROR, "confused about RI update function");
1224  }
1225  switch (info->funcoids[1])
1226  {
1227  case F_RI_FKEY_NOACTION_DEL:
1229  break;
1230  case F_RI_FKEY_CASCADE_DEL:
1232  break;
1233  case F_RI_FKEY_RESTRICT_DEL:
1235  break;
1236  case F_RI_FKEY_SETNULL_DEL:
1238  break;
1239  case F_RI_FKEY_SETDEFAULT_DEL:
1241  break;
1242  default:
1243  /* can't get here because of earlier checks */
1244  elog(ERROR, "confused about RI delete function");
1245  }
1246  fkcon->deferrable = stmt->deferrable;
1247  fkcon->initdeferred = stmt->initdeferred;
1248  fkcon->skip_validation = false;
1249  fkcon->initially_valid = true;
1250 
1251  /* finally, wrap it in a dummy PlannedStmt */
1252  wrapper->commandType = CMD_UTILITY;
1253  wrapper->canSetTag = false;
1254  wrapper->utilityStmt = (Node *) atstmt;
1255  wrapper->stmt_location = -1;
1256  wrapper->stmt_len = -1;
1257 
1258  /* ... and execute it */
1259  ProcessUtility(wrapper,
1260  "(generated ALTER TABLE ADD FOREIGN KEY command)",
1261  PROCESS_UTILITY_SUBCOMMAND, NULL, NULL,
1262  None_Receiver, NULL);
1263 
1264  /* Remove the matched item from the list */
1265  info_list = list_delete_ptr(info_list, info);
1266  pfree(info);
1267  /* We leak the copied args ... not worth worrying about */
1268  }
1269 }
1270 
1271 /*
1272  * Guts of trigger deletion.
1273  */
1274 void
1276 {
1277  Relation tgrel;
1278  SysScanDesc tgscan;
1279  ScanKeyData skey[1];
1280  HeapTuple tup;
1281  Oid relid;
1282  Relation rel;
1283 
1285 
1286  /*
1287  * Find the trigger to delete.
1288  */
1289  ScanKeyInit(&skey[0],
1291  BTEqualStrategyNumber, F_OIDEQ,
1292  ObjectIdGetDatum(trigOid));
1293 
1294  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1295  NULL, 1, skey);
1296 
1297  tup = systable_getnext(tgscan);
1298  if (!HeapTupleIsValid(tup))
1299  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1300 
1301  /*
1302  * Open and exclusive-lock the relation the trigger belongs to.
1303  */
1304  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1305 
1306  rel = heap_open(relid, AccessExclusiveLock);
1307 
1308  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1309  rel->rd_rel->relkind != RELKIND_VIEW &&
1310  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1311  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1312  ereport(ERROR,
1313  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1314  errmsg("\"%s\" is not a table, view, or foreign table",
1315  RelationGetRelationName(rel))));
1316 
1318  ereport(ERROR,
1319  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1320  errmsg("permission denied: \"%s\" is a system catalog",
1321  RelationGetRelationName(rel))));
1322 
1323  /*
1324  * Delete the pg_trigger tuple.
1325  */
1326  CatalogTupleDelete(tgrel, &tup->t_self);
1327 
1328  systable_endscan(tgscan);
1329  heap_close(tgrel, RowExclusiveLock);
1330 
1331  /*
1332  * We do not bother to try to determine whether any other triggers remain,
1333  * which would be needed in order to decide whether it's safe to clear the
1334  * relation's relhastriggers. (In any case, there might be a concurrent
1335  * process adding new triggers.) Instead, just force a relcache inval to
1336  * make other backends (and this one too!) rebuild their relcache entries.
1337  * There's no great harm in leaving relhastriggers true even if there are
1338  * no triggers left.
1339  */
1341 
1342  /* Keep lock on trigger's rel until end of xact */
1343  heap_close(rel, NoLock);
1344 }
1345 
1346 /*
1347  * get_trigger_oid - Look up a trigger by name to find its OID.
1348  *
1349  * If missing_ok is false, throw an error if trigger not found. If
1350  * true, just return InvalidOid.
1351  */
1352 Oid
1353 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1354 {
1355  Relation tgrel;
1356  ScanKeyData skey[2];
1357  SysScanDesc tgscan;
1358  HeapTuple tup;
1359  Oid oid;
1360 
1361  /*
1362  * Find the trigger, verify permissions, set up object address
1363  */
1365 
1366  ScanKeyInit(&skey[0],
1368  BTEqualStrategyNumber, F_OIDEQ,
1369  ObjectIdGetDatum(relid));
1370  ScanKeyInit(&skey[1],
1372  BTEqualStrategyNumber, F_NAMEEQ,
1373  CStringGetDatum(trigname));
1374 
1375  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1376  NULL, 2, skey);
1377 
1378  tup = systable_getnext(tgscan);
1379 
1380  if (!HeapTupleIsValid(tup))
1381  {
1382  if (!missing_ok)
1383  ereport(ERROR,
1384  (errcode(ERRCODE_UNDEFINED_OBJECT),
1385  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1386  trigname, get_rel_name(relid))));
1387  oid = InvalidOid;
1388  }
1389  else
1390  {
1391  oid = HeapTupleGetOid(tup);
1392  }
1393 
1394  systable_endscan(tgscan);
1395  heap_close(tgrel, AccessShareLock);
1396  return oid;
1397 }
1398 
1399 /*
1400  * Perform permissions and integrity checks before acquiring a relation lock.
1401  */
1402 static void
1404  void *arg)
1405 {
1406  HeapTuple tuple;
1407  Form_pg_class form;
1408 
1409  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1410  if (!HeapTupleIsValid(tuple))
1411  return; /* concurrently dropped */
1412  form = (Form_pg_class) GETSTRUCT(tuple);
1413 
1414  /* only tables and views can have triggers */
1415  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1416  form->relkind != RELKIND_FOREIGN_TABLE &&
1417  form->relkind != RELKIND_PARTITIONED_TABLE)
1418  ereport(ERROR,
1419  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1420  errmsg("\"%s\" is not a table, view, or foreign table",
1421  rv->relname)));
1422 
1423  /* you must own the table to rename one of its triggers */
1424  if (!pg_class_ownercheck(relid, GetUserId()))
1426  if (!allowSystemTableMods && IsSystemClass(relid, form))
1427  ereport(ERROR,
1428  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1429  errmsg("permission denied: \"%s\" is a system catalog",
1430  rv->relname)));
1431 
1432  ReleaseSysCache(tuple);
1433 }
1434 
1435 /*
1436  * renametrig - changes the name of a trigger on a relation
1437  *
1438  * trigger name is changed in trigger catalog.
1439  * No record of the previous name is kept.
1440  *
1441  * get proper relrelation from relation catalog (if not arg)
1442  * scan trigger catalog
1443  * for name conflict (within rel)
1444  * for original trigger (if not arg)
1445  * modify tgname in trigger tuple
1446  * update row in catalog
1447  */
1450 {
1451  Oid tgoid;
1452  Relation targetrel;
1453  Relation tgrel;
1454  HeapTuple tuple;
1455  SysScanDesc tgscan;
1456  ScanKeyData key[2];
1457  Oid relid;
1458  ObjectAddress address;
1459 
1460  /*
1461  * Look up name, check permissions, and acquire lock (which we will NOT
1462  * release until end of transaction).
1463  */
1465  false, false,
1467  NULL);
1468 
1469  /* Have lock already, so just need to build relcache entry. */
1470  targetrel = relation_open(relid, NoLock);
1471 
1472  /*
1473  * Scan pg_trigger twice for existing triggers on relation. We do this in
1474  * order to ensure a trigger does not exist with newname (The unique index
1475  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1476  * exist with oldname.
1477  *
1478  * NOTE that this is cool only because we have AccessExclusiveLock on the
1479  * relation, so the trigger set won't be changing underneath us.
1480  */
1482 
1483  /*
1484  * First pass -- look for name conflict
1485  */
1486  ScanKeyInit(&key[0],
1488  BTEqualStrategyNumber, F_OIDEQ,
1489  ObjectIdGetDatum(relid));
1490  ScanKeyInit(&key[1],
1492  BTEqualStrategyNumber, F_NAMEEQ,
1493  PointerGetDatum(stmt->newname));
1494  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1495  NULL, 2, key);
1496  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1497  ereport(ERROR,
1499  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1500  stmt->newname, RelationGetRelationName(targetrel))));
1501  systable_endscan(tgscan);
1502 
1503  /*
1504  * Second pass -- look for trigger existing with oldname and update
1505  */
1506  ScanKeyInit(&key[0],
1508  BTEqualStrategyNumber, F_OIDEQ,
1509  ObjectIdGetDatum(relid));
1510  ScanKeyInit(&key[1],
1512  BTEqualStrategyNumber, F_NAMEEQ,
1513  PointerGetDatum(stmt->subname));
1514  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1515  NULL, 2, key);
1516  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1517  {
1518  tgoid = HeapTupleGetOid(tuple);
1519 
1520  /*
1521  * Update pg_trigger tuple with new tgname.
1522  */
1523  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1524 
1525  namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1526  stmt->newname);
1527 
1528  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1529 
1531  HeapTupleGetOid(tuple), 0);
1532 
1533  /*
1534  * Invalidate relation's relcache entry so that other backends (and
1535  * this one too!) are sent SI message to make them rebuild relcache
1536  * entries. (Ideally this should happen automatically...)
1537  */
1538  CacheInvalidateRelcache(targetrel);
1539  }
1540  else
1541  {
1542  ereport(ERROR,
1543  (errcode(ERRCODE_UNDEFINED_OBJECT),
1544  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1545  stmt->subname, RelationGetRelationName(targetrel))));
1546  }
1547 
1548  ObjectAddressSet(address, TriggerRelationId, tgoid);
1549 
1550  systable_endscan(tgscan);
1551 
1552  heap_close(tgrel, RowExclusiveLock);
1553 
1554  /*
1555  * Close rel, but keep exclusive lock!
1556  */
1557  relation_close(targetrel, NoLock);
1558 
1559  return address;
1560 }
1561 
1562 
1563 /*
1564  * EnableDisableTrigger()
1565  *
1566  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1567  * to change 'tgenabled' field for the specified trigger(s)
1568  *
1569  * rel: relation to process (caller must hold suitable lock on it)
1570  * tgname: trigger to process, or NULL to scan all triggers
1571  * fires_when: new value for tgenabled field. In addition to generic
1572  * enablement/disablement, this also defines when the trigger
1573  * should be fired in session replication roles.
1574  * skip_system: if true, skip "system" triggers (constraint triggers)
1575  *
1576  * Caller should have checked permissions for the table; here we also
1577  * enforce that superuser privilege is required to alter the state of
1578  * system triggers
1579  */
1580 void
1581 EnableDisableTrigger(Relation rel, const char *tgname,
1582  char fires_when, bool skip_system)
1583 {
1584  Relation tgrel;
1585  int nkeys;
1586  ScanKeyData keys[2];
1587  SysScanDesc tgscan;
1588  HeapTuple tuple;
1589  bool found;
1590  bool changed;
1591 
1592  /* Scan the relevant entries in pg_triggers */
1594 
1595  ScanKeyInit(&keys[0],
1597  BTEqualStrategyNumber, F_OIDEQ,
1599  if (tgname)
1600  {
1601  ScanKeyInit(&keys[1],
1603  BTEqualStrategyNumber, F_NAMEEQ,
1604  CStringGetDatum(tgname));
1605  nkeys = 2;
1606  }
1607  else
1608  nkeys = 1;
1609 
1610  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1611  NULL, nkeys, keys);
1612 
1613  found = changed = false;
1614 
1615  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1616  {
1617  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1618 
1619  if (oldtrig->tgisinternal)
1620  {
1621  /* system trigger ... ok to process? */
1622  if (skip_system)
1623  continue;
1624  if (!superuser())
1625  ereport(ERROR,
1626  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1627  errmsg("permission denied: \"%s\" is a system trigger",
1628  NameStr(oldtrig->tgname))));
1629  }
1630 
1631  found = true;
1632 
1633  if (oldtrig->tgenabled != fires_when)
1634  {
1635  /* need to change this one ... make a copy to scribble on */
1636  HeapTuple newtup = heap_copytuple(tuple);
1637  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1638 
1639  newtrig->tgenabled = fires_when;
1640 
1641  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1642 
1643  heap_freetuple(newtup);
1644 
1645  changed = true;
1646  }
1647 
1649  HeapTupleGetOid(tuple), 0);
1650  }
1651 
1652  systable_endscan(tgscan);
1653 
1654  heap_close(tgrel, RowExclusiveLock);
1655 
1656  if (tgname && !found)
1657  ereport(ERROR,
1658  (errcode(ERRCODE_UNDEFINED_OBJECT),
1659  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1660  tgname, RelationGetRelationName(rel))));
1661 
1662  /*
1663  * If we changed anything, broadcast a SI inval message to force each
1664  * backend (including our own!) to rebuild relation's relcache entry.
1665  * Otherwise they will fail to apply the change promptly.
1666  */
1667  if (changed)
1669 }
1670 
1671 
1672 /*
1673  * Build trigger data to attach to the given relcache entry.
1674  *
1675  * Note that trigger data attached to a relcache entry must be stored in
1676  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1677  * But we should be running in a less long-lived working context. To avoid
1678  * leaking cache memory if this routine fails partway through, we build a
1679  * temporary TriggerDesc in working memory and then copy the completed
1680  * structure into cache memory.
1681  */
1682 void
1684 {
1685  TriggerDesc *trigdesc;
1686  int numtrigs;
1687  int maxtrigs;
1688  Trigger *triggers;
1689  Relation tgrel;
1690  ScanKeyData skey;
1691  SysScanDesc tgscan;
1692  HeapTuple htup;
1693  MemoryContext oldContext;
1694  int i;
1695 
1696  /*
1697  * Allocate a working array to hold the triggers (the array is extended if
1698  * necessary)
1699  */
1700  maxtrigs = 16;
1701  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1702  numtrigs = 0;
1703 
1704  /*
1705  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1706  * be reading the triggers in name order, except possibly during
1707  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1708  * ensures that triggers will be fired in name order.
1709  */
1710  ScanKeyInit(&skey,
1712  BTEqualStrategyNumber, F_OIDEQ,
1713  ObjectIdGetDatum(RelationGetRelid(relation)));
1714 
1716  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1717  NULL, 1, &skey);
1718 
1719  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1720  {
1721  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1722  Trigger *build;
1723  Datum datum;
1724  bool isnull;
1725 
1726  if (numtrigs >= maxtrigs)
1727  {
1728  maxtrigs *= 2;
1729  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1730  }
1731  build = &(triggers[numtrigs]);
1732 
1733  build->tgoid = HeapTupleGetOid(htup);
1735  NameGetDatum(&pg_trigger->tgname)));
1736  build->tgfoid = pg_trigger->tgfoid;
1737  build->tgtype = pg_trigger->tgtype;
1738  build->tgenabled = pg_trigger->tgenabled;
1739  build->tgisinternal = pg_trigger->tgisinternal;
1740  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1741  build->tgconstrindid = pg_trigger->tgconstrindid;
1742  build->tgconstraint = pg_trigger->tgconstraint;
1743  build->tgdeferrable = pg_trigger->tgdeferrable;
1744  build->tginitdeferred = pg_trigger->tginitdeferred;
1745  build->tgnargs = pg_trigger->tgnargs;
1746  /* tgattr is first var-width field, so OK to access directly */
1747  build->tgnattr = pg_trigger->tgattr.dim1;
1748  if (build->tgnattr > 0)
1749  {
1750  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1751  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1752  build->tgnattr * sizeof(int16));
1753  }
1754  else
1755  build->tgattr = NULL;
1756  if (build->tgnargs > 0)
1757  {
1758  bytea *val;
1759  char *p;
1760 
1761  val = DatumGetByteaPP(fastgetattr(htup,
1763  tgrel->rd_att, &isnull));
1764  if (isnull)
1765  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1766  RelationGetRelationName(relation));
1767  p = (char *) VARDATA_ANY(val);
1768  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1769  for (i = 0; i < build->tgnargs; i++)
1770  {
1771  build->tgargs[i] = pstrdup(p);
1772  p += strlen(p) + 1;
1773  }
1774  }
1775  else
1776  build->tgargs = NULL;
1777 
1779  tgrel->rd_att, &isnull);
1780  if (!isnull)
1781  build->tgoldtable =
1783  else
1784  build->tgoldtable = NULL;
1785 
1787  tgrel->rd_att, &isnull);
1788  if (!isnull)
1789  build->tgnewtable =
1791  else
1792  build->tgnewtable = NULL;
1793 
1794  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1795  tgrel->rd_att, &isnull);
1796  if (!isnull)
1797  build->tgqual = TextDatumGetCString(datum);
1798  else
1799  build->tgqual = NULL;
1800 
1801  numtrigs++;
1802  }
1803 
1804  systable_endscan(tgscan);
1805  heap_close(tgrel, AccessShareLock);
1806 
1807  /* There might not be any triggers */
1808  if (numtrigs == 0)
1809  {
1810  pfree(triggers);
1811  return;
1812  }
1813 
1814  /* Build trigdesc */
1815  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1816  trigdesc->triggers = triggers;
1817  trigdesc->numtriggers = numtrigs;
1818  for (i = 0; i < numtrigs; i++)
1819  SetTriggerFlags(trigdesc, &(triggers[i]));
1820 
1821  /* Copy completed trigdesc into cache storage */
1823  relation->trigdesc = CopyTriggerDesc(trigdesc);
1824  MemoryContextSwitchTo(oldContext);
1825 
1826  /* Release working memory */
1827  FreeTriggerDesc(trigdesc);
1828 }
1829 
1830 /*
1831  * Update the TriggerDesc's hint flags to include the specified trigger
1832  */
1833 static void
1835 {
1836  int16 tgtype = trigger->tgtype;
1837 
1838  trigdesc->trig_insert_before_row |=
1841  trigdesc->trig_insert_after_row |=
1844  trigdesc->trig_insert_instead_row |=
1847  trigdesc->trig_insert_before_statement |=
1850  trigdesc->trig_insert_after_statement |=
1853  trigdesc->trig_update_before_row |=
1856  trigdesc->trig_update_after_row |=
1859  trigdesc->trig_update_instead_row |=
1862  trigdesc->trig_update_before_statement |=
1865  trigdesc->trig_update_after_statement |=
1868  trigdesc->trig_delete_before_row |=
1871  trigdesc->trig_delete_after_row |=
1874  trigdesc->trig_delete_instead_row |=
1877  trigdesc->trig_delete_before_statement |=
1880  trigdesc->trig_delete_after_statement |=
1883  /* there are no row-level truncate triggers */
1884  trigdesc->trig_truncate_before_statement |=
1887  trigdesc->trig_truncate_after_statement |=
1890 
1891  trigdesc->trig_insert_new_table |=
1892  (TRIGGER_FOR_INSERT(tgtype) &&
1894  trigdesc->trig_update_old_table |=
1895  (TRIGGER_FOR_UPDATE(tgtype) &&
1897  trigdesc->trig_update_new_table |=
1898  (TRIGGER_FOR_UPDATE(tgtype) &&
1900  trigdesc->trig_delete_old_table |=
1901  (TRIGGER_FOR_DELETE(tgtype) &&
1903 }
1904 
1905 /*
1906  * Copy a TriggerDesc data structure.
1907  *
1908  * The copy is allocated in the current memory context.
1909  */
1910 TriggerDesc *
1912 {
1913  TriggerDesc *newdesc;
1914  Trigger *trigger;
1915  int i;
1916 
1917  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1918  return NULL;
1919 
1920  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1921  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1922 
1923  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1924  memcpy(trigger, trigdesc->triggers,
1925  trigdesc->numtriggers * sizeof(Trigger));
1926  newdesc->triggers = trigger;
1927 
1928  for (i = 0; i < trigdesc->numtriggers; i++)
1929  {
1930  trigger->tgname = pstrdup(trigger->tgname);
1931  if (trigger->tgnattr > 0)
1932  {
1933  int16 *newattr;
1934 
1935  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1936  memcpy(newattr, trigger->tgattr,
1937  trigger->tgnattr * sizeof(int16));
1938  trigger->tgattr = newattr;
1939  }
1940  if (trigger->tgnargs > 0)
1941  {
1942  char **newargs;
1943  int16 j;
1944 
1945  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1946  for (j = 0; j < trigger->tgnargs; j++)
1947  newargs[j] = pstrdup(trigger->tgargs[j]);
1948  trigger->tgargs = newargs;
1949  }
1950  if (trigger->tgqual)
1951  trigger->tgqual = pstrdup(trigger->tgqual);
1952  if (trigger->tgoldtable)
1953  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1954  if (trigger->tgnewtable)
1955  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1956  trigger++;
1957  }
1958 
1959  return newdesc;
1960 }
1961 
1962 /*
1963  * Free a TriggerDesc data structure.
1964  */
1965 void
1967 {
1968  Trigger *trigger;
1969  int i;
1970 
1971  if (trigdesc == NULL)
1972  return;
1973 
1974  trigger = trigdesc->triggers;
1975  for (i = 0; i < trigdesc->numtriggers; i++)
1976  {
1977  pfree(trigger->tgname);
1978  if (trigger->tgnattr > 0)
1979  pfree(trigger->tgattr);
1980  if (trigger->tgnargs > 0)
1981  {
1982  while (--(trigger->tgnargs) >= 0)
1983  pfree(trigger->tgargs[trigger->tgnargs]);
1984  pfree(trigger->tgargs);
1985  }
1986  if (trigger->tgqual)
1987  pfree(trigger->tgqual);
1988  if (trigger->tgoldtable)
1989  pfree(trigger->tgoldtable);
1990  if (trigger->tgnewtable)
1991  pfree(trigger->tgnewtable);
1992  trigger++;
1993  }
1994  pfree(trigdesc->triggers);
1995  pfree(trigdesc);
1996 }
1997 
1998 /*
1999  * Compare two TriggerDesc structures for logical equality.
2000  */
2001 #ifdef NOT_USED
2002 bool
2003 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2004 {
2005  int i,
2006  j;
2007 
2008  /*
2009  * We need not examine the hint flags, just the trigger array itself; if
2010  * we have the same triggers with the same types, the flags should match.
2011  *
2012  * As of 7.3 we assume trigger set ordering is significant in the
2013  * comparison; so we just compare corresponding slots of the two sets.
2014  *
2015  * Note: comparing the stringToNode forms of the WHEN clauses means that
2016  * parse column locations will affect the result. This is okay as long as
2017  * this function is only used for detecting exact equality, as for example
2018  * in checking for staleness of a cache entry.
2019  */
2020  if (trigdesc1 != NULL)
2021  {
2022  if (trigdesc2 == NULL)
2023  return false;
2024  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2025  return false;
2026  for (i = 0; i < trigdesc1->numtriggers; i++)
2027  {
2028  Trigger *trig1 = trigdesc1->triggers + i;
2029  Trigger *trig2 = trigdesc2->triggers + i;
2030 
2031  if (trig1->tgoid != trig2->tgoid)
2032  return false;
2033  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2034  return false;
2035  if (trig1->tgfoid != trig2->tgfoid)
2036  return false;
2037  if (trig1->tgtype != trig2->tgtype)
2038  return false;
2039  if (trig1->tgenabled != trig2->tgenabled)
2040  return false;
2041  if (trig1->tgisinternal != trig2->tgisinternal)
2042  return false;
2043  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2044  return false;
2045  if (trig1->tgconstrindid != trig2->tgconstrindid)
2046  return false;
2047  if (trig1->tgconstraint != trig2->tgconstraint)
2048  return false;
2049  if (trig1->tgdeferrable != trig2->tgdeferrable)
2050  return false;
2051  if (trig1->tginitdeferred != trig2->tginitdeferred)
2052  return false;
2053  if (trig1->tgnargs != trig2->tgnargs)
2054  return false;
2055  if (trig1->tgnattr != trig2->tgnattr)
2056  return false;
2057  if (trig1->tgnattr > 0 &&
2058  memcmp(trig1->tgattr, trig2->tgattr,
2059  trig1->tgnattr * sizeof(int16)) != 0)
2060  return false;
2061  for (j = 0; j < trig1->tgnargs; j++)
2062  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2063  return false;
2064  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2065  /* ok */ ;
2066  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2067  return false;
2068  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2069  return false;
2070  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2071  /* ok */ ;
2072  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2073  return false;
2074  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2075  return false;
2076  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2077  /* ok */ ;
2078  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2079  return false;
2080  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2081  return false;
2082  }
2083  }
2084  else if (trigdesc2 != NULL)
2085  return false;
2086  return true;
2087 }
2088 #endif /* NOT_USED */
2089 
2090 /*
2091  * Check if there is a row-level trigger with transition tables that prevents
2092  * a table from becoming an inheritance child or partition. Return the name
2093  * of the first such incompatible trigger, or NULL if there is none.
2094  */
2095 const char *
2097 {
2098  if (trigdesc != NULL)
2099  {
2100  int i;
2101 
2102  for (i = 0; i < trigdesc->numtriggers; ++i)
2103  {
2104  Trigger *trigger = &trigdesc->triggers[i];
2105 
2106  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2107  return trigger->tgname;
2108  }
2109  }
2110 
2111  return NULL;
2112 }
2113 
2114 /*
2115  * Call a trigger function.
2116  *
2117  * trigdata: trigger descriptor.
2118  * tgindx: trigger's index in finfo and instr arrays.
2119  * finfo: array of cached trigger function call information.
2120  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2121  * per_tuple_context: memory context to execute the function in.
2122  *
2123  * Returns the tuple (or NULL) as returned by the function.
2124  */
2125 static HeapTuple
2127  int tgindx,
2128  FmgrInfo *finfo,
2129  Instrumentation *instr,
2130  MemoryContext per_tuple_context)
2131 {
2132  FunctionCallInfoData fcinfo;
2133  PgStat_FunctionCallUsage fcusage;
2134  Datum result;
2135  MemoryContext oldContext;
2136 
2137  /*
2138  * Protect against code paths that may fail to initialize transition table
2139  * info.
2140  */
2141  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2142  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2143  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2144  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2145  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2146  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2147  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2148 
2149  finfo += tgindx;
2150 
2151  /*
2152  * We cache fmgr lookup info, to avoid making the lookup again on each
2153  * call.
2154  */
2155  if (finfo->fn_oid == InvalidOid)
2156  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2157 
2158  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2159 
2160  /*
2161  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2162  */
2163  if (instr)
2164  InstrStartNode(instr + tgindx);
2165 
2166  /*
2167  * Do the function evaluation in the per-tuple memory context, so that
2168  * leaked memory will be reclaimed once per tuple. Note in particular that
2169  * any new tuple created by the trigger function will live till the end of
2170  * the tuple cycle.
2171  */
2172  oldContext = MemoryContextSwitchTo(per_tuple_context);
2173 
2174  /*
2175  * Call the function, passing no arguments but setting a context.
2176  */
2177  InitFunctionCallInfoData(fcinfo, finfo, 0,
2178  InvalidOid, (Node *) trigdata, NULL);
2179 
2180  pgstat_init_function_usage(&fcinfo, &fcusage);
2181 
2182  MyTriggerDepth++;
2183  PG_TRY();
2184  {
2185  result = FunctionCallInvoke(&fcinfo);
2186  }
2187  PG_CATCH();
2188  {
2189  MyTriggerDepth--;
2190  PG_RE_THROW();
2191  }
2192  PG_END_TRY();
2193  MyTriggerDepth--;
2194 
2195  pgstat_end_function_usage(&fcusage, true);
2196 
2197  MemoryContextSwitchTo(oldContext);
2198 
2199  /*
2200  * Trigger protocol allows function to return a null pointer, but NOT to
2201  * set the isnull result flag.
2202  */
2203  if (fcinfo.isnull)
2204  ereport(ERROR,
2205  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2206  errmsg("trigger function %u returned null value",
2207  fcinfo.flinfo->fn_oid)));
2208 
2209  /*
2210  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2211  * one "tuple returned" (really the number of firings).
2212  */
2213  if (instr)
2214  InstrStopNode(instr + tgindx, 1);
2215 
2216  return (HeapTuple) DatumGetPointer(result);
2217 }
2218 
2219 void
2221 {
2222  TriggerDesc *trigdesc;
2223  int i;
2224  TriggerData LocTriggerData;
2225 
2226  trigdesc = relinfo->ri_TrigDesc;
2227 
2228  if (trigdesc == NULL)
2229  return;
2230  if (!trigdesc->trig_insert_before_statement)
2231  return;
2232 
2233  /* no-op if we already fired BS triggers in this context */
2235  CMD_INSERT))
2236  return;
2237 
2238  LocTriggerData.type = T_TriggerData;
2239  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2241  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2242  LocTriggerData.tg_trigtuple = NULL;
2243  LocTriggerData.tg_newtuple = NULL;
2244  LocTriggerData.tg_oldtable = NULL;
2245  LocTriggerData.tg_newtable = NULL;
2246  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2247  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2248  for (i = 0; i < trigdesc->numtriggers; i++)
2249  {
2250  Trigger *trigger = &trigdesc->triggers[i];
2251  HeapTuple newtuple;
2252 
2253  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2257  continue;
2258  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2259  NULL, NULL, NULL))
2260  continue;
2261 
2262  LocTriggerData.tg_trigger = trigger;
2263  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2264  i,
2265  relinfo->ri_TrigFunctions,
2266  relinfo->ri_TrigInstrument,
2267  GetPerTupleMemoryContext(estate));
2268 
2269  if (newtuple)
2270  ereport(ERROR,
2271  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2272  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2273  }
2274 }
2275 
2276 void
2278  TransitionCaptureState *transition_capture)
2279 {
2280  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2281 
2282  if (trigdesc && trigdesc->trig_insert_after_statement)
2284  false, NULL, NULL, NIL, NULL, transition_capture);
2285 }
2286 
2289  TupleTableSlot *slot)
2290 {
2291  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2292  HeapTuple slottuple = ExecMaterializeSlot(slot);
2293  HeapTuple newtuple = slottuple;
2294  HeapTuple oldtuple;
2295  TriggerData LocTriggerData;
2296  int i;
2297 
2298  LocTriggerData.type = T_TriggerData;
2299  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2302  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2303  LocTriggerData.tg_newtuple = NULL;
2304  LocTriggerData.tg_oldtable = NULL;
2305  LocTriggerData.tg_newtable = NULL;
2306  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2307  for (i = 0; i < trigdesc->numtriggers; i++)
2308  {
2309  Trigger *trigger = &trigdesc->triggers[i];
2310 
2311  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2315  continue;
2316  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2317  NULL, NULL, newtuple))
2318  continue;
2319 
2320  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2321  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2322  LocTriggerData.tg_trigger = trigger;
2323  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2324  i,
2325  relinfo->ri_TrigFunctions,
2326  relinfo->ri_TrigInstrument,
2327  GetPerTupleMemoryContext(estate));
2328  if (oldtuple != newtuple && oldtuple != slottuple)
2329  heap_freetuple(oldtuple);
2330  if (newtuple == NULL)
2331  return NULL; /* "do nothing" */
2332  }
2333 
2334  if (newtuple != slottuple)
2335  {
2336  /*
2337  * Return the modified tuple using the es_trig_tuple_slot. We assume
2338  * the tuple was allocated in per-tuple memory context, and therefore
2339  * will go away by itself. The tuple table slot should not try to
2340  * clear it.
2341  */
2342  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2343  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2344 
2345  if (newslot->tts_tupleDescriptor != tupdesc)
2346  ExecSetSlotDescriptor(newslot, tupdesc);
2347  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2348  slot = newslot;
2349  }
2350  return slot;
2351 }
2352 
2353 void
2355  HeapTuple trigtuple, List *recheckIndexes,
2356  TransitionCaptureState *transition_capture)
2357 {
2358  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2359 
2360  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2361  (transition_capture && transition_capture->tcs_insert_new_table))
2363  true, NULL, trigtuple,
2364  recheckIndexes, NULL,
2365  transition_capture);
2366 }
2367 
2370  TupleTableSlot *slot)
2371 {
2372  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2373  HeapTuple slottuple = ExecMaterializeSlot(slot);
2374  HeapTuple newtuple = slottuple;
2375  HeapTuple oldtuple;
2376  TriggerData LocTriggerData;
2377  int i;
2378 
2379  LocTriggerData.type = T_TriggerData;
2380  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2383  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2384  LocTriggerData.tg_newtuple = NULL;
2385  LocTriggerData.tg_oldtable = NULL;
2386  LocTriggerData.tg_newtable = NULL;
2387  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2388  for (i = 0; i < trigdesc->numtriggers; i++)
2389  {
2390  Trigger *trigger = &trigdesc->triggers[i];
2391 
2392  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2396  continue;
2397  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2398  NULL, NULL, newtuple))
2399  continue;
2400 
2401  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2402  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2403  LocTriggerData.tg_trigger = trigger;
2404  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2405  i,
2406  relinfo->ri_TrigFunctions,
2407  relinfo->ri_TrigInstrument,
2408  GetPerTupleMemoryContext(estate));
2409  if (oldtuple != newtuple && oldtuple != slottuple)
2410  heap_freetuple(oldtuple);
2411  if (newtuple == NULL)
2412  return NULL; /* "do nothing" */
2413  }
2414 
2415  if (newtuple != slottuple)
2416  {
2417  /*
2418  * Return the modified tuple using the es_trig_tuple_slot. We assume
2419  * the tuple was allocated in per-tuple memory context, and therefore
2420  * will go away by itself. The tuple table slot should not try to
2421  * clear it.
2422  */
2423  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2424  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2425 
2426  if (newslot->tts_tupleDescriptor != tupdesc)
2427  ExecSetSlotDescriptor(newslot, tupdesc);
2428  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2429  slot = newslot;
2430  }
2431  return slot;
2432 }
2433 
2434 void
2436 {
2437  TriggerDesc *trigdesc;
2438  int i;
2439  TriggerData LocTriggerData;
2440 
2441  trigdesc = relinfo->ri_TrigDesc;
2442 
2443  if (trigdesc == NULL)
2444  return;
2445  if (!trigdesc->trig_delete_before_statement)
2446  return;
2447 
2448  /* no-op if we already fired BS triggers in this context */
2450  CMD_DELETE))
2451  return;
2452 
2453  LocTriggerData.type = T_TriggerData;
2454  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2456  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2457  LocTriggerData.tg_trigtuple = NULL;
2458  LocTriggerData.tg_newtuple = NULL;
2459  LocTriggerData.tg_oldtable = NULL;
2460  LocTriggerData.tg_newtable = NULL;
2461  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2462  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2463  for (i = 0; i < trigdesc->numtriggers; i++)
2464  {
2465  Trigger *trigger = &trigdesc->triggers[i];
2466  HeapTuple newtuple;
2467 
2468  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2472  continue;
2473  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2474  NULL, NULL, NULL))
2475  continue;
2476 
2477  LocTriggerData.tg_trigger = trigger;
2478  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2479  i,
2480  relinfo->ri_TrigFunctions,
2481  relinfo->ri_TrigInstrument,
2482  GetPerTupleMemoryContext(estate));
2483 
2484  if (newtuple)
2485  ereport(ERROR,
2486  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2487  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2488  }
2489 }
2490 
2491 void
2493  TransitionCaptureState *transition_capture)
2494 {
2495  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2496 
2497  if (trigdesc && trigdesc->trig_delete_after_statement)
2499  false, NULL, NULL, NIL, NULL, transition_capture);
2500 }
2501 
2502 bool
2504  ResultRelInfo *relinfo,
2505  ItemPointer tupleid,
2506  HeapTuple fdw_trigtuple)
2507 {
2508  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2509  bool result = true;
2510  TriggerData LocTriggerData;
2511  HeapTuple trigtuple;
2512  HeapTuple newtuple;
2513  TupleTableSlot *newSlot;
2514  int i;
2515 
2516  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2517  if (fdw_trigtuple == NULL)
2518  {
2519  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2520  LockTupleExclusive, &newSlot);
2521  if (trigtuple == NULL)
2522  return false;
2523  }
2524  else
2525  trigtuple = fdw_trigtuple;
2526 
2527  LocTriggerData.type = T_TriggerData;
2528  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2531  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2532  LocTriggerData.tg_newtuple = NULL;
2533  LocTriggerData.tg_oldtable = NULL;
2534  LocTriggerData.tg_newtable = NULL;
2535  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2536  for (i = 0; i < trigdesc->numtriggers; i++)
2537  {
2538  Trigger *trigger = &trigdesc->triggers[i];
2539 
2540  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2544  continue;
2545  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2546  NULL, trigtuple, NULL))
2547  continue;
2548 
2549  LocTriggerData.tg_trigtuple = trigtuple;
2550  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2551  LocTriggerData.tg_trigger = trigger;
2552  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2553  i,
2554  relinfo->ri_TrigFunctions,
2555  relinfo->ri_TrigInstrument,
2556  GetPerTupleMemoryContext(estate));
2557  if (newtuple == NULL)
2558  {
2559  result = false; /* tell caller to suppress delete */
2560  break;
2561  }
2562  if (newtuple != trigtuple)
2563  heap_freetuple(newtuple);
2564  }
2565  if (trigtuple != fdw_trigtuple)
2566  heap_freetuple(trigtuple);
2567 
2568  return result;
2569 }
2570 
2571 void
2573  ItemPointer tupleid,
2574  HeapTuple fdw_trigtuple,
2575  TransitionCaptureState *transition_capture)
2576 {
2577  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2578 
2579  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2580  (transition_capture && transition_capture->tcs_delete_old_table))
2581  {
2582  HeapTuple trigtuple;
2583 
2584  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2585  if (fdw_trigtuple == NULL)
2586  trigtuple = GetTupleForTrigger(estate,
2587  NULL,
2588  relinfo,
2589  tupleid,
2591  NULL);
2592  else
2593  trigtuple = fdw_trigtuple;
2594 
2596  true, trigtuple, NULL, NIL, NULL,
2597  transition_capture);
2598  if (trigtuple != fdw_trigtuple)
2599  heap_freetuple(trigtuple);
2600  }
2601 }
2602 
2603 bool
2605  HeapTuple trigtuple)
2606 {
2607  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2608  TriggerData LocTriggerData;
2609  HeapTuple rettuple;
2610  int i;
2611 
2612  LocTriggerData.type = T_TriggerData;
2613  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2616  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2617  LocTriggerData.tg_newtuple = NULL;
2618  LocTriggerData.tg_oldtable = NULL;
2619  LocTriggerData.tg_newtable = NULL;
2620  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2621  for (i = 0; i < trigdesc->numtriggers; i++)
2622  {
2623  Trigger *trigger = &trigdesc->triggers[i];
2624 
2625  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2629  continue;
2630  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2631  NULL, trigtuple, NULL))
2632  continue;
2633 
2634  LocTriggerData.tg_trigtuple = trigtuple;
2635  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2636  LocTriggerData.tg_trigger = trigger;
2637  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2638  i,
2639  relinfo->ri_TrigFunctions,
2640  relinfo->ri_TrigInstrument,
2641  GetPerTupleMemoryContext(estate));
2642  if (rettuple == NULL)
2643  return false; /* Delete was suppressed */
2644  if (rettuple != trigtuple)
2645  heap_freetuple(rettuple);
2646  }
2647  return true;
2648 }
2649 
2650 void
2652 {
2653  TriggerDesc *trigdesc;
2654  int i;
2655  TriggerData LocTriggerData;
2656  Bitmapset *updatedCols;
2657 
2658  trigdesc = relinfo->ri_TrigDesc;
2659 
2660  if (trigdesc == NULL)
2661  return;
2662  if (!trigdesc->trig_update_before_statement)
2663  return;
2664 
2665  /* no-op if we already fired BS triggers in this context */
2667  CMD_UPDATE))
2668  return;
2669 
2670  updatedCols = GetUpdatedColumns(relinfo, estate);
2671 
2672  LocTriggerData.type = T_TriggerData;
2673  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2675  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2676  LocTriggerData.tg_trigtuple = NULL;
2677  LocTriggerData.tg_newtuple = NULL;
2678  LocTriggerData.tg_oldtable = NULL;
2679  LocTriggerData.tg_newtable = NULL;
2680  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2681  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2682  for (i = 0; i < trigdesc->numtriggers; i++)
2683  {
2684  Trigger *trigger = &trigdesc->triggers[i];
2685  HeapTuple newtuple;
2686 
2687  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2691  continue;
2692  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2693  updatedCols, NULL, NULL))
2694  continue;
2695 
2696  LocTriggerData.tg_trigger = trigger;
2697  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2698  i,
2699  relinfo->ri_TrigFunctions,
2700  relinfo->ri_TrigInstrument,
2701  GetPerTupleMemoryContext(estate));
2702 
2703  if (newtuple)
2704  ereport(ERROR,
2705  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2706  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2707  }
2708 }
2709 
2710 void
2712  TransitionCaptureState *transition_capture)
2713 {
2714  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2715 
2716  if (trigdesc && trigdesc->trig_update_after_statement)
2718  false, NULL, NULL, NIL,
2719  GetUpdatedColumns(relinfo, estate),
2720  transition_capture);
2721 }
2722 
2725  ResultRelInfo *relinfo,
2726  ItemPointer tupleid,
2727  HeapTuple fdw_trigtuple,
2728  TupleTableSlot *slot)
2729 {
2730  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2731  HeapTuple slottuple = ExecMaterializeSlot(slot);
2732  HeapTuple newtuple = slottuple;
2733  TriggerData LocTriggerData;
2734  HeapTuple trigtuple;
2735  HeapTuple oldtuple;
2736  TupleTableSlot *newSlot;
2737  int i;
2738  Bitmapset *updatedCols;
2739  LockTupleMode lockmode;
2740 
2741  /* Determine lock mode to use */
2742  lockmode = ExecUpdateLockMode(estate, relinfo);
2743 
2744  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2745  if (fdw_trigtuple == NULL)
2746  {
2747  /* get a copy of the on-disk tuple we are planning to update */
2748  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2749  lockmode, &newSlot);
2750  if (trigtuple == NULL)
2751  return NULL; /* cancel the update action */
2752  }
2753  else
2754  {
2755  trigtuple = fdw_trigtuple;
2756  newSlot = NULL;
2757  }
2758 
2759  /*
2760  * In READ COMMITTED isolation level it's possible that target tuple was
2761  * changed due to concurrent update. In that case we have a raw subplan
2762  * output tuple in newSlot, and need to run it through the junk filter to
2763  * produce an insertable tuple.
2764  *
2765  * Caution: more than likely, the passed-in slot is the same as the
2766  * junkfilter's output slot, so we are clobbering the original value of
2767  * slottuple by doing the filtering. This is OK since neither we nor our
2768  * caller have any more interest in the prior contents of that slot.
2769  */
2770  if (newSlot != NULL)
2771  {
2772  slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
2773  slottuple = ExecMaterializeSlot(slot);
2774  newtuple = slottuple;
2775  }
2776 
2777 
2778  LocTriggerData.type = T_TriggerData;
2779  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2782  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2783  LocTriggerData.tg_oldtable = NULL;
2784  LocTriggerData.tg_newtable = NULL;
2785  updatedCols = GetUpdatedColumns(relinfo, estate);
2786  for (i = 0; i < trigdesc->numtriggers; i++)
2787  {
2788  Trigger *trigger = &trigdesc->triggers[i];
2789 
2790  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2794  continue;
2795  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2796  updatedCols, trigtuple, newtuple))
2797  continue;
2798 
2799  LocTriggerData.tg_trigtuple = trigtuple;
2800  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2801  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2802  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2803  LocTriggerData.tg_trigger = trigger;
2804  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2805  i,
2806  relinfo->ri_TrigFunctions,
2807  relinfo->ri_TrigInstrument,
2808  GetPerTupleMemoryContext(estate));
2809  if (oldtuple != newtuple && oldtuple != slottuple)
2810  heap_freetuple(oldtuple);
2811  if (newtuple == NULL)
2812  {
2813  if (trigtuple != fdw_trigtuple)
2814  heap_freetuple(trigtuple);
2815  return NULL; /* "do nothing" */
2816  }
2817  }
2818  if (trigtuple != fdw_trigtuple)
2819  heap_freetuple(trigtuple);
2820 
2821  if (newtuple != slottuple)
2822  {
2823  /*
2824  * Return the modified tuple using the es_trig_tuple_slot. We assume
2825  * the tuple was allocated in per-tuple memory context, and therefore
2826  * will go away by itself. The tuple table slot should not try to
2827  * clear it.
2828  */
2829  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2830  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2831 
2832  if (newslot->tts_tupleDescriptor != tupdesc)
2833  ExecSetSlotDescriptor(newslot, tupdesc);
2834  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2835  slot = newslot;
2836  }
2837  return slot;
2838 }
2839 
2840 void
2842  ItemPointer tupleid,
2843  HeapTuple fdw_trigtuple,
2844  HeapTuple newtuple,
2845  List *recheckIndexes,
2846  TransitionCaptureState *transition_capture)
2847 {
2848  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2849 
2850  if ((trigdesc && trigdesc->trig_update_after_row) ||
2851  (transition_capture &&
2852  (transition_capture->tcs_update_old_table ||
2853  transition_capture->tcs_update_new_table)))
2854  {
2855  HeapTuple trigtuple;
2856 
2857  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2858  if (fdw_trigtuple == NULL)
2859  trigtuple = GetTupleForTrigger(estate,
2860  NULL,
2861  relinfo,
2862  tupleid,
2864  NULL);
2865  else
2866  trigtuple = fdw_trigtuple;
2867 
2869  true, trigtuple, newtuple, recheckIndexes,
2870  GetUpdatedColumns(relinfo, estate),
2871  transition_capture);
2872  if (trigtuple != fdw_trigtuple)
2873  heap_freetuple(trigtuple);
2874  }
2875 }
2876 
2879  HeapTuple trigtuple, TupleTableSlot *slot)
2880 {
2881  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2882  HeapTuple slottuple = ExecMaterializeSlot(slot);
2883  HeapTuple newtuple = slottuple;
2884  TriggerData LocTriggerData;
2885  HeapTuple oldtuple;
2886  int i;
2887 
2888  LocTriggerData.type = T_TriggerData;
2889  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2892  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2893  LocTriggerData.tg_oldtable = NULL;
2894  LocTriggerData.tg_newtable = NULL;
2895  for (i = 0; i < trigdesc->numtriggers; i++)
2896  {
2897  Trigger *trigger = &trigdesc->triggers[i];
2898 
2899  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2903  continue;
2904  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2905  NULL, trigtuple, newtuple))
2906  continue;
2907 
2908  LocTriggerData.tg_trigtuple = trigtuple;
2909  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2910  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2911  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2912  LocTriggerData.tg_trigger = trigger;
2913  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2914  i,
2915  relinfo->ri_TrigFunctions,
2916  relinfo->ri_TrigInstrument,
2917  GetPerTupleMemoryContext(estate));
2918  if (oldtuple != newtuple && oldtuple != slottuple)
2919  heap_freetuple(oldtuple);
2920  if (newtuple == NULL)
2921  return NULL; /* "do nothing" */
2922  }
2923 
2924  if (newtuple != slottuple)
2925  {
2926  /*
2927  * Return the modified tuple using the es_trig_tuple_slot. We assume
2928  * the tuple was allocated in per-tuple memory context, and therefore
2929  * will go away by itself. The tuple table slot should not try to
2930  * clear it.
2931  */
2932  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2933  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2934 
2935  if (newslot->tts_tupleDescriptor != tupdesc)
2936  ExecSetSlotDescriptor(newslot, tupdesc);
2937  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2938  slot = newslot;
2939  }
2940  return slot;
2941 }
2942 
2943 void
2945 {
2946  TriggerDesc *trigdesc;
2947  int i;
2948  TriggerData LocTriggerData;
2949 
2950  trigdesc = relinfo->ri_TrigDesc;
2951 
2952  if (trigdesc == NULL)
2953  return;
2954  if (!trigdesc->trig_truncate_before_statement)
2955  return;
2956 
2957  LocTriggerData.type = T_TriggerData;
2958  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2960  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2961  LocTriggerData.tg_trigtuple = NULL;
2962  LocTriggerData.tg_newtuple = NULL;
2963  LocTriggerData.tg_oldtable = NULL;
2964  LocTriggerData.tg_newtable = NULL;
2965  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2966  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2967  for (i = 0; i < trigdesc->numtriggers; i++)
2968  {
2969  Trigger *trigger = &trigdesc->triggers[i];
2970  HeapTuple newtuple;
2971 
2972  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2976  continue;
2977  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2978  NULL, NULL, NULL))
2979  continue;
2980 
2981  LocTriggerData.tg_trigger = trigger;
2982  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2983  i,
2984  relinfo->ri_TrigFunctions,
2985  relinfo->ri_TrigInstrument,
2986  GetPerTupleMemoryContext(estate));
2987 
2988  if (newtuple)
2989  ereport(ERROR,
2990  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2991  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2992  }
2993 }
2994 
2995 void
2997 {
2998  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2999 
3000  if (trigdesc && trigdesc->trig_truncate_after_statement)
3002  false, NULL, NULL, NIL, NULL, NULL);
3003 }
3004 
3005 
3006 static HeapTuple
3008  EPQState *epqstate,
3009  ResultRelInfo *relinfo,
3010  ItemPointer tid,
3011  LockTupleMode lockmode,
3012  TupleTableSlot **newSlot)
3013 {
3014  Relation relation = relinfo->ri_RelationDesc;
3015  HeapTupleData tuple;
3016  HeapTuple result;
3017  Buffer buffer;
3018 
3019  if (newSlot != NULL)
3020  {
3021  HTSU_Result test;
3022  HeapUpdateFailureData hufd;
3023 
3024  *newSlot = NULL;
3025 
3026  /* caller must pass an epqstate if EvalPlanQual is possible */
3027  Assert(epqstate != NULL);
3028 
3029  /*
3030  * lock tuple for update
3031  */
3032 ltrmark:;
3033  tuple.t_self = *tid;
3034  test = heap_lock_tuple(relation, &tuple,
3035  estate->es_output_cid,
3036  lockmode, LockWaitBlock,
3037  false, &buffer, &hufd);
3038  switch (test)
3039  {
3040  case HeapTupleSelfUpdated:
3041 
3042  /*
3043  * The target tuple was already updated or deleted by the
3044  * current command, or by a later command in the current
3045  * transaction. We ignore the tuple in the former case, and
3046  * throw error in the latter case, for the same reasons
3047  * enumerated in ExecUpdate and ExecDelete in
3048  * nodeModifyTable.c.
3049  */
3050  if (hufd.cmax != estate->es_output_cid)
3051  ereport(ERROR,
3052  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3053  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3054  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3055 
3056  /* treat it as deleted; do not process */
3057  ReleaseBuffer(buffer);
3058  return NULL;
3059 
3060  case HeapTupleMayBeUpdated:
3061  break;
3062 
3063  case HeapTupleUpdated:
3064  ReleaseBuffer(buffer);
3066  ereport(ERROR,
3067  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3068  errmsg("could not serialize access due to concurrent update")));
3069  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
3070  {
3071  /* it was updated, so look at the updated version */
3072  TupleTableSlot *epqslot;
3073 
3074  epqslot = EvalPlanQual(estate,
3075  epqstate,
3076  relation,
3077  relinfo->ri_RangeTableIndex,
3078  lockmode,
3079  &hufd.ctid,
3080  hufd.xmax);
3081  if (!TupIsNull(epqslot))
3082  {
3083  *tid = hufd.ctid;
3084  *newSlot = epqslot;
3085 
3086  /*
3087  * EvalPlanQual already locked the tuple, but we
3088  * re-call heap_lock_tuple anyway as an easy way of
3089  * re-fetching the correct tuple. Speed is hardly a
3090  * criterion in this path anyhow.
3091  */
3092  goto ltrmark;
3093  }
3094  }
3095 
3096  /*
3097  * if tuple was deleted or PlanQual failed for updated tuple -
3098  * we must not process this tuple!
3099  */
3100  return NULL;
3101 
3102  case HeapTupleInvisible:
3103  elog(ERROR, "attempted to lock invisible tuple");
3104 
3105  default:
3106  ReleaseBuffer(buffer);
3107  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
3108  return NULL; /* keep compiler quiet */
3109  }
3110  }
3111  else
3112  {
3113  Page page;
3114  ItemId lp;
3115 
3116  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3117 
3118  /*
3119  * Although we already know this tuple is valid, we must lock the
3120  * buffer to ensure that no one has a buffer cleanup lock; otherwise
3121  * they might move the tuple while we try to copy it. But we can
3122  * release the lock before actually doing the heap_copytuple call,
3123  * since holding pin is sufficient to prevent anyone from getting a
3124  * cleanup lock they don't already hold.
3125  */
3126  LockBuffer(buffer, BUFFER_LOCK_SHARE);
3127 
3128  page = BufferGetPage(buffer);
3129  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3130 
3131  Assert(ItemIdIsNormal(lp));
3132 
3133  tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3134  tuple.t_len = ItemIdGetLength(lp);
3135  tuple.t_self = *tid;
3136  tuple.t_tableOid = RelationGetRelid(relation);
3137 
3138  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3139  }
3140 
3141  result = heap_copytuple(&tuple);
3142  ReleaseBuffer(buffer);
3143 
3144  return result;
3145 }
3146 
3147 /*
3148  * Is trigger enabled to fire?
3149  */
3150 static bool
3152  Trigger *trigger, TriggerEvent event,
3153  Bitmapset *modifiedCols,
3154  HeapTuple oldtup, HeapTuple newtup)
3155 {
3156  /* Check replication-role-dependent enable state */
3158  {
3159  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3160  trigger->tgenabled == TRIGGER_DISABLED)
3161  return false;
3162  }
3163  else /* ORIGIN or LOCAL role */
3164  {
3165  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3166  trigger->tgenabled == TRIGGER_DISABLED)
3167  return false;
3168  }
3169 
3170  /*
3171  * Check for column-specific trigger (only possible for UPDATE, and in
3172  * fact we *must* ignore tgattr for other event types)
3173  */
3174  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3175  {
3176  int i;
3177  bool modified;
3178 
3179  modified = false;
3180  for (i = 0; i < trigger->tgnattr; i++)
3181  {
3183  modifiedCols))
3184  {
3185  modified = true;
3186  break;
3187  }
3188  }
3189  if (!modified)
3190  return false;
3191  }
3192 
3193  /* Check for WHEN clause */
3194  if (trigger->tgqual)
3195  {
3196  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3197  ExprState **predicate;
3198  ExprContext *econtext;
3199  TupleTableSlot *oldslot = NULL;
3200  TupleTableSlot *newslot = NULL;
3201  MemoryContext oldContext;
3202  int i;
3203 
3204  Assert(estate != NULL);
3205 
3206  /*
3207  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3208  * matching element of relinfo->ri_TrigWhenExprs[]
3209  */
3210  i = trigger - relinfo->ri_TrigDesc->triggers;
3211  predicate = &relinfo->ri_TrigWhenExprs[i];
3212 
3213  /*
3214  * If first time through for this WHEN expression, build expression
3215  * nodetrees for it. Keep them in the per-query memory context so
3216  * they'll survive throughout the query.
3217  */
3218  if (*predicate == NULL)
3219  {
3220  Node *tgqual;
3221 
3222  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3223  tgqual = stringToNode(trigger->tgqual);
3224  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3227  /* ExecPrepareQual wants implicit-AND form */
3228  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3229  *predicate = ExecPrepareQual((List *) tgqual, estate);
3230  MemoryContextSwitchTo(oldContext);
3231  }
3232 
3233  /*
3234  * We will use the EState's per-tuple context for evaluating WHEN
3235  * expressions (creating it if it's not already there).
3236  */
3237  econtext = GetPerTupleExprContext(estate);
3238 
3239  /*
3240  * Put OLD and NEW tuples into tupleslots for expression evaluation.
3241  * These slots can be shared across the whole estate, but be careful
3242  * that they have the current resultrel's tupdesc.
3243  */
3244  if (HeapTupleIsValid(oldtup))
3245  {
3246  if (estate->es_trig_oldtup_slot == NULL)
3247  {
3248  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3249  estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate);
3250  MemoryContextSwitchTo(oldContext);
3251  }
3252  oldslot = estate->es_trig_oldtup_slot;
3253  if (oldslot->tts_tupleDescriptor != tupdesc)
3254  ExecSetSlotDescriptor(oldslot, tupdesc);
3255  ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
3256  }
3257  if (HeapTupleIsValid(newtup))
3258  {
3259  if (estate->es_trig_newtup_slot == NULL)
3260  {
3261  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3262  estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
3263  MemoryContextSwitchTo(oldContext);
3264  }
3265  newslot = estate->es_trig_newtup_slot;
3266  if (newslot->tts_tupleDescriptor != tupdesc)
3267  ExecSetSlotDescriptor(newslot, tupdesc);
3268  ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
3269  }
3270 
3271  /*
3272  * Finally evaluate the expression, making the old and/or new tuples
3273  * available as INNER_VAR/OUTER_VAR respectively.
3274  */
3275  econtext->ecxt_innertuple = oldslot;
3276  econtext->ecxt_outertuple = newslot;
3277  if (!ExecQual(*predicate, econtext))
3278  return false;
3279  }
3280 
3281  return true;
3282 }
3283 
3284 
3285 /* ----------
3286  * After-trigger stuff
3287  *
3288  * The AfterTriggersData struct holds data about pending AFTER trigger events
3289  * during the current transaction tree. (BEFORE triggers are fired
3290  * immediately so we don't need any persistent state about them.) The struct
3291  * and most of its subsidiary data are kept in TopTransactionContext; however
3292  * some data that can be discarded sooner appears in the CurTransactionContext
3293  * of the relevant subtransaction. Also, the individual event records are
3294  * kept in a separate sub-context of TopTransactionContext. This is done
3295  * mainly so that it's easy to tell from a memory context dump how much space
3296  * is being eaten by trigger events.
3297  *
3298  * Because the list of pending events can grow large, we go to some
3299  * considerable effort to minimize per-event memory consumption. The event
3300  * records are grouped into chunks and common data for similar events in the
3301  * same chunk is only stored once.
3302  *
3303  * XXX We need to be able to save the per-event data in a file if it grows too
3304  * large.
3305  * ----------
3306  */
3307 
3308 /* Per-trigger SET CONSTRAINT status */
3310 {
3314 
3316 
3317 /*
3318  * SET CONSTRAINT intra-transaction status.
3319  *
3320  * We make this a single palloc'd object so it can be copied and freed easily.
3321  *
3322  * all_isset and all_isdeferred are used to keep track
3323  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3324  *
3325  * trigstates[] stores per-trigger tgisdeferred settings.
3326  */
3328 {
3331  int numstates; /* number of trigstates[] entries in use */
3332  int numalloc; /* allocated size of trigstates[] */
3333  SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3335 
3337 
3338 
3339 /*
3340  * Per-trigger-event data
3341  *
3342  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3343  * status bits and up to two tuple CTIDs. Each event record also has an
3344  * associated AfterTriggerSharedData that is shared across all instances of
3345  * similar events within a "chunk".
3346  *
3347  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3348  * fields. Updates of regular tables use two; inserts and deletes of regular
3349  * tables use one; foreign tables always use zero and save the tuple(s) to a
3350  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3351  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3352  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3353  * tuple(s). This permits storing tuples once regardless of the number of
3354  * row-level triggers on a foreign table.
3355  *
3356  * Note that we need triggers on foreign tables to be fired in exactly the
3357  * order they were queued, so that the tuples come out of the tuplestore in
3358  * the right order. To ensure that, we forbid deferrable (constraint)
3359  * triggers on foreign tables. This also ensures that such triggers do not
3360  * get deferred into outer trigger query levels, meaning that it's okay to
3361  * destroy the tuplestore at the end of the query level.
3362  *
3363  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3364  * require no ctid field. We lack the flag bit space to neatly represent that
3365  * distinct case, and it seems unlikely to be worth much trouble.
3366  *
3367  * Note: ats_firing_id is initially zero and is set to something else when
3368  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3369  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3370  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3371  * because all instances of the same type of event in a given event list will
3372  * be fired at the same time, if they were queued between the same firing
3373  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3374  * a new event to an existing AfterTriggerSharedData record.
3375  */
3377 
3378 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3379 #define AFTER_TRIGGER_DONE 0x10000000
3380 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3381 /* bits describing the size and tuple sources of this event */
3382 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3383 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3384 #define AFTER_TRIGGER_1CTID 0x40000000
3385 #define AFTER_TRIGGER_2CTID 0xC0000000
3386 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3387 
3389 
3391 {
3392  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3393  Oid ats_tgoid; /* the trigger's ID */
3394  Oid ats_relid; /* the relation it's on */
3395  CommandId ats_firing_id; /* ID for firing cycle */
3396  struct AfterTriggersTableData *ats_table; /* transition table access */
3398 
3400 
3402 {
3403  TriggerFlags ate_flags; /* status bits and offset to shared data */
3404  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3405  ItemPointerData ate_ctid2; /* new updated tuple */
3407 
3408 /* AfterTriggerEventData, minus ate_ctid2 */
3410 {
3411  TriggerFlags ate_flags; /* status bits and offset to shared data */
3412  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3414 
3415 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3417 {
3418  TriggerFlags ate_flags; /* status bits and offset to shared data */
3420 
3421 #define SizeofTriggerEvent(evt) \
3422  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3423  sizeof(AfterTriggerEventData) : \
3424  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3425  sizeof(AfterTriggerEventDataOneCtid) : \
3426  sizeof(AfterTriggerEventDataZeroCtids))
3427 
3428 #define GetTriggerSharedData(evt) \
3429  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3430 
3431 /*
3432  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3433  * larger chunks (a slightly more sophisticated version of an expansible
3434  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3435  * AfterTriggerEventData records; the space between endfree and endptr is
3436  * occupied by AfterTriggerSharedData records.
3437  */
3439 {
3440  struct AfterTriggerEventChunk *next; /* list link */
3441  char *freeptr; /* start of free space in chunk */
3442  char *endfree; /* end of free space in chunk */
3443  char *endptr; /* end of chunk */
3444  /* event data follows here */
3446 
3447 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3448 
3449 /* A list of events */
3451 {
3454  char *tailfree; /* freeptr of tail chunk */
3456 
3457 /* Macros to help in iterating over a list of events */
3458 #define for_each_chunk(cptr, evtlist) \
3459  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3460 #define for_each_event(eptr, cptr) \
3461  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3462  (char *) eptr < (cptr)->freeptr; \
3463  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3464 /* Use this if no special per-chunk processing is needed */
3465 #define for_each_event_chunk(eptr, cptr, evtlist) \
3466  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3467 
3468 /* Macros for iterating from a start point that might not be list start */
3469 #define for_each_chunk_from(cptr) \
3470  for (; cptr != NULL; cptr = cptr->next)
3471 #define for_each_event_from(eptr, cptr) \
3472  for (; \
3473  (char *) eptr < (cptr)->freeptr; \
3474  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3475 
3476 
3477 /*
3478  * All per-transaction data for the AFTER TRIGGERS module.
3479  *
3480  * AfterTriggersData has the following fields:
3481  *
3482  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3483  * We mark firable events with the current firing cycle's ID so that we can
3484  * tell which ones to work on. This ensures sane behavior if a trigger
3485  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3486  * only fire those events that weren't already scheduled for firing.
3487  *
3488  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3489  * This is saved and restored across failed subtransactions.
3490  *
3491  * events is the current list of deferred events. This is global across
3492  * all subtransactions of the current transaction. In a subtransaction
3493  * abort, we know that the events added by the subtransaction are at the
3494  * end of the list, so it is relatively easy to discard them. The event
3495  * list chunks themselves are stored in event_cxt.
3496  *
3497  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3498  * (-1 when the stack is empty).
3499  *
3500  * query_stack[query_depth] is the per-query-level data, including these fields:
3501  *
3502  * events is a list of AFTER trigger events queued by the current query.
3503  * None of these are valid until the matching AfterTriggerEndQuery call
3504  * occurs. At that point we fire immediate-mode triggers, and append any
3505  * deferred events to the main events list.
3506  *
3507  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3508  * needed by events queued by the current query. (Note: we use just one
3509  * tuplestore even though more than one foreign table might be involved.
3510  * This is okay because tuplestores don't really care what's in the tuples
3511  * they store; but it's possible that someday it'd break.)
3512  *
3513  * tables is a List of AfterTriggersTableData structs for target tables
3514  * of the current query (see below).
3515  *
3516  * maxquerydepth is just the allocated length of query_stack.
3517  *
3518  * trans_stack holds per-subtransaction data, including these fields:
3519  *
3520  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3521  * state data. Each subtransaction level that modifies that state first
3522  * saves a copy, which we use to restore the state if we abort.
3523  *
3524  * events is a copy of the events head/tail pointers,
3525  * which we use to restore those values during subtransaction abort.
3526  *
3527  * query_depth is the subtransaction-start-time value of query_depth,
3528  * which we similarly use to clean up at subtransaction abort.
3529  *
3530  * firing_counter is the subtransaction-start-time value of firing_counter.
3531  * We use this to recognize which deferred triggers were fired (or marked
3532  * for firing) within an aborted subtransaction.
3533  *
3534  * We use GetCurrentTransactionNestLevel() to determine the correct array
3535  * index in trans_stack. maxtransdepth is the number of allocated entries in
3536  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3537  * in cases where errors during subxact abort cause multiple invocations
3538  * of AfterTriggerEndSubXact() at the same nesting depth.)
3539  *
3540  * We create an AfterTriggersTableData struct for each target table of the
3541  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3542  * either transition tables or statement-level triggers. This is used to
3543  * hold the relevant transition tables, as well as info tracking whether
3544  * we already queued the statement triggers. (We use that info to prevent
3545  * firing the same statement triggers more than once per statement, or really
3546  * once per transition table set.) These structs, along with the transition
3547  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3548  * That's sufficient lifespan because we don't allow transition tables to be
3549  * used by deferrable triggers, so they only need to survive until
3550  * AfterTriggerEndQuery.
3551  */
3555 
3556 typedef struct AfterTriggersData
3557 {
3558  CommandId firing_counter; /* next firing ID to assign */
3559  SetConstraintState state; /* the active S C state */
3560  AfterTriggerEventList events; /* deferred-event list */
3561  MemoryContext event_cxt; /* memory context for events, if any */
3562 
3563  /* per-query-level data: */
3564  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3565  int query_depth; /* current index in above array */
3566  int maxquerydepth; /* allocated len of above array */
3567 
3568  /* per-subtransaction-level data: */
3569  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3570  int maxtransdepth; /* allocated len of above array */
3572 
3574 {
3575  AfterTriggerEventList events; /* events pending from this query */
3576  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3577  List *tables; /* list of AfterTriggersTableData, see below */
3578 };
3579 
3581 {
3582  /* these fields are just for resetting at subtrans abort: */
3583  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3584  AfterTriggerEventList events; /* saved list pointer */
3585  int query_depth; /* saved query_depth */
3586  CommandId firing_counter; /* saved firing_counter */
3587 };
3588 
3590 {
3591  /* relid + cmdType form the lookup key for these structs: */
3592  Oid relid; /* target table's OID */
3593  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3594  bool closed; /* true when no longer OK to add tuples */
3595  bool before_trig_done; /* did we already queue BS triggers? */
3596  bool after_trig_done; /* did we already queue AS triggers? */
3597  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3598  Tuplestorestate *old_tuplestore; /* "old" transition table, if any */
3599  Tuplestorestate *new_tuplestore; /* "new" transition table, if any */
3600 };
3601 
3603 
3604 static void AfterTriggerExecute(AfterTriggerEvent event,
3605  Relation rel, TriggerDesc *trigdesc,
3606  FmgrInfo *finfo,
3607  Instrumentation *instr,
3608  MemoryContext per_tuple_context,
3609  TupleTableSlot *trig_tuple_slot1,
3610  TupleTableSlot *trig_tuple_slot2);
3612  CmdType cmdType);
3614 static SetConstraintState SetConstraintStateCreate(int numalloc);
3615 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3616 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3617  Oid tgoid, bool tgisdeferred);
3618 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3619 
3620 
3621 /*
3622  * Get the FDW tuplestore for the current trigger query level, creating it
3623  * if necessary.
3624  */
3625 static Tuplestorestate *
3627 {
3628  Tuplestorestate *ret;
3629 
3630  ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3631  if (ret == NULL)
3632  {
3633  MemoryContext oldcxt;
3634  ResourceOwner saveResourceOwner;
3635 
3636  /*
3637  * Make the tuplestore valid until end of subtransaction. We really
3638  * only need it until AfterTriggerEndQuery().
3639  */
3641  saveResourceOwner = CurrentResourceOwner;
3643 
3644  ret = tuplestore_begin_heap(false, false, work_mem);
3645 
3646  CurrentResourceOwner = saveResourceOwner;
3647  MemoryContextSwitchTo(oldcxt);
3648 
3649  afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3650  }
3651 
3652  return ret;
3653 }
3654 
3655 /* ----------
3656  * afterTriggerCheckState()
3657  *
3658  * Returns true if the trigger event is actually in state DEFERRED.
3659  * ----------
3660  */
3661 static bool
3662 afterTriggerCheckState(AfterTriggerShared evtshared)
3663 {
3664  Oid tgoid = evtshared->ats_tgoid;
3665  SetConstraintState state = afterTriggers.state;
3666  int i;
3667 
3668  /*
3669  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3670  * constraints declared NOT DEFERRABLE), the state is always false.
3671  */
3672  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3673  return false;
3674 
3675  /*
3676  * If constraint state exists, SET CONSTRAINTS might have been executed
3677  * either for this trigger or for all triggers.
3678  */
3679  if (state != NULL)
3680  {
3681  /* Check for SET CONSTRAINTS for this specific trigger. */
3682  for (i = 0; i < state->numstates; i++)
3683  {
3684  if (state->trigstates[i].sct_tgoid == tgoid)
3685  return state->trigstates[i].sct_tgisdeferred;
3686  }
3687 
3688  /* Check for SET CONSTRAINTS ALL. */
3689  if (state->all_isset)
3690  return state->all_isdeferred;
3691  }
3692 
3693  /*
3694  * Otherwise return the default state for the trigger.
3695  */
3696  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3697 }
3698 
3699 
3700 /* ----------
3701  * afterTriggerAddEvent()
3702  *
3703  * Add a new trigger event to the specified queue.
3704  * The passed-in event data is copied.
3705  * ----------
3706  */
3707 static void
3709  AfterTriggerEvent event, AfterTriggerShared evtshared)
3710 {
3711  Size eventsize = SizeofTriggerEvent(event);
3712  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3713  AfterTriggerEventChunk *chunk;
3714  AfterTriggerShared newshared;
3715  AfterTriggerEvent newevent;
3716 
3717  /*
3718  * If empty list or not enough room in the tail chunk, make a new chunk.
3719  * We assume here that a new shared record will always be needed.
3720  */
3721  chunk = events->tail;
3722  if (chunk == NULL ||
3723  chunk->endfree - chunk->freeptr < needed)
3724  {
3725  Size chunksize;
3726 
3727  /* Create event context if we didn't already */
3728  if (afterTriggers.event_cxt == NULL)
3729  afterTriggers.event_cxt =
3731  "AfterTriggerEvents",
3733 
3734  /*
3735  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3736  * These numbers are fairly arbitrary, though there is a hard limit at
3737  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3738  * shared records using the available space in ate_flags. Another
3739  * constraint is that if the chunk size gets too huge, the search loop
3740  * below would get slow given a (not too common) usage pattern with
3741  * many distinct event types in a chunk. Therefore, we double the
3742  * preceding chunk size only if there weren't too many shared records
3743  * in the preceding chunk; otherwise we halve it. This gives us some
3744  * ability to adapt to the actual usage pattern of the current query
3745  * while still having large chunk sizes in typical usage. All chunk
3746  * sizes used should be MAXALIGN multiples, to ensure that the shared
3747  * records will be aligned safely.
3748  */
3749 #define MIN_CHUNK_SIZE 1024
3750 #define MAX_CHUNK_SIZE (1024*1024)
3751 
3752 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3753 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3754 #endif
3755 
3756  if (chunk == NULL)
3757  chunksize = MIN_CHUNK_SIZE;
3758  else
3759  {
3760  /* preceding chunk size... */
3761  chunksize = chunk->endptr - (char *) chunk;
3762  /* check number of shared records in preceding chunk */
3763  if ((chunk->endptr - chunk->endfree) <=
3764  (100 * sizeof(AfterTriggerSharedData)))
3765  chunksize *= 2; /* okay, double it */
3766  else
3767  chunksize /= 2; /* too many shared records */
3768  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3769  }
3770  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3771  chunk->next = NULL;
3772  chunk->freeptr = CHUNK_DATA_START(chunk);
3773  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3774  Assert(chunk->endfree - chunk->freeptr >= needed);
3775 
3776  if (events->head == NULL)
3777  events->head = chunk;
3778  else
3779  events->tail->next = chunk;
3780  events->tail = chunk;
3781  /* events->tailfree is now out of sync, but we'll fix it below */
3782  }
3783 
3784  /*
3785  * Try to locate a matching shared-data record already in the chunk. If
3786  * none, make a new one.
3787  */
3788  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3789  (char *) newshared >= chunk->endfree;
3790  newshared--)
3791  {
3792  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3793  newshared->ats_relid == evtshared->ats_relid &&
3794  newshared->ats_event == evtshared->ats_event &&
3795  newshared->ats_table == evtshared->ats_table &&
3796  newshared->ats_firing_id == 0)
3797  break;
3798  }
3799  if ((char *) newshared < chunk->endfree)
3800  {
3801  *newshared = *evtshared;
3802  newshared->ats_firing_id = 0; /* just to be sure */
3803  chunk->endfree = (char *) newshared;
3804  }
3805 
3806  /* Insert the data */
3807  newevent = (AfterTriggerEvent) chunk->freeptr;
3808  memcpy(newevent, event, eventsize);
3809  /* ... and link the new event to its shared record */
3810  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3811  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3812 
3813  chunk->freeptr += eventsize;
3814  events->tailfree = chunk->freeptr;
3815 }
3816 
3817 /* ----------
3818  * afterTriggerFreeEventList()
3819  *
3820  * Free all the event storage in the given list.
3821  * ----------
3822  */
3823 static void
3825 {
3826  AfterTriggerEventChunk *chunk;
3827 
3828  while ((chunk = events->head) != NULL)
3829  {
3830  events->head = chunk->next;
3831  pfree(chunk);
3832  }
3833  events->tail = NULL;
3834  events->tailfree = NULL;
3835 }
3836 
3837 /* ----------
3838  * afterTriggerRestoreEventList()
3839  *
3840  * Restore an event list to its prior length, removing all the events
3841  * added since it had the value old_events.
3842  * ----------
3843  */
3844 static void
3846  const AfterTriggerEventList *old_events)
3847 {
3848  AfterTriggerEventChunk *chunk;
3849  AfterTriggerEventChunk *next_chunk;
3850 
3851  if (old_events->tail == NULL)
3852  {
3853  /* restoring to a completely empty state, so free everything */
3854  afterTriggerFreeEventList(events);
3855  }
3856  else
3857  {
3858  *events = *old_events;
3859  /* free any chunks after the last one we want to keep */
3860  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3861  {
3862  next_chunk = chunk->next;
3863  pfree(chunk);
3864  }
3865  /* and clean up the tail chunk to be the right length */
3866  events->tail->next = NULL;
3867  events->tail->freeptr = events->tailfree;
3868 
3869  /*
3870  * We don't make any effort to remove now-unused shared data records.
3871  * They might still be useful, anyway.
3872  */
3873  }
3874 }
3875 
3876 /* ----------
3877  * afterTriggerDeleteHeadEventChunk()
3878  *
3879  * Remove the first chunk of events from the query level's event list.
3880  * Keep any event list pointers elsewhere in the query level's data
3881  * structures in sync.
3882  * ----------
3883  */
3884 static void
3886 {
3887  AfterTriggerEventChunk *target = qs->events.head;
3888  ListCell *lc;
3889 
3890  Assert(target && target->next);
3891 
3892  /*
3893  * First, update any pointers in the per-table data, so that they won't be
3894  * dangling. Resetting obsoleted pointers to NULL will make
3895  * cancel_prior_stmt_triggers start from the list head, which is fine.
3896  */
3897  foreach(lc, qs->tables)
3898  {
3900 
3901  if (table->after_trig_done &&
3902  table->after_trig_events.tail == target)
3903  {
3904  table->after_trig_events.head = NULL;
3905  table->after_trig_events.tail = NULL;
3906  table->after_trig_events.tailfree = NULL;
3907  }
3908  }
3909 
3910  /* Now we can flush the head chunk */
3911  qs->events.head = target->next;
3912  pfree(target);
3913 }
3914 
3915 
3916 /* ----------
3917  * AfterTriggerExecute()
3918  *
3919  * Fetch the required tuples back from the heap and fire one
3920  * single trigger function.
3921  *
3922  * Frequently, this will be fired many times in a row for triggers of
3923  * a single relation. Therefore, we cache the open relation and provide
3924  * fmgr lookup cache space at the caller level. (For triggers fired at
3925  * the end of a query, we can even piggyback on the executor's state.)
3926  *
3927  * event: event currently being fired.
3928  * rel: open relation for event.
3929  * trigdesc: working copy of rel's trigger info.
3930  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3931  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3932  * or NULL if no instrumentation is wanted.
3933  * per_tuple_context: memory context to call trigger function in.
3934  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3935  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3936  * ----------
3937  */
3938 static void
3939 AfterTriggerExecute(AfterTriggerEvent event,
3940  Relation rel, TriggerDesc *trigdesc,
3941  FmgrInfo *finfo, Instrumentation *instr,
3942  MemoryContext per_tuple_context,
3943  TupleTableSlot *trig_tuple_slot1,
3944  TupleTableSlot *trig_tuple_slot2)
3945 {
3946  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3947  Oid tgoid = evtshared->ats_tgoid;
3948  TriggerData LocTriggerData;
3949  HeapTupleData tuple1;
3950  HeapTupleData tuple2;
3951  HeapTuple rettuple;
3952  Buffer buffer1 = InvalidBuffer;
3953  Buffer buffer2 = InvalidBuffer;
3954  int tgindx;
3955 
3956  /*
3957  * Locate trigger in trigdesc.
3958  */
3959  LocTriggerData.tg_trigger = NULL;
3960  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3961  {
3962  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3963  {
3964  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3965  break;
3966  }
3967  }
3968  if (LocTriggerData.tg_trigger == NULL)
3969  elog(ERROR, "could not find trigger %u", tgoid);
3970 
3971  /*
3972  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3973  * to include time spent re-fetching tuples in the trigger cost.
3974  */
3975  if (instr)
3976  InstrStartNode(instr + tgindx);
3977 
3978  /*
3979  * Fetch the required tuple(s).
3980  */
3981  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3982  {
3984  {
3985  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
3986 
3987  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3988  trig_tuple_slot1))
3989  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3990 
3991  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3993  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3994  trig_tuple_slot2))
3995  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3996  }
3997  /* fall through */
3999 
4000  /*
4001  * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
4002  * ensures that tg_trigtuple does not reference tuplestore memory.
4003  * (It is formally possible for the trigger function to queue
4004  * trigger events that add to the same tuplestore, which can push
4005  * other tuples out of memory.) The distinction is academic,
4006  * because we start with a minimal tuple that ExecFetchSlotTuple()
4007  * must materialize anyway.
4008  */
4009  LocTriggerData.tg_trigtuple =
4010  ExecMaterializeSlot(trig_tuple_slot1);
4011  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
4012 
4013  LocTriggerData.tg_newtuple =
4014  ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4016  ExecMaterializeSlot(trig_tuple_slot2) : NULL;
4017  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
4018 
4019  break;
4020 
4021  default:
4022  if (ItemPointerIsValid(&(event->ate_ctid1)))
4023  {
4024  ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
4025  if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
4026  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4027  LocTriggerData.tg_trigtuple = &tuple1;
4028  LocTriggerData.tg_trigtuplebuf = buffer1;
4029  }
4030  else
4031  {
4032  LocTriggerData.tg_trigtuple = NULL;
4033  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
4034  }
4035 
4036  /* don't touch ctid2 if not there */
4037  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4039  ItemPointerIsValid(&(event->ate_ctid2)))
4040  {
4041  ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
4042  if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
4043  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4044  LocTriggerData.tg_newtuple = &tuple2;
4045  LocTriggerData.tg_newtuplebuf = buffer2;
4046  }
4047  else
4048  {
4049  LocTriggerData.tg_newtuple = NULL;
4050  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
4051  }
4052  }
4053 
4054  /*
4055  * Set up the tuplestore information to let the trigger have access to
4056  * transition tables. When we first make a transition table available to
4057  * a trigger, mark it "closed" so that it cannot change anymore. If any
4058  * additional events of the same type get queued in the current trigger
4059  * query level, they'll go into new transition tables.
4060  */
4061  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4062  if (evtshared->ats_table)
4063  {
4064  if (LocTriggerData.tg_trigger->tgoldtable)
4065  {
4066  LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore;
4067  evtshared->ats_table->closed = true;
4068  }
4069 
4070  if (LocTriggerData.tg_trigger->tgnewtable)
4071  {
4072  LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore;
4073  evtshared->ats_table->closed = true;
4074  }
4075  }
4076 
4077  /*
4078  * Setup the remaining trigger information
4079  */
4080  LocTriggerData.type = T_TriggerData;
4081  LocTriggerData.tg_event =
4083  LocTriggerData.tg_relation = rel;
4084 
4085  MemoryContextReset(per_tuple_context);
4086 
4087  /*
4088  * Call the trigger and throw away any possibly returned updated tuple.
4089  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4090  */
4091  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4092  tgindx,
4093  finfo,
4094  NULL,
4095  per_tuple_context);
4096  if (rettuple != NULL &&
4097  rettuple != LocTriggerData.tg_trigtuple &&
4098  rettuple != LocTriggerData.tg_newtuple)
4099  heap_freetuple(rettuple);
4100 
4101  /*
4102  * Release buffers
4103  */
4104  if (buffer1 != InvalidBuffer)
4105  ReleaseBuffer(buffer1);
4106  if (buffer2 != InvalidBuffer)
4107  ReleaseBuffer(buffer2);
4108 
4109  /*
4110  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4111  * one "tuple returned" (really the number of firings).
4112  */
4113  if (instr)
4114  InstrStopNode(instr + tgindx, 1);
4115 }
4116 
4117 
4118 /*
4119  * afterTriggerMarkEvents()
4120  *
4121  * Scan the given event list for not yet invoked events. Mark the ones
4122  * that can be invoked now with the current firing ID.
4123  *
4124  * If move_list isn't NULL, events that are not to be invoked now are
4125  * transferred to move_list.
4126  *
4127  * When immediate_only is TRUE, do not invoke currently-deferred triggers.
4128  * (This will be FALSE only at main transaction exit.)
4129  *
4130  * Returns TRUE if any invokable events were found.
4131  */
4132 static bool
4134  AfterTriggerEventList *move_list,
4135  bool immediate_only)
4136 {
4137  bool found = false;
4138  AfterTriggerEvent event;
4139  AfterTriggerEventChunk *chunk;
4140 
4141  for_each_event_chunk(event, chunk, *events)
4142  {
4143  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4144  bool defer_it = false;
4145 
4146  if (!(event->ate_flags &
4148  {
4149  /*
4150  * This trigger hasn't been called or scheduled yet. Check if we
4151  * should call it now.
4152  */
4153  if (immediate_only && afterTriggerCheckState(evtshared))
4154  {
4155  defer_it = true;
4156  }
4157  else
4158  {
4159  /*
4160  * Mark it as to be fired in this firing cycle.
4161  */
4162  evtshared->ats_firing_id = afterTriggers.firing_counter;
4163  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4164  found = true;
4165  }
4166  }
4167 
4168  /*
4169  * If it's deferred, move it to move_list, if requested.
4170  */
4171  if (defer_it && move_list != NULL)
4172  {
4173  /* add it to move_list */
4174  afterTriggerAddEvent(move_list, event, evtshared);
4175  /* mark original copy "done" so we don't do it again */
4176  event->ate_flags |= AFTER_TRIGGER_DONE;
4177  }
4178  }
4179 
4180  return found;
4181 }
4182 
4183 /*
4184  * afterTriggerInvokeEvents()
4185  *
4186  * Scan the given event list for events that are marked as to be fired
4187  * in the current firing cycle, and fire them.
4188  *
4189  * If estate isn't NULL, we use its result relation info to avoid repeated
4190  * openings and closing of trigger target relations. If it is NULL, we
4191  * make one locally to cache the info in case there are multiple trigger
4192  * events per rel.
4193  *
4194  * When delete_ok is TRUE, it's safe to delete fully-processed events.
4195  * (We are not very tense about that: we simply reset a chunk to be empty
4196  * if all its events got fired. The objective here is just to avoid useless
4197  * rescanning of events when a trigger queues new events during transaction
4198  * end, so it's not necessary to worry much about the case where only
4199  * some events are fired.)
4200  *
4201  * Returns TRUE if no unfired events remain in the list (this allows us
4202  * to avoid repeating afterTriggerMarkEvents).
4203  */
4204 static bool
4206  CommandId firing_id,
4207  EState *estate,
4208  bool delete_ok)
4209 {
4210  bool all_fired = true;
4211  AfterTriggerEventChunk *chunk;
4212  MemoryContext per_tuple_context;
4213  bool local_estate = false;
4214  Relation rel = NULL;
4215  TriggerDesc *trigdesc = NULL;
4216  FmgrInfo *finfo = NULL;
4217  Instrumentation *instr = NULL;
4218  TupleTableSlot *slot1 = NULL,
4219  *slot2 = NULL;
4220 
4221  /* Make a local EState if need be */
4222  if (estate == NULL)
4223  {
4224  estate = CreateExecutorState();
4225  local_estate = true;
4226  }
4227 
4228  /* Make a per-tuple memory context for trigger function calls */
4229  per_tuple_context =
4231  "AfterTriggerTupleContext",
4233 
4234  for_each_chunk(chunk, *events)
4235  {
4236  AfterTriggerEvent event;
4237  bool all_fired_in_chunk = true;
4238 
4239  for_each_event(event, chunk)
4240  {
4241  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4242 
4243  /*
4244  * Is it one for me to fire?
4245  */
4246  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4247  evtshared->ats_firing_id == firing_id)
4248  {
4249  /*
4250  * So let's fire it... but first, find the correct relation if
4251  * this is not the same relation as before.
4252  */
4253  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4254  {
4255  ResultRelInfo *rInfo;
4256 
4257  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4258  rel = rInfo->ri_RelationDesc;
4259  trigdesc = rInfo->ri_TrigDesc;
4260  finfo = rInfo->ri_TrigFunctions;
4261  instr = rInfo->ri_TrigInstrument;
4262  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4263  {
4264  if (slot1 != NULL)
4265  {
4268  }
4269  slot1 = MakeSingleTupleTableSlot(rel->rd_att);
4270  slot2 = MakeSingleTupleTableSlot(rel->rd_att);
4271  }
4272  if (trigdesc == NULL) /* should not happen */
4273  elog(ERROR, "relation %u has no triggers",
4274  evtshared->ats_relid);
4275  }
4276 
4277  /*
4278  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4279  * still set, so recursive examinations of the event list
4280  * won't try to re-fire it.
4281  */
4282  AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
4283  per_tuple_context, slot1, slot2);
4284 
4285  /*
4286  * Mark the event as done.
4287  */
4288  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4289  event->ate_flags |= AFTER_TRIGGER_DONE;
4290  }
4291  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4292  {
4293  /* something remains to be done */
4294  all_fired = all_fired_in_chunk = false;
4295  }
4296  }
4297 
4298  /* Clear the chunk if delete_ok and nothing left of interest */
4299  if (delete_ok && all_fired_in_chunk)
4300  {
4301  chunk->freeptr = CHUNK_DATA_START(chunk);
4302  chunk->endfree = chunk->endptr;
4303 
4304  /*
4305  * If it's last chunk, must sync event list's tailfree too. Note
4306  * that delete_ok must NOT be passed as true if there could be
4307  * additional AfterTriggerEventList values pointing at this event
4308  * list, since we'd fail to fix their copies of tailfree.
4309  */
4310  if (chunk == events->tail)
4311  events->tailfree = chunk->freeptr;
4312  }
4313  }
4314  if (slot1 != NULL)
4315  {
4318  }
4319 
4320  /* Release working resources */
4321  MemoryContextDelete(per_tuple_context);
4322 
4323  if (local_estate)
4324  {
4325  ExecCleanUpTriggerState(estate);
4326  FreeExecutorState(estate);
4327  }
4328 
4329  return all_fired;
4330 }
4331 
4332 
4333 /*
4334  * GetAfterTriggersTableData
4335  *
4336  * Find or create an AfterTriggersTableData struct for the specified
4337  * trigger event (relation + operation type). Ignore existing structs
4338  * marked "closed"; we don't want to put any additional tuples into them,
4339  * nor change their stmt-triggers-fired state.
4340  *
4341  * Note: the AfterTriggersTableData list is allocated in the current
4342  * (sub)transaction's CurTransactionContext. This is OK because
4343  * we don't need it to live past AfterTriggerEndQuery.
4344  */
4345 static AfterTriggersTableData *
4347 {
4348  AfterTriggersTableData *table;
4350  MemoryContext oldcxt;
4351  ListCell *lc;
4352 
4353  /* Caller should have ensured query_depth is OK. */
4354  Assert(afterTriggers.query_depth >= 0 &&
4355  afterTriggers.query_depth < afterTriggers.maxquerydepth);
4356  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4357 
4358  foreach(lc, qs->tables)
4359  {
4360  table = (AfterTriggersTableData *) lfirst(lc);
4361  if (table->relid == relid && table->cmdType == cmdType &&
4362  !table->closed)
4363  return table;
4364  }
4365 
4367 
4369  table->relid = relid;
4370  table->cmdType = cmdType;
4371  qs->tables = lappend(qs->tables, table);
4372 
4373  MemoryContextSwitchTo(oldcxt);
4374 
4375  return table;
4376 }
4377 
4378 
4379 /*
4380  * MakeTransitionCaptureState
4381  *
4382  * Make a TransitionCaptureState object for the given TriggerDesc, target
4383  * relation, and operation type. The TCS object holds all the state needed
4384  * to decide whether to capture tuples in transition tables.
4385  *
4386  * If there are no triggers in 'trigdesc' that request relevant transition
4387  * tables, then return NULL.
4388  *
4389  * The resulting object can be passed to the ExecAR* functions. The caller
4390  * should set tcs_map or tcs_original_insert_tuple as appropriate when dealing
4391  * with child tables.
4392  *
4393  * Note that we copy the flags from a parent table into this struct (rather
4394  * than subsequently using the relation's TriggerDesc directly) so that we can
4395  * use it to control collection of transition tuples from child tables.
4396  *
4397  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4398  * on the same table during one query should share one transition table.
4399  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4400  * looked up using the table OID + CmdType, and are merely referenced by
4401  * the TransitionCaptureState objects we hand out to callers.
4402  */
4405 {
4407  bool need_old,
4408  need_new;
4409  AfterTriggersTableData *table;
4410  MemoryContext oldcxt;
4411  ResourceOwner saveResourceOwner;
4412 
4413  if (trigdesc == NULL)
4414  return NULL;
4415 
4416  /* Detect which table(s) we need. */
4417  switch (cmdType)
4418  {
4419  case CMD_INSERT:
4420  need_old = false;
4421  need_new = trigdesc->trig_insert_new_table;
4422  break;
4423  case CMD_UPDATE:
4424  need_old = trigdesc->trig_update_old_table;
4425  need_new = trigdesc->trig_update_new_table;
4426  break;
4427  case CMD_DELETE:
4428  need_old = trigdesc->trig_delete_old_table;
4429  need_new = false;
4430  break;
4431  default:
4432  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4433  need_old = need_new = false; /* keep compiler quiet */
4434  break;
4435  }
4436  if (!need_old && !need_new)
4437  return NULL;
4438 
4439  /* Check state, like AfterTriggerSaveEvent. */
4440  if (afterTriggers.query_depth < 0)
4441  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4442 
4443  /* Be sure we have enough space to record events at this query depth. */
4444  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4446 
4447  /*
4448  * Find or create an AfterTriggersTableData struct to hold the
4449  * tuplestore(s). If there's a matching struct but it's marked closed,
4450  * ignore it; we need a newer one.
4451  *
4452  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4453  * allocated in the current (sub)transaction's CurTransactionContext, and
4454  * the tuplestores are managed by the (sub)transaction's resource owner.
4455  * This is sufficient lifespan because we do not allow triggers using
4456  * transition tables to be deferrable; they will be fired during
4457  * AfterTriggerEndQuery, after which it's okay to delete the data.
4458  */
4459  table = GetAfterTriggersTableData(relid, cmdType);
4460 
4461  /* Now create required tuplestore(s), if we don't have them already. */
4463  saveResourceOwner = CurrentResourceOwner;
4465 
4466  if (need_old && table->old_tuplestore == NULL)
4467  table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4468  if (need_new && table->new_tuplestore == NULL)
4469  table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4470 
4471  CurrentResourceOwner = saveResourceOwner;
4472  MemoryContextSwitchTo(oldcxt);
4473 
4474  /* Now build the TransitionCaptureState struct, in caller's context */
4476  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4477  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4478  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4479  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4480  state->tcs_private = table;
4481 
4482  return state;
4483 }
4484 
4485 
4486 /* ----------
4487  * AfterTriggerBeginXact()
4488  *
4489  * Called at transaction start (either BEGIN or implicit for single
4490  * statement outside of transaction block).
4491  * ----------
4492  */
4493 void
4495 {
4496  /*
4497  * Initialize after-trigger state structure to empty
4498  */
4499  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4500  afterTriggers.query_depth = -1;
4501 
4502  /*
4503  * Verify that there is no leftover state remaining. If these assertions
4504  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4505  * up properly.
4506  */
4507  Assert(afterTriggers.state == NULL);
4508  Assert(afterTriggers.query_stack == NULL);
4509  Assert(afterTriggers.maxquerydepth == 0);
4510  Assert(afterTriggers.event_cxt == NULL);
4511  Assert(afterTriggers.events.head == NULL);
4512  Assert(afterTriggers.trans_stack == NULL);
4513  Assert(afterTriggers.maxtransdepth == 0);
4514 }
4515 
4516 
4517 /* ----------
4518  * AfterTriggerBeginQuery()
4519  *
4520  * Called just before we start processing a single query within a
4521  * transaction (or subtransaction). Most of the real work gets deferred
4522  * until somebody actually tries to queue a trigger event.
4523  * ----------
4524  */
4525 void
4527 {
4528  /* Increase the query stack depth */
4529  afterTriggers.query_depth++;
4530 }
4531 
4532 
4533 /* ----------
4534  * AfterTriggerEndQuery()
4535  *
4536  * Called after one query has been completely processed. At this time
4537  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4538  * transfer deferred trigger events to the global deferred-trigger list.
4539  *
4540  * Note that this must be called BEFORE closing down the executor
4541  * with ExecutorEnd, because we make use of the EState's info about
4542  * target relations. Normally it is called from ExecutorFinish.
4543  * ----------
4544  */
4545 void
4547 {
4549 
4550  /* Must be inside a query, too */
4551  Assert(afterTriggers.query_depth >= 0);
4552 
4553  /*
4554  * If we never even got as far as initializing the event stack, there
4555  * certainly won't be any events, so exit quickly.
4556  */
4557  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4558  {
4559  afterTriggers.query_depth--;
4560  return;
4561  }
4562 
4563  /*
4564  * Process all immediate-mode triggers queued by the query, and move the
4565  * deferred ones to the main list of deferred events.
4566  *
4567  * Notice that we decide which ones will be fired, and put the deferred
4568  * ones on the main list, before anything is actually fired. This ensures
4569  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4570  * IMMEDIATE: all events we have decided to defer will be available for it
4571  * to fire.
4572  *
4573  * We loop in case a trigger queues more events at the same query level.
4574  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4575  * will instead fire any triggers in a dedicated query level. Foreign key
4576  * enforcement triggers do add to the current query level, thanks to their
4577  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4578  * C-language triggers might do likewise.
4579  *
4580  * If we find no firable events, we don't have to increment
4581  * firing_counter.
4582  */
4583  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4584 
4585  for (;;)
4586  {
4587  if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
4588  {
4589  CommandId firing_id = afterTriggers.firing_counter++;
4590  AfterTriggerEventChunk *oldtail = qs->events.tail;
4591 
4592  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
4593  break; /* all fired */
4594 
4595  /*
4596  * Firing a trigger could result in query_stack being repalloc'd,
4597  * so we must recalculate qs after each afterTriggerInvokeEvents
4598  * call. Furthermore, it's unsafe to pass delete_ok = true here,
4599  * because that could cause afterTriggerInvokeEvents to try to
4600  * access qs->events after the stack has been repalloc'd.
4601  */
4602  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4603 
4604  /*
4605  * We'll need to scan the events list again. To reduce the cost
4606  * of doing so, get rid of completely-fired chunks. We know that
4607  * all events were marked IN_PROGRESS or DONE at the conclusion of
4608  * afterTriggerMarkEvents, so any still-interesting events must
4609  * have been added after that, and so must be in the chunk that
4610  * was then the tail chunk, or in later chunks. So, zap all
4611  * chunks before oldtail. This is approximately the same set of
4612  * events we would have gotten rid of by passing delete_ok = true.
4613  */
4614  Assert(oldtail != NULL);
4615  while (qs->events.head != oldtail)
4617  }
4618  else
4619  break;
4620  }
4621 
4622  /* Release query-level-local storage, including tuplestores if any */
4623  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4624 
4625  afterTriggers.query_depth--;
4626 }
4627 
4628 
4629 /*
4630  * AfterTriggerFreeQuery
4631  * Release subsidiary storage for a trigger query level.
4632  * This includes closing down tuplestores.
4633  * Note: it's important for this to be safe if interrupted by an error
4634  * and then called again for the same query level.
4635  */
4636 static void
4638 {
4639  Tuplestorestate *ts;
4640  List *tables;
4641  ListCell *lc;
4642 
4643  /* Drop the trigger events */
4645 
4646  /* Drop FDW tuplestore if any */
4647  ts = qs->fdw_tuplestore;
4648  qs->fdw_tuplestore = NULL;
4649  if (ts)
4650  tuplestore_end(ts);
4651 
4652  /* Release per-table subsidiary storage */
4653  tables = qs->tables;
4654  foreach(lc, tables)
4655  {
4657 
4658  ts = table->old_tuplestore;
4659  table->old_tuplestore = NULL;
4660  if (ts)
4661  tuplestore_end(ts);
4662  ts = table->new_tuplestore;
4663  table->new_tuplestore = NULL;
4664  if (ts)
4665  tuplestore_end(ts);
4666  }
4667 
4668  /*
4669  * Now free the AfterTriggersTableData structs and list cells. Reset list
4670  * pointer first; if list_free_deep somehow gets an error, better to leak
4671  * that storage than have an infinite loop.
4672  */
4673  qs->tables = NIL;
4674  list_free_deep(tables);
4675 }
4676 
4677 
4678 /* ----------
4679  * AfterTriggerFireDeferred()
4680  *
4681  * Called just before the current transaction is committed. At this
4682  * time we invoke all pending DEFERRED triggers.
4683  *
4684  * It is possible for other modules to queue additional deferred triggers
4685  * during pre-commit processing; therefore xact.c may have to call this
4686  * multiple times.
4687  * ----------
4688  */
4689 void
4691 {
4692  AfterTriggerEventList *events;
4693  bool snap_pushed = false;
4694 
4695  /* Must not be inside a query */
4696  Assert(afterTriggers.query_depth == -1);
4697 
4698  /*
4699  * If there are any triggers to fire, make sure we have set a snapshot for
4700  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4701  * can't assume ActiveSnapshot is valid on entry.)
4702  */
4703  events = &afterTriggers.events;
4704  if (events->head != NULL)
4705  {
4707  snap_pushed = true;
4708  }
4709 
4710  /*
4711  * Run all the remaining triggers. Loop until they are all gone, in case
4712  * some trigger queues more for us to do.
4713  */
4714  while (afterTriggerMarkEvents(events, NULL, false))
4715  {
4716  CommandId firing_id = afterTriggers.firing_counter++;
4717 
4718  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4719  break; /* all fired */
4720  }
4721 
4722  /*
4723  * We don't bother freeing the event list, since it will go away anyway
4724  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4725  */
4726 
4727  if (snap_pushed)
4729 }
4730 
4731 
4732 /* ----------
4733  * AfterTriggerEndXact()
4734  *
4735  * The current transaction is finishing.
4736  *
4737  * Any unfired triggers are canceled so we simply throw
4738  * away anything we know.
4739  *
4740  * Note: it is possible for this to be called repeatedly in case of
4741  * error during transaction abort; therefore, do not complain if
4742  * already closed down.
4743  * ----------
4744  */
4745 void
4746 AfterTriggerEndXact(bool isCommit)
4747 {
4748  /*
4749  * Forget the pending-events list.
4750  *
4751  * Since all the info is in TopTransactionContext or children thereof, we
4752  * don't really need to do anything to reclaim memory. However, the
4753  * pending-events list could be large, and so it's useful to discard it as
4754  * soon as possible --- especially if we are aborting because we ran out
4755  * of memory for the list!
4756  */
4757  if (afterTriggers.event_cxt)
4758  {
4759  MemoryContextDelete(afterTriggers.event_cxt);
4760  afterTriggers.event_cxt = NULL;
4761  afterTriggers.events.head = NULL;
4762  afterTriggers.events.tail = NULL;
4763  afterTriggers.events.tailfree = NULL;
4764  }
4765 
4766  /*
4767  * Forget any subtransaction state as well. Since this can't be very
4768  * large, we let the eventual reset of TopTransactionContext free the
4769  * memory instead of doing it here.
4770  */
4771  afterTriggers.trans_stack = NULL;
4772  afterTriggers.maxtransdepth = 0;
4773 
4774 
4775  /*
4776  * Forget the query stack and constraint-related state information. As
4777  * with the subtransaction state information, we don't bother freeing the
4778  * memory here.
4779  */
4780  afterTriggers.query_stack = NULL;
4781  afterTriggers.maxquerydepth = 0;
4782  afterTriggers.state = NULL;
4783 
4784  /* No more afterTriggers manipulation until next transaction starts. */
4785  afterTriggers.query_depth = -1;
4786 }
4787 
4788 /*
4789  * AfterTriggerBeginSubXact()
4790  *
4791  * Start a subtransaction.
4792  */
4793 void
4795 {
4796  int my_level = GetCurrentTransactionNestLevel();
4797 
4798  /*
4799  * Allocate more space in the trans_stack if needed. (Note: because the
4800  * minimum nest level of a subtransaction is 2, we waste the first couple
4801  * entries of the array; not worth the notational effort to avoid it.)
4802  */
4803  while (my_level >= afterTriggers.maxtransdepth)
4804  {
4805  if (afterTriggers.maxtransdepth == 0)
4806  {
4807  /* Arbitrarily initialize for max of 8 subtransaction levels */
4808  afterTriggers.trans_stack = (AfterTriggersTransData *)
4810  8 * sizeof(AfterTriggersTransData));
4811  afterTriggers.maxtransdepth = 8;
4812  }
4813  else
4814  {
4815  /* repalloc will keep the stack in the same context */
4816  int new_alloc = afterTriggers.maxtransdepth * 2;
4817 
4818  afterTriggers.trans_stack = (AfterTriggersTransData *)
4819  repalloc(afterTriggers.trans_stack,
4820  new_alloc * sizeof(AfterTriggersTransData));
4821  afterTriggers.maxtransdepth = new_alloc;
4822  }
4823  }
4824 
4825  /*
4826  * Push the current information into the stack. The SET CONSTRAINTS state
4827  * is not saved until/unless changed. Likewise, we don't make a
4828  * per-subtransaction event context until needed.
4829  */
4830  afterTriggers.trans_stack[my_level].state = NULL;
4831  afterTriggers.trans_stack[my_level].events = afterTriggers.events;
4832  afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
4833  afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
4834 }
4835 
4836 /*
4837  * AfterTriggerEndSubXact()
4838  *
4839  * The current subtransaction is ending.
4840  */
4841 void
4843 {
4844  int my_level = GetCurrentTransactionNestLevel();
4845  SetConstraintState state;
4846  AfterTriggerEvent event;
4847  AfterTriggerEventChunk *chunk;
4848  CommandId subxact_firing_id;
4849 
4850  /*
4851  * Pop the prior state if needed.
4852  */
4853  if (isCommit)
4854  {
4855  Assert(my_level < afterTriggers.maxtransdepth);
4856  /* If we saved a prior state, we don't need it anymore */
4857  state = afterTriggers.trans_stack[my_level].state;
4858  if (state != NULL)
4859  pfree(state);
4860  /* this avoids double pfree if error later: */
4861  afterTriggers.trans_stack[my_level].state = NULL;
4862  Assert(afterTriggers.query_depth ==
4863  afterTriggers.trans_stack[my_level].query_depth);
4864  }
4865  else
4866  {
4867  /*
4868  * Aborting. It is possible subxact start failed before calling
4869  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4870  * trans_stack levels that aren't there.
4871  */
4872  if (my_level >= afterTriggers.maxtransdepth)
4873  return;
4874 
4875  /*
4876  * Release query-level storage for queries being aborted, and restore
4877  * query_depth to its pre-subxact value. This assumes that a
4878  * subtransaction will not add events to query levels started in a
4879  * earlier transaction state.
4880  */
4881  while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
4882  {
4883  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4884  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4885  afterTriggers.query_depth--;
4886  }
4887  Assert(afterTriggers.query_depth ==
4888  afterTriggers.trans_stack[my_level].query_depth);
4889 
4890  /*
4891  * Restore the global deferred-event list to its former length,
4892  * discarding any events queued by the subxact.
4893  */
4894  afterTriggerRestoreEventList(&afterTriggers.events,
4895  &afterTriggers.trans_stack[my_level].events);
4896 
4897  /*
4898  * Restore the trigger state. If the saved state is NULL, then this
4899  * subxact didn't save it, so it doesn't need restoring.
4900  */
4901  state = afterTriggers.trans_stack[my_level].state;
4902  if (state != NULL)
4903  {
4904  pfree(afterTriggers.state);
4905  afterTriggers.state = state;
4906  }
4907  /* this avoids double pfree if error later: */
4908  afterTriggers.trans_stack[my_level].state = NULL;
4909 
4910  /*
4911  * Scan for any remaining deferred events that were marked DONE or IN
4912  * PROGRESS by this subxact or a child, and un-mark them. We can
4913  * recognize such events because they have a firing ID greater than or
4914  * equal to the firing_counter value we saved at subtransaction start.
4915  * (This essentially assumes that the current subxact includes all
4916  * subxacts started after it.)
4917  */
4918  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
4919  for_each_event_chunk(event, chunk, afterTriggers.events)
4920  {
4921  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4922 
4923  if (event->ate_flags &
4925  {
4926  if (evtshared->ats_firing_id >= subxact_firing_id)
4927  event->ate_flags &=
4929  }
4930  }
4931  }
4932 }
4933 
4934 /* ----------
4935  * AfterTriggerEnlargeQueryState()
4936  *
4937  * Prepare the necessary state so that we can record AFTER trigger events
4938  * queued by a query. It is allowed to have nested queries within a
4939  * (sub)transaction, so we need to have separate state for each query
4940  * nesting level.
4941  * ----------
4942  */
4943 static void
4945 {
4946  int init_depth = afterTriggers.maxquerydepth;
4947 
4948  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4949 
4950  if (afterTriggers.maxquerydepth == 0)
4951  {
4952  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4953 
4954  afterTriggers.query_stack = (AfterTriggersQueryData *)
4956  new_alloc * sizeof(AfterTriggersQueryData));
4957  afterTriggers.maxquerydepth = new_alloc;
4958  }
4959  else
4960  {
4961  /* repalloc will keep the stack in the same context */
4962  int old_alloc = afterTriggers.maxquerydepth;
4963  int new_alloc = Max(afterTriggers.query_depth + 1,
4964  old_alloc * 2);
4965 
4966  afterTriggers.query_stack = (AfterTriggersQueryData *)
4967  repalloc(afterTriggers.query_stack,
4968  new_alloc * sizeof(AfterTriggersQueryData));
4969  afterTriggers.maxquerydepth = new_alloc;
4970  }
4971 
4972  /* Initialize new array entries to empty */
4973  while (init_depth < afterTriggers.maxquerydepth)
4974  {
4975  AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
4976 
4977  qs->events.head = NULL;
4978  qs->events.tail = NULL;
4979  qs->events.tailfree = NULL;
4980  qs->fdw_tuplestore = NULL;
4981  qs->tables = NIL;
4982 
4983  ++init_depth;
4984  }
4985 }
4986 
4987 /*
4988  * Create an empty SetConstraintState with room for numalloc trigstates
4989  */
4990 static SetConstraintState
4992 {
4993  SetConstraintState state;
4994 
4995  /* Behave sanely with numalloc == 0 */
4996  if (numalloc <= 0)
4997  numalloc = 1;
4998 
4999  /*
5000  * We assume that zeroing will correctly initialize the state values.
5001  */
5002  state = (SetConstraintState)
5004  offsetof(SetConstraintStateData, trigstates) +
5005  numalloc * sizeof(SetConstraintTriggerData));
5006 
5007  state->numalloc = numalloc;
5008 
5009  return state;
5010 }
5011 
5012 /*
5013  * Copy a SetConstraintState
5014  */
5015 static SetConstraintState
5016 SetConstraintStateCopy(SetConstraintState origstate)
5017 {
5018  SetConstraintState state;
5019 
5020  state = SetConstraintStateCreate(origstate->numstates);
5021 
5022  state->all_isset = origstate->all_isset;
5023  state->all_isdeferred = origstate->all_isdeferred;
5024  state->numstates = origstate->numstates;
5025  memcpy(state->trigstates, origstate->trigstates,
5026  origstate->numstates * sizeof(SetConstraintTriggerData));
5027 
5028  return state;
5029 }
5030 
5031 /*
5032  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5033  * pointer to the state object (it will change if we have to repalloc).
5034  */
5035 static SetConstraintState
5037  Oid tgoid, bool tgisdeferred)
5038 {
5039  if (state->numstates >= state->numalloc)
5040  {
5041  int newalloc = state->numalloc * 2;
5042 
5043  newalloc = Max(newalloc, 8); /* in case original has size 0 */
5044  state = (SetConstraintState)
5045  repalloc(state,
5046  offsetof(SetConstraintStateData, trigstates) +
5047  newalloc * sizeof(SetConstraintTriggerData));
5048  state->numalloc = newalloc;
5049  Assert(state->numstates < state->numalloc);
5050  }
5051 
5052  state->trigstates[state->numstates].sct_tgoid = tgoid;
5053  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
5054  state->numstates++;
5055 
5056  return state;
5057 }
5058 
5059 /* ----------
5060  * AfterTriggerSetState()
5061  *
5062  * Execute the SET CONSTRAINTS ... utility command.
5063  * ----------
5064  */
5065 void
5067 {
5068  int my_level = GetCurrentTransactionNestLevel();
5069 
5070  /* If we haven't already done so, initialize our state. */
5071  if (afterTriggers.state == NULL)
5072  afterTriggers.state = SetConstraintStateCreate(8);
5073 
5074  /*
5075  * If in a subtransaction, and we didn't save the current state already,
5076  * save it so it can be restored if the subtransaction aborts.
5077  */
5078  if (my_level > 1 &&
5079  afterTriggers.trans_stack[my_level].state == NULL)
5080  {
5081  afterTriggers.trans_stack[my_level].state =
5082  SetConstraintStateCopy(afterTriggers.state);
5083  }
5084 
5085  /*
5086  * Handle SET CONSTRAINTS ALL ...
5087  */
5088  if (stmt->constraints == NIL)
5089  {
5090  /*
5091  * Forget any previous SET CONSTRAINTS commands in this transaction.
5092  */
5093  afterTriggers.state->numstates = 0;
5094 
5095  /*
5096  * Set the per-transaction ALL state to known.
5097  */
5098  afterTriggers.state->all_isset = true;
5099  afterTriggers.state->all_isdeferred = stmt->deferred;
5100  }
5101  else
5102  {
5103  Relation conrel;
5104  Relation tgrel;
5105  List *conoidlist = NIL;
5106  List *tgoidlist = NIL;
5107  ListCell *lc;
5108 
5109  /*
5110  * Handle SET CONSTRAINTS constraint-name [, ...]
5111  *
5112  * First, identify all the named constraints and make a list of their
5113  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5114  * the same name within a schema, the specifications are not
5115  * necessarily unique. Our strategy is to target all matching
5116  * constraints within the first search-path schema that has any
5117  * matches, but disregard matches in schemas beyond the first match.
5118  * (This is a bit odd but it's the historical behavior.)
5119  */
5121 
5122  foreach(lc, stmt->constraints)
5123  {
5124  RangeVar *constraint = lfirst(lc);
5125  bool found;
5126  List *namespacelist;
5127  ListCell *nslc;
5128 
5129  if (constraint->catalogname)
5130  {
5131  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5132  ereport(ERROR,
5133  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5134  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5135  constraint->catalogname, constraint->schemaname,
5136  constraint->relname)));
5137  }
5138 
5139  /*
5140  * If we're given the schema name with the constraint, look only
5141  * in that schema. If given a bare constraint name, use the
5142  * search path to find the first matching constraint.
5143  */
5144  if (constraint->schemaname)
5145  {
5146  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5147  false);
5148 
5149  namespacelist = list_make1_oid(namespaceId);
5150  }
5151  else
5152  {
5153  namespacelist = fetch_search_path(true);
5154  }
5155 
5156  found = false;
5157  foreach(nslc, namespacelist)
5158  {
5159  Oid namespaceId = lfirst_oid(nslc);
5160  SysScanDesc conscan;
5161  ScanKeyData skey[2];
5162  HeapTuple tup;
5163 
5164  ScanKeyInit(&skey[0],
5166  BTEqualStrategyNumber, F_NAMEEQ,
5167  CStringGetDatum(constraint->relname));
5168  ScanKeyInit(&skey[1],
5170  BTEqualStrategyNumber, F_OIDEQ,
5171  ObjectIdGetDatum(namespaceId));
5172 
5173  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5174  true, NULL, 2, skey);
5175 
5176  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5177  {
5179 
5180  if (con->condeferrable)
5181  conoidlist = lappend_oid(conoidlist,
5182  HeapTupleGetOid(tup));
5183  else if (stmt->deferred)
5184  ereport(ERROR,
5185  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5186  errmsg("constraint \"%s\" is not deferrable",
5187  constraint->relname)));
5188  found = true;
5189  }
5190 
5191  systable_endscan(conscan);
5192 
5193  /*
5194  * Once we've found a matching constraint we do not search
5195  * later parts of the search path.
5196  */
5197  if (found)
5198  break;
5199  }
5200 
5201  list_free(namespacelist);
5202 
5203  /*
5204  * Not found ?
5205  */
5206  if (!found)
5207  ereport(ERROR,
5208  (errcode(ERRCODE_UNDEFINED_OBJECT),
5209  errmsg("constraint \"%s\" does not exist",
5210  constraint->relname)));
5211  }
5212 
5213  heap_close(conrel, AccessShareLock);
5214 
5215  /*
5216  * Now, locate the trigger(s) implementing each of these constraints,
5217  * and make a list of their OIDs.
5218  */
5220 
5221  foreach(lc, conoidlist)
5222  {
5223  Oid conoid = lfirst_oid(lc);
5224  bool found;
5225  ScanKeyData skey;
5226  SysScanDesc tgscan;
5227  HeapTuple htup;
5228 
5229  found = false;
5230 
5231  ScanKeyInit(&skey,
5233  BTEqualStrategyNumber, F_OIDEQ,
5234  ObjectIdGetDatum(conoid));
5235 
5236  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5237  NULL, 1, &skey);
5238 
5239  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5240  {
5241  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5242 
5243  /*
5244  * Silently skip triggers that are marked as non-deferrable in
5245  * pg_trigger. This is not an error condition, since a
5246  * deferrable RI constraint may have some non-deferrable
5247  * actions.
5248  */
5249  if (pg_trigger->tgdeferrable)
5250  tgoidlist = lappend_oid(tgoidlist,
5251  HeapTupleGetOid(htup));
5252 
5253  found = true;
5254  }
5255 
5256  systable_endscan(tgscan);
5257 
5258  /* Safety check: a deferrable constraint should have triggers */
5259  if (!found)
5260  elog(ERROR, "no triggers found for constraint with OID %u",
5261  conoid);
5262  }
5263 
5264  heap_close(tgrel, AccessShareLock);
5265 
5266  /*
5267  * Now we can set the trigger states of individual triggers for this
5268  * xact.
5269  */
5270  foreach(lc, tgoidlist)
5271  {
5272  Oid tgoid = lfirst_oid(lc);
5273  SetConstraintState state = afterTriggers.state;
5274  bool found = false;
5275  int i;
5276 
5277  for (i = 0; i < state->numstates; i++)
5278  {
5279  if (state->trigstates[i].sct_tgoid == tgoid)
5280  {
5281  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5282  found = true;
5283  break;
5284  }
5285  }
5286  if (!found)
5287  {
5288  afterTriggers.state =
5289  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5290  }
5291  }
5292  }
5293 
5294  /*
5295  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5296  * checks against that constraint must be made when the SET CONSTRAINTS
5297  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5298  * apply retroactively. We've updated the constraints state, so scan the
5299  * list of previously deferred events to fire any that have now become
5300  * immediate.
5301  *
5302  * Obviously, if this was SET ... DEFERRED then it can't have converted
5303  * any unfired events to immediate, so we need do nothing in that case.
5304  */
5305  if (!stmt->deferred)
5306  {
5307  AfterTriggerEventList *events = &afterTriggers.events;
5308  bool snapshot_set = false;
5309 
5310  while (afterTriggerMarkEvents(events, NULL, true))
5311  {
5312  CommandId firing_id = afterTriggers.firing_counter++;
5313 
5314  /*
5315  * Make sure a snapshot has been established in case trigger
5316  * functions need one. Note that we avoid setting a snapshot if
5317  * we don't find at least one trigger that has to be fired now.
5318  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5319  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5320  * at the start of a transaction it's not possible for any trigger
5321  * events to be queued yet.)
5322  */
5323  if (!snapshot_set)
5324  {
5326  snapshot_set = true;
5327  }
5328 
5329  /*
5330  * We can delete fired events if we are at top transaction level,
5331  * but we'd better not if inside a subtransaction, since the
5332  * subtransaction could later get rolled back.
5333  */
5334  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5335  !IsSubTransaction()))
5336  break; /* all fired */
5337  }
5338 
5339  if (snapshot_set)
5341  }
5342 }
5343 
5344 /* ----------
5345  * AfterTriggerPendingOnRel()
5346  * Test to see if there are any pending after-trigger events for rel.
5347  *
5348  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5349  * it is unsafe to perform major surgery on a relation. Note that only
5350  * local pending events are examined. We assume that having exclusive lock
5351  * on a rel guarantees there are no unserviced events in other backends ---
5352  * but having a lock does not prevent there being such events in our own.
5353  *
5354  * In some scenarios it'd be reasonable to remove pending events (more
5355  * specifically, mark them DONE by the current subxact) but without a lot
5356  * of knowledge of the trigger semantics we can't do this in general.
5357  * ----------
5358  */
5359 bool
5361 {
5362  AfterTriggerEvent event;
5363  AfterTriggerEventChunk *chunk;
5364  int depth;
5365 
5366  /* Scan queued events */
5367  for_each_event_chunk(event, chunk, afterTriggers.events)
5368  {
5369  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5370 
5371  /*
5372  * We can ignore completed events. (Even if a DONE flag is rolled
5373  * back by subxact abort, it's OK because the effects of the TRUNCATE
5374  * or whatever must get rolled back too.)
5375  */
5376  if (event->ate_flags & AFTER_TRIGGER_DONE)
5377  continue;
5378 
5379  if (evtshared->ats_relid == relid)
5380  return true;
5381  }
5382 
5383  /*
5384  * Also scan events queued by incomplete queries. This could only matter
5385  * if TRUNCATE/etc is executed by a function or trigger within an updating
5386  * query on the same relation, which is pretty perverse, but let's check.
5387  */
5388  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5389  {
5390  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
5391  {
5392  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5393 
5394  if (event->ate_flags & AFTER_TRIGGER_DONE)
5395  continue;
5396 
5397  if (evtshared->ats_relid == relid)
5398  return true;
5399  }
5400  }
5401 
5402  return false;
5403 }
5404 
5405 
5406 /* ----------
5407  * AfterTriggerSaveEvent()
5408  *
5409  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5410  * be fired for an event.
5411  *
5412  * NOTE: this is called whenever there are any triggers associated with
5413  * the event (even if they are disabled). This function decides which
5414  * triggers actually need to be queued. It is also called after each row,
5415  * even if there are no triggers for that event, if there are any AFTER
5416  * STATEMENT triggers for the statement which use transition tables, so that
5417  * the transition tuplestores can be built.
5418  *
5419  * Transition tuplestores are built now, rather than when events are pulled
5420  * off of the queue because AFTER ROW triggers are allowed to select from the
5421  * transition tables for the statement.
5422  * ----------
5423  */
5424 static void
5426  int event, bool row_trigger,
5427  HeapTuple oldtup, HeapTuple newtup,
5428  List *recheckIndexes, Bitmapset *modifiedCols,
5429  TransitionCaptureState *transition_capture)
5430 {
5431  Relation rel = relinfo->ri_RelationDesc;
5432  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5433  AfterTriggerEventData new_event;
5434  AfterTriggerSharedData new_shared;
5435  char relkind = rel->rd_rel->relkind;
5436  int tgtype_event;
5437  int tgtype_level;
5438  int i;
5439  Tuplestorestate *fdw_tuplestore = NULL;
5440 
5441  /*
5442  * Check state. We use a normal test not Assert because it is possible to
5443  * reach here in the wrong state given misconfigured RI triggers, in
5444  * particular deferring a cascade action trigger.
5445  */
5446  if (afterTriggers.query_depth < 0)
5447  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5448 
5449  /* Be sure we have enough space to record events at this query depth. */
5450  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5452 
5453  /*
5454  * If the directly named relation has any triggers with transition tables,
5455  * then we need to capture transition tuples.
5456  */
5457  if (row_trigger && transition_capture != NULL)
5458  {
5459  HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5460  TupleConversionMap *map = transition_capture->tcs_map;
5461  bool delete_old_table = transition_capture->tcs_delete_old_table;
5462  bool update_old_table = transition_capture->tcs_update_old_table;
5463  bool update_new_table = transition_capture->tcs_update_new_table;
5464  bool insert_new_table = transition_capture->tcs_insert_new_table;;
5465 
5466  if ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
5467  (event == TRIGGER_EVENT_UPDATE && update_old_table))
5468  {
5469  Tuplestorestate *old_tuplestore;
5470 
5471  Assert(oldtup != NULL);
5472  old_tuplestore = transition_capture->tcs_private->old_tuplestore;
5473 
5474  if (map != NULL)
5475  {
5476  HeapTuple converted = do_convert_tuple(oldtup, map);
5477 
5478  tuplestore_puttuple(old_tuplestore, converted);
5479  pfree(converted);
5480  }
5481  else
5482  tuplestore_puttuple(old_tuplestore, oldtup);
5483  }
5484  if ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
5485  (event == TRIGGER_EVENT_UPDATE && update_new_table))
5486  {
5487  Tuplestorestate *new_tuplestore;
5488 
5489  Assert(newtup != NULL);
5490  new_tuplestore = transition_capture->tcs_private->new_tuplestore;
5491 
5492  if (original_insert_tuple != NULL)
5493  tuplestore_puttuple(new_tuplestore, original_insert_tuple);
5494  else if (map != NULL)
5495  {
5496  HeapTuple converted = do_convert_tuple(newtup, map);
5497 
5498  tuplestore_puttuple(new_tuplestore, converted);
5499  pfree(converted);
5500  }
5501  else
5502  tuplestore_puttuple(new_tuplestore, newtup);
5503  }
5504 
5505  /* If transition tables are the only reason we're here, return. */
5506  if (trigdesc == NULL ||
5507  (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5508  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5509  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
5510  return;
5511  }
5512 
5513  /*
5514  * Validate the event code and collect the associated tuple CTIDs.
5515  *
5516  * The event code will be used both as a bitmask and an array offset, so
5517  * validation is important to make sure we don't walk off the edge of our
5518  * arrays.
5519  *
5520  * Also, if we're considering statement-level triggers, check whether we
5521  * already queued a set of them for this event, and cancel the prior set
5522  * if so. This preserves the behavior that statement-level triggers fire
5523  * just once per statement and fire after row-level triggers.
5524  */
5525  switch (event)
5526  {
5527  case TRIGGER_EVENT_INSERT:
5528  tgtype_event = TRIGGER_TYPE_INSERT;
5529  if (row_trigger)
5530  {
5531  Assert(oldtup == NULL);
5532  Assert(newtup != NULL);
5533  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid1));
5534  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5535  }
5536  else
5537  {
5538  Assert(oldtup == NULL);
5539  Assert(newtup == NULL);
5540  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5541  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5543  CMD_INSERT, event);
5544  }
5545  break;
5546  case TRIGGER_EVENT_DELETE:
5547  tgtype_event = TRIGGER_TYPE_DELETE;
5548  if (row_trigger)
5549  {
5550  Assert(oldtup != NULL);
5551  Assert(newtup == NULL);
5552  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5553  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5554  }
5555  else
5556  {
5557  Assert(oldtup == NULL);
5558  Assert(newtup == NULL);
5559  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5560  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5562  CMD_DELETE, event);
5563  }
5564  break;
5565  case TRIGGER_EVENT_UPDATE:
5566  tgtype_event = TRIGGER_TYPE_UPDATE;
5567  if (row_trigger)
5568  {
5569  Assert(oldtup != NULL);
5570  Assert(newtup != NULL);
5571  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5572  ItemPointerCopy(&(newtup->t_self), &(new_event.