PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/index.h"
24 #include "catalog/indexing.h"
25 #include "catalog/objectaccess.h"
26 #include "catalog/partition.h"
27 #include "catalog/pg_constraint.h"
28 #include "catalog/pg_inherits.h"
29 #include "catalog/pg_proc.h"
30 #include "catalog/pg_trigger.h"
31 #include "catalog/pg_type.h"
32 #include "commands/dbcommands.h"
33 #include "commands/defrem.h"
34 #include "commands/trigger.h"
35 #include "executor/executor.h"
36 #include "miscadmin.h"
37 #include "nodes/bitmapset.h"
38 #include "nodes/makefuncs.h"
39 #include "optimizer/clauses.h"
40 #include "optimizer/var.h"
41 #include "parser/parse_clause.h"
42 #include "parser/parse_collate.h"
43 #include "parser/parse_func.h"
44 #include "parser/parse_relation.h"
45 #include "parser/parsetree.h"
46 #include "pgstat.h"
47 #include "rewrite/rewriteManip.h"
48 #include "storage/bufmgr.h"
49 #include "storage/lmgr.h"
50 #include "tcop/utility.h"
51 #include "utils/acl.h"
52 #include "utils/builtins.h"
53 #include "utils/bytea.h"
54 #include "utils/fmgroids.h"
55 #include "utils/inval.h"
56 #include "utils/lsyscache.h"
57 #include "utils/memutils.h"
58 #include "utils/rel.h"
59 #include "utils/snapmgr.h"
60 #include "utils/syscache.h"
61 #include "utils/tqual.h"
62 #include "utils/tuplestore.h"
63 
64 
65 /* GUC variables */
67 
68 /* How many levels deep into trigger execution are we? */
69 static int MyTriggerDepth = 0;
70 
71 /*
72  * Note that similar macros also exist in executor/execMain.c. There does not
73  * appear to be any good header to put them into, given the structures that
74  * they use, so we let them be duplicated. Be sure to update all if one needs
75  * to be changed, however.
76  */
77 #define GetUpdatedColumns(relinfo, estate) \
78  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
79 
80 /* Local function prototypes */
81 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
82 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
83 static HeapTuple GetTupleForTrigger(EState *estate,
84  EPQState *epqstate,
85  ResultRelInfo *relinfo,
86  ItemPointer tid,
87  LockTupleMode lockmode,
88  TupleTableSlot **newSlot);
89 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
90  Trigger *trigger, TriggerEvent event,
91  Bitmapset *modifiedCols,
92  HeapTuple oldtup, HeapTuple newtup);
94  int tgindx,
95  FmgrInfo *finfo,
96  Instrumentation *instr,
97  MemoryContext per_tuple_context);
98 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
99  int event, bool row_trigger,
100  HeapTuple oldtup, HeapTuple newtup,
101  List *recheckIndexes, Bitmapset *modifiedCols,
102  TransitionCaptureState *transition_capture);
103 static void AfterTriggerEnlargeQueryState(void);
104 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
105 
106 
107 /*
108  * Create a trigger. Returns the address of the created trigger.
109  *
110  * queryString is the source text of the CREATE TRIGGER command.
111  * This must be supplied if a whenClause is specified, else it can be NULL.
112  *
113  * relOid, if nonzero, is the relation on which the trigger should be
114  * created. If zero, the name provided in the statement will be looked up.
115  *
116  * refRelOid, if nonzero, is the relation to which the constraint trigger
117  * refers. If zero, the constraint relation name provided in the statement
118  * will be looked up as needed.
119  *
120  * constraintOid, if nonzero, says that this trigger is being created
121  * internally to implement that constraint. A suitable pg_depend entry will
122  * be made to link the trigger to that constraint. constraintOid is zero when
123  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
124  * TRIGGER, we build a pg_constraint entry internally.)
125  *
126  * indexOid, if nonzero, is the OID of an index associated with the constraint.
127  * We do nothing with this except store it into pg_trigger.tgconstrindid;
128  * but when creating a trigger for a deferrable unique constraint on a
129  * partitioned table, its children are looked up. Note we don't cope with
130  * invalid indexes in that case.
131  *
132  * funcoid, if nonzero, is the OID of the function to invoke. When this is
133  * given, stmt->funcname is ignored.
134  *
135  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
136  * if that trigger is dropped, this one should be too. (This is passed as
137  * Invalid by most callers; it's set here when recursing on a partition.)
138  *
139  * If whenClause is passed, it is an already-transformed expression for
140  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
141  *
142  * If isInternal is true then this is an internally-generated trigger.
143  * This argument sets the tgisinternal field of the pg_trigger entry, and
144  * if true causes us to modify the given trigger name to ensure uniqueness.
145  *
146  * When isInternal is not true we require ACL_TRIGGER permissions on the
147  * relation, as well as ACL_EXECUTE on the trigger function. For internal
148  * triggers the caller must apply any required permission checks.
149  *
150  * When called on partitioned tables, this function recurses to create the
151  * trigger on all the partitions, except if isInternal is true, in which
152  * case caller is expected to execute recursion on its own.
153  *
154  * Note: can return InvalidObjectAddress if we decided to not create a trigger
155  * at all, but a foreign-key constraint. This is a kluge for backwards
156  * compatibility.
157  */
159 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
160  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
161  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
162  bool isInternal, bool in_partition)
163 {
164  int16 tgtype;
165  int ncolumns;
166  int16 *columns;
167  int2vector *tgattr;
168  List *whenRtable;
169  char *qual;
170  Datum values[Natts_pg_trigger];
171  bool nulls[Natts_pg_trigger];
172  Relation rel;
173  AclResult aclresult;
174  Relation tgrel;
175  SysScanDesc tgscan;
176  ScanKeyData key;
177  Relation pgrel;
178  HeapTuple tuple;
179  Oid fargtypes[1]; /* dummy */
180  Oid funcrettype;
181  Oid trigoid;
182  char internaltrigname[NAMEDATALEN];
183  char *trigname;
184  Oid constrrelid = InvalidOid;
185  ObjectAddress myself,
186  referenced;
187  char *oldtablename = NULL;
188  char *newtablename = NULL;
189  bool partition_recurse;
190 
191  if (OidIsValid(relOid))
192  rel = heap_open(relOid, ShareRowExclusiveLock);
193  else
195 
196  /*
197  * Triggers must be on tables or views, and there are additional
198  * relation-type-specific restrictions.
199  */
200  if (rel->rd_rel->relkind == RELKIND_RELATION)
201  {
202  /* Tables can't have INSTEAD OF triggers */
203  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
204  stmt->timing != TRIGGER_TYPE_AFTER)
205  ereport(ERROR,
206  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
207  errmsg("\"%s\" is a table",
209  errdetail("Tables cannot have INSTEAD OF triggers.")));
210  }
211  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
212  {
213  /* Partitioned tables can't have INSTEAD OF triggers */
214  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
215  stmt->timing != TRIGGER_TYPE_AFTER)
216  ereport(ERROR,
217  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
218  errmsg("\"%s\" is a table",
220  errdetail("Tables cannot have INSTEAD OF triggers.")));
221 
222  /*
223  * FOR EACH ROW triggers have further restrictions
224  */
225  if (stmt->row)
226  {
227  /*
228  * BEFORE triggers FOR EACH ROW are forbidden, because they would
229  * allow the user to direct the row to another partition, which
230  * isn't implemented in the executor.
231  */
232  if (stmt->timing != TRIGGER_TYPE_AFTER)
233  ereport(ERROR,
234  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
235  errmsg("\"%s\" is a partitioned table",
237  errdetail("Partitioned tables cannot have BEFORE / FOR EACH ROW triggers.")));
238 
239  /*
240  * Disallow use of transition tables.
241  *
242  * Note that we have another restriction about transition tables
243  * in partitions; search for 'has_superclass' below for an
244  * explanation. The check here is just to protect from the fact
245  * that if we allowed it here, the creation would succeed for a
246  * partitioned table with no partitions, but would be blocked by
247  * the other restriction when the first partition was created,
248  * which is very unfriendly behavior.
249  */
250  if (stmt->transitionRels != NIL)
251  ereport(ERROR,
252  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
253  errmsg("\"%s\" is a partitioned table",
255  errdetail("Triggers on partitioned tables cannot have transition tables.")));
256  }
257  }
258  else if (rel->rd_rel->relkind == RELKIND_VIEW)
259  {
260  /*
261  * Views can have INSTEAD OF triggers (which we check below are
262  * row-level), or statement-level BEFORE/AFTER triggers.
263  */
264  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
265  ereport(ERROR,
266  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
267  errmsg("\"%s\" is a view",
269  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
270  /* Disallow TRUNCATE triggers on VIEWs */
271  if (TRIGGER_FOR_TRUNCATE(stmt->events))
272  ereport(ERROR,
273  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
274  errmsg("\"%s\" is a view",
276  errdetail("Views cannot have TRUNCATE triggers.")));
277  }
278  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
279  {
280  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
281  stmt->timing != TRIGGER_TYPE_AFTER)
282  ereport(ERROR,
283  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
284  errmsg("\"%s\" is a foreign table",
286  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
287 
288  if (TRIGGER_FOR_TRUNCATE(stmt->events))
289  ereport(ERROR,
290  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
291  errmsg("\"%s\" is a foreign table",
293  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
294 
295  /*
296  * We disallow constraint triggers to protect the assumption that
297  * triggers on FKs can't be deferred. See notes with AfterTriggers
298  * data structures, below.
299  */
300  if (stmt->isconstraint)
301  ereport(ERROR,
302  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
303  errmsg("\"%s\" is a foreign table",
305  errdetail("Foreign tables cannot have constraint triggers.")));
306  }
307  else
308  ereport(ERROR,
309  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
310  errmsg("\"%s\" is not a table or view",
311  RelationGetRelationName(rel))));
312 
314  ereport(ERROR,
315  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
316  errmsg("permission denied: \"%s\" is a system catalog",
317  RelationGetRelationName(rel))));
318 
319  if (stmt->isconstraint)
320  {
321  /*
322  * We must take a lock on the target relation to protect against
323  * concurrent drop. It's not clear that AccessShareLock is strong
324  * enough, but we certainly need at least that much... otherwise, we
325  * might end up creating a pg_constraint entry referencing a
326  * nonexistent table.
327  */
328  if (OidIsValid(refRelOid))
329  {
330  LockRelationOid(refRelOid, AccessShareLock);
331  constrrelid = refRelOid;
332  }
333  else if (stmt->constrrel != NULL)
334  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
335  false);
336  }
337 
338  /* permission checks */
339  if (!isInternal)
340  {
341  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
342  ACL_TRIGGER);
343  if (aclresult != ACLCHECK_OK)
344  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
346 
347  if (OidIsValid(constrrelid))
348  {
349  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
350  ACL_TRIGGER);
351  if (aclresult != ACLCHECK_OK)
352  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
353  get_rel_name(constrrelid));
354  }
355  }
356 
357  /*
358  * When called on a partitioned table to create a FOR EACH ROW trigger
359  * that's not internal, we create one trigger for each partition, too.
360  *
361  * For that, we'd better hold lock on all of them ahead of time.
362  */
363  partition_recurse = !isInternal && stmt->row &&
364  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
365  if (partition_recurse)
367  ShareRowExclusiveLock, NULL));
368 
369  /* Compute tgtype */
370  TRIGGER_CLEAR_TYPE(tgtype);
371  if (stmt->row)
372  TRIGGER_SETT_ROW(tgtype);
373  tgtype |= stmt->timing;
374  tgtype |= stmt->events;
375 
376  /* Disallow ROW-level TRUNCATE triggers */
377  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
378  ereport(ERROR,
379  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
380  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
381 
382  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
383  if (TRIGGER_FOR_INSTEAD(tgtype))
384  {
385  if (!TRIGGER_FOR_ROW(tgtype))
386  ereport(ERROR,
387  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
388  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
389  if (stmt->whenClause)
390  ereport(ERROR,
391  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
392  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
393  if (stmt->columns != NIL)
394  ereport(ERROR,
395  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
396  errmsg("INSTEAD OF triggers cannot have column lists")));
397  }
398 
399  /*
400  * We don't yet support naming ROW transition variables, but the parser
401  * recognizes the syntax so we can give a nicer message here.
402  *
403  * Per standard, REFERENCING TABLE names are only allowed on AFTER
404  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
405  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
406  * only allowed once. Per standard, OLD may not be specified when
407  * creating a trigger only for INSERT, and NEW may not be specified when
408  * creating a trigger only for DELETE.
409  *
410  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
411  * reference both ROW and TABLE transition data.
412  */
413  if (stmt->transitionRels != NIL)
414  {
415  List *varList = stmt->transitionRels;
416  ListCell *lc;
417 
418  foreach(lc, varList)
419  {
421 
422  if (!(tt->isTable))
423  ereport(ERROR,
424  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
425  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
426  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
427 
428  /*
429  * Because of the above test, we omit further ROW-related testing
430  * below. If we later allow naming OLD and NEW ROW variables,
431  * adjustments will be needed below.
432  */
433 
434  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
435  ereport(ERROR,
436  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
437  errmsg("\"%s\" is a foreign table",
439  errdetail("Triggers on foreign tables cannot have transition tables.")));
440 
441  if (rel->rd_rel->relkind == RELKIND_VIEW)
442  ereport(ERROR,
443  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
444  errmsg("\"%s\" is a view",
446  errdetail("Triggers on views cannot have transition tables.")));
447 
448  /*
449  * We currently don't allow row-level triggers with transition
450  * tables on partition or inheritance children. Such triggers
451  * would somehow need to see tuples converted to the format of the
452  * table they're attached to, and it's not clear which subset of
453  * tuples each child should see. See also the prohibitions in
454  * ATExecAttachPartition() and ATExecAddInherit().
455  */
456  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
457  {
458  /* Use appropriate error message. */
459  if (rel->rd_rel->relispartition)
460  ereport(ERROR,
461  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
462  errmsg("ROW triggers with transition tables are not supported on partitions")));
463  else
464  ereport(ERROR,
465  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
466  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
467  }
468 
469  if (stmt->timing != TRIGGER_TYPE_AFTER)
470  ereport(ERROR,
471  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
472  errmsg("transition table name can only be specified for an AFTER trigger")));
473 
474  if (TRIGGER_FOR_TRUNCATE(tgtype))
475  ereport(ERROR,
476  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
477  errmsg("TRUNCATE triggers with transition tables are not supported")));
478 
479  /*
480  * We currently don't allow multi-event triggers ("INSERT OR
481  * UPDATE") with transition tables, because it's not clear how to
482  * handle INSERT ... ON CONFLICT statements which can fire both
483  * INSERT and UPDATE triggers. We show the inserted tuples to
484  * INSERT triggers and the updated tuples to UPDATE triggers, but
485  * it's not yet clear what INSERT OR UPDATE trigger should see.
486  * This restriction could be lifted if we can decide on the right
487  * semantics in a later release.
488  */
489  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
490  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
491  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
492  ereport(ERROR,
493  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
494  errmsg("transition tables cannot be specified for triggers with more than one event")));
495 
496  /*
497  * We currently don't allow column-specific triggers with
498  * transition tables. Per spec, that seems to require
499  * accumulating separate transition tables for each combination of
500  * columns, which is a lot of work for a rather marginal feature.
501  */
502  if (stmt->columns != NIL)
503  ereport(ERROR,
504  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
505  errmsg("transition tables cannot be specified for triggers with column lists")));
506 
507  /*
508  * We disallow constraint triggers with transition tables, to
509  * protect the assumption that such triggers can't be deferred.
510  * See notes with AfterTriggers data structures, below.
511  *
512  * Currently this is enforced by the grammar, so just Assert here.
513  */
514  Assert(!stmt->isconstraint);
515 
516  if (tt->isNew)
517  {
518  if (!(TRIGGER_FOR_INSERT(tgtype) ||
519  TRIGGER_FOR_UPDATE(tgtype)))
520  ereport(ERROR,
521  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
522  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
523 
524  if (newtablename != NULL)
525  ereport(ERROR,
526  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
527  errmsg("NEW TABLE cannot be specified multiple times")));
528 
529  newtablename = tt->name;
530  }
531  else
532  {
533  if (!(TRIGGER_FOR_DELETE(tgtype) ||
534  TRIGGER_FOR_UPDATE(tgtype)))
535  ereport(ERROR,
536  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
537  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
538 
539  if (oldtablename != NULL)
540  ereport(ERROR,
541  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
542  errmsg("OLD TABLE cannot be specified multiple times")));
543 
544  oldtablename = tt->name;
545  }
546  }
547 
548  if (newtablename != NULL && oldtablename != NULL &&
549  strcmp(newtablename, oldtablename) == 0)
550  ereport(ERROR,
551  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
552  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
553  }
554 
555  /*
556  * Parse the WHEN clause, if any and we weren't passed an already
557  * transformed one.
558  *
559  * Note that as a side effect, we fill whenRtable when parsing. If we got
560  * an already parsed clause, this does not occur, which is what we want --
561  * no point in adding redundant dependencies below.
562  */
563  if (!whenClause && stmt->whenClause)
564  {
565  ParseState *pstate;
566  RangeTblEntry *rte;
567  List *varList;
568  ListCell *lc;
569 
570  /* Set up a pstate to parse with */
571  pstate = make_parsestate(NULL);
572  pstate->p_sourcetext = queryString;
573 
574  /*
575  * Set up RTEs for OLD and NEW references.
576  *
577  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
578  */
579  rte = addRangeTableEntryForRelation(pstate, rel,
580  makeAlias("old", NIL),
581  false, false);
582  addRTEtoQuery(pstate, rte, false, true, true);
583  rte = addRangeTableEntryForRelation(pstate, rel,
584  makeAlias("new", NIL),
585  false, false);
586  addRTEtoQuery(pstate, rte, false, true, true);
587 
588  /* Transform expression. Copy to be sure we don't modify original */
589  whenClause = transformWhereClause(pstate,
590  copyObject(stmt->whenClause),
592  "WHEN");
593  /* we have to fix its collations too */
594  assign_expr_collations(pstate, whenClause);
595 
596  /*
597  * Check for disallowed references to OLD/NEW.
598  *
599  * NB: pull_var_clause is okay here only because we don't allow
600  * subselects in WHEN clauses; it would fail to examine the contents
601  * of subselects.
602  */
603  varList = pull_var_clause(whenClause, 0);
604  foreach(lc, varList)
605  {
606  Var *var = (Var *) lfirst(lc);
607 
608  switch (var->varno)
609  {
610  case PRS2_OLD_VARNO:
611  if (!TRIGGER_FOR_ROW(tgtype))
612  ereport(ERROR,
613  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
614  errmsg("statement trigger's WHEN condition cannot reference column values"),
615  parser_errposition(pstate, var->location)));
616  if (TRIGGER_FOR_INSERT(tgtype))
617  ereport(ERROR,
618  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
619  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
620  parser_errposition(pstate, var->location)));
621  /* system columns are okay here */
622  break;
623  case PRS2_NEW_VARNO:
624  if (!TRIGGER_FOR_ROW(tgtype))
625  ereport(ERROR,
626  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
627  errmsg("statement trigger's WHEN condition cannot reference column values"),
628  parser_errposition(pstate, var->location)));
629  if (TRIGGER_FOR_DELETE(tgtype))
630  ereport(ERROR,
631  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
632  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
633  parser_errposition(pstate, var->location)));
634  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
635  ereport(ERROR,
636  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
637  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
638  parser_errposition(pstate, var->location)));
639  break;
640  default:
641  /* can't happen without add_missing_from, so just elog */
642  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
643  break;
644  }
645  }
646 
647  /* we'll need the rtable for recordDependencyOnExpr */
648  whenRtable = pstate->p_rtable;
649 
650  qual = nodeToString(whenClause);
651 
652  free_parsestate(pstate);
653  }
654  else if (!whenClause)
655  {
656  whenClause = NULL;
657  whenRtable = NIL;
658  qual = NULL;
659  }
660  else
661  {
662  qual = nodeToString(whenClause);
663  whenRtable = NIL;
664  }
665 
666  /*
667  * Find and validate the trigger function.
668  */
669  if (!OidIsValid(funcoid))
670  funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
671  if (!isInternal)
672  {
673  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
674  if (aclresult != ACLCHECK_OK)
675  aclcheck_error(aclresult, OBJECT_FUNCTION,
676  NameListToString(stmt->funcname));
677  }
678  funcrettype = get_func_rettype(funcoid);
679  if (funcrettype != TRIGGEROID)
680  {
681  /*
682  * We allow OPAQUE just so we can load old dump files. When we see a
683  * trigger function declared OPAQUE, change it to TRIGGER.
684  */
685  if (funcrettype == OPAQUEOID)
686  {
688  (errmsg("changing return type of function %s from %s to %s",
689  NameListToString(stmt->funcname),
690  "opaque", "trigger")));
691  SetFunctionReturnType(funcoid, TRIGGEROID);
692  }
693  else
694  ereport(ERROR,
695  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
696  errmsg("function %s must return type %s",
697  NameListToString(stmt->funcname), "trigger")));
698  }
699 
700  /*
701  * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
702  * references one of the built-in RI_FKey trigger functions, assume it is
703  * from a dump of a pre-7.3 foreign key constraint, and take steps to
704  * convert this legacy representation into a regular foreign key
705  * constraint. Ugly, but necessary for loading old dump files.
706  */
707  if (stmt->isconstraint && !isInternal &&
708  list_length(stmt->args) >= 6 &&
709  (list_length(stmt->args) % 2) == 0 &&
711  {
712  /* Keep lock on target rel until end of xact */
713  heap_close(rel, NoLock);
714 
715  ConvertTriggerToFK(stmt, funcoid);
716 
717  return InvalidObjectAddress;
718  }
719 
720  /*
721  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
722  * corresponding pg_constraint entry.
723  */
724  if (stmt->isconstraint && !OidIsValid(constraintOid))
725  {
726  /* Internal callers should have made their own constraints */
727  Assert(!isInternal);
728  constraintOid = CreateConstraintEntry(stmt->trigname,
730  CONSTRAINT_TRIGGER,
731  stmt->deferrable,
732  stmt->initdeferred,
733  true,
734  InvalidOid, /* no parent */
735  RelationGetRelid(rel),
736  NULL, /* no conkey */
737  0,
738  0,
739  InvalidOid, /* no domain */
740  InvalidOid, /* no index */
741  InvalidOid, /* no foreign key */
742  NULL,
743  NULL,
744  NULL,
745  NULL,
746  0,
747  ' ',
748  ' ',
749  ' ',
750  NULL, /* no exclusion */
751  NULL, /* no check constraint */
752  NULL,
753  NULL,
754  true, /* islocal */
755  0, /* inhcount */
756  true, /* isnoinherit */
757  isInternal); /* is_internal */
758  }
759 
760  /*
761  * Generate the trigger's OID now, so that we can use it in the name if
762  * needed.
763  */
764  tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
765 
766  trigoid = GetNewOid(tgrel);
767 
768  /*
769  * If trigger is internally generated, modify the provided trigger name to
770  * ensure uniqueness by appending the trigger OID. (Callers will usually
771  * supply a simple constant trigger name in these cases.)
772  */
773  if (isInternal)
774  {
775  snprintf(internaltrigname, sizeof(internaltrigname),
776  "%s_%u", stmt->trigname, trigoid);
777  trigname = internaltrigname;
778  }
779  else
780  {
781  /* user-defined trigger; use the specified trigger name as-is */
782  trigname = stmt->trigname;
783  }
784 
785  /*
786  * Scan pg_trigger for existing triggers on relation. We do this only to
787  * give a nice error message if there's already a trigger of the same
788  * name. (The unique index on tgrelid/tgname would complain anyway.) We
789  * can skip this for internally generated triggers, since the name
790  * modification above should be sufficient.
791  *
792  * NOTE that this is cool only because we have ShareRowExclusiveLock on
793  * the relation, so the trigger set won't be changing underneath us.
794  */
795  if (!isInternal)
796  {
797  ScanKeyInit(&key,
798  Anum_pg_trigger_tgrelid,
799  BTEqualStrategyNumber, F_OIDEQ,
801  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
802  NULL, 1, &key);
803  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
804  {
805  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
806 
807  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
808  ereport(ERROR,
810  errmsg("trigger \"%s\" for relation \"%s\" already exists",
811  trigname, RelationGetRelationName(rel))));
812  }
813  systable_endscan(tgscan);
814  }
815 
816  /*
817  * Build the new pg_trigger tuple.
818  *
819  * When we're creating a trigger in a partition, we mark it as internal,
820  * even though we don't do the isInternal magic in this function. This
821  * makes the triggers in partitions identical to the ones in the
822  * partitioned tables, except that they are marked internal.
823  */
824  memset(nulls, false, sizeof(nulls));
825 
826  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
827  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
828  CStringGetDatum(trigname));
829  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
830  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
831  values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(TRIGGER_FIRES_ON_ORIGIN);
832  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal || in_partition);
833  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
834  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
835  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
836  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
837  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
838 
839  if (stmt->args)
840  {
841  ListCell *le;
842  char *args;
843  int16 nargs = list_length(stmt->args);
844  int len = 0;
845 
846  foreach(le, stmt->args)
847  {
848  char *ar = strVal(lfirst(le));
849 
850  len += strlen(ar) + 4;
851  for (; *ar; ar++)
852  {
853  if (*ar == '\\')
854  len++;
855  }
856  }
857  args = (char *) palloc(len + 1);
858  args[0] = '\0';
859  foreach(le, stmt->args)
860  {
861  char *s = strVal(lfirst(le));
862  char *d = args + strlen(args);
863 
864  while (*s)
865  {
866  if (*s == '\\')
867  *d++ = '\\';
868  *d++ = *s++;
869  }
870  strcpy(d, "\\000");
871  }
872  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
873  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
874  CStringGetDatum(args));
875  }
876  else
877  {
878  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
879  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
880  CStringGetDatum(""));
881  }
882 
883  /* build column number array if it's a column-specific trigger */
884  ncolumns = list_length(stmt->columns);
885  if (ncolumns == 0)
886  columns = NULL;
887  else
888  {
889  ListCell *cell;
890  int i = 0;
891 
892  columns = (int16 *) palloc(ncolumns * sizeof(int16));
893  foreach(cell, stmt->columns)
894  {
895  char *name = strVal(lfirst(cell));
896  int16 attnum;
897  int j;
898 
899  /* Lookup column name. System columns are not allowed */
900  attnum = attnameAttNum(rel, name, false);
901  if (attnum == InvalidAttrNumber)
902  ereport(ERROR,
903  (errcode(ERRCODE_UNDEFINED_COLUMN),
904  errmsg("column \"%s\" of relation \"%s\" does not exist",
905  name, RelationGetRelationName(rel))));
906 
907  /* Check for duplicates */
908  for (j = i - 1; j >= 0; j--)
909  {
910  if (columns[j] == attnum)
911  ereport(ERROR,
912  (errcode(ERRCODE_DUPLICATE_COLUMN),
913  errmsg("column \"%s\" specified more than once",
914  name)));
915  }
916 
917  columns[i++] = attnum;
918  }
919  }
920  tgattr = buildint2vector(columns, ncolumns);
921  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
922 
923  /* set tgqual if trigger has WHEN clause */
924  if (qual)
925  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
926  else
927  nulls[Anum_pg_trigger_tgqual - 1] = true;
928 
929  if (oldtablename)
930  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
931  CStringGetDatum(oldtablename));
932  else
933  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
934  if (newtablename)
935  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
936  CStringGetDatum(newtablename));
937  else
938  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
939 
940  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
941 
942  /* force tuple to have the desired OID */
943  HeapTupleSetOid(tuple, trigoid);
944 
945  /*
946  * Insert tuple into pg_trigger.
947  */
948  CatalogTupleInsert(tgrel, tuple);
949 
950  heap_freetuple(tuple);
952 
953  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
954  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
955  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
956  if (oldtablename)
957  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
958  if (newtablename)
959  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
960 
961  /*
962  * Update relation's pg_class entry; if necessary; and if not, send an SI
963  * message to make other backends (and this one) rebuild relcache entries.
964  */
965  pgrel = heap_open(RelationRelationId, RowExclusiveLock);
966  tuple = SearchSysCacheCopy1(RELOID,
968  if (!HeapTupleIsValid(tuple))
969  elog(ERROR, "cache lookup failed for relation %u",
970  RelationGetRelid(rel));
971  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
972  {
973  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
974 
975  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
976 
978  }
979  else
981 
982  heap_freetuple(tuple);
984 
985  /*
986  * Record dependencies for trigger. Always place a normal dependency on
987  * the function.
988  */
989  myself.classId = TriggerRelationId;
990  myself.objectId = trigoid;
991  myself.objectSubId = 0;
992 
993  referenced.classId = ProcedureRelationId;
994  referenced.objectId = funcoid;
995  referenced.objectSubId = 0;
996  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
997 
998  if (isInternal && OidIsValid(constraintOid))
999  {
1000  /*
1001  * Internally-generated trigger for a constraint, so make it an
1002  * internal dependency of the constraint. We can skip depending on
1003  * the relation(s), as there'll be an indirect dependency via the
1004  * constraint.
1005  */
1006  referenced.classId = ConstraintRelationId;
1007  referenced.objectId = constraintOid;
1008  referenced.objectSubId = 0;
1009  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1010  }
1011  else
1012  {
1013  /*
1014  * User CREATE TRIGGER, so place dependencies. We make trigger be
1015  * auto-dropped if its relation is dropped or if the FK relation is
1016  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1017  *
1018  * Exception: if this trigger comes from a parent partitioned table,
1019  * then it's not separately drop-able, but goes away if the partition
1020  * does.
1021  */
1022  referenced.classId = RelationRelationId;
1023  referenced.objectId = RelationGetRelid(rel);
1024  referenced.objectSubId = 0;
1025  recordDependencyOn(&myself, &referenced, OidIsValid(parentTriggerOid) ?
1027  DEPENDENCY_AUTO);
1028 
1029  if (OidIsValid(constrrelid))
1030  {
1031  referenced.classId = RelationRelationId;
1032  referenced.objectId = constrrelid;
1033  referenced.objectSubId = 0;
1034  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1035  }
1036  /* Not possible to have an index dependency in this case */
1037  Assert(!OidIsValid(indexOid));
1038 
1039  /*
1040  * If it's a user-specified constraint trigger, make the constraint
1041  * internally dependent on the trigger instead of vice versa.
1042  */
1043  if (OidIsValid(constraintOid))
1044  {
1045  referenced.classId = ConstraintRelationId;
1046  referenced.objectId = constraintOid;
1047  referenced.objectSubId = 0;
1048  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1049  }
1050 
1051  /* Depends on the parent trigger, if there is one. */
1052  if (OidIsValid(parentTriggerOid))
1053  {
1054  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1055  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL_AUTO);
1056  }
1057  }
1058 
1059  /* If column-specific trigger, add normal dependencies on columns */
1060  if (columns != NULL)
1061  {
1062  int i;
1063 
1064  referenced.classId = RelationRelationId;
1065  referenced.objectId = RelationGetRelid(rel);
1066  for (i = 0; i < ncolumns; i++)
1067  {
1068  referenced.objectSubId = columns[i];
1069  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1070  }
1071  }
1072 
1073  /*
1074  * If it has a WHEN clause, add dependencies on objects mentioned in the
1075  * expression (eg, functions, as well as any columns used).
1076  */
1077  if (whenRtable != NIL)
1078  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1080 
1081  /* Post creation hook for new trigger */
1082  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1083  isInternal);
1084 
1085  /*
1086  * Lastly, create the trigger on child relations, if needed.
1087  */
1088  if (partition_recurse)
1089  {
1090  PartitionDesc partdesc = RelationGetPartitionDesc(rel);
1091  List *idxs = NIL;
1092  List *childTbls = NIL;
1093  ListCell *l;
1094  int i;
1095  MemoryContext oldcxt,
1096  perChildCxt;
1097 
1099  "part trig clone",
1101 
1102  /*
1103  * When a trigger is being created associated with an index, we'll
1104  * need to associate the trigger in each child partition with the
1105  * corresponding index on it.
1106  */
1107  if (OidIsValid(indexOid))
1108  {
1109  ListCell *l;
1110  List *idxs = NIL;
1111 
1113  foreach(l, idxs)
1114  childTbls = lappend_oid(childTbls,
1116  false));
1117  }
1118 
1119  oldcxt = MemoryContextSwitchTo(perChildCxt);
1120 
1121  /* Iterate to create the trigger on each existing partition */
1122  for (i = 0; i < partdesc->nparts; i++)
1123  {
1124  Oid indexOnChild = InvalidOid;
1125  ListCell *l2;
1126  CreateTrigStmt *childStmt;
1127  Relation childTbl;
1128  Node *qual;
1129  bool found_whole_row;
1130 
1131  childTbl = heap_open(partdesc->oids[i], ShareRowExclusiveLock);
1132 
1133  /* Find which of the child indexes is the one on this partition */
1134  if (OidIsValid(indexOid))
1135  {
1136  forboth(l, idxs, l2, childTbls)
1137  {
1138  if (lfirst_oid(l2) == partdesc->oids[i])
1139  {
1140  indexOnChild = lfirst_oid(l);
1141  break;
1142  }
1143  }
1144  if (!OidIsValid(indexOnChild))
1145  elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"",
1146  get_rel_name(indexOid),
1147  get_rel_name(partdesc->oids[i]));
1148  }
1149 
1150  /*
1151  * Initialize our fabricated parse node by copying the original
1152  * one, then resetting fields that we pass separately.
1153  */
1154  childStmt = (CreateTrigStmt *) copyObject(stmt);
1155  childStmt->funcname = NIL;
1156  childStmt->args = NIL;
1157  childStmt->whenClause = NULL;
1158 
1159  /* If there is a WHEN clause, create a modified copy of it */
1160  qual = copyObject(whenClause);
1161  qual = (Node *)
1163  childTbl, rel,
1164  &found_whole_row);
1165  if (found_whole_row)
1166  elog(ERROR, "unexpected whole-row reference found in trigger WHEN clause");
1167  qual = (Node *)
1169  childTbl, rel,
1170  &found_whole_row);
1171  if (found_whole_row)
1172  elog(ERROR, "unexpected whole-row reference found in trigger WHEN clause");
1173 
1174  CreateTrigger(childStmt, queryString,
1175  partdesc->oids[i], refRelOid,
1176  InvalidOid, indexOnChild,
1177  funcoid, trigoid, qual,
1178  isInternal, true);
1179 
1180  heap_close(childTbl, NoLock);
1181 
1182  MemoryContextReset(perChildCxt);
1183  }
1184 
1185  MemoryContextSwitchTo(oldcxt);
1186  MemoryContextDelete(perChildCxt);
1187  list_free(idxs);
1188  list_free(childTbls);
1189  }
1190 
1191  /* Keep lock on target rel until end of xact */
1192  heap_close(rel, NoLock);
1193 
1194  return myself;
1195 }
1196 
1197 
1198 /*
1199  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
1200  * full-fledged foreign key constraints.
1201  *
1202  * The conversion is complex because a pre-7.3 foreign key involved three
1203  * separate triggers, which were reported separately in dumps. While the
1204  * single trigger on the referencing table adds no new information, we need
1205  * to know the trigger functions of both of the triggers on the referenced
1206  * table to build the constraint declaration. Also, due to lack of proper
1207  * dependency checking pre-7.3, it is possible that the source database had
1208  * an incomplete set of triggers resulting in an only partially enforced
1209  * FK constraint. (This would happen if one of the tables had been dropped
1210  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
1211  * that caused loss of tgconstrrelid information.) We choose to translate to
1212  * an FK constraint only when we've seen all three triggers of a set. This is
1213  * implemented by storing unmatched items in a list in TopMemoryContext.
1214  * We match triggers together by comparing the trigger arguments (which
1215  * include constraint name, table and column names, so should be good enough).
1216  */
1217 typedef struct
1218 {
1219  List *args; /* list of (T_String) Values or NIL */
1220  Oid funcoids[3]; /* OIDs of trigger functions */
1221  /* The three function OIDs are stored in the order update, delete, child */
1222 } OldTriggerInfo;
1223 
1224 static void
1226 {
1227  static List *info_list = NIL;
1228 
1229  static const char *const funcdescr[3] = {
1230  gettext_noop("Found referenced table's UPDATE trigger."),
1231  gettext_noop("Found referenced table's DELETE trigger."),
1232  gettext_noop("Found referencing table's trigger.")
1233  };
1234 
1235  char *constr_name;
1236  char *fk_table_name;
1237  char *pk_table_name;
1238  char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1239  List *fk_attrs = NIL;
1240  List *pk_attrs = NIL;
1242  int funcnum;
1243  OldTriggerInfo *info = NULL;
1244  ListCell *l;
1245  int i;
1246 
1247  /* Parse out the trigger arguments */
1248  constr_name = strVal(linitial(stmt->args));
1249  fk_table_name = strVal(lsecond(stmt->args));
1250  pk_table_name = strVal(lthird(stmt->args));
1251  i = 0;
1252  foreach(l, stmt->args)
1253  {
1254  Value *arg = (Value *) lfirst(l);
1255 
1256  i++;
1257  if (i < 4) /* skip constraint and table names */
1258  continue;
1259  if (i == 4) /* handle match type */
1260  {
1261  if (strcmp(strVal(arg), "FULL") == 0)
1262  fk_matchtype = FKCONSTR_MATCH_FULL;
1263  else
1264  fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1265  continue;
1266  }
1267  if (i % 2)
1268  fk_attrs = lappend(fk_attrs, arg);
1269  else
1270  pk_attrs = lappend(pk_attrs, arg);
1271  }
1272 
1273  /* Prepare description of constraint for use in messages */
1274  initStringInfo(&buf);
1275  appendStringInfo(&buf, "FOREIGN KEY %s(",
1276  quote_identifier(fk_table_name));
1277  i = 0;
1278  foreach(l, fk_attrs)
1279  {
1280  Value *arg = (Value *) lfirst(l);
1281 
1282  if (i++ > 0)
1283  appendStringInfoChar(&buf, ',');
1285  }
1286  appendStringInfo(&buf, ") REFERENCES %s(",
1287  quote_identifier(pk_table_name));
1288  i = 0;
1289  foreach(l, pk_attrs)
1290  {
1291  Value *arg = (Value *) lfirst(l);
1292 
1293  if (i++ > 0)
1294  appendStringInfoChar(&buf, ',');
1296  }
1297  appendStringInfoChar(&buf, ')');
1298 
1299  /* Identify class of trigger --- update, delete, or referencing-table */
1300  switch (funcoid)
1301  {
1302  case F_RI_FKEY_CASCADE_UPD:
1303  case F_RI_FKEY_RESTRICT_UPD:
1304  case F_RI_FKEY_SETNULL_UPD:
1305  case F_RI_FKEY_SETDEFAULT_UPD:
1306  case F_RI_FKEY_NOACTION_UPD:
1307  funcnum = 0;
1308  break;
1309 
1310  case F_RI_FKEY_CASCADE_DEL:
1311  case F_RI_FKEY_RESTRICT_DEL:
1312  case F_RI_FKEY_SETNULL_DEL:
1313  case F_RI_FKEY_SETDEFAULT_DEL:
1314  case F_RI_FKEY_NOACTION_DEL:
1315  funcnum = 1;
1316  break;
1317 
1318  default:
1319  funcnum = 2;
1320  break;
1321  }
1322 
1323  /* See if we have a match to this trigger */
1324  foreach(l, info_list)
1325  {
1326  info = (OldTriggerInfo *) lfirst(l);
1327  if (info->funcoids[funcnum] == InvalidOid &&
1328  equal(info->args, stmt->args))
1329  {
1330  info->funcoids[funcnum] = funcoid;
1331  break;
1332  }
1333  }
1334 
1335  if (l == NULL)
1336  {
1337  /* First trigger of set, so create a new list entry */
1338  MemoryContext oldContext;
1339 
1340  ereport(NOTICE,
1341  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1342  constr_name, buf.data),
1343  errdetail_internal("%s", _(funcdescr[funcnum]))));
1345  info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
1346  info->args = copyObject(stmt->args);
1347  info->funcoids[funcnum] = funcoid;
1348  info_list = lappend(info_list, info);
1349  MemoryContextSwitchTo(oldContext);
1350  }
1351  else if (info->funcoids[0] == InvalidOid ||
1352  info->funcoids[1] == InvalidOid ||
1353  info->funcoids[2] == InvalidOid)
1354  {
1355  /* Second trigger of set */
1356  ereport(NOTICE,
1357  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1358  constr_name, buf.data),
1359  errdetail_internal("%s", _(funcdescr[funcnum]))));
1360  }
1361  else
1362  {
1363  /* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
1366  Constraint *fkcon = makeNode(Constraint);
1367  PlannedStmt *wrapper = makeNode(PlannedStmt);
1368 
1369  ereport(NOTICE,
1370  (errmsg("converting trigger group into constraint \"%s\" %s",
1371  constr_name, buf.data),
1372  errdetail_internal("%s", _(funcdescr[funcnum]))));
1373  fkcon->contype = CONSTR_FOREIGN;
1374  fkcon->location = -1;
1375  if (funcnum == 2)
1376  {
1377  /* This trigger is on the FK table */
1378  atstmt->relation = stmt->relation;
1379  if (stmt->constrrel)
1380  fkcon->pktable = stmt->constrrel;
1381  else
1382  {
1383  /* Work around ancient pg_dump bug that omitted constrrel */
1384  fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
1385  }
1386  }
1387  else
1388  {
1389  /* This trigger is on the PK table */
1390  fkcon->pktable = stmt->relation;
1391  if (stmt->constrrel)
1392  atstmt->relation = stmt->constrrel;
1393  else
1394  {
1395  /* Work around ancient pg_dump bug that omitted constrrel */
1396  atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1397  }
1398  }
1399  atstmt->cmds = list_make1(atcmd);
1400  atstmt->relkind = OBJECT_TABLE;
1401  atcmd->subtype = AT_AddConstraint;
1402  atcmd->def = (Node *) fkcon;
1403  if (strcmp(constr_name, "<unnamed>") == 0)
1404  fkcon->conname = NULL;
1405  else
1406  fkcon->conname = constr_name;
1407  fkcon->fk_attrs = fk_attrs;
1408  fkcon->pk_attrs = pk_attrs;
1409  fkcon->fk_matchtype = fk_matchtype;
1410  switch (info->funcoids[0])
1411  {
1412  case F_RI_FKEY_NOACTION_UPD:
1414  break;
1415  case F_RI_FKEY_CASCADE_UPD:
1417  break;
1418  case F_RI_FKEY_RESTRICT_UPD:
1420  break;
1421  case F_RI_FKEY_SETNULL_UPD:
1423  break;
1424  case F_RI_FKEY_SETDEFAULT_UPD:
1426  break;
1427  default:
1428  /* can't get here because of earlier checks */
1429  elog(ERROR, "confused about RI update function");
1430  }
1431  switch (info->funcoids[1])
1432  {
1433  case F_RI_FKEY_NOACTION_DEL:
1435  break;
1436  case F_RI_FKEY_CASCADE_DEL:
1438  break;
1439  case F_RI_FKEY_RESTRICT_DEL:
1441  break;
1442  case F_RI_FKEY_SETNULL_DEL:
1444  break;
1445  case F_RI_FKEY_SETDEFAULT_DEL:
1447  break;
1448  default:
1449  /* can't get here because of earlier checks */
1450  elog(ERROR, "confused about RI delete function");
1451  }
1452  fkcon->deferrable = stmt->deferrable;
1453  fkcon->initdeferred = stmt->initdeferred;
1454  fkcon->skip_validation = false;
1455  fkcon->initially_valid = true;
1456 
1457  /* finally, wrap it in a dummy PlannedStmt */
1458  wrapper->commandType = CMD_UTILITY;
1459  wrapper->canSetTag = false;
1460  wrapper->utilityStmt = (Node *) atstmt;
1461  wrapper->stmt_location = -1;
1462  wrapper->stmt_len = -1;
1463 
1464  /* ... and execute it */
1465  ProcessUtility(wrapper,
1466  "(generated ALTER TABLE ADD FOREIGN KEY command)",
1467  PROCESS_UTILITY_SUBCOMMAND, NULL, NULL,
1468  None_Receiver, NULL);
1469 
1470  /* Remove the matched item from the list */
1471  info_list = list_delete_ptr(info_list, info);
1472  pfree(info);
1473  /* We leak the copied args ... not worth worrying about */
1474  }
1475 }
1476 
1477 /*
1478  * Guts of trigger deletion.
1479  */
1480 void
1482 {
1483  Relation tgrel;
1484  SysScanDesc tgscan;
1485  ScanKeyData skey[1];
1486  HeapTuple tup;
1487  Oid relid;
1488  Relation rel;
1489 
1490  tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
1491 
1492  /*
1493  * Find the trigger to delete.
1494  */
1495  ScanKeyInit(&skey[0],
1497  BTEqualStrategyNumber, F_OIDEQ,
1498  ObjectIdGetDatum(trigOid));
1499 
1500  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1501  NULL, 1, skey);
1502 
1503  tup = systable_getnext(tgscan);
1504  if (!HeapTupleIsValid(tup))
1505  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1506 
1507  /*
1508  * Open and exclusive-lock the relation the trigger belongs to.
1509  */
1510  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1511 
1512  rel = heap_open(relid, AccessExclusiveLock);
1513 
1514  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1515  rel->rd_rel->relkind != RELKIND_VIEW &&
1516  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1517  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1518  ereport(ERROR,
1519  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1520  errmsg("\"%s\" is not a table, view, or foreign table",
1521  RelationGetRelationName(rel))));
1522 
1524  ereport(ERROR,
1525  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1526  errmsg("permission denied: \"%s\" is a system catalog",
1527  RelationGetRelationName(rel))));
1528 
1529  /*
1530  * Delete the pg_trigger tuple.
1531  */
1532  CatalogTupleDelete(tgrel, &tup->t_self);
1533 
1534  systable_endscan(tgscan);
1535  heap_close(tgrel, RowExclusiveLock);
1536 
1537  /*
1538  * We do not bother to try to determine whether any other triggers remain,
1539  * which would be needed in order to decide whether it's safe to clear the
1540  * relation's relhastriggers. (In any case, there might be a concurrent
1541  * process adding new triggers.) Instead, just force a relcache inval to
1542  * make other backends (and this one too!) rebuild their relcache entries.
1543  * There's no great harm in leaving relhastriggers true even if there are
1544  * no triggers left.
1545  */
1547 
1548  /* Keep lock on trigger's rel until end of xact */
1549  heap_close(rel, NoLock);
1550 }
1551 
1552 /*
1553  * get_trigger_oid - Look up a trigger by name to find its OID.
1554  *
1555  * If missing_ok is false, throw an error if trigger not found. If
1556  * true, just return InvalidOid.
1557  */
1558 Oid
1559 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1560 {
1561  Relation tgrel;
1562  ScanKeyData skey[2];
1563  SysScanDesc tgscan;
1564  HeapTuple tup;
1565  Oid oid;
1566 
1567  /*
1568  * Find the trigger, verify permissions, set up object address
1569  */
1570  tgrel = heap_open(TriggerRelationId, AccessShareLock);
1571 
1572  ScanKeyInit(&skey[0],
1573  Anum_pg_trigger_tgrelid,
1574  BTEqualStrategyNumber, F_OIDEQ,
1575  ObjectIdGetDatum(relid));
1576  ScanKeyInit(&skey[1],
1577  Anum_pg_trigger_tgname,
1578  BTEqualStrategyNumber, F_NAMEEQ,
1579  CStringGetDatum(trigname));
1580 
1581  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1582  NULL, 2, skey);
1583 
1584  tup = systable_getnext(tgscan);
1585 
1586  if (!HeapTupleIsValid(tup))
1587  {
1588  if (!missing_ok)
1589  ereport(ERROR,
1590  (errcode(ERRCODE_UNDEFINED_OBJECT),
1591  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1592  trigname, get_rel_name(relid))));
1593  oid = InvalidOid;
1594  }
1595  else
1596  {
1597  oid = HeapTupleGetOid(tup);
1598  }
1599 
1600  systable_endscan(tgscan);
1601  heap_close(tgrel, AccessShareLock);
1602  return oid;
1603 }
1604 
1605 /*
1606  * Perform permissions and integrity checks before acquiring a relation lock.
1607  */
1608 static void
1610  void *arg)
1611 {
1612  HeapTuple tuple;
1613  Form_pg_class form;
1614 
1615  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1616  if (!HeapTupleIsValid(tuple))
1617  return; /* concurrently dropped */
1618  form = (Form_pg_class) GETSTRUCT(tuple);
1619 
1620  /* only tables and views can have triggers */
1621  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1622  form->relkind != RELKIND_FOREIGN_TABLE &&
1623  form->relkind != RELKIND_PARTITIONED_TABLE)
1624  ereport(ERROR,
1625  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1626  errmsg("\"%s\" is not a table, view, or foreign table",
1627  rv->relname)));
1628 
1629  /* you must own the table to rename one of its triggers */
1630  if (!pg_class_ownercheck(relid, GetUserId()))
1632  if (!allowSystemTableMods && IsSystemClass(relid, form))
1633  ereport(ERROR,
1634  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1635  errmsg("permission denied: \"%s\" is a system catalog",
1636  rv->relname)));
1637 
1638  ReleaseSysCache(tuple);
1639 }
1640 
1641 /*
1642  * renametrig - changes the name of a trigger on a relation
1643  *
1644  * trigger name is changed in trigger catalog.
1645  * No record of the previous name is kept.
1646  *
1647  * get proper relrelation from relation catalog (if not arg)
1648  * scan trigger catalog
1649  * for name conflict (within rel)
1650  * for original trigger (if not arg)
1651  * modify tgname in trigger tuple
1652  * update row in catalog
1653  */
1656 {
1657  Oid tgoid;
1658  Relation targetrel;
1659  Relation tgrel;
1660  HeapTuple tuple;
1661  SysScanDesc tgscan;
1662  ScanKeyData key[2];
1663  Oid relid;
1664  ObjectAddress address;
1665 
1666  /*
1667  * Look up name, check permissions, and acquire lock (which we will NOT
1668  * release until end of transaction).
1669  */
1671  0,
1673  NULL);
1674 
1675  /* Have lock already, so just need to build relcache entry. */
1676  targetrel = relation_open(relid, NoLock);
1677 
1678  /*
1679  * Scan pg_trigger twice for existing triggers on relation. We do this in
1680  * order to ensure a trigger does not exist with newname (The unique index
1681  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1682  * exist with oldname.
1683  *
1684  * NOTE that this is cool only because we have AccessExclusiveLock on the
1685  * relation, so the trigger set won't be changing underneath us.
1686  */
1687  tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
1688 
1689  /*
1690  * First pass -- look for name conflict
1691  */
1692  ScanKeyInit(&key[0],
1693  Anum_pg_trigger_tgrelid,
1694  BTEqualStrategyNumber, F_OIDEQ,
1695  ObjectIdGetDatum(relid));
1696  ScanKeyInit(&key[1],
1697  Anum_pg_trigger_tgname,
1698  BTEqualStrategyNumber, F_NAMEEQ,
1699  PointerGetDatum(stmt->newname));
1700  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1701  NULL, 2, key);
1702  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1703  ereport(ERROR,
1705  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1706  stmt->newname, RelationGetRelationName(targetrel))));
1707  systable_endscan(tgscan);
1708 
1709  /*
1710  * Second pass -- look for trigger existing with oldname and update
1711  */
1712  ScanKeyInit(&key[0],
1713  Anum_pg_trigger_tgrelid,
1714  BTEqualStrategyNumber, F_OIDEQ,
1715  ObjectIdGetDatum(relid));
1716  ScanKeyInit(&key[1],
1717  Anum_pg_trigger_tgname,
1718  BTEqualStrategyNumber, F_NAMEEQ,
1719  PointerGetDatum(stmt->subname));
1720  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1721  NULL, 2, key);
1722  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1723  {
1724  tgoid = HeapTupleGetOid(tuple);
1725 
1726  /*
1727  * Update pg_trigger tuple with new tgname.
1728  */
1729  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1730 
1731  namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1732  stmt->newname);
1733 
1734  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1735 
1736  InvokeObjectPostAlterHook(TriggerRelationId,
1737  HeapTupleGetOid(tuple), 0);
1738 
1739  /*
1740  * Invalidate relation's relcache entry so that other backends (and
1741  * this one too!) are sent SI message to make them rebuild relcache
1742  * entries. (Ideally this should happen automatically...)
1743  */
1744  CacheInvalidateRelcache(targetrel);
1745  }
1746  else
1747  {
1748  ereport(ERROR,
1749  (errcode(ERRCODE_UNDEFINED_OBJECT),
1750  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1751  stmt->subname, RelationGetRelationName(targetrel))));
1752  }
1753 
1754  ObjectAddressSet(address, TriggerRelationId, tgoid);
1755 
1756  systable_endscan(tgscan);
1757 
1758  heap_close(tgrel, RowExclusiveLock);
1759 
1760  /*
1761  * Close rel, but keep exclusive lock!
1762  */
1763  relation_close(targetrel, NoLock);
1764 
1765  return address;
1766 }
1767 
1768 
1769 /*
1770  * EnableDisableTrigger()
1771  *
1772  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1773  * to change 'tgenabled' field for the specified trigger(s)
1774  *
1775  * rel: relation to process (caller must hold suitable lock on it)
1776  * tgname: trigger to process, or NULL to scan all triggers
1777  * fires_when: new value for tgenabled field. In addition to generic
1778  * enablement/disablement, this also defines when the trigger
1779  * should be fired in session replication roles.
1780  * skip_system: if true, skip "system" triggers (constraint triggers)
1781  *
1782  * Caller should have checked permissions for the table; here we also
1783  * enforce that superuser privilege is required to alter the state of
1784  * system triggers
1785  */
1786 void
1787 EnableDisableTrigger(Relation rel, const char *tgname,
1788  char fires_when, bool skip_system, LOCKMODE lockmode)
1789 {
1790  Relation tgrel;
1791  int nkeys;
1792  ScanKeyData keys[2];
1793  SysScanDesc tgscan;
1794  HeapTuple tuple;
1795  bool found;
1796  bool changed;
1797 
1798  /* Scan the relevant entries in pg_triggers */
1799  tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
1800 
1801  ScanKeyInit(&keys[0],
1802  Anum_pg_trigger_tgrelid,
1803  BTEqualStrategyNumber, F_OIDEQ,
1805  if (tgname)
1806  {
1807  ScanKeyInit(&keys[1],
1808  Anum_pg_trigger_tgname,
1809  BTEqualStrategyNumber, F_NAMEEQ,
1810  CStringGetDatum(tgname));
1811  nkeys = 2;
1812  }
1813  else
1814  nkeys = 1;
1815 
1816  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1817  NULL, nkeys, keys);
1818 
1819  found = changed = false;
1820 
1821  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1822  {
1823  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1824 
1825  if (oldtrig->tgisinternal)
1826  {
1827  /* system trigger ... ok to process? */
1828  if (skip_system)
1829  continue;
1830  if (!superuser())
1831  ereport(ERROR,
1832  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1833  errmsg("permission denied: \"%s\" is a system trigger",
1834  NameStr(oldtrig->tgname))));
1835  }
1836 
1837  found = true;
1838 
1839  if (oldtrig->tgenabled != fires_when)
1840  {
1841  /* need to change this one ... make a copy to scribble on */
1842  HeapTuple newtup = heap_copytuple(tuple);
1843  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1844 
1845  newtrig->tgenabled = fires_when;
1846 
1847  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1848 
1849  heap_freetuple(newtup);
1850 
1851  /*
1852  * When altering FOR EACH ROW triggers on a partitioned table, do
1853  * the same on the partitions as well.
1854  */
1855  if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1856  (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1857  {
1858  PartitionDesc partdesc = RelationGetPartitionDesc(rel);
1859  int i;
1860 
1861  for (i = 0; i < partdesc->nparts; i++)
1862  {
1863  Relation part;
1864 
1865  part = relation_open(partdesc->oids[i], lockmode);
1866  EnableDisableTrigger(part, NameStr(oldtrig->tgname),
1867  fires_when, skip_system, lockmode);
1868  heap_close(part, NoLock); /* keep lock till commit */
1869  }
1870  }
1871 
1872  changed = true;
1873  }
1874 
1875  InvokeObjectPostAlterHook(TriggerRelationId,
1876  HeapTupleGetOid(tuple), 0);
1877  }
1878 
1879  systable_endscan(tgscan);
1880 
1881  heap_close(tgrel, RowExclusiveLock);
1882 
1883  if (tgname && !found)
1884  ereport(ERROR,
1885  (errcode(ERRCODE_UNDEFINED_OBJECT),
1886  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1887  tgname, RelationGetRelationName(rel))));
1888 
1889  /*
1890  * If we changed anything, broadcast a SI inval message to force each
1891  * backend (including our own!) to rebuild relation's relcache entry.
1892  * Otherwise they will fail to apply the change promptly.
1893  */
1894  if (changed)
1896 }
1897 
1898 
1899 /*
1900  * Build trigger data to attach to the given relcache entry.
1901  *
1902  * Note that trigger data attached to a relcache entry must be stored in
1903  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1904  * But we should be running in a less long-lived working context. To avoid
1905  * leaking cache memory if this routine fails partway through, we build a
1906  * temporary TriggerDesc in working memory and then copy the completed
1907  * structure into cache memory.
1908  */
1909 void
1911 {
1912  TriggerDesc *trigdesc;
1913  int numtrigs;
1914  int maxtrigs;
1915  Trigger *triggers;
1916  Relation tgrel;
1917  ScanKeyData skey;
1918  SysScanDesc tgscan;
1919  HeapTuple htup;
1920  MemoryContext oldContext;
1921  int i;
1922 
1923  /*
1924  * Allocate a working array to hold the triggers (the array is extended if
1925  * necessary)
1926  */
1927  maxtrigs = 16;
1928  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1929  numtrigs = 0;
1930 
1931  /*
1932  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1933  * be reading the triggers in name order, except possibly during
1934  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1935  * ensures that triggers will be fired in name order.
1936  */
1937  ScanKeyInit(&skey,
1938  Anum_pg_trigger_tgrelid,
1939  BTEqualStrategyNumber, F_OIDEQ,
1940  ObjectIdGetDatum(RelationGetRelid(relation)));
1941 
1942  tgrel = heap_open(TriggerRelationId, AccessShareLock);
1943  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1944  NULL, 1, &skey);
1945 
1946  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1947  {
1948  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1949  Trigger *build;
1950  Datum datum;
1951  bool isnull;
1952 
1953  if (numtrigs >= maxtrigs)
1954  {
1955  maxtrigs *= 2;
1956  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1957  }
1958  build = &(triggers[numtrigs]);
1959 
1960  build->tgoid = HeapTupleGetOid(htup);
1962  NameGetDatum(&pg_trigger->tgname)));
1963  build->tgfoid = pg_trigger->tgfoid;
1964  build->tgtype = pg_trigger->tgtype;
1965  build->tgenabled = pg_trigger->tgenabled;
1966  build->tgisinternal = pg_trigger->tgisinternal;
1967  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1968  build->tgconstrindid = pg_trigger->tgconstrindid;
1969  build->tgconstraint = pg_trigger->tgconstraint;
1970  build->tgdeferrable = pg_trigger->tgdeferrable;
1971  build->tginitdeferred = pg_trigger->tginitdeferred;
1972  build->tgnargs = pg_trigger->tgnargs;
1973  /* tgattr is first var-width field, so OK to access directly */
1974  build->tgnattr = pg_trigger->tgattr.dim1;
1975  if (build->tgnattr > 0)
1976  {
1977  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1978  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1979  build->tgnattr * sizeof(int16));
1980  }
1981  else
1982  build->tgattr = NULL;
1983  if (build->tgnargs > 0)
1984  {
1985  bytea *val;
1986  char *p;
1987 
1988  val = DatumGetByteaPP(fastgetattr(htup,
1989  Anum_pg_trigger_tgargs,
1990  tgrel->rd_att, &isnull));
1991  if (isnull)
1992  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1993  RelationGetRelationName(relation));
1994  p = (char *) VARDATA_ANY(val);
1995  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1996  for (i = 0; i < build->tgnargs; i++)
1997  {
1998  build->tgargs[i] = pstrdup(p);
1999  p += strlen(p) + 1;
2000  }
2001  }
2002  else
2003  build->tgargs = NULL;
2004 
2005  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
2006  tgrel->rd_att, &isnull);
2007  if (!isnull)
2008  build->tgoldtable =
2010  else
2011  build->tgoldtable = NULL;
2012 
2013  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
2014  tgrel->rd_att, &isnull);
2015  if (!isnull)
2016  build->tgnewtable =
2018  else
2019  build->tgnewtable = NULL;
2020 
2021  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
2022  tgrel->rd_att, &isnull);
2023  if (!isnull)
2024  build->tgqual = TextDatumGetCString(datum);
2025  else
2026  build->tgqual = NULL;
2027 
2028  numtrigs++;
2029  }
2030 
2031  systable_endscan(tgscan);
2032  heap_close(tgrel, AccessShareLock);
2033 
2034  /* There might not be any triggers */
2035  if (numtrigs == 0)
2036  {
2037  pfree(triggers);
2038  return;
2039  }
2040 
2041  /* Build trigdesc */
2042  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
2043  trigdesc->triggers = triggers;
2044  trigdesc->numtriggers = numtrigs;
2045  for (i = 0; i < numtrigs; i++)
2046  SetTriggerFlags(trigdesc, &(triggers[i]));
2047 
2048  /* Copy completed trigdesc into cache storage */
2050  relation->trigdesc = CopyTriggerDesc(trigdesc);
2051  MemoryContextSwitchTo(oldContext);
2052 
2053  /* Release working memory */
2054  FreeTriggerDesc(trigdesc);
2055 }
2056 
2057 /*
2058  * Update the TriggerDesc's hint flags to include the specified trigger
2059  */
2060 static void
2062 {
2063  int16 tgtype = trigger->tgtype;
2064 
2065  trigdesc->trig_insert_before_row |=
2066  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2067  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2068  trigdesc->trig_insert_after_row |=
2069  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2070  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2071  trigdesc->trig_insert_instead_row |=
2072  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2073  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2074  trigdesc->trig_insert_before_statement |=
2075  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2076  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2077  trigdesc->trig_insert_after_statement |=
2078  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2079  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2080  trigdesc->trig_update_before_row |=
2081  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2082  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2083  trigdesc->trig_update_after_row |=
2084  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2085  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2086  trigdesc->trig_update_instead_row |=
2087  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2088  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2089  trigdesc->trig_update_before_statement |=
2090  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2091  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2092  trigdesc->trig_update_after_statement |=
2093  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2094  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2095  trigdesc->trig_delete_before_row |=
2096  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2097  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2098  trigdesc->trig_delete_after_row |=
2099  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2100  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2101  trigdesc->trig_delete_instead_row |=
2102  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2103  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2104  trigdesc->trig_delete_before_statement |=
2105  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2106  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2107  trigdesc->trig_delete_after_statement |=
2108  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2109  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2110  /* there are no row-level truncate triggers */
2111  trigdesc->trig_truncate_before_statement |=
2112  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2113  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2114  trigdesc->trig_truncate_after_statement |=
2115  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2116  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2117 
2118  trigdesc->trig_insert_new_table |=
2119  (TRIGGER_FOR_INSERT(tgtype) &&
2120  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2121  trigdesc->trig_update_old_table |=
2122  (TRIGGER_FOR_UPDATE(tgtype) &&
2123  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2124  trigdesc->trig_update_new_table |=
2125  (TRIGGER_FOR_UPDATE(tgtype) &&
2126  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2127  trigdesc->trig_delete_old_table |=
2128  (TRIGGER_FOR_DELETE(tgtype) &&
2129  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2130 }
2131 
2132 /*
2133  * Copy a TriggerDesc data structure.
2134  *
2135  * The copy is allocated in the current memory context.
2136  */
2137 TriggerDesc *
2139 {
2140  TriggerDesc *newdesc;
2141  Trigger *trigger;
2142  int i;
2143 
2144  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2145  return NULL;
2146 
2147  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2148  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2149 
2150  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2151  memcpy(trigger, trigdesc->triggers,
2152  trigdesc->numtriggers * sizeof(Trigger));
2153  newdesc->triggers = trigger;
2154 
2155  for (i = 0; i < trigdesc->numtriggers; i++)
2156  {
2157  trigger->tgname = pstrdup(trigger->tgname);
2158  if (trigger->tgnattr > 0)
2159  {
2160  int16 *newattr;
2161 
2162  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2163  memcpy(newattr, trigger->tgattr,
2164  trigger->tgnattr * sizeof(int16));
2165  trigger->tgattr = newattr;
2166  }
2167  if (trigger->tgnargs > 0)
2168  {
2169  char **newargs;
2170  int16 j;
2171 
2172  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2173  for (j = 0; j < trigger->tgnargs; j++)
2174  newargs[j] = pstrdup(trigger->tgargs[j]);
2175  trigger->tgargs = newargs;
2176  }
2177  if (trigger->tgqual)
2178  trigger->tgqual = pstrdup(trigger->tgqual);
2179  if (trigger->tgoldtable)
2180  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2181  if (trigger->tgnewtable)
2182  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2183  trigger++;
2184  }
2185 
2186  return newdesc;
2187 }
2188 
2189 /*
2190  * Free a TriggerDesc data structure.
2191  */
2192 void
2194 {
2195  Trigger *trigger;
2196  int i;
2197 
2198  if (trigdesc == NULL)
2199  return;
2200 
2201  trigger = trigdesc->triggers;
2202  for (i = 0; i < trigdesc->numtriggers; i++)
2203  {
2204  pfree(trigger->tgname);
2205  if (trigger->tgnattr > 0)
2206  pfree(trigger->tgattr);
2207  if (trigger->tgnargs > 0)
2208  {
2209  while (--(trigger->tgnargs) >= 0)
2210  pfree(trigger->tgargs[trigger->tgnargs]);
2211  pfree(trigger->tgargs);
2212  }
2213  if (trigger->tgqual)
2214  pfree(trigger->tgqual);
2215  if (trigger->tgoldtable)
2216  pfree(trigger->tgoldtable);
2217  if (trigger->tgnewtable)
2218  pfree(trigger->tgnewtable);
2219  trigger++;
2220  }
2221  pfree(trigdesc->triggers);
2222  pfree(trigdesc);
2223 }
2224 
2225 /*
2226  * Compare two TriggerDesc structures for logical equality.
2227  */
2228 #ifdef NOT_USED
2229 bool
2230 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2231 {
2232  int i,
2233  j;
2234 
2235  /*
2236  * We need not examine the hint flags, just the trigger array itself; if
2237  * we have the same triggers with the same types, the flags should match.
2238  *
2239  * As of 7.3 we assume trigger set ordering is significant in the
2240  * comparison; so we just compare corresponding slots of the two sets.
2241  *
2242  * Note: comparing the stringToNode forms of the WHEN clauses means that
2243  * parse column locations will affect the result. This is okay as long as
2244  * this function is only used for detecting exact equality, as for example
2245  * in checking for staleness of a cache entry.
2246  */
2247  if (trigdesc1 != NULL)
2248  {
2249  if (trigdesc2 == NULL)
2250  return false;
2251  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2252  return false;
2253  for (i = 0; i < trigdesc1->numtriggers; i++)
2254  {
2255  Trigger *trig1 = trigdesc1->triggers + i;
2256  Trigger *trig2 = trigdesc2->triggers + i;
2257 
2258  if (trig1->tgoid != trig2->tgoid)
2259  return false;
2260  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2261  return false;
2262  if (trig1->tgfoid != trig2->tgfoid)
2263  return false;
2264  if (trig1->tgtype != trig2->tgtype)
2265  return false;
2266  if (trig1->tgenabled != trig2->tgenabled)
2267  return false;
2268  if (trig1->tgisinternal != trig2->tgisinternal)
2269  return false;
2270  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2271  return false;
2272  if (trig1->tgconstrindid != trig2->tgconstrindid)
2273  return false;
2274  if (trig1->tgconstraint != trig2->tgconstraint)
2275  return false;
2276  if (trig1->tgdeferrable != trig2->tgdeferrable)
2277  return false;
2278  if (trig1->tginitdeferred != trig2->tginitdeferred)
2279  return false;
2280  if (trig1->tgnargs != trig2->tgnargs)
2281  return false;
2282  if (trig1->tgnattr != trig2->tgnattr)
2283  return false;
2284  if (trig1->tgnattr > 0 &&
2285  memcmp(trig1->tgattr, trig2->tgattr,
2286  trig1->tgnattr * sizeof(int16)) != 0)
2287  return false;
2288  for (j = 0; j < trig1->tgnargs; j++)
2289  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2290  return false;
2291  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2292  /* ok */ ;
2293  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2294  return false;
2295  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2296  return false;
2297  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2298  /* ok */ ;
2299  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2300  return false;
2301  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2302  return false;
2303  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2304  /* ok */ ;
2305  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2306  return false;
2307  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2308  return false;
2309  }
2310  }
2311  else if (trigdesc2 != NULL)
2312  return false;
2313  return true;
2314 }
2315 #endif /* NOT_USED */
2316 
2317 /*
2318  * Check if there is a row-level trigger with transition tables that prevents
2319  * a table from becoming an inheritance child or partition. Return the name
2320  * of the first such incompatible trigger, or NULL if there is none.
2321  */
2322 const char *
2324 {
2325  if (trigdesc != NULL)
2326  {
2327  int i;
2328 
2329  for (i = 0; i < trigdesc->numtriggers; ++i)
2330  {
2331  Trigger *trigger = &trigdesc->triggers[i];
2332 
2333  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2334  return trigger->tgname;
2335  }
2336  }
2337 
2338  return NULL;
2339 }
2340 
2341 /*
2342  * Call a trigger function.
2343  *
2344  * trigdata: trigger descriptor.
2345  * tgindx: trigger's index in finfo and instr arrays.
2346  * finfo: array of cached trigger function call information.
2347  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2348  * per_tuple_context: memory context to execute the function in.
2349  *
2350  * Returns the tuple (or NULL) as returned by the function.
2351  */
2352 static HeapTuple
2354  int tgindx,
2355  FmgrInfo *finfo,
2356  Instrumentation *instr,
2357  MemoryContext per_tuple_context)
2358 {
2359  FunctionCallInfoData fcinfo;
2360  PgStat_FunctionCallUsage fcusage;
2361  Datum result;
2362  MemoryContext oldContext;
2363 
2364  /*
2365  * Protect against code paths that may fail to initialize transition table
2366  * info.
2367  */
2368  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2369  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2370  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2371  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2372  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2373  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2374  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2375 
2376  finfo += tgindx;
2377 
2378  /*
2379  * We cache fmgr lookup info, to avoid making the lookup again on each
2380  * call.
2381  */
2382  if (finfo->fn_oid == InvalidOid)
2383  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2384 
2385  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2386 
2387  /*
2388  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2389  */
2390  if (instr)
2391  InstrStartNode(instr + tgindx);
2392 
2393  /*
2394  * Do the function evaluation in the per-tuple memory context, so that
2395  * leaked memory will be reclaimed once per tuple. Note in particular that
2396  * any new tuple created by the trigger function will live till the end of
2397  * the tuple cycle.
2398  */
2399  oldContext = MemoryContextSwitchTo(per_tuple_context);
2400 
2401  /*
2402  * Call the function, passing no arguments but setting a context.
2403  */
2404  InitFunctionCallInfoData(fcinfo, finfo, 0,
2405  InvalidOid, (Node *) trigdata, NULL);
2406 
2407  pgstat_init_function_usage(&fcinfo, &fcusage);
2408 
2409  MyTriggerDepth++;
2410  PG_TRY();
2411  {
2412  result = FunctionCallInvoke(&fcinfo);
2413  }
2414  PG_CATCH();
2415  {
2416  MyTriggerDepth--;
2417  PG_RE_THROW();
2418  }
2419  PG_END_TRY();
2420  MyTriggerDepth--;
2421 
2422  pgstat_end_function_usage(&fcusage, true);
2423 
2424  MemoryContextSwitchTo(oldContext);
2425 
2426  /*
2427  * Trigger protocol allows function to return a null pointer, but NOT to
2428  * set the isnull result flag.
2429  */
2430  if (fcinfo.isnull)
2431  ereport(ERROR,
2432  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2433  errmsg("trigger function %u returned null value",
2434  fcinfo.flinfo->fn_oid)));
2435 
2436  /*
2437  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2438  * one "tuple returned" (really the number of firings).
2439  */
2440  if (instr)
2441  InstrStopNode(instr + tgindx, 1);
2442 
2443  return (HeapTuple) DatumGetPointer(result);
2444 }
2445 
2446 void
2448 {
2449  TriggerDesc *trigdesc;
2450  int i;
2451  TriggerData LocTriggerData;
2452 
2453  trigdesc = relinfo->ri_TrigDesc;
2454 
2455  if (trigdesc == NULL)
2456  return;
2457  if (!trigdesc->trig_insert_before_statement)
2458  return;
2459 
2460  /* no-op if we already fired BS triggers in this context */
2462  CMD_INSERT))
2463  return;
2464 
2465  LocTriggerData.type = T_TriggerData;
2466  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2468  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2469  LocTriggerData.tg_trigtuple = NULL;
2470  LocTriggerData.tg_newtuple = NULL;
2471  LocTriggerData.tg_oldtable = NULL;
2472  LocTriggerData.tg_newtable = NULL;
2473  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2474  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2475  for (i = 0; i < trigdesc->numtriggers; i++)
2476  {
2477  Trigger *trigger = &trigdesc->triggers[i];
2478  HeapTuple newtuple;
2479 
2480  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2481  TRIGGER_TYPE_STATEMENT,
2482  TRIGGER_TYPE_BEFORE,
2483  TRIGGER_TYPE_INSERT))
2484  continue;
2485  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2486  NULL, NULL, NULL))
2487  continue;
2488 
2489  LocTriggerData.tg_trigger = trigger;
2490  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2491  i,
2492  relinfo->ri_TrigFunctions,
2493  relinfo->ri_TrigInstrument,
2494  GetPerTupleMemoryContext(estate));
2495 
2496  if (newtuple)
2497  ereport(ERROR,
2498  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2499  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2500  }
2501 }
2502 
2503 void
2505  TransitionCaptureState *transition_capture)
2506 {
2507  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2508 
2509  if (trigdesc && trigdesc->trig_insert_after_statement)
2511  false, NULL, NULL, NIL, NULL, transition_capture);
2512 }
2513 
2516  TupleTableSlot *slot)
2517 {
2518  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2519  HeapTuple slottuple = ExecMaterializeSlot(slot);
2520  HeapTuple newtuple = slottuple;
2521  HeapTuple oldtuple;
2522  TriggerData LocTriggerData;
2523  int i;
2524 
2525  LocTriggerData.type = T_TriggerData;
2526  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2529  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2530  LocTriggerData.tg_newtuple = NULL;
2531  LocTriggerData.tg_oldtable = NULL;
2532  LocTriggerData.tg_newtable = NULL;
2533  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2534  for (i = 0; i < trigdesc->numtriggers; i++)
2535  {
2536  Trigger *trigger = &trigdesc->triggers[i];
2537 
2538  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2539  TRIGGER_TYPE_ROW,
2540  TRIGGER_TYPE_BEFORE,
2541  TRIGGER_TYPE_INSERT))
2542  continue;
2543  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2544  NULL, NULL, newtuple))
2545  continue;
2546 
2547  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2548  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2549  LocTriggerData.tg_trigger = trigger;
2550  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2551  i,
2552  relinfo->ri_TrigFunctions,
2553  relinfo->ri_TrigInstrument,
2554  GetPerTupleMemoryContext(estate));
2555  if (oldtuple != newtuple && oldtuple != slottuple)
2556  heap_freetuple(oldtuple);
2557  if (newtuple == NULL)
2558  return NULL; /* "do nothing" */
2559  }
2560 
2561  if (newtuple != slottuple)
2562  {
2563  /*
2564  * Return the modified tuple using the es_trig_tuple_slot. We assume
2565  * the tuple was allocated in per-tuple memory context, and therefore
2566  * will go away by itself. The tuple table slot should not try to
2567  * clear it.
2568  */
2569  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2570  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2571 
2572  if (newslot->tts_tupleDescriptor != tupdesc)
2573  ExecSetSlotDescriptor(newslot, tupdesc);
2574  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2575  slot = newslot;
2576  }
2577  return slot;
2578 }
2579 
2580 void
2582  HeapTuple trigtuple, List *recheckIndexes,
2583  TransitionCaptureState *transition_capture)
2584 {
2585  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2586 
2587  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2588  (transition_capture && transition_capture->tcs_insert_new_table))
2590  true, NULL, trigtuple,
2591  recheckIndexes, NULL,
2592  transition_capture);
2593 }
2594 
2597  TupleTableSlot *slot)
2598 {
2599  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2600  HeapTuple slottuple = ExecMaterializeSlot(slot);
2601  HeapTuple newtuple = slottuple;
2602  HeapTuple oldtuple;
2603  TriggerData LocTriggerData;
2604  int i;
2605 
2606  LocTriggerData.type = T_TriggerData;
2607  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2610  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2611  LocTriggerData.tg_newtuple = NULL;
2612  LocTriggerData.tg_oldtable = NULL;
2613  LocTriggerData.tg_newtable = NULL;
2614  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2615  for (i = 0; i < trigdesc->numtriggers; i++)
2616  {
2617  Trigger *trigger = &trigdesc->triggers[i];
2618 
2619  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2620  TRIGGER_TYPE_ROW,
2621  TRIGGER_TYPE_INSTEAD,
2622  TRIGGER_TYPE_INSERT))
2623  continue;
2624  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2625  NULL, NULL, newtuple))
2626  continue;
2627 
2628  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2629  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2630  LocTriggerData.tg_trigger = trigger;
2631  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2632  i,
2633  relinfo->ri_TrigFunctions,
2634  relinfo->ri_TrigInstrument,
2635  GetPerTupleMemoryContext(estate));
2636  if (oldtuple != newtuple && oldtuple != slottuple)
2637  heap_freetuple(oldtuple);
2638  if (newtuple == NULL)
2639  return NULL; /* "do nothing" */
2640  }
2641 
2642  if (newtuple != slottuple)
2643  {
2644  /*
2645  * Return the modified tuple using the es_trig_tuple_slot. We assume
2646  * the tuple was allocated in per-tuple memory context, and therefore
2647  * will go away by itself. The tuple table slot should not try to
2648  * clear it.
2649  */
2650  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2651  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2652 
2653  if (newslot->tts_tupleDescriptor != tupdesc)
2654  ExecSetSlotDescriptor(newslot, tupdesc);
2655  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2656  slot = newslot;
2657  }
2658  return slot;
2659 }
2660 
2661 void
2663 {
2664  TriggerDesc *trigdesc;
2665  int i;
2666  TriggerData LocTriggerData;
2667 
2668  trigdesc = relinfo->ri_TrigDesc;
2669 
2670  if (trigdesc == NULL)
2671  return;
2672  if (!trigdesc->trig_delete_before_statement)
2673  return;
2674 
2675  /* no-op if we already fired BS triggers in this context */
2677  CMD_DELETE))
2678  return;
2679 
2680  LocTriggerData.type = T_TriggerData;
2681  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2683  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2684  LocTriggerData.tg_trigtuple = NULL;
2685  LocTriggerData.tg_newtuple = NULL;
2686  LocTriggerData.tg_oldtable = NULL;
2687  LocTriggerData.tg_newtable = NULL;
2688  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2689  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2690  for (i = 0; i < trigdesc->numtriggers; i++)
2691  {
2692  Trigger *trigger = &trigdesc->triggers[i];
2693  HeapTuple newtuple;
2694 
2695  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2696  TRIGGER_TYPE_STATEMENT,
2697  TRIGGER_TYPE_BEFORE,
2698  TRIGGER_TYPE_DELETE))
2699  continue;
2700  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2701  NULL, NULL, NULL))
2702  continue;
2703 
2704  LocTriggerData.tg_trigger = trigger;
2705  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2706  i,
2707  relinfo->ri_TrigFunctions,
2708  relinfo->ri_TrigInstrument,
2709  GetPerTupleMemoryContext(estate));
2710 
2711  if (newtuple)
2712  ereport(ERROR,
2713  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2714  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2715  }
2716 }
2717 
2718 void
2720  TransitionCaptureState *transition_capture)
2721 {
2722  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2723 
2724  if (trigdesc && trigdesc->trig_delete_after_statement)
2726  false, NULL, NULL, NIL, NULL, transition_capture);
2727 }
2728 
2729 bool
2731  ResultRelInfo *relinfo,
2732  ItemPointer tupleid,
2733  HeapTuple fdw_trigtuple)
2734 {
2735  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2736  bool result = true;
2737  TriggerData LocTriggerData;
2738  HeapTuple trigtuple;
2739  HeapTuple newtuple;
2740  TupleTableSlot *newSlot;
2741  int i;
2742 
2743  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2744  if (fdw_trigtuple == NULL)
2745  {
2746  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2747  LockTupleExclusive, &newSlot);
2748  if (trigtuple == NULL)
2749  return false;
2750  }
2751  else
2752  trigtuple = fdw_trigtuple;
2753 
2754  LocTriggerData.type = T_TriggerData;
2755  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2758  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2759  LocTriggerData.tg_newtuple = NULL;
2760  LocTriggerData.tg_oldtable = NULL;
2761  LocTriggerData.tg_newtable = NULL;
2762  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2763  for (i = 0; i < trigdesc->numtriggers; i++)
2764  {
2765  Trigger *trigger = &trigdesc->triggers[i];
2766 
2767  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2768  TRIGGER_TYPE_ROW,
2769  TRIGGER_TYPE_BEFORE,
2770  TRIGGER_TYPE_DELETE))
2771  continue;
2772  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2773  NULL, trigtuple, NULL))
2774  continue;
2775 
2776  LocTriggerData.tg_trigtuple = trigtuple;
2777  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2778  LocTriggerData.tg_trigger = trigger;
2779  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2780  i,
2781  relinfo->ri_TrigFunctions,
2782  relinfo->ri_TrigInstrument,
2783  GetPerTupleMemoryContext(estate));
2784  if (newtuple == NULL)
2785  {
2786  result = false; /* tell caller to suppress delete */
2787  break;
2788  }
2789  if (newtuple != trigtuple)
2790  heap_freetuple(newtuple);
2791  }
2792  if (trigtuple != fdw_trigtuple)
2793  heap_freetuple(trigtuple);
2794 
2795  return result;
2796 }
2797 
2798 void
2800  ItemPointer tupleid,
2801  HeapTuple fdw_trigtuple,
2802  TransitionCaptureState *transition_capture)
2803 {
2804  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2805 
2806  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2807  (transition_capture && transition_capture->tcs_delete_old_table))
2808  {
2809  HeapTuple trigtuple;
2810 
2811  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2812  if (fdw_trigtuple == NULL)
2813  trigtuple = GetTupleForTrigger(estate,
2814  NULL,
2815  relinfo,
2816  tupleid,
2818  NULL);
2819  else
2820  trigtuple = fdw_trigtuple;
2821 
2823  true, trigtuple, NULL, NIL, NULL,
2824  transition_capture);
2825  if (trigtuple != fdw_trigtuple)
2826  heap_freetuple(trigtuple);
2827  }
2828 }
2829 
2830 bool
2832  HeapTuple trigtuple)
2833 {
2834  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2835  TriggerData LocTriggerData;
2836  HeapTuple rettuple;
2837  int i;
2838 
2839  LocTriggerData.type = T_TriggerData;
2840  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2843  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2844  LocTriggerData.tg_newtuple = NULL;
2845  LocTriggerData.tg_oldtable = NULL;
2846  LocTriggerData.tg_newtable = NULL;
2847  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2848  for (i = 0; i < trigdesc->numtriggers; i++)
2849  {
2850  Trigger *trigger = &trigdesc->triggers[i];
2851 
2852  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2853  TRIGGER_TYPE_ROW,
2854  TRIGGER_TYPE_INSTEAD,
2855  TRIGGER_TYPE_DELETE))
2856  continue;
2857  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2858  NULL, trigtuple, NULL))
2859  continue;
2860 
2861  LocTriggerData.tg_trigtuple = trigtuple;
2862  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2863  LocTriggerData.tg_trigger = trigger;
2864  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2865  i,
2866  relinfo->ri_TrigFunctions,
2867  relinfo->ri_TrigInstrument,
2868  GetPerTupleMemoryContext(estate));
2869  if (rettuple == NULL)
2870  return false; /* Delete was suppressed */
2871  if (rettuple != trigtuple)
2872  heap_freetuple(rettuple);
2873  }
2874  return true;
2875 }
2876 
2877 void
2879 {
2880  TriggerDesc *trigdesc;
2881  int i;
2882  TriggerData LocTriggerData;
2883  Bitmapset *updatedCols;
2884 
2885  trigdesc = relinfo->ri_TrigDesc;
2886 
2887  if (trigdesc == NULL)
2888  return;
2889  if (!trigdesc->trig_update_before_statement)
2890  return;
2891 
2892  /* no-op if we already fired BS triggers in this context */
2894  CMD_UPDATE))
2895  return;
2896 
2897  updatedCols = GetUpdatedColumns(relinfo, estate);
2898 
2899  LocTriggerData.type = T_TriggerData;
2900  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2902  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2903  LocTriggerData.tg_trigtuple = NULL;
2904  LocTriggerData.tg_newtuple = NULL;
2905  LocTriggerData.tg_oldtable = NULL;
2906  LocTriggerData.tg_newtable = NULL;
2907  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2908  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2909  for (i = 0; i < trigdesc->numtriggers; i++)
2910  {
2911  Trigger *trigger = &trigdesc->triggers[i];
2912  HeapTuple newtuple;
2913 
2914  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2915  TRIGGER_TYPE_STATEMENT,
2916  TRIGGER_TYPE_BEFORE,
2917  TRIGGER_TYPE_UPDATE))
2918  continue;
2919  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2920  updatedCols, NULL, NULL))
2921  continue;
2922 
2923  LocTriggerData.tg_trigger = trigger;
2924  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2925  i,
2926  relinfo->ri_TrigFunctions,
2927  relinfo->ri_TrigInstrument,
2928  GetPerTupleMemoryContext(estate));
2929 
2930  if (newtuple)
2931  ereport(ERROR,
2932  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2933  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2934  }
2935 }
2936 
2937 void
2939  TransitionCaptureState *transition_capture)
2940 {
2941  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2942 
2943  if (trigdesc && trigdesc->trig_update_after_statement)
2945  false, NULL, NULL, NIL,
2946  GetUpdatedColumns(relinfo, estate),
2947  transition_capture);
2948 }
2949 
2952  ResultRelInfo *relinfo,
2953  ItemPointer tupleid,
2954  HeapTuple fdw_trigtuple,
2955  TupleTableSlot *slot)
2956 {
2957  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2958  HeapTuple slottuple = ExecMaterializeSlot(slot);
2959  HeapTuple newtuple = slottuple;
2960  TriggerData LocTriggerData;
2961  HeapTuple trigtuple;
2962  HeapTuple oldtuple;
2963  TupleTableSlot *newSlot;
2964  int i;
2965  Bitmapset *updatedCols;
2966  LockTupleMode lockmode;
2967 
2968  /* Determine lock mode to use */
2969  lockmode = ExecUpdateLockMode(estate, relinfo);
2970 
2971  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2972  if (fdw_trigtuple == NULL)
2973  {
2974  /* get a copy of the on-disk tuple we are planning to update */
2975  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2976  lockmode, &newSlot);
2977  if (trigtuple == NULL)
2978  return NULL; /* cancel the update action */
2979  }
2980  else
2981  {
2982  trigtuple = fdw_trigtuple;
2983  newSlot = NULL;
2984  }
2985 
2986  /*
2987  * In READ COMMITTED isolation level it's possible that target tuple was
2988  * changed due to concurrent update. In that case we have a raw subplan
2989  * output tuple in newSlot, and need to run it through the junk filter to
2990  * produce an insertable tuple.
2991  *
2992  * Caution: more than likely, the passed-in slot is the same as the
2993  * junkfilter's output slot, so we are clobbering the original value of
2994  * slottuple by doing the filtering. This is OK since neither we nor our
2995  * caller have any more interest in the prior contents of that slot.
2996  */
2997  if (newSlot != NULL)
2998  {
2999  slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
3000  slottuple = ExecMaterializeSlot(slot);
3001  newtuple = slottuple;
3002  }
3003 
3004 
3005  LocTriggerData.type = T_TriggerData;
3006  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3009  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3010  LocTriggerData.tg_oldtable = NULL;
3011  LocTriggerData.tg_newtable = NULL;
3012  updatedCols = GetUpdatedColumns(relinfo, estate);
3013  for (i = 0; i < trigdesc->numtriggers; i++)
3014  {
3015  Trigger *trigger = &trigdesc->triggers[i];
3016 
3017  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3018  TRIGGER_TYPE_ROW,
3019  TRIGGER_TYPE_BEFORE,
3020  TRIGGER_TYPE_UPDATE))
3021  continue;
3022  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3023  updatedCols, trigtuple, newtuple))
3024  continue;
3025 
3026  LocTriggerData.tg_trigtuple = trigtuple;
3027  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3028  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3029  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3030  LocTriggerData.tg_trigger = trigger;
3031  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3032  i,
3033  relinfo->ri_TrigFunctions,
3034  relinfo->ri_TrigInstrument,
3035  GetPerTupleMemoryContext(estate));
3036  if (oldtuple != newtuple && oldtuple != slottuple)
3037  heap_freetuple(oldtuple);
3038  if (newtuple == NULL)
3039  {
3040  if (trigtuple != fdw_trigtuple)
3041  heap_freetuple(trigtuple);
3042  return NULL; /* "do nothing" */
3043  }
3044  }
3045  if (trigtuple != fdw_trigtuple && trigtuple != newtuple)
3046  heap_freetuple(trigtuple);
3047 
3048  if (newtuple != slottuple)
3049  {
3050  /*
3051  * Return the modified tuple using the es_trig_tuple_slot. We assume
3052  * the tuple was allocated in per-tuple memory context, and therefore
3053  * will go away by itself. The tuple table slot should not try to
3054  * clear it.
3055  */
3056  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
3057  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3058 
3059  if (newslot->tts_tupleDescriptor != tupdesc)
3060  ExecSetSlotDescriptor(newslot, tupdesc);
3061  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
3062  slot = newslot;
3063  }
3064  return slot;
3065 }
3066 
3067 void
3069  ItemPointer tupleid,
3070  HeapTuple fdw_trigtuple,
3071  HeapTuple newtuple,
3072  List *recheckIndexes,
3073  TransitionCaptureState *transition_capture)
3074 {
3075  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3076 
3077  if ((trigdesc && trigdesc->trig_update_after_row) ||
3078  (transition_capture &&
3079  (transition_capture->tcs_update_old_table ||
3080  transition_capture->tcs_update_new_table)))
3081  {
3082  HeapTuple trigtuple;
3083 
3084  /*
3085  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3086  * update-partition-key operation, then this function is also called
3087  * separately for DELETE and INSERT to capture transition table rows.
3088  * In such case, either old tuple or new tuple can be NULL.
3089  */
3090  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3091  trigtuple = GetTupleForTrigger(estate,
3092  NULL,
3093  relinfo,
3094  tupleid,
3096  NULL);
3097  else
3098  trigtuple = fdw_trigtuple;
3099 
3101  true, trigtuple, newtuple, recheckIndexes,
3102  GetUpdatedColumns(relinfo, estate),
3103  transition_capture);
3104  if (trigtuple != fdw_trigtuple)
3105  heap_freetuple(trigtuple);
3106  }
3107 }
3108 
3111  HeapTuple trigtuple, TupleTableSlot *slot)
3112 {
3113  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3114  HeapTuple slottuple = ExecMaterializeSlot(slot);
3115  HeapTuple newtuple = slottuple;
3116  TriggerData LocTriggerData;
3117  HeapTuple oldtuple;
3118  int i;
3119 
3120  LocTriggerData.type = T_TriggerData;
3121  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3124  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3125  LocTriggerData.tg_oldtable = NULL;
3126  LocTriggerData.tg_newtable = NULL;
3127  for (i = 0; i < trigdesc->numtriggers; i++)
3128  {
3129  Trigger *trigger = &trigdesc->triggers[i];
3130 
3131  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3132  TRIGGER_TYPE_ROW,
3133  TRIGGER_TYPE_INSTEAD,
3134  TRIGGER_TYPE_UPDATE))
3135  continue;
3136  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3137  NULL, trigtuple, newtuple))
3138  continue;
3139 
3140  LocTriggerData.tg_trigtuple = trigtuple;
3141  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3142  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3143  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3144  LocTriggerData.tg_trigger = trigger;
3145  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3146  i,
3147  relinfo->ri_TrigFunctions,
3148  relinfo->ri_TrigInstrument,
3149  GetPerTupleMemoryContext(estate));
3150  if (oldtuple != newtuple && oldtuple != slottuple)
3151  heap_freetuple(oldtuple);
3152  if (newtuple == NULL)
3153  return NULL; /* "do nothing" */
3154  }
3155 
3156  if (newtuple != slottuple)
3157  {
3158  /*
3159  * Return the modified tuple using the es_trig_tuple_slot. We assume
3160  * the tuple was allocated in per-tuple memory context, and therefore
3161  * will go away by itself. The tuple table slot should not try to
3162  * clear it.
3163  */
3164  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
3165  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3166 
3167  if (newslot->tts_tupleDescriptor != tupdesc)
3168  ExecSetSlotDescriptor(newslot, tupdesc);
3169  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
3170  slot = newslot;
3171  }
3172  return slot;
3173 }
3174 
3175 void
3177 {
3178  TriggerDesc *trigdesc;
3179  int i;
3180  TriggerData LocTriggerData;
3181 
3182  trigdesc = relinfo->ri_TrigDesc;
3183 
3184  if (trigdesc == NULL)
3185  return;
3186  if (!trigdesc->trig_truncate_before_statement)
3187  return;
3188 
3189  LocTriggerData.type = T_TriggerData;
3190  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3192  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3193  LocTriggerData.tg_trigtuple = NULL;
3194  LocTriggerData.tg_newtuple = NULL;
3195  LocTriggerData.tg_oldtable = NULL;
3196  LocTriggerData.tg_newtable = NULL;
3197  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3198  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3199  for (i = 0; i < trigdesc->numtriggers; i++)
3200  {
3201  Trigger *trigger = &trigdesc->triggers[i];
3202  HeapTuple newtuple;
3203 
3204  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3205  TRIGGER_TYPE_STATEMENT,
3206  TRIGGER_TYPE_BEFORE,
3207  TRIGGER_TYPE_TRUNCATE))
3208  continue;
3209  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3210  NULL, NULL, NULL))
3211  continue;
3212 
3213  LocTriggerData.tg_trigger = trigger;
3214  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3215  i,
3216  relinfo->ri_TrigFunctions,
3217  relinfo->ri_TrigInstrument,
3218  GetPerTupleMemoryContext(estate));
3219 
3220  if (newtuple)
3221  ereport(ERROR,
3222  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3223  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3224  }
3225 }
3226 
3227 void
3229 {
3230  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3231 
3232  if (trigdesc && trigdesc->trig_truncate_after_statement)
3234  false, NULL, NULL, NIL, NULL, NULL);
3235 }
3236 
3237 
3238 static HeapTuple
3240  EPQState *epqstate,
3241  ResultRelInfo *relinfo,
3242  ItemPointer tid,
3243  LockTupleMode lockmode,
3244  TupleTableSlot **newSlot)
3245 {
3246  Relation relation = relinfo->ri_RelationDesc;
3247  HeapTupleData tuple;
3248  HeapTuple result;
3249  Buffer buffer;
3250 
3251  if (newSlot != NULL)
3252  {
3253  HTSU_Result test;
3254  HeapUpdateFailureData hufd;
3255 
3256  *newSlot = NULL;
3257 
3258  /* caller must pass an epqstate if EvalPlanQual is possible */
3259  Assert(epqstate != NULL);
3260 
3261  /*
3262  * lock tuple for update
3263  */
3264 ltrmark:;
3265  tuple.t_self = *tid;
3266  test = heap_lock_tuple(relation, &tuple,
3267  estate->es_output_cid,
3268  lockmode, LockWaitBlock,
3269  false, &buffer, &hufd);
3270  switch (test)
3271  {
3272  case HeapTupleSelfUpdated:
3273 
3274  /*
3275  * The target tuple was already updated or deleted by the
3276  * current command, or by a later command in the current
3277  * transaction. We ignore the tuple in the former case, and
3278  * throw error in the latter case, for the same reasons
3279  * enumerated in ExecUpdate and ExecDelete in
3280  * nodeModifyTable.c.
3281  */
3282  if (hufd.cmax != estate->es_output_cid)
3283  ereport(ERROR,
3284  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3285  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3286  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3287 
3288  /* treat it as deleted; do not process */
3289  ReleaseBuffer(buffer);
3290  return NULL;
3291 
3292  case HeapTupleMayBeUpdated:
3293  break;
3294 
3295  case HeapTupleUpdated:
3296  ReleaseBuffer(buffer);
3298  ereport(ERROR,
3299  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3300  errmsg("could not serialize access due to concurrent update")));
3302  ereport(ERROR,
3303  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3304  errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
3305 
3306  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
3307  {
3308  /* it was updated, so look at the updated version */
3309  TupleTableSlot *epqslot;
3310 
3311  epqslot = EvalPlanQual(estate,
3312  epqstate,
3313  relation,
3314  relinfo->ri_RangeTableIndex,
3315  lockmode,
3316  &hufd.ctid,
3317  hufd.xmax);
3318  if (!TupIsNull(epqslot))
3319  {
3320  *tid = hufd.ctid;
3321  *newSlot = epqslot;
3322 
3323  /*
3324  * EvalPlanQual already locked the tuple, but we
3325  * re-call heap_lock_tuple anyway as an easy way of
3326  * re-fetching the correct tuple. Speed is hardly a
3327  * criterion in this path anyhow.
3328  */
3329  goto ltrmark;
3330  }
3331  }
3332 
3333  /*
3334  * if tuple was deleted or PlanQual failed for updated tuple -
3335  * we must not process this tuple!
3336  */
3337  return NULL;
3338 
3339  case HeapTupleInvisible:
3340  elog(ERROR, "attempted to lock invisible tuple");
3341  break;
3342 
3343  default:
3344  ReleaseBuffer(buffer);
3345  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
3346  return NULL; /* keep compiler quiet */
3347  }
3348  }
3349  else
3350  {
3351  Page page;
3352  ItemId lp;
3353 
3354  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3355 
3356  /*
3357  * Although we already know this tuple is valid, we must lock the
3358  * buffer to ensure that no one has a buffer cleanup lock; otherwise
3359  * they might move the tuple while we try to copy it. But we can
3360  * release the lock before actually doing the heap_copytuple call,
3361  * since holding pin is sufficient to prevent anyone from getting a
3362  * cleanup lock they don't already hold.
3363  */
3364  LockBuffer(buffer, BUFFER_LOCK_SHARE);
3365 
3366  page = BufferGetPage(buffer);
3367  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3368 
3369  Assert(ItemIdIsNormal(lp));
3370 
3371  tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3372  tuple.t_len = ItemIdGetLength(lp);
3373  tuple.t_self = *tid;
3374  tuple.t_tableOid = RelationGetRelid(relation);
3375 
3376  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3377  }
3378 
3379  result = heap_copytuple(&tuple);
3380  ReleaseBuffer(buffer);
3381 
3382  return result;
3383 }
3384 
3385 /*
3386  * Is trigger enabled to fire?
3387  */
3388 static bool
3390  Trigger *trigger, TriggerEvent event,
3391  Bitmapset *modifiedCols,
3392  HeapTuple oldtup, HeapTuple newtup)
3393 {
3394  /* Check replication-role-dependent enable state */
3396  {
3397  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3398  trigger->tgenabled == TRIGGER_DISABLED)
3399  return false;
3400  }
3401  else /* ORIGIN or LOCAL role */
3402  {
3403  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3404  trigger->tgenabled == TRIGGER_DISABLED)
3405  return false;
3406  }
3407 
3408  /*
3409  * Check for column-specific trigger (only possible for UPDATE, and in
3410  * fact we *must* ignore tgattr for other event types)
3411  */
3412  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3413  {
3414  int i;
3415  bool modified;
3416 
3417  modified = false;
3418  for (i = 0; i < trigger->tgnattr; i++)
3419  {
3421  modifiedCols))
3422  {
3423  modified = true;
3424  break;
3425  }
3426  }
3427  if (!modified)
3428  return false;
3429  }
3430 
3431  /* Check for WHEN clause */
3432  if (trigger->tgqual)
3433  {
3434  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3435  ExprState **predicate;
3436  ExprContext *econtext;
3437  TupleTableSlot *oldslot = NULL;
3438  TupleTableSlot *newslot = NULL;
3439  MemoryContext oldContext;
3440  int i;
3441 
3442  Assert(estate != NULL);
3443 
3444  /*
3445  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3446  * matching element of relinfo->ri_TrigWhenExprs[]
3447  */
3448  i = trigger - relinfo->ri_TrigDesc->triggers;
3449  predicate = &relinfo->ri_TrigWhenExprs[i];
3450 
3451  /*
3452  * If first time through for this WHEN expression, build expression
3453  * nodetrees for it. Keep them in the per-query memory context so
3454  * they'll survive throughout the query.
3455  */
3456  if (*predicate == NULL)
3457  {
3458  Node *tgqual;
3459 
3460  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3461  tgqual = stringToNode(trigger->tgqual);
3462  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3465  /* ExecPrepareQual wants implicit-AND form */
3466  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3467  *predicate = ExecPrepareQual((List *) tgqual, estate);
3468  MemoryContextSwitchTo(oldContext);
3469  }
3470 
3471  /*
3472  * We will use the EState's per-tuple context for evaluating WHEN
3473  * expressions (creating it if it's not already there).
3474  */
3475  econtext = GetPerTupleExprContext(estate);
3476 
3477  /*
3478  * Put OLD and NEW tuples into tupleslots for expression evaluation.
3479  * These slots can be shared across the whole estate, but be careful
3480  * that they have the current resultrel's tupdesc.
3481  */
3482  if (HeapTupleIsValid(oldtup))
3483  {
3484  if (estate->es_trig_oldtup_slot == NULL)
3485  {
3486  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3487  estate->es_trig_oldtup_slot =
3488  ExecInitExtraTupleSlot(estate, NULL);
3489  MemoryContextSwitchTo(oldContext);
3490  }
3491  oldslot = estate->es_trig_oldtup_slot;
3492  if (oldslot->tts_tupleDescriptor != tupdesc)
3493  ExecSetSlotDescriptor(oldslot, tupdesc);
3494  ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
3495  }
3496  if (HeapTupleIsValid(newtup))
3497  {
3498  if (estate->es_trig_newtup_slot == NULL)
3499  {
3500  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3501  estate->es_trig_newtup_slot =
3502  ExecInitExtraTupleSlot(estate, NULL);
3503  MemoryContextSwitchTo(oldContext);
3504  }
3505  newslot = estate->es_trig_newtup_slot;
3506  if (newslot->tts_tupleDescriptor != tupdesc)
3507  ExecSetSlotDescriptor(newslot, tupdesc);
3508  ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
3509  }
3510 
3511  /*
3512  * Finally evaluate the expression, making the old and/or new tuples
3513  * available as INNER_VAR/OUTER_VAR respectively.
3514  */
3515  econtext->ecxt_innertuple = oldslot;
3516  econtext->ecxt_outertuple = newslot;
3517  if (!ExecQual(*predicate, econtext))
3518  return false;
3519  }
3520 
3521  return true;
3522 }
3523 
3524 
3525 /* ----------
3526  * After-trigger stuff
3527  *
3528  * The AfterTriggersData struct holds data about pending AFTER trigger events
3529  * during the current transaction tree. (BEFORE triggers are fired
3530  * immediately so we don't need any persistent state about them.) The struct
3531  * and most of its subsidiary data are kept in TopTransactionContext; however
3532  * some data that can be discarded sooner appears in the CurTransactionContext
3533  * of the relevant subtransaction. Also, the individual event records are
3534  * kept in a separate sub-context of TopTransactionContext. This is done
3535  * mainly so that it's easy to tell from a memory context dump how much space
3536  * is being eaten by trigger events.
3537  *
3538  * Because the list of pending events can grow large, we go to some
3539  * considerable effort to minimize per-event memory consumption. The event
3540  * records are grouped into chunks and common data for similar events in the
3541  * same chunk is only stored once.
3542  *
3543  * XXX We need to be able to save the per-event data in a file if it grows too
3544  * large.
3545  * ----------
3546  */
3547 
3548 /* Per-trigger SET CONSTRAINT status */
3550 {
3554 
3556 
3557 /*
3558  * SET CONSTRAINT intra-transaction status.
3559  *
3560  * We make this a single palloc'd object so it can be copied and freed easily.
3561  *
3562  * all_isset and all_isdeferred are used to keep track
3563  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3564  *
3565  * trigstates[] stores per-trigger tgisdeferred settings.
3566  */
3568 {
3571  int numstates; /* number of trigstates[] entries in use */
3572  int numalloc; /* allocated size of trigstates[] */
3573  SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3575 
3577 
3578 
3579 /*
3580  * Per-trigger-event data
3581  *
3582  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3583  * status bits and up to two tuple CTIDs. Each event record also has an
3584  * associated AfterTriggerSharedData that is shared across all instances of
3585  * similar events within a "chunk".
3586  *
3587  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3588  * fields. Updates of regular tables use two; inserts and deletes of regular
3589  * tables use one; foreign tables always use zero and save the tuple(s) to a
3590  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3591  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3592  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3593  * tuple(s). This permits storing tuples once regardless of the number of
3594  * row-level triggers on a foreign table.
3595  *
3596  * Note that we need triggers on foreign tables to be fired in exactly the
3597  * order they were queued, so that the tuples come out of the tuplestore in
3598  * the right order. To ensure that, we forbid deferrable (constraint)
3599  * triggers on foreign tables. This also ensures that such triggers do not
3600  * get deferred into outer trigger query levels, meaning that it's okay to
3601  * destroy the tuplestore at the end of the query level.
3602  *
3603  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3604  * require no ctid field. We lack the flag bit space to neatly represent that
3605  * distinct case, and it seems unlikely to be worth much trouble.
3606  *
3607  * Note: ats_firing_id is initially zero and is set to something else when
3608  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3609  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3610  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3611  * because all instances of the same type of event in a given event list will
3612  * be fired at the same time, if they were queued between the same firing
3613  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3614  * a new event to an existing AfterTriggerSharedData record.
3615  */
3617 
3618 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3619 #define AFTER_TRIGGER_DONE 0x10000000
3620 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3621 /* bits describing the size and tuple sources of this event */
3622 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3623 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3624 #define AFTER_TRIGGER_1CTID 0x40000000
3625 #define AFTER_TRIGGER_2CTID 0xC0000000
3626 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3627 
3629 
3631 {
3632  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3633  Oid ats_tgoid; /* the trigger's ID */
3634  Oid ats_relid; /* the relation it's on */
3635  CommandId ats_firing_id; /* ID for firing cycle */
3636  struct AfterTriggersTableData *ats_table; /* transition table access */
3638 
3640 
3642 {
3643  TriggerFlags ate_flags; /* status bits and offset to shared data */
3644  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3645  ItemPointerData ate_ctid2; /* new updated tuple */
3647 
3648 /* AfterTriggerEventData, minus ate_ctid2 */
3650 {
3651  TriggerFlags ate_flags; /* status bits and offset to shared data */
3652  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3654 
3655 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3657 {
3658  TriggerFlags ate_flags; /* status bits and offset to shared data */
3660 
3661 #define SizeofTriggerEvent(evt) \
3662  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3663  sizeof(AfterTriggerEventData) : \
3664  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3665  sizeof(AfterTriggerEventDataOneCtid) : \
3666  sizeof(AfterTriggerEventDataZeroCtids))
3667 
3668 #define GetTriggerSharedData(evt) \
3669  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3670 
3671 /*
3672  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3673  * larger chunks (a slightly more sophisticated version of an expansible
3674  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3675  * AfterTriggerEventData records; the space between endfree and endptr is
3676  * occupied by AfterTriggerSharedData records.
3677  */
3679 {
3680  struct AfterTriggerEventChunk *next; /* list link */
3681  char *freeptr; /* start of free space in chunk */
3682  char *endfree; /* end of free space in chunk */
3683  char *endptr; /* end of chunk */
3684  /* event data follows here */
3686 
3687 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3688 
3689 /* A list of events */
3691 {
3694  char *tailfree; /* freeptr of tail chunk */
3696 
3697 /* Macros to help in iterating over a list of events */
3698 #define for_each_chunk(cptr, evtlist) \
3699  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3700 #define for_each_event(eptr, cptr) \
3701  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3702  (char *) eptr < (cptr)->freeptr; \
3703  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3704 /* Use this if no special per-chunk processing is needed */
3705 #define for_each_event_chunk(eptr, cptr, evtlist) \
3706  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3707 
3708 /* Macros for iterating from a start point that might not be list start */
3709 #define for_each_chunk_from(cptr) \
3710  for (; cptr != NULL; cptr = cptr->next)
3711 #define for_each_event_from(eptr, cptr) \
3712  for (; \
3713  (char *) eptr < (cptr)->freeptr; \
3714  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3715 
3716 
3717 /*
3718  * All per-transaction data for the AFTER TRIGGERS module.
3719  *
3720  * AfterTriggersData has the following fields:
3721  *
3722  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3723  * We mark firable events with the current firing cycle's ID so that we can
3724  * tell which ones to work on. This ensures sane behavior if a trigger
3725  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3726  * only fire those events that weren't already scheduled for firing.
3727  *
3728  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3729  * This is saved and restored across failed subtransactions.
3730  *
3731  * events is the current list of deferred events. This is global across
3732  * all subtransactions of the current transaction. In a subtransaction
3733  * abort, we know that the events added by the subtransaction are at the
3734  * end of the list, so it is relatively easy to discard them. The event
3735  * list chunks themselves are stored in event_cxt.
3736  *
3737  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3738  * (-1 when the stack is empty).
3739  *
3740  * query_stack[query_depth] is the per-query-level data, including these fields:
3741  *
3742  * events is a list of AFTER trigger events queued by the current query.
3743  * None of these are valid until the matching AfterTriggerEndQuery call
3744  * occurs. At that point we fire immediate-mode triggers, and append any
3745  * deferred events to the main events list.
3746  *
3747  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3748  * needed by events queued by the current query. (Note: we use just one
3749  * tuplestore even though more than one foreign table might be involved.
3750  * This is okay because tuplestores don't really care what's in the tuples
3751  * they store; but it's possible that someday it'd break.)
3752  *
3753  * tables is a List of AfterTriggersTableData structs for target tables
3754  * of the current query (see below).
3755  *
3756  * maxquerydepth is just the allocated length of query_stack.
3757  *
3758  * trans_stack holds per-subtransaction data, including these fields:
3759  *
3760  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3761  * state data. Each subtransaction level that modifies that state first
3762  * saves a copy, which we use to restore the state if we abort.
3763  *
3764  * events is a copy of the events head/tail pointers,
3765  * which we use to restore those values during subtransaction abort.
3766  *
3767  * query_depth is the subtransaction-start-time value of query_depth,
3768  * which we similarly use to clean up at subtransaction abort.
3769  *
3770  * firing_counter is the subtransaction-start-time value of firing_counter.
3771  * We use this to recognize which deferred triggers were fired (or marked
3772  * for firing) within an aborted subtransaction.
3773  *
3774  * We use GetCurrentTransactionNestLevel() to determine the correct array
3775  * index in trans_stack. maxtransdepth is the number of allocated entries in
3776  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3777  * in cases where errors during subxact abort cause multiple invocations
3778  * of AfterTriggerEndSubXact() at the same nesting depth.)
3779  *
3780  * We create an AfterTriggersTableData struct for each target table of the
3781  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3782  * either transition tables or statement-level triggers. This is used to
3783  * hold the relevant transition tables, as well as info tracking whether
3784  * we already queued the statement triggers. (We use that info to prevent
3785  * firing the same statement triggers more than once per statement, or really
3786  * once per transition table set.) These structs, along with the transition
3787  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3788  * That's sufficient lifespan because we don't allow transition tables to be
3789  * used by deferrable triggers, so they only need to survive until
3790  * AfterTriggerEndQuery.
3791  */
3795 
3796 typedef struct AfterTriggersData
3797 {
3798  CommandId firing_counter; /* next firing ID to assign */
3799  SetConstraintState state; /* the active S C state */
3800  AfterTriggerEventList events; /* deferred-event list */
3801  MemoryContext event_cxt; /* memory context for events, if any */
3802 
3803  /* per-query-level data: */
3804  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3805  int query_depth; /* current index in above array */
3806  int maxquerydepth; /* allocated len of above array */
3807 
3808  /* per-subtransaction-level data: */
3809  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3810  int maxtransdepth; /* allocated len of above array */
3812 
3814 {
3815  AfterTriggerEventList events; /* events pending from this query */
3816  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3817  List *tables; /* list of AfterTriggersTableData, see below */
3818 };
3819 
3821 {
3822  /* these fields are just for resetting at subtrans abort: */
3823  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3824  AfterTriggerEventList events; /* saved list pointer */
3825  int query_depth; /* saved query_depth */
3826  CommandId firing_counter; /* saved firing_counter */
3827 };
3828 
3830 {
3831  /* relid + cmdType form the lookup key for these structs: */
3832  Oid relid; /* target table's OID */
3833  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3834  bool closed; /* true when no longer OK to add tuples */
3835  bool before_trig_done; /* did we already queue BS triggers? */
3836  bool after_trig_done; /* did we already queue AS triggers? */
3837  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3838  Tuplestorestate *old_tuplestore; /* "old" transition table, if any */
3839  Tuplestorestate *new_tuplestore; /* "new" transition table, if any */
3840 };
3841 
3843 
3844 static void AfterTriggerExecute(AfterTriggerEvent event,
3845  Relation rel, TriggerDesc *trigdesc,
3846  FmgrInfo *finfo,
3847  Instrumentation *instr,
3848  MemoryContext per_tuple_context,
3849  TupleTableSlot *trig_tuple_slot1,
3850  TupleTableSlot *trig_tuple_slot2);
3852  CmdType cmdType);
3854 static SetConstraintState SetConstraintStateCreate(int numalloc);
3857  Oid tgoid, bool tgisdeferred);
3858 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3859 
3860 
3861 /*
3862  * Get the FDW tuplestore for the current trigger query level, creating it
3863  * if necessary.
3864  */
3865 static Tuplestorestate *
3867 {
3868  Tuplestorestate *ret;
3869 
3870  ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3871  if (ret == NULL)
3872  {
3873  MemoryContext oldcxt;
3874  ResourceOwner saveResourceOwner;
3875 
3876  /*
3877  * Make the tuplestore valid until end of subtransaction. We really
3878  * only need it until AfterTriggerEndQuery().
3879  */
3881  saveResourceOwner = CurrentResourceOwner;
3883 
3884  ret = tuplestore_begin_heap(false, false, work_mem);
3885 
3886  CurrentResourceOwner = saveResourceOwner;
3887  MemoryContextSwitchTo(oldcxt);
3888 
3889  afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3890  }
3891 
3892  return ret;
3893 }
3894 
3895 /* ----------
3896  * afterTriggerCheckState()
3897  *
3898  * Returns true if the trigger event is actually in state DEFERRED.
3899  * ----------
3900  */
3901 static bool
3902 afterTriggerCheckState(AfterTriggerShared evtshared)
3903 {
3904  Oid tgoid = evtshared->ats_tgoid;
3905  SetConstraintState state = afterTriggers.state;
3906  int i;
3907 
3908  /*
3909  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3910  * constraints declared NOT DEFERRABLE), the state is always false.
3911  */
3912  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3913  return false;
3914 
3915  /*
3916  * If constraint state exists, SET CONSTRAINTS might have been executed
3917  * either for this trigger or for all triggers.
3918  */
3919  if (state != NULL)
3920  {
3921  /* Check for SET CONSTRAINTS for this specific trigger. */
3922  for (i = 0; i < state->numstates; i++)
3923  {
3924  if (state->trigstates[i].sct_tgoid == tgoid)
3925  return state->trigstates[i].sct_tgisdeferred;
3926  }
3927 
3928  /* Check for SET CONSTRAINTS ALL. */
3929  if (state->all_isset)
3930  return state->all_isdeferred;
3931  }
3932 
3933  /*
3934  * Otherwise return the default state for the trigger.
3935  */
3936  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3937 }
3938 
3939 
3940 /* ----------
3941  * afterTriggerAddEvent()
3942  *
3943  * Add a new trigger event to the specified queue.
3944  * The passed-in event data is copied.
3945  * ----------
3946  */
3947 static void
3949  AfterTriggerEvent event, AfterTriggerShared evtshared)
3950 {
3951  Size eventsize = SizeofTriggerEvent(event);
3952  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3953  AfterTriggerEventChunk *chunk;
3954  AfterTriggerShared newshared;
3955  AfterTriggerEvent newevent;
3956 
3957  /*
3958  * If empty list or not enough room in the tail chunk, make a new chunk.
3959  * We assume here that a new shared record will always be needed.
3960  */
3961  chunk = events->tail;
3962  if (chunk == NULL ||
3963  chunk->endfree - chunk->freeptr < needed)
3964  {
3965  Size chunksize;
3966 
3967  /* Create event context if we didn't already */
3968  if (afterTriggers.event_cxt == NULL)
3969  afterTriggers.event_cxt =
3971  "AfterTriggerEvents",
3973 
3974  /*
3975  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3976  * These numbers are fairly arbitrary, though there is a hard limit at
3977  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3978  * shared records using the available space in ate_flags. Another
3979  * constraint is that if the chunk size gets too huge, the search loop
3980  * below would get slow given a (not too common) usage pattern with
3981  * many distinct event types in a chunk. Therefore, we double the
3982  * preceding chunk size only if there weren't too many shared records
3983  * in the preceding chunk; otherwise we halve it. This gives us some
3984  * ability to adapt to the actual usage pattern of the current query
3985  * while still having large chunk sizes in typical usage. All chunk
3986  * sizes used should be MAXALIGN multiples, to ensure that the shared
3987  * records will be aligned safely.
3988  */
3989 #define MIN_CHUNK_SIZE 1024
3990 #define MAX_CHUNK_SIZE (1024*1024)
3991 
3992 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3993 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3994 #endif
3995 
3996  if (chunk == NULL)
3997  chunksize = MIN_CHUNK_SIZE;
3998  else
3999  {
4000  /* preceding chunk size... */
4001  chunksize = chunk->endptr - (char *) chunk;
4002  /* check number of shared records in preceding chunk */
4003  if ((chunk->endptr - chunk->endfree) <=
4004  (100 * sizeof(AfterTriggerSharedData)))
4005  chunksize *= 2; /* okay, double it */
4006  else
4007  chunksize /= 2; /* too many shared records */
4008  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4009  }
4010  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4011  chunk->next = NULL;
4012  chunk->freeptr = CHUNK_DATA_START(chunk);
4013  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4014  Assert(chunk->endfree - chunk->freeptr >= needed);
4015 
4016  if (events->head == NULL)
4017  events->head = chunk;
4018  else
4019  events->tail->next = chunk;
4020  events->tail = chunk;
4021  /* events->tailfree is now out of sync, but we'll fix it below */
4022  }
4023 
4024  /*
4025  * Try to locate a matching shared-data record already in the chunk. If
4026  * none, make a new one.
4027  */
4028  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4029  (char *) newshared >= chunk->endfree;
4030  newshared--)
4031  {
4032  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4033  newshared->ats_relid == evtshared->ats_relid &&
4034  newshared->ats_event == evtshared->ats_event &&
4035  newshared->ats_table == evtshared->ats_table &&
4036  newshared->ats_firing_id == 0)
4037  break;
4038  }
4039  if ((char *) newshared < chunk->endfree)
4040  {
4041  *newshared = *evtshared;
4042  newshared->ats_firing_id = 0; /* just to be sure */
4043  chunk->endfree = (char *) newshared;
4044  }
4045 
4046  /* Insert the data */
4047  newevent = (AfterTriggerEvent) chunk->freeptr;
4048  memcpy(newevent, event, eventsize);
4049  /* ... and link the new event to its shared record */
4050  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4051  newevent->ate_flags |= (char *) newshared - (char *) newevent;
4052 
4053  chunk->freeptr += eventsize;
4054  events->tailfree = chunk->freeptr;
4055 }
4056 
4057 /* ----------
4058  * afterTriggerFreeEventList()
4059  *
4060  * Free all the event storage in the given list.
4061  * ----------
4062  */
4063 static void
4065 {
4066  AfterTriggerEventChunk *chunk;
4067 
4068  while ((chunk = events->head) != NULL)
4069  {
4070  events->head = chunk->next;
4071  pfree(chunk);
4072  }
4073  events->tail = NULL;
4074  events->tailfree = NULL;
4075 }
4076 
4077 /* ----------
4078  * afterTriggerRestoreEventList()
4079  *
4080  * Restore an event list to its prior length, removing all the events
4081  * added since it had the value old_events.
4082  * ----------
4083  */
4084 static void
4086  const AfterTriggerEventList *old_events)
4087 {
4088  AfterTriggerEventChunk *chunk;
4089  AfterTriggerEventChunk *next_chunk;
4090 
4091  if (old_events->tail == NULL)
4092  {
4093  /* restoring to a completely empty state, so free everything */
4094  afterTriggerFreeEventList(events);
4095  }
4096  else
4097  {
4098  *events = *old_events;
4099  /* free any chunks after the last one we want to keep */
4100  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4101  {
4102  next_chunk = chunk->next;
4103  pfree(chunk);
4104  }
4105  /* and clean up the tail chunk to be the right length */
4106  events->tail->next = NULL;
4107  events->tail->freeptr = events->tailfree;
4108 
4109  /*
4110  * We don't make any effort to remove now-unused shared data records.
4111  * They might still be useful, anyway.
4112  */
4113  }
4114 }
4115 
4116 /* ----------
4117  * afterTriggerDeleteHeadEventChunk()
4118  *
4119  * Remove the first chunk of events from the query level's event list.
4120  * Keep any event list pointers elsewhere in the query level's data
4121  * structures in sync.
4122  * ----------
4123  */
4124 static void
4126 {
4127  AfterTriggerEventChunk *target = qs->events.head;
4128  ListCell *lc;
4129 
4130  Assert(target && target->next);
4131 
4132  /*
4133  * First, update any pointers in the per-table data, so that they won't be
4134  * dangling. Resetting obsoleted pointers to NULL will make
4135  * cancel_prior_stmt_triggers start from the list head, which is fine.
4136  */
4137  foreach(lc, qs->tables)
4138  {
4140 
4141  if (table->after_trig_done &&
4142  table->after_trig_events.tail == target)
4143  {
4144  table->after_trig_events.head = NULL;
4145  table->after_trig_events.tail = NULL;
4146  table->after_trig_events.tailfree = NULL;
4147  }
4148  }
4149 
4150  /* Now we can flush the head chunk */
4151  qs->events.head = target->next;
4152  pfree(target);
4153 }
4154 
4155 
4156 /* ----------
4157  * AfterTriggerExecute()
4158  *
4159  * Fetch the required tuples back from the heap and fire one
4160  * single trigger function.
4161  *
4162  * Frequently, this will be fired many times in a row for triggers of
4163  * a single relation. Therefore, we cache the open relation and provide
4164  * fmgr lookup cache space at the caller level. (For triggers fired at
4165  * the end of a query, we can even piggyback on the executor's state.)
4166  *
4167  * event: event currently being fired.
4168  * rel: open relation for event.
4169  * trigdesc: working copy of rel's trigger info.
4170  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4171  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4172  * or NULL if no instrumentation is wanted.
4173  * per_tuple_context: memory context to call trigger function in.
4174  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4175  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4176  * ----------
4177  */
4178 static void
4179 AfterTriggerExecute(AfterTriggerEvent event,
4180  Relation rel, TriggerDesc *trigdesc,
4181  FmgrInfo *finfo, Instrumentation *instr,
4182  MemoryContext per_tuple_context,
4183  TupleTableSlot *trig_tuple_slot1,
4184  TupleTableSlot *trig_tuple_slot2)
4185 {
4186  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4187  Oid tgoid = evtshared->ats_tgoid;
4188  TriggerData LocTriggerData;
4189  HeapTupleData tuple1;
4190  HeapTupleData tuple2;
4191  HeapTuple rettuple;
4192  Buffer buffer1 = InvalidBuffer;
4193  Buffer buffer2 = InvalidBuffer;
4194  int tgindx;
4195 
4196  /*
4197  * Locate trigger in trigdesc.
4198  */
4199  LocTriggerData.tg_trigger = NULL;
4200  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4201  {
4202  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4203  {
4204  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4205  break;
4206  }
4207  }
4208  if (LocTriggerData.tg_trigger == NULL)
4209  elog(ERROR, "could not find trigger %u", tgoid);
4210 
4211  /*
4212  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4213  * to include time spent re-fetching tuples in the trigger cost.
4214  */
4215  if (instr)
4216  InstrStartNode(instr + tgindx);
4217 
4218  /*
4219  * Fetch the required tuple(s).
4220  */
4221  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4222  {
4224  {
4225  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4226 
4227  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4228  trig_tuple_slot1))
4229  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4230 
4231  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4233  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4234  trig_tuple_slot2))
4235  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4236  }
4237  /* fall through */
4239 
4240  /*
4241  * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
4242  * ensures that tg_trigtuple does not reference tuplestore memory.
4243  * (It is formally possible for the trigger function to queue
4244  * trigger events that add to the same tuplestore, which can push
4245  * other tuples out of memory.) The distinction is academic,
4246  * because we start with a minimal tuple that ExecFetchSlotTuple()
4247  * must materialize anyway.
4248  */
4249  LocTriggerData.tg_trigtuple =
4250  ExecMaterializeSlot(trig_tuple_slot1);
4251  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
4252 
4253  LocTriggerData.tg_newtuple =
4254  ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4256  ExecMaterializeSlot(trig_tuple_slot2) : NULL;
4257  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
4258 
4259  break;
4260 
4261  default:
4262  if (ItemPointerIsValid(&(event->ate_ctid1)))
4263  {
4264  ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
4265  if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
4266  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4267  LocTriggerData.tg_trigtuple = &tuple1;
4268  LocTriggerData.tg_trigtuplebuf = buffer1;
4269  }
4270  else
4271  {
4272  LocTriggerData.tg_trigtuple = NULL;
4273  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
4274  }
4275 
4276  /* don't touch ctid2 if not there */
4277  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4279  ItemPointerIsValid(&(event->ate_ctid2)))
4280  {
4281  ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
4282  if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
4283  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4284  LocTriggerData.tg_newtuple = &tuple2;
4285  LocTriggerData.tg_newtuplebuf = buffer2;
4286  }
4287  else
4288  {
4289  LocTriggerData.tg_newtuple = NULL;
4290  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
4291  }
4292  }
4293 
4294  /*
4295  * Set up the tuplestore information to let the trigger have access to
4296  * transition tables. When we first make a transition table available to
4297  * a trigger, mark it "closed" so that it cannot change anymore. If any
4298  * additional events of the same type get queued in the current trigger
4299  * query level, they'll go into new transition tables.
4300  */
4301  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4302  if (evtshared->ats_table)
4303  {
4304  if (LocTriggerData.tg_trigger->tgoldtable)
4305  {
4306  LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore;
4307  evtshared->ats_table->closed = true;
4308  }
4309 
4310  if (LocTriggerData.tg_trigger->tgnewtable)
4311  {
4312  LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore;
4313  evtshared->ats_table->closed = true;
4314  }
4315  }
4316 
4317  /*
4318  * Setup the remaining trigger information
4319  */
4320  LocTriggerData.type = T_TriggerData;
4321  LocTriggerData.tg_event =
4323  LocTriggerData.tg_relation = rel;
4324 
4325  MemoryContextReset(per_tuple_context);
4326 
4327  /*
4328  * Call the trigger and throw away any possibly returned updated tuple.
4329  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4330  */
4331  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4332  tgindx,
4333  finfo,
4334  NULL,
4335  per_tuple_context);
4336  if (rettuple != NULL &&
4337  rettuple != LocTriggerData.tg_trigtuple &&
4338  rettuple != LocTriggerData.tg_newtuple)
4339  heap_freetuple(rettuple);
4340 
4341  /*
4342  * Release buffers
4343  */
4344  if (buffer1 != InvalidBuffer)
4345  ReleaseBuffer(buffer1);
4346  if (buffer2 != InvalidBuffer)
4347  ReleaseBuffer(buffer2);
4348 
4349  /*
4350  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4351  * one "tuple returned" (really the number of firings).
4352  */
4353  if (instr)
4354  InstrStopNode(instr + tgindx, 1);
4355 }
4356 
4357 
4358 /*
4359  * afterTriggerMarkEvents()
4360  *
4361  * Scan the given event list for not yet invoked events. Mark the ones
4362  * that can be invoked now with the current firing ID.
4363  *
4364  * If move_list isn't NULL, events that are not to be invoked now are
4365  * transferred to move_list.
4366  *
4367  * When immediate_only is true, do not invoke currently-deferred triggers.
4368  * (This will be false only at main transaction exit.)
4369  *
4370  * Returns true if any invokable events were found.
4371  */
4372 static bool
4374  AfterTriggerEventList *move_list,
4375  bool immediate_only)
4376 {
4377  bool found = false;
4378  AfterTriggerEvent event;
4379  AfterTriggerEventChunk *chunk;
4380 
4381  for_each_event_chunk(event, chunk, *events)
4382  {
4383  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4384  bool defer_it = false;
4385 
4386  if (!(event->ate_flags &
4388  {
4389  /*
4390  * This trigger hasn't been called or scheduled yet. Check if we
4391  * should call it now.
4392  */
4393  if (immediate_only && afterTriggerCheckState(evtshared))
4394  {
4395  defer_it = true;
4396  }
4397  else
4398  {
4399  /*
4400  * Mark it as to be fired in this firing cycle.
4401  */
4402  evtshared->ats_firing_id = afterTriggers.firing_counter;
4403  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4404  found = true;
4405  }
4406  }
4407 
4408  /*
4409  * If it's deferred, move it to move_list, if requested.
4410  */
4411  if (defer_it && move_list != NULL)
4412  {
4413  /* add it to move_list */
4414  afterTriggerAddEvent(move_list, event, evtshared);
4415  /* mark original copy "done" so we don't do it again */
4416  event->ate_flags |= AFTER_TRIGGER_DONE;
4417  }
4418  }
4419 
4420  return found;
4421 }
4422 
4423 /*
4424  * afterTriggerInvokeEvents()
4425  *
4426  * Scan the given event list for events that are marked as to be fired
4427  * in the current firing cycle, and fire them.
4428  *
4429  * If estate isn't NULL, we use its result relation info to avoid repeated
4430  * openings and closing of trigger target relations. If it is NULL, we
4431  * make one locally to cache the info in case there are multiple trigger
4432  * events per rel.
4433  *
4434  * When delete_ok is true, it's safe to delete fully-processed events.
4435  * (We are not very tense about that: we simply reset a chunk to be empty
4436  * if all its events got fired. The objective here is just to avoid useless
4437  * rescanning of events when a trigger queues new events during transaction
4438  * end, so it's not necessary to worry much about the case where only
4439  * some events are fired.)
4440  *
4441  * Returns true if no unfired events remain in the list (this allows us
4442  * to avoid repeating afterTriggerMarkEvents).
4443  */
4444 static bool
4446  CommandId firing_id,
4447  EState *estate,
4448  bool delete_ok)
4449 {
4450  bool all_fired = true;
4451  AfterTriggerEventChunk *chunk;
4452  MemoryContext per_tuple_context;
4453  bool local_estate = false;
4454  Relation rel = NULL;
4455  TriggerDesc *trigdesc = NULL;
4456  FmgrInfo *finfo = NULL;
4457  Instrumentation *instr = NULL;
4458  TupleTableSlot *slot1 = NULL,
4459  *slot2 = NULL;
4460 
4461  /* Make a local EState if need be */
4462  if (estate == NULL)
4463  {
4464  estate = CreateExecutorState();
4465  local_estate = true;
4466  }
4467 
4468  /* Make a per-tuple memory context for trigger function calls */
4469  per_tuple_context =
4471  "AfterTriggerTupleContext",
4473 
4474  for_each_chunk(chunk, *events)
4475  {
4476  AfterTriggerEvent event;
4477  bool all_fired_in_chunk = true;
4478 
4479  for_each_event(event, chunk)
4480  {
4481  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4482 
4483  /*
4484  * Is it one for me to fire?
4485  */
4486  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4487  evtshared->ats_firing_id == firing_id)
4488  {
4489  /*
4490  * So let's fire it... but first, find the correct relation if
4491  * this is not the same relation as before.
4492  */
4493  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4494  {
4495  ResultRelInfo *rInfo;
4496 
4497  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4498  rel = rInfo->ri_RelationDesc;
4499  trigdesc = rInfo->ri_TrigDesc;
4500  finfo = rInfo->ri_TrigFunctions;
4501  instr = rInfo->ri_TrigInstrument;
4502  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4503  {
4504  if (slot1 != NULL)
4505  {
4508  }
4509  slot1 = MakeSingleTupleTableSlot(rel->rd_att);
4510  slot2 = MakeSingleTupleTableSlot(rel->rd_att);
4511  }
4512  if (trigdesc == NULL) /* should not happen */
4513  elog(ERROR, "relation %u has no triggers",
4514  evtshared->ats_relid);
4515  }
4516 
4517  /*
4518  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4519  * still set, so recursive examinations of the event list
4520  * won't try to re-fire it.
4521  */
4522  AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
4523  per_tuple_context, slot1, slot2);
4524 
4525  /*
4526  * Mark the event as done.
4527  */
4528  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4529  event->ate_flags |= AFTER_TRIGGER_DONE;
4530  }
4531  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4532  {
4533  /* something remains to be done */
4534  all_fired = all_fired_in_chunk = false;
4535  }
4536  }
4537 
4538  /* Clear the chunk if delete_ok and nothing left of interest */
4539  if (delete_ok && all_fired_in_chunk)
4540  {
4541  chunk->freeptr = CHUNK_DATA_START(chunk);
4542  chunk->endfree = chunk->endptr;
4543 
4544  /*
4545  * If it's last chunk, must sync event list's tailfree too. Note
4546  * that delete_ok must NOT be passed as true if there could be
4547  * additional AfterTriggerEventList values pointing at this event
4548  * list, since we'd fail to fix their copies of tailfree.
4549  */
4550  if (chunk == events->tail)
4551  events->tailfree = chunk->freeptr;
4552  }
4553  }
4554  if (slot1 != NULL)
4555  {
4558  }
4559 
4560  /* Release working resources */
4561  MemoryContextDelete(per_tuple_context);
4562 
4563  if (local_estate)
4564  {
4565  ExecCleanUpTriggerState(estate);
4566  FreeExecutorState(estate);
4567  }
4568 
4569  return all_fired;
4570 }
4571 
4572 
4573 /*
4574  * GetAfterTriggersTableData
4575  *
4576  * Find or create an AfterTriggersTableData struct for the specified
4577  * trigger event (relation + operation type). Ignore existing structs
4578  * marked "closed"; we don't want to put any additional tuples into them,
4579  * nor change their stmt-triggers-fired state.
4580  *
4581  * Note: the AfterTriggersTableData list is allocated in the current
4582  * (sub)transaction's CurTransactionContext. This is OK because
4583  * we don't need it to live past AfterTriggerEndQuery.
4584  */
4585 static AfterTriggersTableData *
4587 {
4588  AfterTriggersTableData *table;
4590  MemoryContext oldcxt;
4591  ListCell *lc;
4592 
4593  /* Caller should have ensured query_depth is OK. */
4594  Assert(afterTriggers.query_depth >= 0 &&
4595  afterTriggers.query_depth < afterTriggers.maxquerydepth);
4596  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4597 
4598  foreach(lc, qs->tables)
4599  {
4600  table = (AfterTriggersTableData *) lfirst(lc);
4601  if (table->relid == relid && table->cmdType == cmdType &&
4602  !table->closed)
4603  return table;
4604  }
4605 
4607 
4609  table->relid = relid;
4610  table->cmdType = cmdType;
4611  qs->tables = lappend(qs->tables, table);
4612 
4613  MemoryContextSwitchTo(oldcxt);
4614 
4615  return table;
4616 }
4617 
4618 
4619 /*
4620  * MakeTransitionCaptureState
4621  *
4622  * Make a TransitionCaptureState object for the given TriggerDesc, target
4623  * relation, and operation type. The TCS object holds all the state needed
4624  * to decide whether to capture tuples in transition tables.
4625  *
4626  * If there are no triggers in 'trigdesc' that request relevant transition
4627  * tables, then return NULL.
4628  *
4629  * The resulting object can be passed to the ExecAR* functions. The caller
4630  * should set tcs_map or tcs_original_insert_tuple as appropriate when dealing
4631  * with child tables.
4632  *
4633  * Note that we copy the flags from a parent table into this struct (rather
4634  * than subsequently using the relation's TriggerDesc directly) so that we can
4635  * use it to control collection of transition tuples from child tables.
4636  *
4637  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4638  * on the same table during one query should share one transition table.
4639  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4640  * looked up using the table OID + CmdType, and are merely referenced by
4641  * the TransitionCaptureState objects we hand out to callers.
4642  */
4645 {
4647  bool need_old,
4648  need_new;
4649  AfterTriggersTableData *table;
4650  MemoryContext oldcxt;
4651  ResourceOwner saveResourceOwner;
4652 
4653  if (trigdesc == NULL)
4654  return NULL;
4655 
4656  /* Detect which table(s) we need. */
4657  switch (cmdType)
4658  {
4659  case CMD_INSERT:
4660  need_old = false;
4661  need_new = trigdesc->trig_insert_new_table;
4662  break;
4663  case CMD_UPDATE:
4664  need_old = trigdesc->trig_update_old_table;
4665  need_new = trigdesc->trig_update_new_table;
4666  break;
4667  case CMD_DELETE:
4668  need_old = trigdesc->trig_delete_old_table;
4669  need_new = false;
4670  break;
4671  default:
4672  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4673  need_old = need_new = false; /* keep compiler quiet */
4674  break;
4675  }
4676  if (!need_old && !need_new)
4677  return NULL;
4678 
4679  /* Check state, like AfterTriggerSaveEvent. */
4680  if (afterTriggers.query_depth < 0)
4681  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4682 
4683  /* Be sure we have enough space to record events at this query depth. */
4684  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4686 
4687  /*
4688  * Find or create an AfterTriggersTableData struct to hold the
4689  * tuplestore(s). If there's a matching struct but it's marked closed,
4690  * ignore it; we need a newer one.
4691  *
4692  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4693  * allocated in the current (sub)transaction's CurTransactionContext, and
4694  * the tuplestores are managed by the (sub)transaction's resource owner.
4695  * This is sufficient lifespan because we do not allow triggers using
4696  * transition tables to be deferrable; they will be fired during
4697  * AfterTriggerEndQuery, after which it's okay to delete the data.
4698  */
4699  table = GetAfterTriggersTableData(relid, cmdType);
4700 
4701  /* Now create required tuplestore(s), if we don't have them already. */
4703  saveResourceOwner = CurrentResourceOwner;
4705 
4706  if (need_old && table->old_tuplestore == NULL)
4707  table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4708  if (need_new && table->new_tuplestore == NULL)
4709  table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4710 
4711  CurrentResourceOwner = saveResourceOwner;
4712  MemoryContextSwitchTo(oldcxt);
4713 
4714  /* Now build the TransitionCaptureState struct, in caller's context */
4716  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4717  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4718  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4719  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4720  state->tcs_private = table;
4721 
4722  return state;
4723 }
4724 
4725 
4726 /* ----------
4727  * AfterTriggerBeginXact()
4728  *
4729  * Called at transaction start (either BEGIN or implicit for single
4730  * statement outside of transaction block).
4731  * ----------
4732  */
4733 void
4735 {
4736  /*
4737  * Initialize after-trigger state structure to empty
4738  */
4739  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4740  afterTriggers.query_depth = -1;
4741 
4742  /*
4743  * Verify that there is no leftover state remaining. If these assertions
4744  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4745  * up properly.
4746  */
4747  Assert(afterTriggers.state == NULL);
4748  Assert(afterTriggers.query_stack == NULL);
4749  Assert(afterTriggers.maxquerydepth == 0);
4750  Assert(afterTriggers.event_cxt == NULL);
4751  Assert(afterTriggers.events.head == NULL);
4752  Assert(afterTriggers.trans_stack == NULL);
4753  Assert(afterTriggers.maxtransdepth == 0);
4754 }
4755 
4756 
4757 /* ----------
4758  * AfterTriggerBeginQuery()
4759  *
4760  * Called just before we start processing a single query within a
4761  * transaction (or subtransaction). Most of the real work gets deferred
4762  * until somebody actually tries to queue a trigger event.
4763  * ----------
4764  */
4765 void
4767 {
4768  /* Increase the query stack depth */
4769  afterTriggers.query_depth++;
4770 }
4771 
4772 
4773 /* ----------
4774  * AfterTriggerEndQuery()
4775  *
4776  * Called after one query has been completely processed. At this time
4777  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4778  * transfer deferred trigger events to the global deferred-trigger list.
4779  *
4780  * Note that this must be called BEFORE closing down the executor
4781  * with ExecutorEnd, because we make use of the EState's info about
4782  * target relations. Normally it is called from ExecutorFinish.
4783  * ----------
4784  */
4785 void
4787 {
4789 
4790  /* Must be inside a query, too */
4791  Assert(afterTriggers.query_depth >= 0);
4792 
4793  /*
4794  * If we never even got as far as initializing the event stack, there
4795  * certainly won't be any events, so exit quickly.
4796  */
4797  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4798  {
4799  afterTriggers.query_depth--;
4800  return;
4801  }
4802 
4803  /*
4804  * Process all immediate-mode triggers queued by the query, and move the
4805  * deferred ones to the main list of deferred events.
4806  *
4807  * Notice that we decide which ones will be fired, and put the deferred
4808  * ones on the main list, before anything is actually fired. This ensures
4809  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4810  * IMMEDIATE: all events we have decided to defer will be available for it
4811  * to fire.
4812  *
4813  * We loop in case a trigger queues more events at the same query level.
4814  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4815  * will instead fire any triggers in a dedicated query level. Foreign key
4816  * enforcement triggers do add to the current query level, thanks to their
4817  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4818  * C-language triggers might do likewise.
4819  *
4820  * If we find no firable events, we don't have to increment
4821  * firing_counter.
4822  */
4823  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4824 
4825  for (;;)
4826  {
4827  if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
4828  {
4829  CommandId firing_id = afterTriggers.firing_counter++;
4830  AfterTriggerEventChunk *oldtail = qs->events.tail;
4831 
4832  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
4833  break; /* all fired */
4834 
4835  /*
4836  * Firing a trigger could result in query_stack being repalloc'd,
4837  * so we must recalculate qs after each afterTriggerInvokeEvents
4838  * call. Furthermore, it's unsafe to pass delete_ok = true here,
4839  * because that could cause afterTriggerInvokeEvents to try to
4840  * access qs->events after the stack has been repalloc'd.
4841  */
4842  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4843 
4844  /*
4845  * We'll need to scan the events list again. To reduce the cost
4846  * of doing so, get rid of completely-fired chunks. We know that
4847  * all events were marked IN_PROGRESS or DONE at the conclusion of
4848  * afterTriggerMarkEvents, so any still-interesting events must
4849  * have been added after that, and so must be in the chunk that
4850  * was then the tail chunk, or in later chunks. So, zap all
4851  * chunks before oldtail. This is approximately the same set of
4852  * events we would have gotten rid of by passing delete_ok = true.
4853  */
4854  Assert(oldtail != NULL);
4855  while (qs->events.head != oldtail)
4857  }
4858  else
4859  break;
4860  }
4861 
4862  /* Release query-level-local storage, including tuplestores if any */
4863  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4864 
4865  afterTriggers.query_depth--;
4866 }
4867 
4868 
4869 /*
4870  * AfterTriggerFreeQuery
4871  * Release subsidiary storage for a trigger query level.
4872  * This includes closing down tuplestores.
4873  * Note: it's important for this to be safe if interrupted by an error
4874  * and then called again for the same query level.
4875  */
4876 static void
4878 {
4879  Tuplestorestate *ts;
4880  List *tables;
4881  ListCell *lc;
4882 
4883  /* Drop the trigger events */
4885 
4886  /* Drop FDW tuplestore if any */
4887  ts = qs->fdw_tuplestore;
4888  qs->fdw_tuplestore = NULL;
4889  if (ts)
4890  tuplestore_end(ts);
4891 
4892  /* Release per-table subsidiary storage */
4893  tables = qs->tables;
4894  foreach(lc, tables)
4895  {
4897 
4898  ts = table->old_tuplestore;
4899  table->old_tuplestore = NULL;
4900  if (ts)
4901  tuplestore_end(ts);
4902  ts = table->new_tuplestore;
4903  table->new_tuplestore = NULL;
4904  if (ts)
4905  tuplestore_end(ts);
4906  }
4907 
4908  /*
4909  * Now free the AfterTriggersTableData structs and list cells. Reset list
4910  * pointer first; if list_free_deep somehow gets an error, better to leak
4911  * that storage than have an infinite loop.
4912  */
4913  qs->tables = NIL;
4914  list_free_deep(tables);
4915 }
4916 
4917 
4918 /* ----------
4919  * AfterTriggerFireDeferred()
4920  *
4921  * Called just before the current transaction is committed. At this
4922  * time we invoke all pending DEFERRED triggers.
4923  *
4924  * It is possible for other modules to queue additional deferred triggers
4925  * during pre-commit processing; therefore xact.c may have to call this
4926  * multiple times.
4927  * ----------
4928  */
4929 void
4931 {
4932  AfterTriggerEventList *events;
4933  bool snap_pushed = false;
4934 
4935  /* Must not be inside a query */
4936  Assert(afterTriggers.query_depth == -1);
4937 
4938  /*
4939  * If there are any triggers to fire, make sure we have set a snapshot for
4940  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4941  * can't assume ActiveSnapshot is valid on entry.)
4942  */
4943  events = &afterTriggers.events;
4944  if (events->head != NULL)
4945  {
4947  snap_pushed = true;
4948  }
4949 
4950  /*
4951  * Run all the remaining triggers. Loop until they are all gone, in case
4952  * some trigger queues more for us to do.
4953  */
4954  while (afterTriggerMarkEvents(events, NULL, false))
4955  {
4956  CommandId firing_id = afterTriggers.firing_counter++;
4957 
4958  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4959  break; /* all fired */
4960  }
4961 
4962  /*
4963  * We don't bother freeing the event list, since it will go away anyway
4964  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4965  */
4966 
4967  if (snap_pushed)
4969 }
4970 
4971 
4972 /* ----------
4973  * AfterTriggerEndXact()
4974  *
4975  * The current transaction is finishing.
4976  *
4977  * Any unfired triggers are canceled so we simply throw
4978  * away anything we know.
4979  *
4980  * Note: it is possible for this to be called repeatedly in case of
4981  * error during transaction abort; therefore, do not complain if
4982  * already closed down.
4983  * ----------
4984  */
4985 void
4986 AfterTriggerEndXact(bool isCommit)
4987 {
4988  /*
4989  * Forget the pending-events list.
4990  *
4991  * Since all the info is in TopTransactionContext or children thereof, we
4992  * don't really need to do anything to reclaim memory. However, the
4993  * pending-events list could be large, and so it's useful to discard it as
4994  * soon as possible --- especially if we are aborting because we ran out
4995  * of memory for the list!
4996  */
4997  if (afterTriggers.event_cxt)
4998  {
4999  MemoryContextDelete(afterTriggers.event_cxt);
5000  afterTriggers.event_cxt = NULL;
5001  afterTriggers.events.head = NULL;
5002  afterTriggers.events.tail = NULL;
5003  afterTriggers.events.tailfree = NULL;
5004  }
5005 
5006  /*
5007  * Forget any subtransaction state as well. Since this can't be very
5008  * large, we let the eventual reset of TopTransactionContext free the
5009  * memory instead of doing it here.
5010  */
5011  afterTriggers.trans_stack = NULL;
5012  afterTriggers.maxtransdepth = 0;
5013 
5014 
5015  /*
5016  * Forget the query stack and constraint-related state information. As
5017  * with the subtransaction state information, we don't bother freeing the
5018  * memory here.
5019  */
5020  afterTriggers.query_stack = NULL;
5021  afterTriggers.maxquerydepth = 0;
5022  afterTriggers.state = NULL;
5023 
5024  /* No more afterTriggers manipulation until next transaction starts. */
5025  afterTriggers.query_depth = -1;
5026 }
5027 
5028 /*
5029  * AfterTriggerBeginSubXact()
5030  *
5031  * Start a subtransaction.
5032  */
5033 void
5035 {
5036  int my_level = GetCurrentTransactionNestLevel();
5037 
5038  /*
5039  * Allocate more space in the trans_stack if needed. (Note: because the
5040  * minimum nest level of a subtransaction is 2, we waste the first couple
5041  * entries of the array; not worth the notational effort to avoid it.)
5042  */
5043  while (my_level >= afterTriggers.maxtransdepth)
5044  {
5045  if (afterTriggers.maxtransdepth == 0)
5046  {
5047  /* Arbitrarily initialize for max of 8 subtransaction levels */
5048  afterTriggers.trans_stack = (AfterTriggersTransData *)
5050  8 * sizeof(AfterTriggersTransData));
5051  afterTriggers.maxtransdepth = 8;
5052  }
5053  else
5054  {
5055  /* repalloc will keep the stack in the same context */
5056  int new_alloc = afterTriggers.maxtransdepth * 2;
5057 
5058  afterTriggers.trans_stack = (AfterTriggersTransData *)
5059  repalloc(afterTriggers.trans_stack,
5060  new_alloc * sizeof(AfterTriggersTransData));
5061  afterTriggers.maxtransdepth = new_alloc;
5062  }
5063  }
5064 
5065  /*
5066  * Push the current information into the stack. The SET CONSTRAINTS state
5067  * is not saved until/unless changed. Likewise, we don't make a
5068  * per-subtransaction event context until needed.
5069  */
5070  afterTriggers.trans_stack[my_level].state = NULL;
5071  afterTriggers.trans_stack[my_level].events = afterTriggers.events;
5072  afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
5073  afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
5074 }
5075 
5076 /*
5077  * AfterTriggerEndSubXact()
5078  *
5079  * The current subtransaction is ending.
5080  */
5081 void
5083 {
5084  int my_level = GetCurrentTransactionNestLevel();
5086  AfterTriggerEvent event;
5087  AfterTriggerEventChunk *chunk;
5088  CommandId subxact_firing_id;
5089 
5090  /*
5091  * Pop the prior state if needed.
5092  */
5093  if (isCommit)
5094  {
5095  Assert(my_level < afterTriggers.maxtransdepth);
5096  /* If we saved a prior state, we don't need it anymore */
5097  state = afterTriggers.trans_stack[my_level].state;
5098  if (state != NULL)
5099  pfree(state);
5100  /* this avoids double pfree if error later: */
5101  afterTriggers.trans_stack[my_level].state = NULL;
5102  Assert(afterTriggers.query_depth ==
5103  afterTriggers.trans_stack[my_level].query_depth);
5104  }
5105  else
5106  {
5107  /*
5108  * Aborting. It is possible subxact start failed before calling
5109  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5110  * trans_stack levels that aren't there.
5111  */
5112  if (my_level >= afterTriggers.maxtransdepth)
5113  return;
5114 
5115  /*
5116  * Release query-level storage for queries being aborted, and restore
5117  * query_depth to its pre-subxact value. This assumes that a
5118  * subtransaction will not add events to query levels started in a
5119  * earlier transaction state.
5120  */
5121  while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
5122  {
5123  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
5124  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
5125  afterTriggers.query_depth--;
5126  }
5127  Assert(afterTriggers.query_depth ==
5128  afterTriggers.trans_stack[my_level].query_depth);
5129 
5130  /*
5131  * Restore the global deferred-event list to its former length,
5132  * discarding any events queued by the subxact.
5133  */
5134  afterTriggerRestoreEventList(&afterTriggers.events,
5135  &afterTriggers.trans_stack[my_level].events);
5136 
5137  /*
5138  * Restore the trigger state. If the saved state is NULL, then this
5139  * subxact didn't save it, so it doesn't need restoring.
5140  */
5141  state = afterTriggers.trans_stack[my_level].state;
5142  if (state != NULL)
5143  {
5144  pfree(afterTriggers.state);
5145  afterTriggers.state = state;
5146  }
5147  /* this avoids double pfree if error later: */
5148  afterTriggers.trans_stack[my_level].state = NULL;
5149 
5150  /*
5151  * Scan for any remaining deferred events that were marked DONE or IN
5152  * PROGRESS by this subxact or a child, and un-mark them. We can
5153  * recognize such events because they have a firing ID greater than or
5154  * equal to the firing_counter value we saved at subtransaction start.
5155  * (This essentially assumes that the current subxact includes all
5156  * subxacts started after it.)
5157  */
5158  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5159  for_each_event_chunk(event, chunk, afterTriggers.events)
5160  {
5161  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5162 
5163  if (event->ate_flags &
5165  {
5166  if (evtshared->ats_firing_id >= subxact_firing_id)
5167  event->ate_flags &=
5169  }
5170  }
5171  }
5172 }
5173 
5174 /* ----------
5175  * AfterTriggerEnlargeQueryState()
5176  *
5177  * Prepare the necessary state so that we can record AFTER trigger events
5178  * queued by a query. It is allowed to have nested queries within a
5179  * (sub)transaction, so we need to have separate state for each query
5180  * nesting level.
5181  * ----------
5182  */
5183 static void
5185 {
5186  int init_depth = afterTriggers.maxquerydepth;
5187 
5188  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
5189 
5190  if (afterTriggers.maxquerydepth == 0)
5191  {
5192  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5193 
5194  afterTriggers.query_stack = (AfterTriggersQueryData *)
5196  new_alloc * sizeof(AfterTriggersQueryData));
5197  afterTriggers.maxquerydepth = new_alloc;
5198  }
5199  else
5200  {
5201  /* repalloc will keep the stack in the same context */
5202  int old_alloc = afterTriggers.maxquerydepth;
5203  int new_alloc = Max(afterTriggers.query_depth + 1,
5204  old_alloc * 2);
5205 
5206  afterTriggers.query_stack = (AfterTriggersQueryData *)
5207  repalloc(afterTriggers.query_stack,
5208  new_alloc * sizeof(AfterTriggersQueryData));
5209  afterTriggers.maxquerydepth = new_alloc;
5210  }
5211 
5212  /* Initialize new array entries to empty */
5213  while (init_depth < afterTriggers.maxquerydepth)
5214  {
5215  AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
5216 
5217  qs->events.head = NULL;
5218  qs->events.tail = NULL;
5219  qs->events.tailfree = NULL;
5220  qs->fdw_tuplestore = NULL;
5221  qs->tables = NIL;
5222 
5223  ++init_depth;
5224  }
5225 }
5226 
5227 /*
5228  * Create an empty SetConstraintState with room for numalloc trigstates
5229  */
5230 static SetConstraintState
5232 {
5234 
5235  /* Behave sanely with numalloc == 0 */
5236  if (numalloc <= 0)
5237  numalloc = 1;
5238 
5239  /*
5240  * We assume that zeroing will correctly initialize the state values.
5241  */
5242  state = (SetConstraintState)
5244  offsetof(SetConstraintStateData, trigstates) +
5245  numalloc * sizeof(SetConstraintTriggerData));
5246 
5247  state->numalloc = numalloc;
5248 
5249  return state;
5250 }
5251 
5252 /*
5253  * Copy a SetConstraintState
5254  */
5255 static SetConstraintState
5257 {
5259 
5260  state = SetConstraintStateCreate(origstate->numstates);
5261 
5262  state->all_isset = origstate->all_isset;
5263  state->all_isdeferred = origstate->all_isdeferred;
5264  state->numstates = origstate->numstates;
5265  memcpy(state->trigstates, origstate->trigstates,
5266  origstate->numstates * sizeof(SetConstraintTriggerData));
5267 
5268  return state;
5269 }
5270 
5271 /*
5272  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5273  * pointer to the state object (it will change if we have to repalloc).
5274  */
5275 static SetConstraintState
5277  Oid tgoid, bool tgisdeferred)
5278 {
5279  if (state->numstates >= state->numalloc)
5280  {
5281  int newalloc = state->numalloc * 2;
5282 
5283  newalloc = Max(newalloc, 8); /* in case original has size 0 */
5284  state = (SetConstraintState)
5285  repalloc(state,
5286  offsetof(SetConstraintStateData, trigstates) +
5287  newalloc * sizeof(SetConstraintTriggerData));
5288  state->numalloc = newalloc;
5289  Assert(state->numstates < state->numalloc);
5290  }
5291 
5292  state->trigstates[state->numstates].sct_tgoid = tgoid;
5293  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
5294  state->numstates++;
5295 
5296  return state;
5297 }
5298 
5299 /* ----------
5300  * AfterTriggerSetState()
5301  *
5302  * Execute the SET CONSTRAINTS ... utility command.
5303  * ----------
5304  */
5305 void
5307 {
5308  int my_level = GetCurrentTransactionNestLevel();
5309 
5310  /* If we haven't already done so, initialize our state. */
5311  if (afterTriggers.state == NULL)
5312  afterTriggers.state = SetConstraintStateCreate(8);
5313 
5314  /*
5315  * If in a subtransaction, and we didn't save the current state already,
5316  * save it so it can be restored if the subtransaction aborts.
5317  */
5318  if (my_level > 1 &&
5319  afterTriggers.trans_stack[my_level].state == NULL)
5320  {
5321  afterTriggers.trans_stack[my_level].state =
5322  SetConstraintStateCopy(afterTriggers.state);
5323  }
5324 
5325  /*
5326  * Handle SET CONSTRAINTS ALL ...
5327  */
5328  if (stmt->constraints == NIL)
5329  {
5330  /*
5331  * Forget any previous SET CONSTRAINTS commands in this transaction.
5332  */
5333  afterTriggers.state->numstates = 0;
5334 
5335  /*
5336  * Set the per-transaction ALL state to known.
5337  */
5338  afterTriggers.state->all_isset = true;
5339  afterTriggers.state->all_isdeferred = stmt->deferred;
5340  }
5341  else
5342  {
5343  Relation conrel;
5344  Relation tgrel;
5345  List *conoidlist = NIL;
5346  List *tgoidlist = NIL;
5347  ListCell *lc;
5348 
5349  /*
5350  * Handle SET CONSTRAINTS constraint-name [, ...]
5351  *
5352  * First, identify all the named constraints and make a list of their
5353  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5354  * the same name within a schema, the specifications are not
5355  * necessarily unique. Our strategy is to target all matching
5356  * constraints within the first search-path schema that has any
5357  * matches, but disregard matches in schemas beyond the first match.
5358  * (This is a bit odd but it's the historical behavior.)
5359  *
5360  * A constraint in a partitioned table may have corresponding
5361  * constraints in the partitions. Grab those too.
5362  */
5363  conrel = heap_open(ConstraintRelationId, AccessShareLock);
5364 
5365  foreach(lc, stmt->constraints)
5366  {
5367  RangeVar *constraint = lfirst(lc);
5368  bool found;
5369  List *namespacelist;
5370  ListCell *nslc;
5371 
5372  if (constraint->catalogname)
5373  {
5374  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5375  ereport(ERROR,
5376  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5377  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5378  constraint->catalogname, constraint->schemaname,
5379  constraint->relname)));
5380  }
5381 
5382  /*
5383  * If we're given the schema name with the constraint, look only
5384  * in that schema. If given a bare constraint name, use the
5385  * search path to find the first matching constraint.
5386  */
5387  if (constraint->schemaname)
5388  {
5389  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5390  false);
5391 
5392  namespacelist = list_make1_oid(namespaceId);
5393  }
5394  else
5395  {
5396  namespacelist = fetch_search_path(true);
5397  }
5398 
5399  found = false;
5400  foreach(nslc, namespacelist)
5401  {
5402  Oid namespaceId = lfirst_oid(nslc);
5403  SysScanDesc conscan;
5404  ScanKeyData skey[2];
5405  HeapTuple tup;
5406 
5407  ScanKeyInit(&skey[0],
5408  Anum_pg_constraint_conname,
5409  BTEqualStrategyNumber, F_NAMEEQ,
5410  CStringGetDatum(constraint->relname));
5411  ScanKeyInit(&skey[1],
5412  Anum_pg_constraint_connamespace,
5413  BTEqualStrategyNumber, F_OIDEQ,
5414  ObjectIdGetDatum(namespaceId));
5415 
5416  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5417  true, NULL, 2, skey);
5418 
5419  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5420  {
5422 
5423  if (con->condeferrable)
5424  conoidlist = lappend_oid(conoidlist,
5425  HeapTupleGetOid(tup));
5426  else if (stmt->deferred)
5427  ereport(ERROR,
5428  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5429  errmsg("constraint \"%s\" is not deferrable",
5430  constraint->relname)));
5431  found = true;
5432  }
5433 
5434  systable_endscan(conscan);
5435 
5436  /*
5437  * Once we've found a matching constraint we do not search
5438  * later parts of the search path.
5439  */
5440  if (found)
5441  break;
5442  }
5443 
5444  list_free(namespacelist);
5445 
5446  /*
5447  * Not found ?
5448  */
5449  if (!found)
5450  ereport(ERROR,
5451  (errcode(ERRCODE_UNDEFINED_OBJECT),
5452  errmsg("constraint \"%s\" does not exist",
5453  constraint->relname)));
5454  }
5455 
5456  /*
5457  * Scan for any possible descendants of the constraints. We append
5458  * whatever we find to the same list that we're scanning; this has the
5459  * effect that we create new scans for those, too, so if there are
5460  * further descendents, we'll also catch them.
5461  */
5462  foreach(lc, conoidlist)
5463  {
5464  Oid parent = lfirst_oid(lc);
5465  ScanKeyData key;
5466  SysScanDesc scan;
5467  HeapTuple tuple;
5468 
5469  ScanKeyInit(&key,
5470  Anum_pg_constraint_conparentid,
5471  BTEqualStrategyNumber, F_OIDEQ,
5472  ObjectIdGetDatum(parent));
5473 
5474  scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5475 
5476  while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5477  conoidlist = lappend_oid(conoidlist, HeapTupleGetOid(tuple));
5478 
5479  systable_endscan(scan);
5480  }
5481 
5482  heap_close(conrel, AccessShareLock);
5483 
5484  /*
5485  * Now, locate the trigger(s) implementing each of these constraints,
5486  * and make a list of their OIDs.
5487  */
5488  tgrel = heap_open(TriggerRelationId, AccessShareLock);
5489 
5490  foreach(lc, conoidlist)
5491  {
5492  Oid conoid = lfirst_oid(lc);
5493  bool found;
5494  ScanKeyData skey;
5495  SysScanDesc tgscan;
5496  HeapTuple htup;
5497 
5498  found = false;
5499 
5500  ScanKeyInit(&skey,
5501  Anum_pg_trigger_tgconstraint,
5502  BTEqualStrategyNumber, F_OIDEQ,
5503  ObjectIdGetDatum(conoid));
5504 
5505  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5506  NULL, 1, &skey);
5507 
5508  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5509  {
5510  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5511 
5512  /*
5513  * Silently skip triggers that are marked as non-deferrable in
5514  * pg_trigger. This is not an error condition, since a
5515  * deferrable RI constraint may have some non-deferrable
5516  * actions.
5517  */
5518  if (pg_trigger->tgdeferrable)
5519  tgoidlist = lappend_oid(tgoidlist,
5520  HeapTupleGetOid(htup));
5521 
5522  found = true;
5523  }
5524 
5525  systable_endscan(tgscan);
5526 
5527  /* Safety check: a deferrable constraint should have triggers */
5528  if (!found)
5529  elog(ERROR, "no triggers found for constraint with OID %u",
5530  conoid);
5531  }
5532 
5533  heap_close(tgrel, AccessShareLock);
5534 
5535  /*
5536  * Now we can set the trigger states of individual triggers for this
5537  * xact.
5538  */
5539  foreach(lc, tgoidlist)
5540  {
5541  Oid tgoid = lfirst_oid(lc);
5542  SetConstraintState state = afterTriggers.state;
5543  bool found = false;
5544  int i;
5545 
5546  for (i = 0; i < state->numstates; i++)
5547  {
5548  if (state->trigstates[i].sct_tgoid == tgoid)
5549  {
5550  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5551  found = true;
5552  break;
5553  }
5554  }
5555  if (!found)
5556  {
5557  afterTriggers.state =
5558  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5559  }
5560  }
5561  }
5562 
5563  /*
5564  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5565  * checks against that constraint must be made when the SET CONSTRAINTS
5566  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5567  * apply retroactively. We've updated the constraints state, so scan the
5568  * list of previously deferred events to fire any that have now become
5569  * immediate.
5570  *
5571  * Obviously, if this was SET ... DEFERRED then it can't have converted
5572  * any unfired events to immediate, so we need do nothing in that case.
5573  */
5574  if (!stmt->deferred)
5575  {
5576  AfterTriggerEventList *events = &afterTriggers.events;
5577  bool snapshot_set = false;
5578 
5579  while (afterTriggerMarkEvents(events, NULL, true))
5580  {
5581  CommandId firing_id = afterTriggers.firing_counter++;
5582 
5583  /*
5584  * Make sure a snapshot has been established in case trigger
5585  * functions need one. Note that we avoid setting a snapshot if
5586  * we don't find at least one trigger that has to be fired now.
5587  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5588  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5589  * at the start of a transaction it's not possible for any trigger
5590  * events to be queued yet.)
5591  */
5592  if (!snapshot_set)
5593  {
5595  snapshot_set = true;
5596  }
5597 
5598  /*
5599  * We can delete fired events if we are at top transaction level,
5600  * but we'd better not if inside a subtransaction, since the
5601  * subtransaction could later get rolled back.
5602  */
5603  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5604  !IsSubTransaction()))
5605  break; /* all fired */
5606  }
5607 
5608  if (snapshot_set)
5610  }
5611 }
5612 
5613 /* ----------
5614  * AfterTriggerPendingOnRel()
5615  * Test to see if there are any pending after-trigger events for rel.
5616  *
5617  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5618  * it is unsafe to perform major surgery on a relation. Note that only
5619  * local pending events are examined. We assume that having exclusive lock
5620  * on a rel guarantees there are no unserviced events in other backends ---
5621  * but having a lock does not prevent there being such events in our own.
5622  *
5623  * In some scenarios it'd be reasonable to remove pending events (more
5624  * specifically, mark them DONE by the current subxact) but without a lot
5625  * of knowledge of the trigger semantics we can't do this in general.
5626  * ----------
5627  */
5628 bool
5630 {
5631  AfterTriggerEvent event;
5632  AfterTriggerEventChunk *chunk;
5633  int depth;
5634 
5635  /* Scan queued events */
5636  for_each_event_chunk(event, chunk, afterTriggers.events)
5637  {
5638  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5639 
5640  /*
5641  * We can ignore completed events. (Even if a DONE flag is rolled
5642  * back by subxact abort, it's OK because the effects of the TRUNCATE
5643  * or whatever must get rolled back too.)
5644  */
5645  if (event->ate_flags & AFTER_TRIGGER_DONE)
5646  continue;
5647 
5648  if (evtshared->ats_relid == relid)
5649  return true;
5650  }
5651 
5652  /*
5653  * Also scan events queued by incomplete queries. This could only matter
5654  * if TRUNCATE/etc is executed by a function or trigger within an updating
5655  * query on the same relation, which is pretty perverse, but let's check.
5656  */
5657  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5658  {
5659  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
5660  {
5661  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5662 
5663  if (event->ate_flags & AFTER_TRIGGER_DONE)
5664  continue;
5665 
5666  if (evtshared->ats_relid == relid)
5667  return true;
5668  }
5669  }
5670 
5671  return false;
5672 }
5673 
5674 
5675 /* ----------
5676  * AfterTriggerSaveEvent()
5677  *
5678  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5679  * be fired for an event.
5680  *
5681  * NOTE: this is called whenever there are any triggers associated with
5682  * the event (even if they are disabled). This function decides which
5683  * triggers actually need to be queued. It is also called after each row,
5684  * even if there are no triggers for that event, if there are any AFTER
5685  * STATEMENT triggers for the statement which use transition tables, so that
5686  * the transition tuplestores can be built. Furthermore, if the transition
5687  * capture is happening for UPDATEd rows being moved to another partition due
5688  * to the partition-key being changed, then this function is called once when
5689  * the row is deleted (to capture OLD row), and once when the row is inserted
5690  * into another partition (to capture NEW row). This is done separately because
5691  * DELETE and INSERT happen on different tables.
5692  *
5693  * Transition tuplestores are built now, rather than when events are pulled
5694  * off of the queue because AFTER ROW triggers are allowed to select from the
5695  * transition tables for the statement.
5696  * ----------
5697  */
5698 static void
5700  int event, bool row_trigger,
5701  HeapTuple oldtup, HeapTuple newtup,
5702  List *recheckIndexes, Bitmapset *modifiedCols,
5703  TransitionCaptureState *transition_capture)
5704 {
5705  Relation rel = relinfo->ri_RelationDesc;
5706  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5707  AfterTriggerEventData new_event;
5708  AfterTriggerSharedData new_shared;
5709  char relkind = rel->rd_rel->relkind;
5710  int tgtype_event;
5711  int tgtype_level;
5712  int i;
5713  Tuplestorestate *fdw_tuplestore = NULL;
5714 
5715  /*
5716  * Check state. We use a normal test not Assert because it is possible to
5717  * reach here in the wrong state given misconfigured RI triggers, in
5718  * particular deferring a cascade action trigger.</