PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/guc_hooks.h"
59 #include "utils/inval.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/plancache.h"
63 #include "utils/rel.h"
64 #include "utils/snapmgr.h"
65 #include "utils/syscache.h"
66 #include "utils/tuplestore.h"
67 
68 
69 /* GUC variables */
71 
72 /* How many levels deep into trigger execution are we? */
73 static int MyTriggerDepth = 0;
74 
75 /* Local function prototypes */
76 static void renametrig_internal(Relation tgrel, Relation targetrel,
77  HeapTuple trigtup, const char *newname,
78  const char *expected_name);
79 static void renametrig_partition(Relation tgrel, Oid partitionId,
80  Oid parentTriggerOid, const char *newname,
81  const char *expected_name);
82 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
83 static bool GetTupleForTrigger(EState *estate,
84  EPQState *epqstate,
85  ResultRelInfo *relinfo,
86  ItemPointer tid,
87  LockTupleMode lockmode,
88  TupleTableSlot *oldslot,
89  TupleTableSlot **epqslot,
90  TM_Result *tmresultp,
91  TM_FailureData *tmfdp);
92 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
93  Trigger *trigger, TriggerEvent event,
94  Bitmapset *modifiedCols,
95  TupleTableSlot *oldslot, TupleTableSlot *newslot);
97  int tgindx,
98  FmgrInfo *finfo,
99  Instrumentation *instr,
100  MemoryContext per_tuple_context);
101 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
102  ResultRelInfo *src_partinfo,
103  ResultRelInfo *dst_partinfo,
104  int event, bool row_trigger,
105  TupleTableSlot *oldslot, TupleTableSlot *newslot,
106  List *recheckIndexes, Bitmapset *modifiedCols,
107  TransitionCaptureState *transition_capture,
108  bool is_crosspart_update);
109 static void AfterTriggerEnlargeQueryState(void);
110 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
111 
112 
113 /*
114  * Create a trigger. Returns the address of the created trigger.
115  *
116  * queryString is the source text of the CREATE TRIGGER command.
117  * This must be supplied if a whenClause is specified, else it can be NULL.
118  *
119  * relOid, if nonzero, is the relation on which the trigger should be
120  * created. If zero, the name provided in the statement will be looked up.
121  *
122  * refRelOid, if nonzero, is the relation to which the constraint trigger
123  * refers. If zero, the constraint relation name provided in the statement
124  * will be looked up as needed.
125  *
126  * constraintOid, if nonzero, says that this trigger is being created
127  * internally to implement that constraint. A suitable pg_depend entry will
128  * be made to link the trigger to that constraint. constraintOid is zero when
129  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
130  * TRIGGER, we build a pg_constraint entry internally.)
131  *
132  * indexOid, if nonzero, is the OID of an index associated with the constraint.
133  * We do nothing with this except store it into pg_trigger.tgconstrindid;
134  * but when creating a trigger for a deferrable unique constraint on a
135  * partitioned table, its children are looked up. Note we don't cope with
136  * invalid indexes in that case.
137  *
138  * funcoid, if nonzero, is the OID of the function to invoke. When this is
139  * given, stmt->funcname is ignored.
140  *
141  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
142  * if that trigger is dropped, this one should be too. There are two cases
143  * when a nonzero value is passed for this: 1) when this function recurses to
144  * create the trigger on partitions, 2) when creating child foreign key
145  * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
146  *
147  * If whenClause is passed, it is an already-transformed expression for
148  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
149  *
150  * If isInternal is true then this is an internally-generated trigger.
151  * This argument sets the tgisinternal field of the pg_trigger entry, and
152  * if true causes us to modify the given trigger name to ensure uniqueness.
153  *
154  * When isInternal is not true we require ACL_TRIGGER permissions on the
155  * relation, as well as ACL_EXECUTE on the trigger function. For internal
156  * triggers the caller must apply any required permission checks.
157  *
158  * When called on partitioned tables, this function recurses to create the
159  * trigger on all the partitions, except if isInternal is true, in which
160  * case caller is expected to execute recursion on its own. in_partition
161  * indicates such a recursive call; outside callers should pass "false"
162  * (but see CloneRowTriggersToPartition).
163  */
165 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
166  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
167  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
168  bool isInternal, bool in_partition)
169 {
170  return
171  CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
172  constraintOid, indexOid, funcoid,
173  parentTriggerOid, whenClause, isInternal,
174  in_partition, TRIGGER_FIRES_ON_ORIGIN);
175 }
176 
177 /*
178  * Like the above; additionally the firing condition
179  * (always/origin/replica/disabled) can be specified.
180  */
182 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
183  Oid relOid, Oid refRelOid, Oid constraintOid,
184  Oid indexOid, Oid funcoid, Oid parentTriggerOid,
185  Node *whenClause, bool isInternal, bool in_partition,
186  char trigger_fires_when)
187 {
188  int16 tgtype;
189  int ncolumns;
190  int16 *columns;
191  int2vector *tgattr;
192  List *whenRtable;
193  char *qual;
194  Datum values[Natts_pg_trigger];
195  bool nulls[Natts_pg_trigger];
196  Relation rel;
197  AclResult aclresult;
198  Relation tgrel;
199  Relation pgrel;
200  HeapTuple tuple = NULL;
201  Oid funcrettype;
202  Oid trigoid = InvalidOid;
203  char internaltrigname[NAMEDATALEN];
204  char *trigname;
205  Oid constrrelid = InvalidOid;
206  ObjectAddress myself,
207  referenced;
208  char *oldtablename = NULL;
209  char *newtablename = NULL;
210  bool partition_recurse;
211  bool trigger_exists = false;
212  Oid existing_constraint_oid = InvalidOid;
213  bool existing_isInternal = false;
214  bool existing_isClone = false;
215 
216  if (OidIsValid(relOid))
217  rel = table_open(relOid, ShareRowExclusiveLock);
218  else
219  rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
220 
221  /*
222  * Triggers must be on tables or views, and there are additional
223  * relation-type-specific restrictions.
224  */
225  if (rel->rd_rel->relkind == RELKIND_RELATION)
226  {
227  /* Tables can't have INSTEAD OF triggers */
228  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
229  stmt->timing != TRIGGER_TYPE_AFTER)
230  ereport(ERROR,
231  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
232  errmsg("\"%s\" is a table",
234  errdetail("Tables cannot have INSTEAD OF triggers.")));
235  }
236  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
237  {
238  /* Partitioned tables can't have INSTEAD OF triggers */
239  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
240  stmt->timing != TRIGGER_TYPE_AFTER)
241  ereport(ERROR,
242  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
243  errmsg("\"%s\" is a table",
245  errdetail("Tables cannot have INSTEAD OF triggers.")));
246 
247  /*
248  * FOR EACH ROW triggers have further restrictions
249  */
250  if (stmt->row)
251  {
252  /*
253  * Disallow use of transition tables.
254  *
255  * Note that we have another restriction about transition tables
256  * in partitions; search for 'has_superclass' below for an
257  * explanation. The check here is just to protect from the fact
258  * that if we allowed it here, the creation would succeed for a
259  * partitioned table with no partitions, but would be blocked by
260  * the other restriction when the first partition was created,
261  * which is very unfriendly behavior.
262  */
263  if (stmt->transitionRels != NIL)
264  ereport(ERROR,
265  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
266  errmsg("\"%s\" is a partitioned table",
268  errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
269  }
270  }
271  else if (rel->rd_rel->relkind == RELKIND_VIEW)
272  {
273  /*
274  * Views can have INSTEAD OF triggers (which we check below are
275  * row-level), or statement-level BEFORE/AFTER triggers.
276  */
277  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
278  ereport(ERROR,
279  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
280  errmsg("\"%s\" is a view",
282  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
283  /* Disallow TRUNCATE triggers on VIEWs */
284  if (TRIGGER_FOR_TRUNCATE(stmt->events))
285  ereport(ERROR,
286  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
287  errmsg("\"%s\" is a view",
289  errdetail("Views cannot have TRUNCATE triggers.")));
290  }
291  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
292  {
293  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
294  stmt->timing != TRIGGER_TYPE_AFTER)
295  ereport(ERROR,
296  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
297  errmsg("\"%s\" is a foreign table",
299  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
300 
301  /*
302  * We disallow constraint triggers to protect the assumption that
303  * triggers on FKs can't be deferred. See notes with AfterTriggers
304  * data structures, below.
305  */
306  if (stmt->isconstraint)
307  ereport(ERROR,
308  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
309  errmsg("\"%s\" is a foreign table",
311  errdetail("Foreign tables cannot have constraint triggers.")));
312  }
313  else
314  ereport(ERROR,
315  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
316  errmsg("relation \"%s\" cannot have triggers",
318  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
319 
321  ereport(ERROR,
322  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
323  errmsg("permission denied: \"%s\" is a system catalog",
324  RelationGetRelationName(rel))));
325 
326  if (stmt->isconstraint)
327  {
328  /*
329  * We must take a lock on the target relation to protect against
330  * concurrent drop. It's not clear that AccessShareLock is strong
331  * enough, but we certainly need at least that much... otherwise, we
332  * might end up creating a pg_constraint entry referencing a
333  * nonexistent table.
334  */
335  if (OidIsValid(refRelOid))
336  {
337  LockRelationOid(refRelOid, AccessShareLock);
338  constrrelid = refRelOid;
339  }
340  else if (stmt->constrrel != NULL)
341  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
342  false);
343  }
344 
345  /* permission checks */
346  if (!isInternal)
347  {
348  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
349  ACL_TRIGGER);
350  if (aclresult != ACLCHECK_OK)
351  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
353 
354  if (OidIsValid(constrrelid))
355  {
356  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
357  ACL_TRIGGER);
358  if (aclresult != ACLCHECK_OK)
359  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
360  get_rel_name(constrrelid));
361  }
362  }
363 
364  /*
365  * When called on a partitioned table to create a FOR EACH ROW trigger
366  * that's not internal, we create one trigger for each partition, too.
367  *
368  * For that, we'd better hold lock on all of them ahead of time.
369  */
370  partition_recurse = !isInternal && stmt->row &&
371  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
372  if (partition_recurse)
374  ShareRowExclusiveLock, NULL));
375 
376  /* Compute tgtype */
377  TRIGGER_CLEAR_TYPE(tgtype);
378  if (stmt->row)
379  TRIGGER_SETT_ROW(tgtype);
380  tgtype |= stmt->timing;
381  tgtype |= stmt->events;
382 
383  /* Disallow ROW-level TRUNCATE triggers */
384  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
385  ereport(ERROR,
386  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
387  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
388 
389  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
390  if (TRIGGER_FOR_INSTEAD(tgtype))
391  {
392  if (!TRIGGER_FOR_ROW(tgtype))
393  ereport(ERROR,
394  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
395  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
396  if (stmt->whenClause)
397  ereport(ERROR,
398  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
399  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
400  if (stmt->columns != NIL)
401  ereport(ERROR,
402  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
403  errmsg("INSTEAD OF triggers cannot have column lists")));
404  }
405 
406  /*
407  * We don't yet support naming ROW transition variables, but the parser
408  * recognizes the syntax so we can give a nicer message here.
409  *
410  * Per standard, REFERENCING TABLE names are only allowed on AFTER
411  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
412  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
413  * only allowed once. Per standard, OLD may not be specified when
414  * creating a trigger only for INSERT, and NEW may not be specified when
415  * creating a trigger only for DELETE.
416  *
417  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
418  * reference both ROW and TABLE transition data.
419  */
420  if (stmt->transitionRels != NIL)
421  {
422  List *varList = stmt->transitionRels;
423  ListCell *lc;
424 
425  foreach(lc, varList)
426  {
428 
429  if (!(tt->isTable))
430  ereport(ERROR,
431  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
432  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
433  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
434 
435  /*
436  * Because of the above test, we omit further ROW-related testing
437  * below. If we later allow naming OLD and NEW ROW variables,
438  * adjustments will be needed below.
439  */
440 
441  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
442  ereport(ERROR,
443  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
444  errmsg("\"%s\" is a foreign table",
446  errdetail("Triggers on foreign tables cannot have transition tables.")));
447 
448  if (rel->rd_rel->relkind == RELKIND_VIEW)
449  ereport(ERROR,
450  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
451  errmsg("\"%s\" is a view",
453  errdetail("Triggers on views cannot have transition tables.")));
454 
455  /*
456  * We currently don't allow row-level triggers with transition
457  * tables on partition or inheritance children. Such triggers
458  * would somehow need to see tuples converted to the format of the
459  * table they're attached to, and it's not clear which subset of
460  * tuples each child should see. See also the prohibitions in
461  * ATExecAttachPartition() and ATExecAddInherit().
462  */
463  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
464  {
465  /* Use appropriate error message. */
466  if (rel->rd_rel->relispartition)
467  ereport(ERROR,
468  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
469  errmsg("ROW triggers with transition tables are not supported on partitions")));
470  else
471  ereport(ERROR,
472  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
473  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
474  }
475 
476  if (stmt->timing != TRIGGER_TYPE_AFTER)
477  ereport(ERROR,
478  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
479  errmsg("transition table name can only be specified for an AFTER trigger")));
480 
481  if (TRIGGER_FOR_TRUNCATE(tgtype))
482  ereport(ERROR,
483  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
484  errmsg("TRUNCATE triggers with transition tables are not supported")));
485 
486  /*
487  * We currently don't allow multi-event triggers ("INSERT OR
488  * UPDATE") with transition tables, because it's not clear how to
489  * handle INSERT ... ON CONFLICT statements which can fire both
490  * INSERT and UPDATE triggers. We show the inserted tuples to
491  * INSERT triggers and the updated tuples to UPDATE triggers, but
492  * it's not yet clear what INSERT OR UPDATE trigger should see.
493  * This restriction could be lifted if we can decide on the right
494  * semantics in a later release.
495  */
496  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
497  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
498  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
499  ereport(ERROR,
500  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
501  errmsg("transition tables cannot be specified for triggers with more than one event")));
502 
503  /*
504  * We currently don't allow column-specific triggers with
505  * transition tables. Per spec, that seems to require
506  * accumulating separate transition tables for each combination of
507  * columns, which is a lot of work for a rather marginal feature.
508  */
509  if (stmt->columns != NIL)
510  ereport(ERROR,
511  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
512  errmsg("transition tables cannot be specified for triggers with column lists")));
513 
514  /*
515  * We disallow constraint triggers with transition tables, to
516  * protect the assumption that such triggers can't be deferred.
517  * See notes with AfterTriggers data structures, below.
518  *
519  * Currently this is enforced by the grammar, so just Assert here.
520  */
521  Assert(!stmt->isconstraint);
522 
523  if (tt->isNew)
524  {
525  if (!(TRIGGER_FOR_INSERT(tgtype) ||
526  TRIGGER_FOR_UPDATE(tgtype)))
527  ereport(ERROR,
528  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
529  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
530 
531  if (newtablename != NULL)
532  ereport(ERROR,
533  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
534  errmsg("NEW TABLE cannot be specified multiple times")));
535 
536  newtablename = tt->name;
537  }
538  else
539  {
540  if (!(TRIGGER_FOR_DELETE(tgtype) ||
541  TRIGGER_FOR_UPDATE(tgtype)))
542  ereport(ERROR,
543  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
544  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
545 
546  if (oldtablename != NULL)
547  ereport(ERROR,
548  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
549  errmsg("OLD TABLE cannot be specified multiple times")));
550 
551  oldtablename = tt->name;
552  }
553  }
554 
555  if (newtablename != NULL && oldtablename != NULL &&
556  strcmp(newtablename, oldtablename) == 0)
557  ereport(ERROR,
558  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
559  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
560  }
561 
562  /*
563  * Parse the WHEN clause, if any and we weren't passed an already
564  * transformed one.
565  *
566  * Note that as a side effect, we fill whenRtable when parsing. If we got
567  * an already parsed clause, this does not occur, which is what we want --
568  * no point in adding redundant dependencies below.
569  */
570  if (!whenClause && stmt->whenClause)
571  {
572  ParseState *pstate;
573  ParseNamespaceItem *nsitem;
574  List *varList;
575  ListCell *lc;
576 
577  /* Set up a pstate to parse with */
578  pstate = make_parsestate(NULL);
579  pstate->p_sourcetext = queryString;
580 
581  /*
582  * Set up nsitems for OLD and NEW references.
583  *
584  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
585  */
586  nsitem = addRangeTableEntryForRelation(pstate, rel,
588  makeAlias("old", NIL),
589  false, false);
590  addNSItemToQuery(pstate, nsitem, false, true, true);
591  nsitem = addRangeTableEntryForRelation(pstate, rel,
593  makeAlias("new", NIL),
594  false, false);
595  addNSItemToQuery(pstate, nsitem, false, true, true);
596 
597  /* Transform expression. Copy to be sure we don't modify original */
598  whenClause = transformWhereClause(pstate,
599  copyObject(stmt->whenClause),
601  "WHEN");
602  /* we have to fix its collations too */
603  assign_expr_collations(pstate, whenClause);
604 
605  /*
606  * Check for disallowed references to OLD/NEW.
607  *
608  * NB: pull_var_clause is okay here only because we don't allow
609  * subselects in WHEN clauses; it would fail to examine the contents
610  * of subselects.
611  */
612  varList = pull_var_clause(whenClause, 0);
613  foreach(lc, varList)
614  {
615  Var *var = (Var *) lfirst(lc);
616 
617  switch (var->varno)
618  {
619  case PRS2_OLD_VARNO:
620  if (!TRIGGER_FOR_ROW(tgtype))
621  ereport(ERROR,
622  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
623  errmsg("statement trigger's WHEN condition cannot reference column values"),
624  parser_errposition(pstate, var->location)));
625  if (TRIGGER_FOR_INSERT(tgtype))
626  ereport(ERROR,
627  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
628  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
629  parser_errposition(pstate, var->location)));
630  /* system columns are okay here */
631  break;
632  case PRS2_NEW_VARNO:
633  if (!TRIGGER_FOR_ROW(tgtype))
634  ereport(ERROR,
635  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
636  errmsg("statement trigger's WHEN condition cannot reference column values"),
637  parser_errposition(pstate, var->location)));
638  if (TRIGGER_FOR_DELETE(tgtype))
639  ereport(ERROR,
640  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
641  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
642  parser_errposition(pstate, var->location)));
643  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
644  ereport(ERROR,
645  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
646  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
647  parser_errposition(pstate, var->location)));
648  if (TRIGGER_FOR_BEFORE(tgtype) &&
649  var->varattno == 0 &&
650  RelationGetDescr(rel)->constr &&
651  RelationGetDescr(rel)->constr->has_generated_stored)
652  ereport(ERROR,
653  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
654  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
655  errdetail("A whole-row reference is used and the table contains generated columns."),
656  parser_errposition(pstate, var->location)));
657  if (TRIGGER_FOR_BEFORE(tgtype) &&
658  var->varattno > 0 &&
659  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
660  ereport(ERROR,
661  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
662  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
663  errdetail("Column \"%s\" is a generated column.",
664  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
665  parser_errposition(pstate, var->location)));
666  break;
667  default:
668  /* can't happen without add_missing_from, so just elog */
669  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
670  break;
671  }
672  }
673 
674  /* we'll need the rtable for recordDependencyOnExpr */
675  whenRtable = pstate->p_rtable;
676 
677  qual = nodeToString(whenClause);
678 
679  free_parsestate(pstate);
680  }
681  else if (!whenClause)
682  {
683  whenClause = NULL;
684  whenRtable = NIL;
685  qual = NULL;
686  }
687  else
688  {
689  qual = nodeToString(whenClause);
690  whenRtable = NIL;
691  }
692 
693  /*
694  * Find and validate the trigger function.
695  */
696  if (!OidIsValid(funcoid))
697  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
698  if (!isInternal)
699  {
700  aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
701  if (aclresult != ACLCHECK_OK)
702  aclcheck_error(aclresult, OBJECT_FUNCTION,
703  NameListToString(stmt->funcname));
704  }
705  funcrettype = get_func_rettype(funcoid);
706  if (funcrettype != TRIGGEROID)
707  ereport(ERROR,
708  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
709  errmsg("function %s must return type %s",
710  NameListToString(stmt->funcname), "trigger")));
711 
712  /*
713  * Scan pg_trigger to see if there is already a trigger of the same name.
714  * Skip this for internally generated triggers, since we'll modify the
715  * name to be unique below.
716  *
717  * NOTE that this is cool only because we have ShareRowExclusiveLock on
718  * the relation, so the trigger set won't be changing underneath us.
719  */
720  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
721  if (!isInternal)
722  {
723  ScanKeyData skeys[2];
724  SysScanDesc tgscan;
725 
726  ScanKeyInit(&skeys[0],
727  Anum_pg_trigger_tgrelid,
728  BTEqualStrategyNumber, F_OIDEQ,
730 
731  ScanKeyInit(&skeys[1],
732  Anum_pg_trigger_tgname,
733  BTEqualStrategyNumber, F_NAMEEQ,
734  CStringGetDatum(stmt->trigname));
735 
736  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
737  NULL, 2, skeys);
738 
739  /* There should be at most one matching tuple */
740  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
741  {
742  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
743 
744  trigoid = oldtrigger->oid;
745  existing_constraint_oid = oldtrigger->tgconstraint;
746  existing_isInternal = oldtrigger->tgisinternal;
747  existing_isClone = OidIsValid(oldtrigger->tgparentid);
748  trigger_exists = true;
749  /* copy the tuple to use in CatalogTupleUpdate() */
750  tuple = heap_copytuple(tuple);
751  }
752  systable_endscan(tgscan);
753  }
754 
755  if (!trigger_exists)
756  {
757  /* Generate the OID for the new trigger. */
758  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
759  Anum_pg_trigger_oid);
760  }
761  else
762  {
763  /*
764  * If OR REPLACE was specified, we'll replace the old trigger;
765  * otherwise complain about the duplicate name.
766  */
767  if (!stmt->replace)
768  ereport(ERROR,
770  errmsg("trigger \"%s\" for relation \"%s\" already exists",
771  stmt->trigname, RelationGetRelationName(rel))));
772 
773  /*
774  * An internal trigger or a child trigger (isClone) cannot be replaced
775  * by a user-defined trigger. However, skip this test when
776  * in_partition, because then we're recursing from a partitioned table
777  * and the check was made at the parent level.
778  */
779  if ((existing_isInternal || existing_isClone) &&
780  !isInternal && !in_partition)
781  ereport(ERROR,
783  errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
784  stmt->trigname, RelationGetRelationName(rel))));
785 
786  /*
787  * It is not allowed to replace with a constraint trigger; gram.y
788  * should have enforced this already.
789  */
790  Assert(!stmt->isconstraint);
791 
792  /*
793  * It is not allowed to replace an existing constraint trigger,
794  * either. (The reason for these restrictions is partly that it seems
795  * difficult to deal with pending trigger events in such cases, and
796  * partly that the command might imply changing the constraint's
797  * properties as well, which doesn't seem nice.)
798  */
799  if (OidIsValid(existing_constraint_oid))
800  ereport(ERROR,
802  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
803  stmt->trigname, RelationGetRelationName(rel))));
804  }
805 
806  /*
807  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
808  * corresponding pg_constraint entry.
809  */
810  if (stmt->isconstraint && !OidIsValid(constraintOid))
811  {
812  /* Internal callers should have made their own constraints */
813  Assert(!isInternal);
814  constraintOid = CreateConstraintEntry(stmt->trigname,
816  CONSTRAINT_TRIGGER,
817  stmt->deferrable,
818  stmt->initdeferred,
819  true,
820  InvalidOid, /* no parent */
821  RelationGetRelid(rel),
822  NULL, /* no conkey */
823  0,
824  0,
825  InvalidOid, /* no domain */
826  InvalidOid, /* no index */
827  InvalidOid, /* no foreign key */
828  NULL,
829  NULL,
830  NULL,
831  NULL,
832  0,
833  ' ',
834  ' ',
835  NULL,
836  0,
837  ' ',
838  NULL, /* no exclusion */
839  NULL, /* no check constraint */
840  NULL,
841  true, /* islocal */
842  0, /* inhcount */
843  true, /* noinherit */
844  isInternal); /* is_internal */
845  }
846 
847  /*
848  * If trigger is internally generated, modify the provided trigger name to
849  * ensure uniqueness by appending the trigger OID. (Callers will usually
850  * supply a simple constant trigger name in these cases.)
851  */
852  if (isInternal)
853  {
854  snprintf(internaltrigname, sizeof(internaltrigname),
855  "%s_%u", stmt->trigname, trigoid);
856  trigname = internaltrigname;
857  }
858  else
859  {
860  /* user-defined trigger; use the specified trigger name as-is */
861  trigname = stmt->trigname;
862  }
863 
864  /*
865  * Build the new pg_trigger tuple.
866  */
867  memset(nulls, false, sizeof(nulls));
868 
869  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
870  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
871  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
872  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
873  CStringGetDatum(trigname));
874  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
875  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
876  values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
877  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
878  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
879  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
880  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
881  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
882  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
883 
884  if (stmt->args)
885  {
886  ListCell *le;
887  char *args;
888  int16 nargs = list_length(stmt->args);
889  int len = 0;
890 
891  foreach(le, stmt->args)
892  {
893  char *ar = strVal(lfirst(le));
894 
895  len += strlen(ar) + 4;
896  for (; *ar; ar++)
897  {
898  if (*ar == '\\')
899  len++;
900  }
901  }
902  args = (char *) palloc(len + 1);
903  args[0] = '\0';
904  foreach(le, stmt->args)
905  {
906  char *s = strVal(lfirst(le));
907  char *d = args + strlen(args);
908 
909  while (*s)
910  {
911  if (*s == '\\')
912  *d++ = '\\';
913  *d++ = *s++;
914  }
915  strcpy(d, "\\000");
916  }
917  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
918  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
920  }
921  else
922  {
923  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
924  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
925  CStringGetDatum(""));
926  }
927 
928  /* build column number array if it's a column-specific trigger */
929  ncolumns = list_length(stmt->columns);
930  if (ncolumns == 0)
931  columns = NULL;
932  else
933  {
934  ListCell *cell;
935  int i = 0;
936 
937  columns = (int16 *) palloc(ncolumns * sizeof(int16));
938  foreach(cell, stmt->columns)
939  {
940  char *name = strVal(lfirst(cell));
941  int16 attnum;
942  int j;
943 
944  /* Lookup column name. System columns are not allowed */
945  attnum = attnameAttNum(rel, name, false);
946  if (attnum == InvalidAttrNumber)
947  ereport(ERROR,
948  (errcode(ERRCODE_UNDEFINED_COLUMN),
949  errmsg("column \"%s\" of relation \"%s\" does not exist",
950  name, RelationGetRelationName(rel))));
951 
952  /* Check for duplicates */
953  for (j = i - 1; j >= 0; j--)
954  {
955  if (columns[j] == attnum)
956  ereport(ERROR,
957  (errcode(ERRCODE_DUPLICATE_COLUMN),
958  errmsg("column \"%s\" specified more than once",
959  name)));
960  }
961 
962  columns[i++] = attnum;
963  }
964  }
965  tgattr = buildint2vector(columns, ncolumns);
966  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
967 
968  /* set tgqual if trigger has WHEN clause */
969  if (qual)
970  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
971  else
972  nulls[Anum_pg_trigger_tgqual - 1] = true;
973 
974  if (oldtablename)
975  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
976  CStringGetDatum(oldtablename));
977  else
978  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
979  if (newtablename)
980  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
981  CStringGetDatum(newtablename));
982  else
983  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
984 
985  /*
986  * Insert or replace tuple in pg_trigger.
987  */
988  if (!trigger_exists)
989  {
990  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
991  CatalogTupleInsert(tgrel, tuple);
992  }
993  else
994  {
995  HeapTuple newtup;
996 
997  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
998  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
999  heap_freetuple(newtup);
1000  }
1001 
1002  heap_freetuple(tuple); /* free either original or new tuple */
1003  table_close(tgrel, RowExclusiveLock);
1004 
1005  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1006  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1007  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1008  if (oldtablename)
1009  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1010  if (newtablename)
1011  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1012 
1013  /*
1014  * Update relation's pg_class entry; if necessary; and if not, send an SI
1015  * message to make other backends (and this one) rebuild relcache entries.
1016  */
1017  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1018  tuple = SearchSysCacheCopy1(RELOID,
1020  if (!HeapTupleIsValid(tuple))
1021  elog(ERROR, "cache lookup failed for relation %u",
1022  RelationGetRelid(rel));
1023  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1024  {
1025  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1026 
1027  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1028 
1030  }
1031  else
1033 
1034  heap_freetuple(tuple);
1035  table_close(pgrel, RowExclusiveLock);
1036 
1037  /*
1038  * If we're replacing a trigger, flush all the old dependencies before
1039  * recording new ones.
1040  */
1041  if (trigger_exists)
1042  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1043 
1044  /*
1045  * Record dependencies for trigger. Always place a normal dependency on
1046  * the function.
1047  */
1048  myself.classId = TriggerRelationId;
1049  myself.objectId = trigoid;
1050  myself.objectSubId = 0;
1051 
1052  referenced.classId = ProcedureRelationId;
1053  referenced.objectId = funcoid;
1054  referenced.objectSubId = 0;
1055  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1056 
1057  if (isInternal && OidIsValid(constraintOid))
1058  {
1059  /*
1060  * Internally-generated trigger for a constraint, so make it an
1061  * internal dependency of the constraint. We can skip depending on
1062  * the relation(s), as there'll be an indirect dependency via the
1063  * constraint.
1064  */
1065  referenced.classId = ConstraintRelationId;
1066  referenced.objectId = constraintOid;
1067  referenced.objectSubId = 0;
1068  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1069  }
1070  else
1071  {
1072  /*
1073  * User CREATE TRIGGER, so place dependencies. We make trigger be
1074  * auto-dropped if its relation is dropped or if the FK relation is
1075  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1076  */
1077  referenced.classId = RelationRelationId;
1078  referenced.objectId = RelationGetRelid(rel);
1079  referenced.objectSubId = 0;
1080  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1081 
1082  if (OidIsValid(constrrelid))
1083  {
1084  referenced.classId = RelationRelationId;
1085  referenced.objectId = constrrelid;
1086  referenced.objectSubId = 0;
1087  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1088  }
1089  /* Not possible to have an index dependency in this case */
1090  Assert(!OidIsValid(indexOid));
1091 
1092  /*
1093  * If it's a user-specified constraint trigger, make the constraint
1094  * internally dependent on the trigger instead of vice versa.
1095  */
1096  if (OidIsValid(constraintOid))
1097  {
1098  referenced.classId = ConstraintRelationId;
1099  referenced.objectId = constraintOid;
1100  referenced.objectSubId = 0;
1101  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1102  }
1103 
1104  /*
1105  * If it's a partition trigger, create the partition dependencies.
1106  */
1107  if (OidIsValid(parentTriggerOid))
1108  {
1109  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1110  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1111  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1112  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1113  }
1114  }
1115 
1116  /* If column-specific trigger, add normal dependencies on columns */
1117  if (columns != NULL)
1118  {
1119  int i;
1120 
1121  referenced.classId = RelationRelationId;
1122  referenced.objectId = RelationGetRelid(rel);
1123  for (i = 0; i < ncolumns; i++)
1124  {
1125  referenced.objectSubId = columns[i];
1126  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1127  }
1128  }
1129 
1130  /*
1131  * If it has a WHEN clause, add dependencies on objects mentioned in the
1132  * expression (eg, functions, as well as any columns used).
1133  */
1134  if (whenRtable != NIL)
1135  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1137 
1138  /* Post creation hook for new trigger */
1139  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1140  isInternal);
1141 
1142  /*
1143  * Lastly, create the trigger on child relations, if needed.
1144  */
1145  if (partition_recurse)
1146  {
1147  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1148  int i;
1149  MemoryContext oldcxt,
1150  perChildCxt;
1151 
1153  "part trig clone",
1155 
1156  /*
1157  * We don't currently expect to be called with a valid indexOid. If
1158  * that ever changes then we'll need to write code here to find the
1159  * corresponding child index.
1160  */
1161  Assert(!OidIsValid(indexOid));
1162 
1163  oldcxt = MemoryContextSwitchTo(perChildCxt);
1164 
1165  /* Iterate to create the trigger on each existing partition */
1166  for (i = 0; i < partdesc->nparts; i++)
1167  {
1168  CreateTrigStmt *childStmt;
1169  Relation childTbl;
1170  Node *qual;
1171 
1172  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1173 
1174  /*
1175  * Initialize our fabricated parse node by copying the original
1176  * one, then resetting fields that we pass separately.
1177  */
1178  childStmt = (CreateTrigStmt *) copyObject(stmt);
1179  childStmt->funcname = NIL;
1180  childStmt->whenClause = NULL;
1181 
1182  /* If there is a WHEN clause, create a modified copy of it */
1183  qual = copyObject(whenClause);
1184  qual = (Node *)
1186  childTbl, rel);
1187  qual = (Node *)
1189  childTbl, rel);
1190 
1191  CreateTriggerFiringOn(childStmt, queryString,
1192  partdesc->oids[i], refRelOid,
1194  funcoid, trigoid, qual,
1195  isInternal, true, trigger_fires_when);
1196 
1197  table_close(childTbl, NoLock);
1198 
1199  MemoryContextReset(perChildCxt);
1200  }
1201 
1202  MemoryContextSwitchTo(oldcxt);
1203  MemoryContextDelete(perChildCxt);
1204  }
1205 
1206  /* Keep lock on target rel until end of xact */
1207  table_close(rel, NoLock);
1208 
1209  return myself;
1210 }
1211 
1212 /*
1213  * TriggerSetParentTrigger
1214  * Set a partition's trigger as child of its parent trigger,
1215  * or remove the linkage if parentTrigId is InvalidOid.
1216  *
1217  * This updates the constraint's pg_trigger row to show it as inherited, and
1218  * adds PARTITION dependencies to prevent the trigger from being deleted
1219  * on its own. Alternatively, reverse that.
1220  */
1221 void
1223  Oid childTrigId,
1224  Oid parentTrigId,
1225  Oid childTableId)
1226 {
1227  SysScanDesc tgscan;
1228  ScanKeyData skey[1];
1229  Form_pg_trigger trigForm;
1230  HeapTuple tuple,
1231  newtup;
1232  ObjectAddress depender;
1233  ObjectAddress referenced;
1234 
1235  /*
1236  * Find the trigger to delete.
1237  */
1238  ScanKeyInit(&skey[0],
1239  Anum_pg_trigger_oid,
1240  BTEqualStrategyNumber, F_OIDEQ,
1241  ObjectIdGetDatum(childTrigId));
1242 
1243  tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1244  NULL, 1, skey);
1245 
1246  tuple = systable_getnext(tgscan);
1247  if (!HeapTupleIsValid(tuple))
1248  elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1249  newtup = heap_copytuple(tuple);
1250  trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1251  if (OidIsValid(parentTrigId))
1252  {
1253  /* don't allow setting parent for a constraint that already has one */
1254  if (OidIsValid(trigForm->tgparentid))
1255  elog(ERROR, "trigger %u already has a parent trigger",
1256  childTrigId);
1257 
1258  trigForm->tgparentid = parentTrigId;
1259 
1260  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1261 
1262  ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1263 
1264  ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1265  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1266 
1267  ObjectAddressSet(referenced, RelationRelationId, childTableId);
1268  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1269  }
1270  else
1271  {
1272  trigForm->tgparentid = InvalidOid;
1273 
1274  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1275 
1276  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1277  TriggerRelationId,
1279  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1280  RelationRelationId,
1282  }
1283 
1284  heap_freetuple(newtup);
1285  systable_endscan(tgscan);
1286 }
1287 
1288 
1289 /*
1290  * Guts of trigger deletion.
1291  */
1292 void
1294 {
1295  Relation tgrel;
1296  SysScanDesc tgscan;
1297  ScanKeyData skey[1];
1298  HeapTuple tup;
1299  Oid relid;
1300  Relation rel;
1301 
1302  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1303 
1304  /*
1305  * Find the trigger to delete.
1306  */
1307  ScanKeyInit(&skey[0],
1308  Anum_pg_trigger_oid,
1309  BTEqualStrategyNumber, F_OIDEQ,
1310  ObjectIdGetDatum(trigOid));
1311 
1312  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1313  NULL, 1, skey);
1314 
1315  tup = systable_getnext(tgscan);
1316  if (!HeapTupleIsValid(tup))
1317  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1318 
1319  /*
1320  * Open and exclusive-lock the relation the trigger belongs to.
1321  */
1322  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1323 
1324  rel = table_open(relid, AccessExclusiveLock);
1325 
1326  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1327  rel->rd_rel->relkind != RELKIND_VIEW &&
1328  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1329  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1330  ereport(ERROR,
1331  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1332  errmsg("relation \"%s\" cannot have triggers",
1334  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1335 
1337  ereport(ERROR,
1338  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1339  errmsg("permission denied: \"%s\" is a system catalog",
1340  RelationGetRelationName(rel))));
1341 
1342  /*
1343  * Delete the pg_trigger tuple.
1344  */
1345  CatalogTupleDelete(tgrel, &tup->t_self);
1346 
1347  systable_endscan(tgscan);
1348  table_close(tgrel, RowExclusiveLock);
1349 
1350  /*
1351  * We do not bother to try to determine whether any other triggers remain,
1352  * which would be needed in order to decide whether it's safe to clear the
1353  * relation's relhastriggers. (In any case, there might be a concurrent
1354  * process adding new triggers.) Instead, just force a relcache inval to
1355  * make other backends (and this one too!) rebuild their relcache entries.
1356  * There's no great harm in leaving relhastriggers true even if there are
1357  * no triggers left.
1358  */
1360 
1361  /* Keep lock on trigger's rel until end of xact */
1362  table_close(rel, NoLock);
1363 }
1364 
1365 /*
1366  * get_trigger_oid - Look up a trigger by name to find its OID.
1367  *
1368  * If missing_ok is false, throw an error if trigger not found. If
1369  * true, just return InvalidOid.
1370  */
1371 Oid
1372 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1373 {
1374  Relation tgrel;
1375  ScanKeyData skey[2];
1376  SysScanDesc tgscan;
1377  HeapTuple tup;
1378  Oid oid;
1379 
1380  /*
1381  * Find the trigger, verify permissions, set up object address
1382  */
1383  tgrel = table_open(TriggerRelationId, AccessShareLock);
1384 
1385  ScanKeyInit(&skey[0],
1386  Anum_pg_trigger_tgrelid,
1387  BTEqualStrategyNumber, F_OIDEQ,
1388  ObjectIdGetDatum(relid));
1389  ScanKeyInit(&skey[1],
1390  Anum_pg_trigger_tgname,
1391  BTEqualStrategyNumber, F_NAMEEQ,
1392  CStringGetDatum(trigname));
1393 
1394  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1395  NULL, 2, skey);
1396 
1397  tup = systable_getnext(tgscan);
1398 
1399  if (!HeapTupleIsValid(tup))
1400  {
1401  if (!missing_ok)
1402  ereport(ERROR,
1403  (errcode(ERRCODE_UNDEFINED_OBJECT),
1404  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1405  trigname, get_rel_name(relid))));
1406  oid = InvalidOid;
1407  }
1408  else
1409  {
1410  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1411  }
1412 
1413  systable_endscan(tgscan);
1414  table_close(tgrel, AccessShareLock);
1415  return oid;
1416 }
1417 
1418 /*
1419  * Perform permissions and integrity checks before acquiring a relation lock.
1420  */
1421 static void
1423  void *arg)
1424 {
1425  HeapTuple tuple;
1426  Form_pg_class form;
1427 
1428  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1429  if (!HeapTupleIsValid(tuple))
1430  return; /* concurrently dropped */
1431  form = (Form_pg_class) GETSTRUCT(tuple);
1432 
1433  /* only tables and views can have triggers */
1434  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1435  form->relkind != RELKIND_FOREIGN_TABLE &&
1436  form->relkind != RELKIND_PARTITIONED_TABLE)
1437  ereport(ERROR,
1438  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1439  errmsg("relation \"%s\" cannot have triggers",
1440  rv->relname),
1441  errdetail_relkind_not_supported(form->relkind)));
1442 
1443  /* you must own the table to rename one of its triggers */
1444  if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1446  if (!allowSystemTableMods && IsSystemClass(relid, form))
1447  ereport(ERROR,
1448  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1449  errmsg("permission denied: \"%s\" is a system catalog",
1450  rv->relname)));
1451 
1452  ReleaseSysCache(tuple);
1453 }
1454 
1455 /*
1456  * renametrig - changes the name of a trigger on a relation
1457  *
1458  * trigger name is changed in trigger catalog.
1459  * No record of the previous name is kept.
1460  *
1461  * get proper relrelation from relation catalog (if not arg)
1462  * scan trigger catalog
1463  * for name conflict (within rel)
1464  * for original trigger (if not arg)
1465  * modify tgname in trigger tuple
1466  * update row in catalog
1467  */
1470 {
1471  Oid tgoid;
1472  Relation targetrel;
1473  Relation tgrel;
1474  HeapTuple tuple;
1475  SysScanDesc tgscan;
1476  ScanKeyData key[2];
1477  Oid relid;
1478  ObjectAddress address;
1479 
1480  /*
1481  * Look up name, check permissions, and acquire lock (which we will NOT
1482  * release until end of transaction).
1483  */
1485  0,
1487  NULL);
1488 
1489  /* Have lock already, so just need to build relcache entry. */
1490  targetrel = relation_open(relid, NoLock);
1491 
1492  /*
1493  * On partitioned tables, this operation recurses to partitions. Lock all
1494  * tables upfront.
1495  */
1496  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1497  (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1498 
1499  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1500 
1501  /*
1502  * Search for the trigger to modify.
1503  */
1504  ScanKeyInit(&key[0],
1505  Anum_pg_trigger_tgrelid,
1506  BTEqualStrategyNumber, F_OIDEQ,
1507  ObjectIdGetDatum(relid));
1508  ScanKeyInit(&key[1],
1509  Anum_pg_trigger_tgname,
1510  BTEqualStrategyNumber, F_NAMEEQ,
1511  PointerGetDatum(stmt->subname));
1512  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1513  NULL, 2, key);
1514  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1515  {
1516  Form_pg_trigger trigform;
1517 
1518  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1519  tgoid = trigform->oid;
1520 
1521  /*
1522  * If the trigger descends from a trigger on a parent partitioned
1523  * table, reject the rename. We don't allow a trigger in a partition
1524  * to differ in name from that of its parent: that would lead to an
1525  * inconsistency that pg_dump would not reproduce.
1526  */
1527  if (OidIsValid(trigform->tgparentid))
1528  ereport(ERROR,
1529  errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1530  stmt->subname, RelationGetRelationName(targetrel)),
1531  errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1532  get_rel_name(get_partition_parent(relid, false))));
1533 
1534 
1535  /* Rename the trigger on this relation ... */
1536  renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1537  stmt->subname);
1538 
1539  /* ... and if it is partitioned, recurse to its partitions */
1540  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1541  {
1542  PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1543 
1544  for (int i = 0; i < partdesc->nparts; i++)
1545  {
1546  Oid partitionId = partdesc->oids[i];
1547 
1548  renametrig_partition(tgrel, partitionId, trigform->oid,
1549  stmt->newname, stmt->subname);
1550  }
1551  }
1552  }
1553  else
1554  {
1555  ereport(ERROR,
1556  (errcode(ERRCODE_UNDEFINED_OBJECT),
1557  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1558  stmt->subname, RelationGetRelationName(targetrel))));
1559  }
1560 
1561  ObjectAddressSet(address, TriggerRelationId, tgoid);
1562 
1563  systable_endscan(tgscan);
1564 
1565  table_close(tgrel, RowExclusiveLock);
1566 
1567  /*
1568  * Close rel, but keep exclusive lock!
1569  */
1570  relation_close(targetrel, NoLock);
1571 
1572  return address;
1573 }
1574 
1575 /*
1576  * Subroutine for renametrig -- perform the actual work of renaming one
1577  * trigger on one table.
1578  *
1579  * If the trigger has a name different from the expected one, raise a
1580  * NOTICE about it.
1581  */
1582 static void
1584  const char *newname, const char *expected_name)
1585 {
1586  HeapTuple tuple;
1587  Form_pg_trigger tgform;
1588  ScanKeyData key[2];
1589  SysScanDesc tgscan;
1590 
1591  /* If the trigger already has the new name, nothing to do. */
1592  tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1593  if (strcmp(NameStr(tgform->tgname), newname) == 0)
1594  return;
1595 
1596  /*
1597  * Before actually trying the rename, search for triggers with the same
1598  * name. The update would fail with an ugly message in that case, and it
1599  * is better to throw a nicer error.
1600  */
1601  ScanKeyInit(&key[0],
1602  Anum_pg_trigger_tgrelid,
1603  BTEqualStrategyNumber, F_OIDEQ,
1604  ObjectIdGetDatum(RelationGetRelid(targetrel)));
1605  ScanKeyInit(&key[1],
1606  Anum_pg_trigger_tgname,
1607  BTEqualStrategyNumber, F_NAMEEQ,
1608  PointerGetDatum(newname));
1609  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1610  NULL, 2, key);
1611  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1612  ereport(ERROR,
1614  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1615  newname, RelationGetRelationName(targetrel))));
1616  systable_endscan(tgscan);
1617 
1618  /*
1619  * The target name is free; update the existing pg_trigger tuple with it.
1620  */
1621  tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1622  tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1623 
1624  /*
1625  * If the trigger has a name different from what we expected, let the user
1626  * know. (We can proceed anyway, since we must have reached here following
1627  * a tgparentid link.)
1628  */
1629  if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1630  ereport(NOTICE,
1631  errmsg("renamed trigger \"%s\" on relation \"%s\"",
1632  NameStr(tgform->tgname),
1633  RelationGetRelationName(targetrel)));
1634 
1635  namestrcpy(&tgform->tgname, newname);
1636 
1637  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1638 
1639  InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1640 
1641  /*
1642  * Invalidate relation's relcache entry so that other backends (and this
1643  * one too!) are sent SI message to make them rebuild relcache entries.
1644  * (Ideally this should happen automatically...)
1645  */
1646  CacheInvalidateRelcache(targetrel);
1647 }
1648 
1649 /*
1650  * Subroutine for renametrig -- Helper for recursing to partitions when
1651  * renaming triggers on a partitioned table.
1652  */
1653 static void
1654 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1655  const char *newname, const char *expected_name)
1656 {
1657  SysScanDesc tgscan;
1658  ScanKeyData key;
1659  HeapTuple tuple;
1660 
1661  /*
1662  * Given a relation and the OID of a trigger on parent relation, find the
1663  * corresponding trigger in the child and rename that trigger to the given
1664  * name.
1665  */
1666  ScanKeyInit(&key,
1667  Anum_pg_trigger_tgrelid,
1668  BTEqualStrategyNumber, F_OIDEQ,
1669  ObjectIdGetDatum(partitionId));
1670  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1671  NULL, 1, &key);
1672  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1673  {
1674  Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1675  Relation partitionRel;
1676 
1677  if (tgform->tgparentid != parentTriggerOid)
1678  continue; /* not our trigger */
1679 
1680  partitionRel = table_open(partitionId, NoLock);
1681 
1682  /* Rename the trigger on this partition */
1683  renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1684 
1685  /* And if this relation is partitioned, recurse to its partitions */
1686  if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1687  {
1688  PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1689  true);
1690 
1691  for (int i = 0; i < partdesc->nparts; i++)
1692  {
1693  Oid partoid = partdesc->oids[i];
1694 
1695  renametrig_partition(tgrel, partoid, tgform->oid, newname,
1696  NameStr(tgform->tgname));
1697  }
1698  }
1699  table_close(partitionRel, NoLock);
1700 
1701  /* There should be at most one matching tuple */
1702  break;
1703  }
1704  systable_endscan(tgscan);
1705 }
1706 
1707 /*
1708  * EnableDisableTrigger()
1709  *
1710  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1711  * to change 'tgenabled' field for the specified trigger(s)
1712  *
1713  * rel: relation to process (caller must hold suitable lock on it)
1714  * tgname: name of trigger to process, or NULL to scan all triggers
1715  * tgparent: if not zero, process only triggers with this tgparentid
1716  * fires_when: new value for tgenabled field. In addition to generic
1717  * enablement/disablement, this also defines when the trigger
1718  * should be fired in session replication roles.
1719  * skip_system: if true, skip "system" triggers (constraint triggers)
1720  * recurse: if true, recurse to partitions
1721  *
1722  * Caller should have checked permissions for the table; here we also
1723  * enforce that superuser privilege is required to alter the state of
1724  * system triggers
1725  */
1726 void
1727 EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1728  char fires_when, bool skip_system, bool recurse,
1729  LOCKMODE lockmode)
1730 {
1731  Relation tgrel;
1732  int nkeys;
1733  ScanKeyData keys[2];
1734  SysScanDesc tgscan;
1735  HeapTuple tuple;
1736  bool found;
1737  bool changed;
1738 
1739  /* Scan the relevant entries in pg_triggers */
1740  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1741 
1742  ScanKeyInit(&keys[0],
1743  Anum_pg_trigger_tgrelid,
1744  BTEqualStrategyNumber, F_OIDEQ,
1746  if (tgname)
1747  {
1748  ScanKeyInit(&keys[1],
1749  Anum_pg_trigger_tgname,
1750  BTEqualStrategyNumber, F_NAMEEQ,
1751  CStringGetDatum(tgname));
1752  nkeys = 2;
1753  }
1754  else
1755  nkeys = 1;
1756 
1757  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1758  NULL, nkeys, keys);
1759 
1760  found = changed = false;
1761 
1762  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1763  {
1764  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1765 
1766  if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1767  continue;
1768 
1769  if (oldtrig->tgisinternal)
1770  {
1771  /* system trigger ... ok to process? */
1772  if (skip_system)
1773  continue;
1774  if (!superuser())
1775  ereport(ERROR,
1776  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1777  errmsg("permission denied: \"%s\" is a system trigger",
1778  NameStr(oldtrig->tgname))));
1779  }
1780 
1781  found = true;
1782 
1783  if (oldtrig->tgenabled != fires_when)
1784  {
1785  /* need to change this one ... make a copy to scribble on */
1786  HeapTuple newtup = heap_copytuple(tuple);
1787  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1788 
1789  newtrig->tgenabled = fires_when;
1790 
1791  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1792 
1793  heap_freetuple(newtup);
1794 
1795  changed = true;
1796  }
1797 
1798  /*
1799  * When altering FOR EACH ROW triggers on a partitioned table, do the
1800  * same on the partitions as well, unless ONLY is specified.
1801  *
1802  * Note that we recurse even if we didn't change the trigger above,
1803  * because the partitions' copy of the trigger may have a different
1804  * value of tgenabled than the parent's trigger and thus might need to
1805  * be changed.
1806  */
1807  if (recurse &&
1808  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1809  (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1810  {
1811  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1812  int i;
1813 
1814  for (i = 0; i < partdesc->nparts; i++)
1815  {
1816  Relation part;
1817 
1818  part = relation_open(partdesc->oids[i], lockmode);
1819  /* Match on child triggers' tgparentid, not their name */
1820  EnableDisableTrigger(part, NULL, oldtrig->oid,
1821  fires_when, skip_system, recurse,
1822  lockmode);
1823  table_close(part, NoLock); /* keep lock till commit */
1824  }
1825  }
1826 
1827  InvokeObjectPostAlterHook(TriggerRelationId,
1828  oldtrig->oid, 0);
1829  }
1830 
1831  systable_endscan(tgscan);
1832 
1833  table_close(tgrel, RowExclusiveLock);
1834 
1835  if (tgname && !found)
1836  ereport(ERROR,
1837  (errcode(ERRCODE_UNDEFINED_OBJECT),
1838  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1839  tgname, RelationGetRelationName(rel))));
1840 
1841  /*
1842  * If we changed anything, broadcast a SI inval message to force each
1843  * backend (including our own!) to rebuild relation's relcache entry.
1844  * Otherwise they will fail to apply the change promptly.
1845  */
1846  if (changed)
1848 }
1849 
1850 
1851 /*
1852  * Build trigger data to attach to the given relcache entry.
1853  *
1854  * Note that trigger data attached to a relcache entry must be stored in
1855  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1856  * But we should be running in a less long-lived working context. To avoid
1857  * leaking cache memory if this routine fails partway through, we build a
1858  * temporary TriggerDesc in working memory and then copy the completed
1859  * structure into cache memory.
1860  */
1861 void
1863 {
1864  TriggerDesc *trigdesc;
1865  int numtrigs;
1866  int maxtrigs;
1867  Trigger *triggers;
1868  Relation tgrel;
1869  ScanKeyData skey;
1870  SysScanDesc tgscan;
1871  HeapTuple htup;
1872  MemoryContext oldContext;
1873  int i;
1874 
1875  /*
1876  * Allocate a working array to hold the triggers (the array is extended if
1877  * necessary)
1878  */
1879  maxtrigs = 16;
1880  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1881  numtrigs = 0;
1882 
1883  /*
1884  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1885  * be reading the triggers in name order, except possibly during
1886  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1887  * ensures that triggers will be fired in name order.
1888  */
1889  ScanKeyInit(&skey,
1890  Anum_pg_trigger_tgrelid,
1891  BTEqualStrategyNumber, F_OIDEQ,
1892  ObjectIdGetDatum(RelationGetRelid(relation)));
1893 
1894  tgrel = table_open(TriggerRelationId, AccessShareLock);
1895  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1896  NULL, 1, &skey);
1897 
1898  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1899  {
1900  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1901  Trigger *build;
1902  Datum datum;
1903  bool isnull;
1904 
1905  if (numtrigs >= maxtrigs)
1906  {
1907  maxtrigs *= 2;
1908  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1909  }
1910  build = &(triggers[numtrigs]);
1911 
1912  build->tgoid = pg_trigger->oid;
1914  NameGetDatum(&pg_trigger->tgname)));
1915  build->tgfoid = pg_trigger->tgfoid;
1916  build->tgtype = pg_trigger->tgtype;
1917  build->tgenabled = pg_trigger->tgenabled;
1918  build->tgisinternal = pg_trigger->tgisinternal;
1919  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1920  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1921  build->tgconstrindid = pg_trigger->tgconstrindid;
1922  build->tgconstraint = pg_trigger->tgconstraint;
1923  build->tgdeferrable = pg_trigger->tgdeferrable;
1924  build->tginitdeferred = pg_trigger->tginitdeferred;
1925  build->tgnargs = pg_trigger->tgnargs;
1926  /* tgattr is first var-width field, so OK to access directly */
1927  build->tgnattr = pg_trigger->tgattr.dim1;
1928  if (build->tgnattr > 0)
1929  {
1930  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1931  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1932  build->tgnattr * sizeof(int16));
1933  }
1934  else
1935  build->tgattr = NULL;
1936  if (build->tgnargs > 0)
1937  {
1938  bytea *val;
1939  char *p;
1940 
1942  Anum_pg_trigger_tgargs,
1943  tgrel->rd_att, &isnull));
1944  if (isnull)
1945  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1946  RelationGetRelationName(relation));
1947  p = (char *) VARDATA_ANY(val);
1948  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1949  for (i = 0; i < build->tgnargs; i++)
1950  {
1951  build->tgargs[i] = pstrdup(p);
1952  p += strlen(p) + 1;
1953  }
1954  }
1955  else
1956  build->tgargs = NULL;
1957 
1958  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1959  tgrel->rd_att, &isnull);
1960  if (!isnull)
1961  build->tgoldtable =
1963  else
1964  build->tgoldtable = NULL;
1965 
1966  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1967  tgrel->rd_att, &isnull);
1968  if (!isnull)
1969  build->tgnewtable =
1971  else
1972  build->tgnewtable = NULL;
1973 
1974  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1975  tgrel->rd_att, &isnull);
1976  if (!isnull)
1977  build->tgqual = TextDatumGetCString(datum);
1978  else
1979  build->tgqual = NULL;
1980 
1981  numtrigs++;
1982  }
1983 
1984  systable_endscan(tgscan);
1985  table_close(tgrel, AccessShareLock);
1986 
1987  /* There might not be any triggers */
1988  if (numtrigs == 0)
1989  {
1990  pfree(triggers);
1991  return;
1992  }
1993 
1994  /* Build trigdesc */
1995  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1996  trigdesc->triggers = triggers;
1997  trigdesc->numtriggers = numtrigs;
1998  for (i = 0; i < numtrigs; i++)
1999  SetTriggerFlags(trigdesc, &(triggers[i]));
2000 
2001  /* Copy completed trigdesc into cache storage */
2003  relation->trigdesc = CopyTriggerDesc(trigdesc);
2004  MemoryContextSwitchTo(oldContext);
2005 
2006  /* Release working memory */
2007  FreeTriggerDesc(trigdesc);
2008 }
2009 
2010 /*
2011  * Update the TriggerDesc's hint flags to include the specified trigger
2012  */
2013 static void
2015 {
2016  int16 tgtype = trigger->tgtype;
2017 
2018  trigdesc->trig_insert_before_row |=
2019  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2020  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2021  trigdesc->trig_insert_after_row |=
2022  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2023  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2024  trigdesc->trig_insert_instead_row |=
2025  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2026  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2027  trigdesc->trig_insert_before_statement |=
2028  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2029  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2030  trigdesc->trig_insert_after_statement |=
2031  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2032  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2033  trigdesc->trig_update_before_row |=
2034  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2035  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2036  trigdesc->trig_update_after_row |=
2037  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2038  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2039  trigdesc->trig_update_instead_row |=
2040  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2041  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2042  trigdesc->trig_update_before_statement |=
2043  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2044  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2045  trigdesc->trig_update_after_statement |=
2046  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2047  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2048  trigdesc->trig_delete_before_row |=
2049  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2050  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2051  trigdesc->trig_delete_after_row |=
2052  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2053  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2054  trigdesc->trig_delete_instead_row |=
2055  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2056  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2057  trigdesc->trig_delete_before_statement |=
2058  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2059  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2060  trigdesc->trig_delete_after_statement |=
2061  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2062  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2063  /* there are no row-level truncate triggers */
2064  trigdesc->trig_truncate_before_statement |=
2065  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2066  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2067  trigdesc->trig_truncate_after_statement |=
2068  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2069  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2070 
2071  trigdesc->trig_insert_new_table |=
2072  (TRIGGER_FOR_INSERT(tgtype) &&
2073  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2074  trigdesc->trig_update_old_table |=
2075  (TRIGGER_FOR_UPDATE(tgtype) &&
2076  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2077  trigdesc->trig_update_new_table |=
2078  (TRIGGER_FOR_UPDATE(tgtype) &&
2079  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2080  trigdesc->trig_delete_old_table |=
2081  (TRIGGER_FOR_DELETE(tgtype) &&
2082  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2083 }
2084 
2085 /*
2086  * Copy a TriggerDesc data structure.
2087  *
2088  * The copy is allocated in the current memory context.
2089  */
2090 TriggerDesc *
2092 {
2093  TriggerDesc *newdesc;
2094  Trigger *trigger;
2095  int i;
2096 
2097  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2098  return NULL;
2099 
2100  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2101  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2102 
2103  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2104  memcpy(trigger, trigdesc->triggers,
2105  trigdesc->numtriggers * sizeof(Trigger));
2106  newdesc->triggers = trigger;
2107 
2108  for (i = 0; i < trigdesc->numtriggers; i++)
2109  {
2110  trigger->tgname = pstrdup(trigger->tgname);
2111  if (trigger->tgnattr > 0)
2112  {
2113  int16 *newattr;
2114 
2115  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2116  memcpy(newattr, trigger->tgattr,
2117  trigger->tgnattr * sizeof(int16));
2118  trigger->tgattr = newattr;
2119  }
2120  if (trigger->tgnargs > 0)
2121  {
2122  char **newargs;
2123  int16 j;
2124 
2125  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2126  for (j = 0; j < trigger->tgnargs; j++)
2127  newargs[j] = pstrdup(trigger->tgargs[j]);
2128  trigger->tgargs = newargs;
2129  }
2130  if (trigger->tgqual)
2131  trigger->tgqual = pstrdup(trigger->tgqual);
2132  if (trigger->tgoldtable)
2133  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2134  if (trigger->tgnewtable)
2135  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2136  trigger++;
2137  }
2138 
2139  return newdesc;
2140 }
2141 
2142 /*
2143  * Free a TriggerDesc data structure.
2144  */
2145 void
2147 {
2148  Trigger *trigger;
2149  int i;
2150 
2151  if (trigdesc == NULL)
2152  return;
2153 
2154  trigger = trigdesc->triggers;
2155  for (i = 0; i < trigdesc->numtriggers; i++)
2156  {
2157  pfree(trigger->tgname);
2158  if (trigger->tgnattr > 0)
2159  pfree(trigger->tgattr);
2160  if (trigger->tgnargs > 0)
2161  {
2162  while (--(trigger->tgnargs) >= 0)
2163  pfree(trigger->tgargs[trigger->tgnargs]);
2164  pfree(trigger->tgargs);
2165  }
2166  if (trigger->tgqual)
2167  pfree(trigger->tgqual);
2168  if (trigger->tgoldtable)
2169  pfree(trigger->tgoldtable);
2170  if (trigger->tgnewtable)
2171  pfree(trigger->tgnewtable);
2172  trigger++;
2173  }
2174  pfree(trigdesc->triggers);
2175  pfree(trigdesc);
2176 }
2177 
2178 /*
2179  * Compare two TriggerDesc structures for logical equality.
2180  */
2181 #ifdef NOT_USED
2182 bool
2183 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2184 {
2185  int i,
2186  j;
2187 
2188  /*
2189  * We need not examine the hint flags, just the trigger array itself; if
2190  * we have the same triggers with the same types, the flags should match.
2191  *
2192  * As of 7.3 we assume trigger set ordering is significant in the
2193  * comparison; so we just compare corresponding slots of the two sets.
2194  *
2195  * Note: comparing the stringToNode forms of the WHEN clauses means that
2196  * parse column locations will affect the result. This is okay as long as
2197  * this function is only used for detecting exact equality, as for example
2198  * in checking for staleness of a cache entry.
2199  */
2200  if (trigdesc1 != NULL)
2201  {
2202  if (trigdesc2 == NULL)
2203  return false;
2204  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2205  return false;
2206  for (i = 0; i < trigdesc1->numtriggers; i++)
2207  {
2208  Trigger *trig1 = trigdesc1->triggers + i;
2209  Trigger *trig2 = trigdesc2->triggers + i;
2210 
2211  if (trig1->tgoid != trig2->tgoid)
2212  return false;
2213  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2214  return false;
2215  if (trig1->tgfoid != trig2->tgfoid)
2216  return false;
2217  if (trig1->tgtype != trig2->tgtype)
2218  return false;
2219  if (trig1->tgenabled != trig2->tgenabled)
2220  return false;
2221  if (trig1->tgisinternal != trig2->tgisinternal)
2222  return false;
2223  if (trig1->tgisclone != trig2->tgisclone)
2224  return false;
2225  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2226  return false;
2227  if (trig1->tgconstrindid != trig2->tgconstrindid)
2228  return false;
2229  if (trig1->tgconstraint != trig2->tgconstraint)
2230  return false;
2231  if (trig1->tgdeferrable != trig2->tgdeferrable)
2232  return false;
2233  if (trig1->tginitdeferred != trig2->tginitdeferred)
2234  return false;
2235  if (trig1->tgnargs != trig2->tgnargs)
2236  return false;
2237  if (trig1->tgnattr != trig2->tgnattr)
2238  return false;
2239  if (trig1->tgnattr > 0 &&
2240  memcmp(trig1->tgattr, trig2->tgattr,
2241  trig1->tgnattr * sizeof(int16)) != 0)
2242  return false;
2243  for (j = 0; j < trig1->tgnargs; j++)
2244  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2245  return false;
2246  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2247  /* ok */ ;
2248  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2249  return false;
2250  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2251  return false;
2252  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2253  /* ok */ ;
2254  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2255  return false;
2256  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2257  return false;
2258  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2259  /* ok */ ;
2260  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2261  return false;
2262  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2263  return false;
2264  }
2265  }
2266  else if (trigdesc2 != NULL)
2267  return false;
2268  return true;
2269 }
2270 #endif /* NOT_USED */
2271 
2272 /*
2273  * Check if there is a row-level trigger with transition tables that prevents
2274  * a table from becoming an inheritance child or partition. Return the name
2275  * of the first such incompatible trigger, or NULL if there is none.
2276  */
2277 const char *
2279 {
2280  if (trigdesc != NULL)
2281  {
2282  int i;
2283 
2284  for (i = 0; i < trigdesc->numtriggers; ++i)
2285  {
2286  Trigger *trigger = &trigdesc->triggers[i];
2287 
2288  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2289  return trigger->tgname;
2290  }
2291  }
2292 
2293  return NULL;
2294 }
2295 
2296 /*
2297  * Call a trigger function.
2298  *
2299  * trigdata: trigger descriptor.
2300  * tgindx: trigger's index in finfo and instr arrays.
2301  * finfo: array of cached trigger function call information.
2302  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2303  * per_tuple_context: memory context to execute the function in.
2304  *
2305  * Returns the tuple (or NULL) as returned by the function.
2306  */
2307 static HeapTuple
2309  int tgindx,
2310  FmgrInfo *finfo,
2311  Instrumentation *instr,
2312  MemoryContext per_tuple_context)
2313 {
2314  LOCAL_FCINFO(fcinfo, 0);
2315  PgStat_FunctionCallUsage fcusage;
2316  Datum result;
2317  MemoryContext oldContext;
2318 
2319  /*
2320  * Protect against code paths that may fail to initialize transition table
2321  * info.
2322  */
2323  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2324  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2325  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2326  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2327  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2328  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2329  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2330 
2331  finfo += tgindx;
2332 
2333  /*
2334  * We cache fmgr lookup info, to avoid making the lookup again on each
2335  * call.
2336  */
2337  if (finfo->fn_oid == InvalidOid)
2338  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2339 
2340  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2341 
2342  /*
2343  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2344  */
2345  if (instr)
2346  InstrStartNode(instr + tgindx);
2347 
2348  /*
2349  * Do the function evaluation in the per-tuple memory context, so that
2350  * leaked memory will be reclaimed once per tuple. Note in particular that
2351  * any new tuple created by the trigger function will live till the end of
2352  * the tuple cycle.
2353  */
2354  oldContext = MemoryContextSwitchTo(per_tuple_context);
2355 
2356  /*
2357  * Call the function, passing no arguments but setting a context.
2358  */
2359  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2360  InvalidOid, (Node *) trigdata, NULL);
2361 
2362  pgstat_init_function_usage(fcinfo, &fcusage);
2363 
2364  MyTriggerDepth++;
2365  PG_TRY();
2366  {
2367  result = FunctionCallInvoke(fcinfo);
2368  }
2369  PG_FINALLY();
2370  {
2371  MyTriggerDepth--;
2372  }
2373  PG_END_TRY();
2374 
2375  pgstat_end_function_usage(&fcusage, true);
2376 
2377  MemoryContextSwitchTo(oldContext);
2378 
2379  /*
2380  * Trigger protocol allows function to return a null pointer, but NOT to
2381  * set the isnull result flag.
2382  */
2383  if (fcinfo->isnull)
2384  ereport(ERROR,
2385  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2386  errmsg("trigger function %u returned null value",
2387  fcinfo->flinfo->fn_oid)));
2388 
2389  /*
2390  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2391  * one "tuple returned" (really the number of firings).
2392  */
2393  if (instr)
2394  InstrStopNode(instr + tgindx, 1);
2395 
2396  return (HeapTuple) DatumGetPointer(result);
2397 }
2398 
2399 void
2401 {
2402  TriggerDesc *trigdesc;
2403  int i;
2404  TriggerData LocTriggerData = {0};
2405 
2406  trigdesc = relinfo->ri_TrigDesc;
2407 
2408  if (trigdesc == NULL)
2409  return;
2410  if (!trigdesc->trig_insert_before_statement)
2411  return;
2412 
2413  /* no-op if we already fired BS triggers in this context */
2415  CMD_INSERT))
2416  return;
2417 
2418  LocTriggerData.type = T_TriggerData;
2419  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2421  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2422  for (i = 0; i < trigdesc->numtriggers; i++)
2423  {
2424  Trigger *trigger = &trigdesc->triggers[i];
2425  HeapTuple newtuple;
2426 
2427  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2428  TRIGGER_TYPE_STATEMENT,
2429  TRIGGER_TYPE_BEFORE,
2430  TRIGGER_TYPE_INSERT))
2431  continue;
2432  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2433  NULL, NULL, NULL))
2434  continue;
2435 
2436  LocTriggerData.tg_trigger = trigger;
2437  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2438  i,
2439  relinfo->ri_TrigFunctions,
2440  relinfo->ri_TrigInstrument,
2441  GetPerTupleMemoryContext(estate));
2442 
2443  if (newtuple)
2444  ereport(ERROR,
2445  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2446  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2447  }
2448 }
2449 
2450 void
2452  TransitionCaptureState *transition_capture)
2453 {
2454  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2455 
2456  if (trigdesc && trigdesc->trig_insert_after_statement)
2457  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2459  false, NULL, NULL, NIL, NULL, transition_capture,
2460  false);
2461 }
2462 
2463 bool
2465  TupleTableSlot *slot)
2466 {
2467  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2468  HeapTuple newtuple = NULL;
2469  bool should_free;
2470  TriggerData LocTriggerData = {0};
2471  int i;
2472 
2473  LocTriggerData.type = T_TriggerData;
2474  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2477  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2478  for (i = 0; i < trigdesc->numtriggers; i++)
2479  {
2480  Trigger *trigger = &trigdesc->triggers[i];
2481  HeapTuple oldtuple;
2482 
2483  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2484  TRIGGER_TYPE_ROW,
2485  TRIGGER_TYPE_BEFORE,
2486  TRIGGER_TYPE_INSERT))
2487  continue;
2488  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2489  NULL, NULL, slot))
2490  continue;
2491 
2492  if (!newtuple)
2493  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2494 
2495  LocTriggerData.tg_trigslot = slot;
2496  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2497  LocTriggerData.tg_trigger = trigger;
2498  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2499  i,
2500  relinfo->ri_TrigFunctions,
2501  relinfo->ri_TrigInstrument,
2502  GetPerTupleMemoryContext(estate));
2503  if (newtuple == NULL)
2504  {
2505  if (should_free)
2506  heap_freetuple(oldtuple);
2507  return false; /* "do nothing" */
2508  }
2509  else if (newtuple != oldtuple)
2510  {
2511  ExecForceStoreHeapTuple(newtuple, slot, false);
2512 
2513  /*
2514  * After a tuple in a partition goes through a trigger, the user
2515  * could have changed the partition key enough that the tuple no
2516  * longer fits the partition. Verify that.
2517  */
2518  if (trigger->tgisclone &&
2519  !ExecPartitionCheck(relinfo, slot, estate, false))
2520  ereport(ERROR,
2521  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2522  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2523  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2524  trigger->tgname,
2527 
2528  if (should_free)
2529  heap_freetuple(oldtuple);
2530 
2531  /* signal tuple should be re-fetched if used */
2532  newtuple = NULL;
2533  }
2534  }
2535 
2536  return true;
2537 }
2538 
2539 void
2541  TupleTableSlot *slot, List *recheckIndexes,
2542  TransitionCaptureState *transition_capture)
2543 {
2544  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2545 
2546  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2547  (transition_capture && transition_capture->tcs_insert_new_table))
2548  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2550  true, NULL, slot,
2551  recheckIndexes, NULL,
2552  transition_capture,
2553  false);
2554 }
2555 
2556 bool
2558  TupleTableSlot *slot)
2559 {
2560  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2561  HeapTuple newtuple = NULL;
2562  bool should_free;
2563  TriggerData LocTriggerData = {0};
2564  int i;
2565 
2566  LocTriggerData.type = T_TriggerData;
2567  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2570  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2571  for (i = 0; i < trigdesc->numtriggers; i++)
2572  {
2573  Trigger *trigger = &trigdesc->triggers[i];
2574  HeapTuple oldtuple;
2575 
2576  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2577  TRIGGER_TYPE_ROW,
2578  TRIGGER_TYPE_INSTEAD,
2579  TRIGGER_TYPE_INSERT))
2580  continue;
2581  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2582  NULL, NULL, slot))
2583  continue;
2584 
2585  if (!newtuple)
2586  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2587 
2588  LocTriggerData.tg_trigslot = slot;
2589  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2590  LocTriggerData.tg_trigger = trigger;
2591  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2592  i,
2593  relinfo->ri_TrigFunctions,
2594  relinfo->ri_TrigInstrument,
2595  GetPerTupleMemoryContext(estate));
2596  if (newtuple == NULL)
2597  {
2598  if (should_free)
2599  heap_freetuple(oldtuple);
2600  return false; /* "do nothing" */
2601  }
2602  else if (newtuple != oldtuple)
2603  {
2604  ExecForceStoreHeapTuple(newtuple, slot, false);
2605 
2606  if (should_free)
2607  heap_freetuple(oldtuple);
2608 
2609  /* signal tuple should be re-fetched if used */
2610  newtuple = NULL;
2611  }
2612  }
2613 
2614  return true;
2615 }
2616 
2617 void
2619 {
2620  TriggerDesc *trigdesc;
2621  int i;
2622  TriggerData LocTriggerData = {0};
2623 
2624  trigdesc = relinfo->ri_TrigDesc;
2625 
2626  if (trigdesc == NULL)
2627  return;
2628  if (!trigdesc->trig_delete_before_statement)
2629  return;
2630 
2631  /* no-op if we already fired BS triggers in this context */
2633  CMD_DELETE))
2634  return;
2635 
2636  LocTriggerData.type = T_TriggerData;
2637  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2639  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2640  for (i = 0; i < trigdesc->numtriggers; i++)
2641  {
2642  Trigger *trigger = &trigdesc->triggers[i];
2643  HeapTuple newtuple;
2644 
2645  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2646  TRIGGER_TYPE_STATEMENT,
2647  TRIGGER_TYPE_BEFORE,
2648  TRIGGER_TYPE_DELETE))
2649  continue;
2650  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2651  NULL, NULL, NULL))
2652  continue;
2653 
2654  LocTriggerData.tg_trigger = trigger;
2655  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2656  i,
2657  relinfo->ri_TrigFunctions,
2658  relinfo->ri_TrigInstrument,
2659  GetPerTupleMemoryContext(estate));
2660 
2661  if (newtuple)
2662  ereport(ERROR,
2663  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2664  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2665  }
2666 }
2667 
2668 void
2670  TransitionCaptureState *transition_capture)
2671 {
2672  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2673 
2674  if (trigdesc && trigdesc->trig_delete_after_statement)
2675  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2677  false, NULL, NULL, NIL, NULL, transition_capture,
2678  false);
2679 }
2680 
2681 /*
2682  * Execute BEFORE ROW DELETE triggers.
2683  *
2684  * True indicates caller can proceed with the delete. False indicates caller
2685  * need to suppress the delete and additionally if requested, we need to pass
2686  * back the concurrently updated tuple if any.
2687  */
2688 bool
2690  ResultRelInfo *relinfo,
2691  ItemPointer tupleid,
2692  HeapTuple fdw_trigtuple,
2693  TupleTableSlot **epqslot,
2694  TM_Result *tmresult,
2695  TM_FailureData *tmfd)
2696 {
2697  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2698  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2699  bool result = true;
2700  TriggerData LocTriggerData = {0};
2701  HeapTuple trigtuple;
2702  bool should_free = false;
2703  int i;
2704 
2705  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2706  if (fdw_trigtuple == NULL)
2707  {
2708  TupleTableSlot *epqslot_candidate = NULL;
2709 
2710  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2711  LockTupleExclusive, slot, &epqslot_candidate,
2712  tmresult, tmfd))
2713  return false;
2714 
2715  /*
2716  * If the tuple was concurrently updated and the caller of this
2717  * function requested for the updated tuple, skip the trigger
2718  * execution.
2719  */
2720  if (epqslot_candidate != NULL && epqslot != NULL)
2721  {
2722  *epqslot = epqslot_candidate;
2723  return false;
2724  }
2725 
2726  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2727  }
2728  else
2729  {
2730  trigtuple = fdw_trigtuple;
2731  ExecForceStoreHeapTuple(trigtuple, slot, false);
2732  }
2733 
2734  LocTriggerData.type = T_TriggerData;
2735  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2738  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2739  for (i = 0; i < trigdesc->numtriggers; i++)
2740  {
2741  HeapTuple newtuple;
2742  Trigger *trigger = &trigdesc->triggers[i];
2743 
2744  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2745  TRIGGER_TYPE_ROW,
2746  TRIGGER_TYPE_BEFORE,
2747  TRIGGER_TYPE_DELETE))
2748  continue;
2749  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2750  NULL, slot, NULL))
2751  continue;
2752 
2753  LocTriggerData.tg_trigslot = slot;
2754  LocTriggerData.tg_trigtuple = trigtuple;
2755  LocTriggerData.tg_trigger = trigger;
2756  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2757  i,
2758  relinfo->ri_TrigFunctions,
2759  relinfo->ri_TrigInstrument,
2760  GetPerTupleMemoryContext(estate));
2761  if (newtuple == NULL)
2762  {
2763  result = false; /* tell caller to suppress delete */
2764  break;
2765  }
2766  if (newtuple != trigtuple)
2767  heap_freetuple(newtuple);
2768  }
2769  if (should_free)
2770  heap_freetuple(trigtuple);
2771 
2772  return result;
2773 }
2774 
2775 /*
2776  * Note: is_crosspart_update must be true if the DELETE is being performed
2777  * as part of a cross-partition update.
2778  */
2779 void
2781  ResultRelInfo *relinfo,
2782  ItemPointer tupleid,
2783  HeapTuple fdw_trigtuple,
2784  TransitionCaptureState *transition_capture,
2785  bool is_crosspart_update)
2786 {
2787  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2788 
2789  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2790  (transition_capture && transition_capture->tcs_delete_old_table))
2791  {
2792  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2793 
2794  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2795  if (fdw_trigtuple == NULL)
2796  GetTupleForTrigger(estate,
2797  NULL,
2798  relinfo,
2799  tupleid,
2801  slot,
2802  NULL,
2803  NULL,
2804  NULL);
2805  else
2806  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2807 
2808  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2810  true, slot, NULL, NIL, NULL,
2811  transition_capture,
2812  is_crosspart_update);
2813  }
2814 }
2815 
2816 bool
2818  HeapTuple trigtuple)
2819 {
2820  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2821  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2822  TriggerData LocTriggerData = {0};
2823  int i;
2824 
2825  LocTriggerData.type = T_TriggerData;
2826  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2829  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2830 
2831  ExecForceStoreHeapTuple(trigtuple, slot, false);
2832 
2833  for (i = 0; i < trigdesc->numtriggers; i++)
2834  {
2835  HeapTuple rettuple;
2836  Trigger *trigger = &trigdesc->triggers[i];
2837 
2838  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2839  TRIGGER_TYPE_ROW,
2840  TRIGGER_TYPE_INSTEAD,
2841  TRIGGER_TYPE_DELETE))
2842  continue;
2843  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2844  NULL, slot, NULL))
2845  continue;
2846 
2847  LocTriggerData.tg_trigslot = slot;
2848  LocTriggerData.tg_trigtuple = trigtuple;
2849  LocTriggerData.tg_trigger = trigger;
2850  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2851  i,
2852  relinfo->ri_TrigFunctions,
2853  relinfo->ri_TrigInstrument,
2854  GetPerTupleMemoryContext(estate));
2855  if (rettuple == NULL)
2856  return false; /* Delete was suppressed */
2857  if (rettuple != trigtuple)
2858  heap_freetuple(rettuple);
2859  }
2860  return true;
2861 }
2862 
2863 void
2865 {
2866  TriggerDesc *trigdesc;
2867  int i;
2868  TriggerData LocTriggerData = {0};
2869  Bitmapset *updatedCols;
2870 
2871  trigdesc = relinfo->ri_TrigDesc;
2872 
2873  if (trigdesc == NULL)
2874  return;
2875  if (!trigdesc->trig_update_before_statement)
2876  return;
2877 
2878  /* no-op if we already fired BS triggers in this context */
2880  CMD_UPDATE))
2881  return;
2882 
2883  /* statement-level triggers operate on the parent table */
2884  Assert(relinfo->ri_RootResultRelInfo == NULL);
2885 
2886  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2887 
2888  LocTriggerData.type = T_TriggerData;
2889  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2891  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2892  LocTriggerData.tg_updatedcols = updatedCols;
2893  for (i = 0; i < trigdesc->numtriggers; i++)
2894  {
2895  Trigger *trigger = &trigdesc->triggers[i];
2896  HeapTuple newtuple;
2897 
2898  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2899  TRIGGER_TYPE_STATEMENT,
2900  TRIGGER_TYPE_BEFORE,
2901  TRIGGER_TYPE_UPDATE))
2902  continue;
2903  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2904  updatedCols, NULL, NULL))
2905  continue;
2906 
2907  LocTriggerData.tg_trigger = trigger;
2908  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2909  i,
2910  relinfo->ri_TrigFunctions,
2911  relinfo->ri_TrigInstrument,
2912  GetPerTupleMemoryContext(estate));
2913 
2914  if (newtuple)
2915  ereport(ERROR,
2916  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2917  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2918  }
2919 }
2920 
2921 void
2923  TransitionCaptureState *transition_capture)
2924 {
2925  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2926 
2927  /* statement-level triggers operate on the parent table */
2928  Assert(relinfo->ri_RootResultRelInfo == NULL);
2929 
2930  if (trigdesc && trigdesc->trig_update_after_statement)
2931  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2933  false, NULL, NULL, NIL,
2934  ExecGetAllUpdatedCols(relinfo, estate),
2935  transition_capture,
2936  false);
2937 }
2938 
2939 bool
2941  ResultRelInfo *relinfo,
2942  ItemPointer tupleid,
2943  HeapTuple fdw_trigtuple,
2944  TupleTableSlot *newslot,
2945  TM_Result *tmresult,
2946  TM_FailureData *tmfd)
2947 {
2948  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2949  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2950  HeapTuple newtuple = NULL;
2951  HeapTuple trigtuple;
2952  bool should_free_trig = false;
2953  bool should_free_new = false;
2954  TriggerData LocTriggerData = {0};
2955  int i;
2956  Bitmapset *updatedCols;
2957  LockTupleMode lockmode;
2958 
2959  /* Determine lock mode to use */
2960  lockmode = ExecUpdateLockMode(estate, relinfo);
2961 
2962  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2963  if (fdw_trigtuple == NULL)
2964  {
2965  TupleTableSlot *epqslot_candidate = NULL;
2966 
2967  /* get a copy of the on-disk tuple we are planning to update */
2968  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2969  lockmode, oldslot, &epqslot_candidate,
2970  tmresult, tmfd))
2971  return false; /* cancel the update action */
2972 
2973  /*
2974  * In READ COMMITTED isolation level it's possible that target tuple
2975  * was changed due to concurrent update. In that case we have a raw
2976  * subplan output tuple in epqslot_candidate, and need to form a new
2977  * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2978  * received in newslot. Neither we nor our callers have any further
2979  * interest in the passed-in tuple, so it's okay to overwrite newslot
2980  * with the newer data.
2981  *
2982  * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so
2983  * that epqslot_clean will be that same slot and the copy step below
2984  * is not needed.)
2985  */
2986  if (epqslot_candidate != NULL)
2987  {
2988  TupleTableSlot *epqslot_clean;
2989 
2990  epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2991  oldslot);
2992 
2993  if (newslot != epqslot_clean)
2994  ExecCopySlot(newslot, epqslot_clean);
2995  }
2996 
2997  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
2998  }
2999  else
3000  {
3001  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3002  trigtuple = fdw_trigtuple;
3003  }
3004 
3005  LocTriggerData.type = T_TriggerData;
3006  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3009  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3010  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3011  LocTriggerData.tg_updatedcols = updatedCols;
3012  for (i = 0; i < trigdesc->numtriggers; i++)
3013  {
3014  Trigger *trigger = &trigdesc->triggers[i];
3015  HeapTuple oldtuple;
3016 
3017  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3018  TRIGGER_TYPE_ROW,
3019  TRIGGER_TYPE_BEFORE,
3020  TRIGGER_TYPE_UPDATE))
3021  continue;
3022  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3023  updatedCols, oldslot, newslot))
3024  continue;
3025 
3026  if (!newtuple)
3027  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3028 
3029  LocTriggerData.tg_trigslot = oldslot;
3030  LocTriggerData.tg_trigtuple = trigtuple;
3031  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3032  LocTriggerData.tg_newslot = newslot;
3033  LocTriggerData.tg_trigger = trigger;
3034  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3035  i,
3036  relinfo->ri_TrigFunctions,
3037  relinfo->ri_TrigInstrument,
3038  GetPerTupleMemoryContext(estate));
3039 
3040  if (newtuple == NULL)
3041  {
3042  if (should_free_trig)
3043  heap_freetuple(trigtuple);
3044  if (should_free_new)
3045  heap_freetuple(oldtuple);
3046  return false; /* "do nothing" */
3047  }
3048  else if (newtuple != oldtuple)
3049  {
3050  ExecForceStoreHeapTuple(newtuple, newslot, false);
3051 
3052  /*
3053  * If the tuple returned by the trigger / being stored, is the old
3054  * row version, and the heap tuple passed to the trigger was
3055  * allocated locally, materialize the slot. Otherwise we might
3056  * free it while still referenced by the slot.
3057  */
3058  if (should_free_trig && newtuple == trigtuple)
3059  ExecMaterializeSlot(newslot);
3060 
3061  if (should_free_new)
3062  heap_freetuple(oldtuple);
3063 
3064  /* signal tuple should be re-fetched if used */
3065  newtuple = NULL;
3066  }
3067  }
3068  if (should_free_trig)
3069  heap_freetuple(trigtuple);
3070 
3071  return true;
3072 }
3073 
3074 /*
3075  * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3076  * and destination partitions, respectively, of a cross-partition update of
3077  * the root partitioned table mentioned in the query, given by 'relinfo'.
3078  * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3079  * partition, and 'newslot' contains the "new" tuple in the destination
3080  * partition. This interface allows to support the requirements of
3081  * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3082  * that case.
3083  */
3084 void
3086  ResultRelInfo *src_partinfo,
3087  ResultRelInfo *dst_partinfo,
3088  ItemPointer tupleid,
3089  HeapTuple fdw_trigtuple,
3090  TupleTableSlot *newslot,
3091  List *recheckIndexes,
3092  TransitionCaptureState *transition_capture,
3093  bool is_crosspart_update)
3094 {
3095  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3096 
3097  if ((trigdesc && trigdesc->trig_update_after_row) ||
3098  (transition_capture &&
3099  (transition_capture->tcs_update_old_table ||
3100  transition_capture->tcs_update_new_table)))
3101  {
3102  /*
3103  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3104  * update-partition-key operation, then this function is also called
3105  * separately for DELETE and INSERT to capture transition table rows.
3106  * In such case, either old tuple or new tuple can be NULL.
3107  */
3108  TupleTableSlot *oldslot;
3109  ResultRelInfo *tupsrc;
3110 
3111  Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3112  !is_crosspart_update);
3113 
3114  tupsrc = src_partinfo ? src_partinfo : relinfo;
3115  oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3116 
3117  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3118  GetTupleForTrigger(estate,
3119  NULL,
3120  tupsrc,
3121  tupleid,
3123  oldslot,
3124  NULL,
3125  NULL,
3126  NULL);
3127  else if (fdw_trigtuple != NULL)
3128  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3129  else
3130  ExecClearTuple(oldslot);
3131 
3132  AfterTriggerSaveEvent(estate, relinfo,
3133  src_partinfo, dst_partinfo,
3135  true,
3136  oldslot, newslot, recheckIndexes,
3137  ExecGetAllUpdatedCols(relinfo, estate),
3138  transition_capture,
3139  is_crosspart_update);
3140  }
3141 }
3142 
3143 bool
3145  HeapTuple trigtuple, TupleTableSlot *newslot)
3146 {
3147  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3148  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3149  HeapTuple newtuple = NULL;
3150  bool should_free;
3151  TriggerData LocTriggerData = {0};
3152  int i;
3153 
3154  LocTriggerData.type = T_TriggerData;
3155  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3158  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3159 
3160  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3161 
3162  for (i = 0; i < trigdesc->numtriggers; i++)
3163  {
3164  Trigger *trigger = &trigdesc->triggers[i];
3165  HeapTuple oldtuple;
3166 
3167  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3168  TRIGGER_TYPE_ROW,
3169  TRIGGER_TYPE_INSTEAD,
3170  TRIGGER_TYPE_UPDATE))
3171  continue;
3172  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3173  NULL, oldslot, newslot))
3174  continue;
3175 
3176  if (!newtuple)
3177  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3178 
3179  LocTriggerData.tg_trigslot = oldslot;
3180  LocTriggerData.tg_trigtuple = trigtuple;
3181  LocTriggerData.tg_newslot = newslot;
3182  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3183 
3184  LocTriggerData.tg_trigger = trigger;
3185  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3186  i,
3187  relinfo->ri_TrigFunctions,
3188  relinfo->ri_TrigInstrument,
3189  GetPerTupleMemoryContext(estate));
3190  if (newtuple == NULL)
3191  {
3192  return false; /* "do nothing" */
3193  }
3194  else if (newtuple != oldtuple)
3195  {
3196  ExecForceStoreHeapTuple(newtuple, newslot, false);
3197 
3198  if (should_free)
3199  heap_freetuple(oldtuple);
3200 
3201  /* signal tuple should be re-fetched if used */
3202  newtuple = NULL;
3203  }
3204  }
3205 
3206  return true;
3207 }
3208 
3209 void
3211 {
3212  TriggerDesc *trigdesc;
3213  int i;
3214  TriggerData LocTriggerData = {0};
3215 
3216  trigdesc = relinfo->ri_TrigDesc;
3217 
3218  if (trigdesc == NULL)
3219  return;
3220  if (!trigdesc->trig_truncate_before_statement)
3221  return;
3222 
3223  LocTriggerData.type = T_TriggerData;
3224  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3226  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3227 
3228  for (i = 0; i < trigdesc->numtriggers; i++)
3229  {
3230  Trigger *trigger = &trigdesc->triggers[i];
3231  HeapTuple newtuple;
3232 
3233  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3234  TRIGGER_TYPE_STATEMENT,
3235  TRIGGER_TYPE_BEFORE,
3236  TRIGGER_TYPE_TRUNCATE))
3237  continue;
3238  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3239  NULL, NULL, NULL))
3240  continue;
3241 
3242  LocTriggerData.tg_trigger = trigger;
3243  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3244  i,
3245  relinfo->ri_TrigFunctions,
3246  relinfo->ri_TrigInstrument,
3247  GetPerTupleMemoryContext(estate));
3248 
3249  if (newtuple)
3250  ereport(ERROR,
3251  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3252  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3253  }
3254 }
3255 
3256 void
3258 {
3259  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3260 
3261  if (trigdesc && trigdesc->trig_truncate_after_statement)
3262  AfterTriggerSaveEvent(estate, relinfo,
3263  NULL, NULL,
3265  false, NULL, NULL, NIL, NULL, NULL,
3266  false);
3267 }
3268 
3269 
3270 /*
3271  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3272  */
3273 static bool
3275  EPQState *epqstate,
3276  ResultRelInfo *relinfo,
3277  ItemPointer tid,
3278  LockTupleMode lockmode,
3279  TupleTableSlot *oldslot,
3280  TupleTableSlot **epqslot,
3281  TM_Result *tmresultp,
3282  TM_FailureData *tmfdp)
3283 {
3284  Relation relation = relinfo->ri_RelationDesc;
3285 
3286  if (epqslot != NULL)
3287  {
3288  TM_Result test;
3289  TM_FailureData tmfd;
3290  int lockflags = 0;
3291 
3292  *epqslot = NULL;
3293 
3294  /* caller must pass an epqstate if EvalPlanQual is possible */
3295  Assert(epqstate != NULL);
3296 
3297  /*
3298  * lock tuple for update
3299  */
3301  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3302  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3303  estate->es_output_cid,
3304  lockmode, LockWaitBlock,
3305  lockflags,
3306  &tmfd);
3307 
3308  /* Let the caller know about the status of this operation */
3309  if (tmresultp)
3310  *tmresultp = test;
3311  if (tmfdp)
3312  *tmfdp = tmfd;
3313 
3314  switch (test)
3315  {
3316  case TM_SelfModified:
3317 
3318  /*
3319  * The target tuple was already updated or deleted by the
3320  * current command, or by a later command in the current
3321  * transaction. We ignore the tuple in the former case, and
3322  * throw error in the latter case, for the same reasons
3323  * enumerated in ExecUpdate and ExecDelete in
3324  * nodeModifyTable.c.
3325  */
3326  if (tmfd.cmax != estate->es_output_cid)
3327  ereport(ERROR,
3328  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3329  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3330  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3331 
3332  /* treat it as deleted; do not process */
3333  return false;
3334 
3335  case TM_Ok:
3336  if (tmfd.traversed)
3337  {
3338  /*
3339  * Recheck the tuple using EPQ. For MERGE, we leave this
3340  * to the caller (it must do additional rechecking, and
3341  * might end up executing a different action entirely).
3342  */
3343  if (estate->es_plannedstmt->commandType == CMD_MERGE)
3344  {
3345  if (tmresultp)
3346  *tmresultp = TM_Updated;
3347  return false;
3348  }
3349 
3350  *epqslot = EvalPlanQual(epqstate,
3351  relation,
3352  relinfo->ri_RangeTableIndex,
3353  oldslot);
3354 
3355  /*
3356  * If PlanQual failed for updated tuple - we must not
3357  * process this tuple!
3358  */
3359  if (TupIsNull(*epqslot))
3360  {
3361  *epqslot = NULL;
3362  return false;
3363  }
3364  }
3365  break;
3366 
3367  case TM_Updated:
3369  ereport(ERROR,
3371  errmsg("could not serialize access due to concurrent update")));
3372  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3373  break;
3374 
3375  case TM_Deleted:
3377  ereport(ERROR,
3379  errmsg("could not serialize access due to concurrent delete")));
3380  /* tuple was deleted */
3381  return false;
3382 
3383  case TM_Invisible:
3384  elog(ERROR, "attempted to lock invisible tuple");
3385  break;
3386 
3387  default:
3388  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3389  return false; /* keep compiler quiet */
3390  }
3391  }
3392  else
3393  {
3394  /*
3395  * We expect the tuple to be present, thus very simple error handling
3396  * suffices.
3397  */
3398  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3399  oldslot))
3400  elog(ERROR, "failed to fetch tuple for trigger");
3401  }
3402 
3403  return true;
3404 }
3405 
3406 /*
3407  * Is trigger enabled to fire?
3408  */
3409 static bool
3411  Trigger *trigger, TriggerEvent event,
3412  Bitmapset *modifiedCols,
3413  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3414 {
3415  /* Check replication-role-dependent enable state */
3417  {
3418  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3419  trigger->tgenabled == TRIGGER_DISABLED)
3420  return false;
3421  }
3422  else /* ORIGIN or LOCAL role */
3423  {
3424  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3425  trigger->tgenabled == TRIGGER_DISABLED)
3426  return false;
3427  }
3428 
3429  /*
3430  * Check for column-specific trigger (only possible for UPDATE, and in
3431  * fact we *must* ignore tgattr for other event types)
3432  */
3433  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3434  {
3435  int i;
3436  bool modified;
3437 
3438  modified = false;
3439  for (i = 0; i < trigger->tgnattr; i++)
3440  {
3442  modifiedCols))
3443  {
3444  modified = true;
3445  break;
3446  }
3447  }
3448  if (!modified)
3449  return false;
3450  }
3451 
3452  /* Check for WHEN clause */
3453  if (trigger->tgqual)
3454  {
3455  ExprState **predicate;
3456  ExprContext *econtext;
3457  MemoryContext oldContext;
3458  int i;
3459 
3460  Assert(estate != NULL);
3461 
3462  /*
3463  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3464  * matching element of relinfo->ri_TrigWhenExprs[]
3465  */
3466  i = trigger - relinfo->ri_TrigDesc->triggers;
3467  predicate = &relinfo->ri_TrigWhenExprs[i];
3468 
3469  /*
3470  * If first time through for this WHEN expression, build expression
3471  * nodetrees for it. Keep them in the per-query memory context so
3472  * they'll survive throughout the query.
3473  */
3474  if (*predicate == NULL)
3475  {
3476  Node *tgqual;
3477 
3478  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3479  tgqual = stringToNode(trigger->tgqual);
3480  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3483  /* ExecPrepareQual wants implicit-AND form */
3484  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3485  *predicate = ExecPrepareQual((List *) tgqual, estate);
3486  MemoryContextSwitchTo(oldContext);
3487  }
3488 
3489  /*
3490  * We will use the EState's per-tuple context for evaluating WHEN
3491  * expressions (creating it if it's not already there).
3492  */
3493  econtext = GetPerTupleExprContext(estate);
3494 
3495  /*
3496  * Finally evaluate the expression, making the old and/or new tuples
3497  * available as INNER_VAR/OUTER_VAR respectively.
3498  */
3499  econtext->ecxt_innertuple = oldslot;
3500  econtext->ecxt_outertuple = newslot;
3501  if (!ExecQual(*predicate, econtext))
3502  return false;
3503  }
3504 
3505  return true;
3506 }
3507 
3508 
3509 /* ----------
3510  * After-trigger stuff
3511  *
3512  * The AfterTriggersData struct holds data about pending AFTER trigger events
3513  * during the current transaction tree. (BEFORE triggers are fired
3514  * immediately so we don't need any persistent state about them.) The struct
3515  * and most of its subsidiary data are kept in TopTransactionContext; however
3516  * some data that can be discarded sooner appears in the CurTransactionContext
3517  * of the relevant subtransaction. Also, the individual event records are
3518  * kept in a separate sub-context of TopTransactionContext. This is done
3519  * mainly so that it's easy to tell from a memory context dump how much space
3520  * is being eaten by trigger events.
3521  *
3522  * Because the list of pending events can grow large, we go to some
3523  * considerable effort to minimize per-event memory consumption. The event
3524  * records are grouped into chunks and common data for similar events in the
3525  * same chunk is only stored once.
3526  *
3527  * XXX We need to be able to save the per-event data in a file if it grows too
3528  * large.
3529  * ----------
3530  */
3531 
3532 /* Per-trigger SET CONSTRAINT status */
3534 {
3538 
3540 
3541 /*
3542  * SET CONSTRAINT intra-transaction status.
3543  *
3544  * We make this a single palloc'd object so it can be copied and freed easily.
3545  *
3546  * all_isset and all_isdeferred are used to keep track
3547  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3548  *
3549  * trigstates[] stores per-trigger tgisdeferred settings.
3550  */
3552 {
3555  int numstates; /* number of trigstates[] entries in use */
3556  int numalloc; /* allocated size of trigstates[] */
3559 
3561 
3562 
3563 /*
3564  * Per-trigger-event data
3565  *
3566  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3567  * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3568  * Each event record also has an associated AfterTriggerSharedData that is
3569  * shared across all instances of similar events within a "chunk".
3570  *
3571  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3572  * fields. Updates of regular tables use two; inserts and deletes of regular
3573  * tables use one; foreign tables always use zero and save the tuple(s) to a
3574  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3575  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3576  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3577  * tuple(s). This permits storing tuples once regardless of the number of
3578  * row-level triggers on a foreign table.
3579  *
3580  * When updates on partitioned tables cause rows to move between partitions,
3581  * the OIDs of both partitions are stored too, so that the tuples can be
3582  * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3583  * partition update").
3584  *
3585  * Note that we need triggers on foreign tables to be fired in exactly the
3586  * order they were queued, so that the tuples come out of the tuplestore in
3587  * the right order. To ensure that, we forbid deferrable (constraint)
3588  * triggers on foreign tables. This also ensures that such triggers do not
3589  * get deferred into outer trigger query levels, meaning that it's okay to
3590  * destroy the tuplestore at the end of the query level.
3591  *
3592  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3593  * require no ctid field. We lack the flag bit space to neatly represent that
3594  * distinct case, and it seems unlikely to be worth much trouble.
3595  *
3596  * Note: ats_firing_id is initially zero and is set to something else when
3597  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3598  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3599  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3600  * because all instances of the same type of event in a given event list will
3601  * be fired at the same time, if they were queued between the same firing
3602  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3603  * a new event to an existing AfterTriggerSharedData record.
3604  */
3606 
3607 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3608 #define AFTER_TRIGGER_DONE 0x80000000
3609 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3610 /* bits describing the size and tuple sources of this event */
3611 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3612 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3613 #define AFTER_TRIGGER_1CTID 0x10000000
3614 #define AFTER_TRIGGER_2CTID 0x30000000
3615 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3616 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3618 
3620 {
3621  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3622  Oid ats_tgoid; /* the trigger's ID */
3623  Oid ats_relid; /* the relation it's on */
3624  CommandId ats_firing_id; /* ID for firing cycle */
3625  struct AfterTriggersTableData *ats_table; /* transition table access */
3626  Bitmapset *ats_modifiedcols; /* modified columns */
3628 
3630 
3632 {
3633  TriggerFlags ate_flags; /* status bits and offset to shared data */
3634  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3635  ItemPointerData ate_ctid2; /* new updated tuple */
3636 
3637  /*
3638  * During a cross-partition update of a partitioned table, we also store
3639  * the OIDs of source and destination partitions that are needed to fetch
3640  * the old (ctid1) and the new tuple (ctid2) from, respectively.
3641  */
3645 
3646 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3648 {
3653 
3654 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3656 {
3657  TriggerFlags ate_flags; /* status bits and offset to shared data */
3658  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3660 
3661 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3663 {
3664  TriggerFlags ate_flags; /* status bits and offset to shared data */
3666 
3667 #define SizeofTriggerEvent(evt) \
3668  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3669  sizeof(AfterTriggerEventData) : \
3670  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3671  sizeof(AfterTriggerEventDataNoOids) : \
3672  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3673  sizeof(AfterTriggerEventDataOneCtid) : \
3674  sizeof(AfterTriggerEventDataZeroCtids))))
3675 
3676 #define GetTriggerSharedData(evt) \
3677  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3678 
3679 /*
3680  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3681  * larger chunks (a slightly more sophisticated version of an expansible
3682  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3683  * AfterTriggerEventData records; the space between endfree and endptr is
3684  * occupied by AfterTriggerSharedData records.
3685  */
3687 {
3688  struct AfterTriggerEventChunk *next; /* list link */
3689  char *freeptr; /* start of free space in chunk */
3690  char *endfree; /* end of free space in chunk */
3691  char *endptr; /* end of chunk */
3692  /* event data follows here */
3694 
3695 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3696 
3697 /* A list of events */
3699 {
3702  char *tailfree; /* freeptr of tail chunk */
3704 
3705 /* Macros to help in iterating over a list of events */
3706 #define for_each_chunk(cptr, evtlist) \
3707  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3708 #define for_each_event(eptr, cptr) \
3709  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3710  (char *) eptr < (cptr)->freeptr; \
3711  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3712 /* Use this if no special per-chunk processing is needed */
3713 #define for_each_event_chunk(eptr, cptr, evtlist) \
3714  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3715 
3716 /* Macros for iterating from a start point that might not be list start */
3717 #define for_each_chunk_from(cptr) \
3718  for (; cptr != NULL; cptr = cptr->next)
3719 #define for_each_event_from(eptr, cptr) \
3720  for (; \
3721  (char *) eptr < (cptr)->freeptr; \
3722  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3723 
3724 
3725 /*
3726  * All per-transaction data for the AFTER TRIGGERS module.
3727  *
3728  * AfterTriggersData has the following fields:
3729  *
3730  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3731  * We mark firable events with the current firing cycle's ID so that we can
3732  * tell which ones to work on. This ensures sane behavior if a trigger
3733  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3734  * only fire those events that weren't already scheduled for firing.
3735  *
3736  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3737  * This is saved and restored across failed subtransactions.
3738  *
3739  * events is the current list of deferred events. This is global across
3740  * all subtransactions of the current transaction. In a subtransaction
3741  * abort, we know that the events added by the subtransaction are at the
3742  * end of the list, so it is relatively easy to discard them. The event
3743  * list chunks themselves are stored in event_cxt.
3744  *
3745  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3746  * (-1 when the stack is empty).
3747  *
3748  * query_stack[query_depth] is the per-query-level data, including these fields:
3749  *
3750  * events is a list of AFTER trigger events queued by the current query.
3751  * None of these are valid until the matching AfterTriggerEndQuery call
3752  * occurs. At that point we fire immediate-mode triggers, and append any
3753  * deferred events to the main events list.
3754  *
3755  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3756  * needed by events queued by the current query. (Note: we use just one
3757  * tuplestore even though more than one foreign table might be involved.
3758  * This is okay because tuplestores don't really care what's in the tuples
3759  * they store; but it's possible that someday it'd break.)
3760  *
3761  * tables is a List of AfterTriggersTableData structs for target tables
3762  * of the current query (see below).
3763  *
3764  * maxquerydepth is just the allocated length of query_stack.
3765  *
3766  * trans_stack holds per-subtransaction data, including these fields:
3767  *
3768  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3769  * state data. Each subtransaction level that modifies that state first
3770  * saves a copy, which we use to restore the state if we abort.
3771  *
3772  * events is a copy of the events head/tail pointers,
3773  * which we use to restore those values during subtransaction abort.
3774  *
3775  * query_depth is the subtransaction-start-time value of query_depth,
3776  * which we similarly use to clean up at subtransaction abort.
3777  *
3778  * firing_counter is the subtransaction-start-time value of firing_counter.
3779  * We use this to recognize which deferred triggers were fired (or marked
3780  * for firing) within an aborted subtransaction.
3781  *
3782  * We use GetCurrentTransactionNestLevel() to determine the correct array
3783  * index in trans_stack. maxtransdepth is the number of allocated entries in
3784  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3785  * in cases where errors during subxact abort cause multiple invocations
3786  * of AfterTriggerEndSubXact() at the same nesting depth.)
3787  *
3788  * We create an AfterTriggersTableData struct for each target table of the
3789  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3790  * either transition tables or statement-level triggers. This is used to
3791  * hold the relevant transition tables, as well as info tracking whether
3792  * we already queued the statement triggers. (We use that info to prevent
3793  * firing the same statement triggers more than once per statement, or really
3794  * once per transition table set.) These structs, along with the transition
3795  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3796  * That's sufficient lifespan because we don't allow transition tables to be
3797  * used by deferrable triggers, so they only need to survive until
3798  * AfterTriggerEndQuery.
3799  */
3803 
3804 typedef struct AfterTriggersData
3805 {
3806  CommandId firing_counter; /* next firing ID to assign */
3807  SetConstraintState state; /* the active S C state */
3808  AfterTriggerEventList events; /* deferred-event list */
3809  MemoryContext event_cxt; /* memory context for events, if any */
3810 
3811  /* per-query-level data: */
3812  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3813  int query_depth; /* current index in above array */
3814  int maxquerydepth; /* allocated len of above array */
3815 
3816  /* per-subtransaction-level data: */
3817  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3818  int maxtransdepth; /* allocated len of above array */
3820 
3822 {
3823  AfterTriggerEventList events; /* events pending from this query */
3824  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3825  List *tables; /* list of AfterTriggersTableData, see below */
3826 };
3827 
3829 {
3830  /* these fields are just for resetting at subtrans abort: */
3831  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3832  AfterTriggerEventList events; /* saved list pointer */
3833  int query_depth; /* saved query_depth */
3834  CommandId firing_counter; /* saved firing_counter */
3835 };
3836 
3838 {
3839  /* relid + cmdType form the lookup key for these structs: */
3840  Oid relid; /* target table's OID */
3841  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3842  bool closed; /* true when no longer OK to add tuples */
3843  bool before_trig_done; /* did we already queue BS triggers? */
3844  bool after_trig_done; /* did we already queue AS triggers? */
3845  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3846 
3847  /*
3848  * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3849  * MERGE can run all three actions in a single statement. Note that UPDATE
3850  * needs both old and new transition tables whereas INSERT needs only new,
3851  * and DELETE needs only old.
3852  */
3853 
3854  /* "old" transition table for UPDATE, if any */
3856  /* "new" transition table for UPDATE, if any */
3858  /* "old" transition table for DELETE, if any */
3860  /* "new" transition table for INSERT, if any */
3862 
3863  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3864 };
3865 
3867 
3868 static void AfterTriggerExecute(EState *estate,
3869  AfterTriggerEvent event,
3870  ResultRelInfo *relInfo,
3871  ResultRelInfo *src_relInfo,
3872  ResultRelInfo *dst_relInfo,
3873  TriggerDesc *trigdesc,
3874  FmgrInfo *finfo,
3875  Instrumentation *instr,
3876  MemoryContext per_tuple_context,
3877  TupleTableSlot *trig_tuple_slot1,
3878  TupleTableSlot *trig_tuple_slot2);
3880  CmdType cmdType);
3882  TupleDesc tupdesc);
3884  TupleTableSlot *oldslot,
3885  TupleTableSlot *newslot,
3886  TransitionCaptureState *transition_capture);
3887 static void TransitionTableAddTuple(EState *estate,
3888  TransitionCaptureState *transition_capture,
3889  ResultRelInfo *relinfo,
3890  TupleTableSlot *slot,
3891  TupleTableSlot *original_insert_tuple,
3892  Tuplestorestate *tuplestore);
3894 static SetConstraintState SetConstraintStateCreate(int numalloc);
3897  Oid tgoid, bool tgisdeferred);
3898 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3899 
3900 
3901 /*
3902  * Get the FDW tuplestore for the current trigger query level, creating it
3903  * if necessary.
3904  */
3905 static Tuplestorestate *
3907 {
3908  Tuplestorestate *ret;
3909 
3911  if (ret == NULL)
3912  {
3913  MemoryContext oldcxt;
3914  ResourceOwner saveResourceOwner;
3915 
3916  /*
3917  * Make the tuplestore valid until end of subtransaction. We really
3918  * only need it until AfterTriggerEndQuery().
3919  */
3921  saveResourceOwner = CurrentResourceOwner;
3923 
3924  ret = tuplestore_begin_heap(false, false, work_mem);
3925 
3926  CurrentResourceOwner = saveResourceOwner;
3927  MemoryContextSwitchTo(oldcxt);
3928 
3930  }
3931 
3932  return ret;
3933 }
3934 
3935 /* ----------
3936  * afterTriggerCheckState()
3937  *
3938  * Returns true if the trigger event is actually in state DEFERRED.
3939  * ----------
3940  */
3941 static bool
3943 {
3944  Oid tgoid = evtshared->ats_tgoid;
3946  int i;
3947 
3948  /*
3949  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3950  * constraints declared NOT DEFERRABLE), the state is always false.
3951  */
3952  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3953  return false;
3954 
3955  /*
3956  * If constraint state exists, SET CONSTRAINTS might have been executed
3957  * either for this trigger or for all triggers.
3958  */
3959  if (state != NULL)
3960  {
3961  /* Check for SET CONSTRAINTS for this specific trigger. */
3962  for (i = 0; i < state->numstates; i++)
3963  {
3964  if (state->trigstates[i].sct_tgoid == tgoid)
3965  return state->trigstates[i].sct_tgisdeferred;
3966  }
3967 
3968  /* Check for SET CONSTRAINTS ALL. */
3969  if (state->all_isset)
3970  return state->all_isdeferred;
3971  }
3972 
3973  /*
3974  * Otherwise return the default state for the trigger.
3975  */
3976  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3977 }
3978 
3979 /* ----------
3980  * afterTriggerCopyBitmap()
3981  *
3982  * Copy bitmap into AfterTriggerEvents memory context, which is where the after
3983  * trigger events are kept.
3984  * ----------
3985  */
3986 static Bitmapset *
3988 {
3989  Bitmapset *dst;
3990  MemoryContext oldcxt;
3991 
3992  if (src == NULL)
3993  return NULL;
3994 
3995  /* Create event context if we didn't already */
3996  if (afterTriggers.event_cxt == NULL)
3999  "AfterTriggerEvents",
4001 
4003 
4004  dst = bms_copy(src);
4005 
4006  MemoryContextSwitchTo(oldcxt);
4007 
4008  return dst;
4009 }
4010 
4011 /* ----------
4012  * afterTriggerAddEvent()
4013  *
4014  * Add a new trigger event to the specified queue.
4015  * The passed-in event data is copied.
4016  * ----------
4017  */
4018 static void
4020  AfterTriggerEvent event, AfterTriggerShared evtshared)
4021 {
4022  Size eventsize = SizeofTriggerEvent(event);
4023  Size needed = eventsize + sizeof(AfterTriggerSharedData);
4024  AfterTriggerEventChunk *chunk;
4025  AfterTriggerShared newshared;
4026  AfterTriggerEvent newevent;
4027 
4028  /*
4029  * If empty list or not enough room in the tail chunk, make a new chunk.
4030  * We assume here that a new shared record will always be needed.
4031  */
4032  chunk = events->tail;
4033  if (chunk == NULL ||
4034  chunk->endfree - chunk->freeptr < needed)
4035  {
4036  Size chunksize;
4037 
4038  /* Create event context if we didn't already */
4039  if (afterTriggers.event_cxt == NULL)
4042  "AfterTriggerEvents",
4044 
4045  /*
4046  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4047  * These numbers are fairly arbitrary, though there is a hard limit at
4048  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4049  * shared records using the available space in ate_flags. Another
4050  * constraint is that if the chunk size gets too huge, the search loop
4051  * below would get slow given a (not too common) usage pattern with
4052  * many distinct event types in a chunk. Therefore, we double the
4053  * preceding chunk size only if there weren't too many shared records
4054  * in the preceding chunk; otherwise we halve it. This gives us some
4055  * ability to adapt to the actual usage pattern of the current query
4056  * while still having large chunk sizes in typical usage. All chunk
4057  * sizes used should be MAXALIGN multiples, to ensure that the shared
4058  * records will be aligned safely.
4059  */
4060 #define MIN_CHUNK_SIZE 1024
4061 #define MAX_CHUNK_SIZE (1024*1024)
4062 
4063 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4064 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4065 #endif
4066 
4067  if (chunk == NULL)
4068  chunksize = MIN_CHUNK_SIZE;
4069  else
4070  {
4071  /* preceding chunk size... */
4072  chunksize = chunk->endptr - (char *) chunk;
4073  /* check number of shared records in preceding chunk */
4074  if ((chunk->endptr - chunk->endfree) <=
4075  (100 * sizeof(AfterTriggerSharedData)))
4076  chunksize *= 2; /* okay, double it */
4077  else
4078  chunksize /= 2; /* too many shared records */
4079  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4080  }
4081  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4082  chunk->next = NULL;
4083  chunk->freeptr = CHUNK_DATA_START(chunk);
4084  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4085  Assert(chunk->endfree - chunk->freeptr >= needed);
4086 
4087  if (events->head == NULL)
4088  events->head = chunk;
4089  else
4090  events->tail->next = chunk;
4091  events->tail = chunk;
4092  /* events->tailfree is now out of sync, but we'll fix it below */
4093  }
4094 
4095  /*
4096  * Try to locate a matching shared-data record already in the chunk. If
4097  * none, make a new one.
4098  */
4099  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4100  (char *) newshared >= chunk->endfree;
4101  newshared--)
4102  {
4103  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4104  newshared->ats_relid == evtshared->ats_relid &&
4105  newshared->ats_event == evtshared->ats_event &&
4106  newshared->ats_table == evtshared->ats_table &&
4107  newshared->ats_firing_id == 0)
4108  break;
4109  }
4110  if ((char *) newshared < chunk->endfree)
4111  {
4112  *newshared = *evtshared;
4113  newshared->ats_firing_id = 0; /* just to be sure */
4114  chunk->endfree = (char *) newshared;
4115  }
4116 
4117  /* Insert the data */
4118  newevent = (AfterTriggerEvent) chunk->freeptr;
4119  memcpy(newevent, event, eventsize);
4120  /* ... and link the new event to its shared record */
4121  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4122  newevent->ate_flags |= (char *) newshared - (char *) newevent;
4123 
4124  chunk->freeptr += eventsize;
4125  events->tailfree = chunk->freeptr;
4126 }
4127 
4128 /* ----------
4129  * afterTriggerFreeEventList()
4130  *
4131  * Free all the event storage in the given list.
4132  * ----------
4133  */
4134 static void
4136 {
4137  AfterTriggerEventChunk *chunk;
4138 
4139  while ((chunk = events->head) != NULL)
4140  {
4141  events->head = chunk->next;
4142  pfree(chunk);
4143  }
4144  events->tail = NULL;
4145  events->tailfree = NULL;
4146 }
4147 
4148 /* ----------
4149  * afterTriggerRestoreEventList()
4150  *
4151  * Restore an event list to its prior length, removing all the events
4152  * added since it had the value old_events.
4153  * ----------
4154  */
4155 static void
4157  const AfterTriggerEventList *old_events)
4158 {
4159  AfterTriggerEventChunk *chunk;
4160  AfterTriggerEventChunk *next_chunk;
4161 
4162  if (old_events->tail == NULL)
4163  {
4164  /* restoring to a completely empty state, so free everything */
4165  afterTriggerFreeEventList(events);
4166  }
4167  else
4168  {
4169  *events = *old_events;
4170  /* free any chunks after the last one we want to keep */
4171  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4172  {
4173  next_chunk = chunk->next;
4174  pfree(chunk);
4175  }
4176  /* and clean up the tail chunk to be the right length */
4177  events->tail->next = NULL;
4178  events->tail->freeptr = events->tailfree;
4179 
4180  /*
4181  * We don't make any effort to remove now-unused shared data records.
4182  * They might still be useful, anyway.
4183  */
4184  }
4185 }
4186 
4187 /* ----------
4188  * afterTriggerDeleteHeadEventChunk()
4189  *
4190  * Remove the first chunk of events from the query level's event list.
4191  * Keep any event list pointers elsewhere in the query level's data
4192  * structures in sync.
4193  * ----------
4194  */
4195 static void
4197 {
4198  AfterTriggerEventChunk *target = qs->events.head;
4199  ListCell *lc;
4200 
4201  Assert(target && target->next);
4202 
4203  /*
4204  * First, update any pointers in the per-table data, so that they won't be
4205  * dangling. Resetting obsoleted pointers to NULL will make
4206  * cancel_prior_stmt_triggers start from the list head, which is fine.
4207  */
4208  foreach(lc, qs->tables)
4209  {
4211 
4212  if (table->after_trig_done &&
4213  table->after_trig_events.tail == target)
4214  {
4215  table->after_trig_events.head = NULL;
4216  table->after_trig_events.tail = NULL;
4217  table->after_trig_events.tailfree = NULL;
4218  }
4219  }
4220 
4221  /* Now we can flush the head chunk */
4222  qs->events.head = target->next;
4223  pfree(target);
4224 }
4225 
4226 
4227 /* ----------
4228  * AfterTriggerExecute()
4229  *
4230  * Fetch the required tuples back from the heap and fire one
4231  * single trigger function.
4232  *
4233  * Frequently, this will be fired many times in a row for triggers of
4234  * a single relation. Therefore, we cache the open relation and provide
4235  * fmgr lookup cache space at the caller level. (For triggers fired at
4236  * the end of a query, we can even piggyback on the executor's state.)
4237  *
4238  * When fired for a cross-partition update of a partitioned table, the old
4239  * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4240  * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4241  * both are converted into the root partitioned table's format before passing
4242  * to the trigger function.
4243  *
4244  * event: event currently being fired.
4245  * relInfo: result relation for event.
4246  * src_relInfo: source partition of a cross-partition update
4247  * dst_relInfo: its destination partition
4248  * trigdesc: working copy of rel's trigger info.
4249  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4250  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4251  * or NULL if no instrumentation is wanted.
4252  * per_tuple_context: memory context to call trigger function in.
4253  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4254  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4255  * ----------
4256  */
4257 static void
4259  AfterTriggerEvent event,
4260  ResultRelInfo *relInfo,
4261  ResultRelInfo *src_relInfo,
4262  ResultRelInfo *dst_relInfo,
4263  TriggerDesc *trigdesc,
4264  FmgrInfo *finfo, Instrumentation *instr,
4265  MemoryContext per_tuple_context,
4266  TupleTableSlot *trig_tuple_slot1,
4267  TupleTableSlot *trig_tuple_slot2)
4268 {
4269  Relation rel = relInfo->ri_RelationDesc;
4270  Relation src_rel = src_relInfo->ri_RelationDesc;
4271  Relation dst_rel = dst_relInfo->ri_RelationDesc;
4272  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4273  Oid tgoid = evtshared->ats_tgoid;
4274  TriggerData LocTriggerData = {0};
4275  HeapTuple rettuple;
4276  int tgindx;
4277  bool should_free_trig = false;
4278  bool should_free_new = false;
4279 
4280  /*
4281  * Locate trigger in trigdesc.
4282  */
4283  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4284  {
4285  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4286  {
4287  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4288  break;
4289  }
4290  }
4291  if (LocTriggerData.tg_trigger == NULL)
4292  elog(ERROR, "could not find trigger %u", tgoid);
4293 
4294  /*
4295  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4296  * to include time spent re-fetching tuples in the trigger cost.
4297  */
4298  if (instr)
4299  InstrStartNode(instr + tgindx);
4300 
4301  /*
4302  * Fetch the required tuple(s).
4303  */
4304  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4305  {
4307  {
4308  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4309 
4310  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4311  trig_tuple_slot1))
4312  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4313 
4314  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4316  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4317  trig_tuple_slot2))
4318  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4319  }
4320  /* fall through */
4322 
4323  /*
4324  * Store tuple in the slot so that tg_trigtuple does not reference
4325  * tuplestore memory. (It is formally possible for the trigger
4326  * function to queue trigger events that add to the same
4327  * tuplestore, which can push other tuples out of memory.) The
4328  * distinction is academic, because we start with a minimal tuple
4329  * that is stored as a heap tuple, constructed in different memory
4330  * context, in the slot anyway.
4331  */
4332  LocTriggerData.tg_trigslot = trig_tuple_slot1;
4333  LocTriggerData.tg_trigtuple =
4334  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4335 
4336  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4338  {
4339  LocTriggerData.tg_newslot = trig_tuple_slot2;
4340  LocTriggerData.tg_newtuple =
4341  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4342  }
4343  else
4344  {
4345  LocTriggerData.tg_newtuple = NULL;
4346  }
4347  break;
4348 
4349  default:
4350  if (ItemPointerIsValid(&(event->ate_ctid1)))
4351  {
4352  TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4353  src_relInfo);
4354 
4355  if (!table_tuple_fetch_row_version(src_rel,
4356  &(event->ate_ctid1),
4357  SnapshotAny,
4358  src_slot))
4359  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4360 
4361  /*
4362  * Store the tuple fetched from the source partition into the
4363  * target (root partitioned) table slot, converting if needed.
4364  */
4365  if (src_relInfo != relInfo)
4366  {
4367  TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4368 
4369  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4370  if (map)
4371  {
4373  src_slot,
4374  LocTriggerData.tg_trigslot);
4375  }
4376  else
4377  ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4378  }
4379  else
4380  LocTriggerData.tg_trigslot = src_slot;
4381  LocTriggerData.tg_trigtuple =
4382  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4383  }
4384  else
4385  {
4386  LocTriggerData.tg_trigtuple = NULL;
4387  }
4388 
4389  /* don't touch ctid2 if not there */
4391  (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4392  ItemPointerIsValid(&(event->ate_ctid2)))
4393  {
4394  TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4395  dst_relInfo);
4396 
4397  if (!table_tuple_fetch_row_version(dst_rel,
4398  &(event->ate_ctid2),
4399  SnapshotAny,
4400  dst_slot))
4401  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4402 
4403  /*
4404  * Store the tuple fetched from the destination partition into
4405  * the target (root partitioned) table slot, converting if
4406  * needed.
4407  */
4408  if (dst_relInfo != relInfo)
4409  {
4410  TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4411 
4412  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4413  if (map)
4414  {
4416  dst_slot,
4417  LocTriggerData.tg_newslot);
4418  }
4419  else
4420  ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4421  }
4422  else
4423  LocTriggerData.tg_newslot = dst_slot;
4424  LocTriggerData.tg_newtuple =
4425  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4426  }
4427  else
4428  {
4429  LocTriggerData.tg_newtuple = NULL;
4430  }
4431  }
4432 
4433  /*
4434  * Set up the tuplestore information to let the trigger have access to
4435  * transition tables. When we first make a transition table available to
4436  * a trigger, mark it "closed" so that it cannot change anymore. If any
4437  * additional events of the same type get queued in the current trigger
4438  * query level, they'll go into new transition tables.
4439  */
4440  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4441  if (evtshared->ats_table)
4442  {
4443  if (LocTriggerData.tg_trigger->tgoldtable)
4444  {
4445  if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4446  LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4447  else
4448  LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4449  evtshared->ats_table->closed = true;
4450  }
4451 
4452  if (LocTriggerData.tg_trigger->tgnewtable)
4453  {
4454  if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4455  LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4456  else
4457  LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4458  evtshared->ats_table->closed = true;
4459  }
4460  }
4461 
4462  /*
4463  * Setup the remaining trigger information
4464  */
4465  LocTriggerData.type = T_TriggerData;
4466  LocTriggerData.tg_event =
4468  LocTriggerData.tg_relation = rel;
4469  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4470  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4471 
4472  MemoryContextReset(per_tuple_context);
4473 
4474  /*
4475  * Call the trigger and throw away any possibly returned updated tuple.
4476  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4477  */
4478  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4479  tgindx,
4480  finfo,
4481  NULL,
4482  per_tuple_context);
4483  if (rettuple != NULL &&
4484  rettuple != LocTriggerData.tg_trigtuple &&
4485  rettuple != LocTriggerData.tg_newtuple)
4486  heap_freetuple(rettuple);
4487 
4488  /*
4489  * Release resources
4490  */
4491  if (should_free_trig)
4492  heap_freetuple(LocTriggerData.tg_trigtuple);
4493  if (should_free_new)
4494  heap_freetuple(LocTriggerData.tg_newtuple);
4495 
4496  /* don't clear slots' contents if foreign table */
4497  if (trig_tuple_slot1 == NULL)
4498  {
4499  if (LocTriggerData.tg_trigslot)
4500  ExecClearTuple(LocTriggerData.tg_trigslot);
4501  if (LocTriggerData.tg_newslot)
4502  ExecClearTuple(LocTriggerData.tg_newslot);
4503  }
4504 
4505  /*
4506  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4507  * one "tuple returned" (really the number of firings).
4508  */
4509  if (instr)
4510  InstrStopNode(instr + tgindx, 1);
4511 }
4512 
4513 
4514 /*
4515  * afterTriggerMarkEvents()
4516  *
4517  * Scan the given event list for not yet invoked events. Mark the ones
4518  * that can be invoked now with the current firing ID.
4519  *
4520  * If move_list isn't NULL, events that are not to be invoked now are
4521  * transferred to move_list.
4522  *
4523  * When immediate_only is true, do not invoke currently-deferred triggers.
4524  * (This will be false only at main transaction exit.)
4525  *
4526  * Returns true if any invokable events were found.
4527  */
4528 static bool
4530  AfterTriggerEventList *move_list,
4531  bool immediate_only)
4532 {
4533  bool found = false;
4534  bool deferred_found = false;
4535  AfterTriggerEvent event;
4536  AfterTriggerEventChunk *chunk;
4537 
4538  for_each_event_chunk(event, chunk, *events)
4539  {
4540  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4541  bool defer_it = false;
4542 
4543  if (!(event->ate_flags &
4545  {
4546  /*
4547  * This trigger hasn't been called or scheduled yet. Check if we
4548  * should call it now.
4549  */
4550  if (immediate_only && afterTriggerCheckState(evtshared))
4551  {
4552  defer_it = true;
4553  }
4554  else
4555  {
4556  /*
4557  * Mark it as to be fired in this firing cycle.
4558  */
4560  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4561  found = true;
4562  }
4563  }
4564 
4565  /*
4566  * If it's deferred, move it to move_list, if requested.
4567  */
4568  if (defer_it && move_list != NULL)
4569  {
4570  deferred_found = true;
4571  /* add it to move_list */
4572  afterTriggerAddEvent(move_list, event, evtshared);
4573  /* mark original copy "done" so we don't do it again */
4574  event->ate_flags |= AFTER_TRIGGER_DONE;
4575  }
4576  }
4577 
4578  /*
4579  * We could allow deferred triggers if, before the end of the
4580  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4581  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4582  */
4583  if (deferred_found && InSecurityRestrictedOperation())
4584  ereport(ERROR,
4585  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4586  errmsg("cannot fire deferred trigger within security-restricted operation")));
4587 
4588  return found;
4589 }
4590 
4591 /*
4592  * afterTriggerInvokeEvents()
4593  *
4594  * Scan the given event list for events that are marked as to be fired
4595  * in the current firing cycle, and fire them.
4596  *
4597  * If estate isn't NULL, we use its result relation info to avoid repeated
4598  * openings and closing of trigger target relations. If it is NULL, we
4599  * make one locally to cache the info in case there are multiple trigger
4600  * events per rel.
4601  *
4602  * When delete_ok is true, it's safe to delete fully-processed events.
4603  * (We are not very tense about that: we simply reset a chunk to be empty
4604  * if all its events got fired. The objective here is just to avoid useless
4605  * rescanning of events when a trigger queues new events during transaction
4606  * end, so it's not necessary to worry much about the case where only
4607  * some events are fired.)
4608  *
4609  * Returns true if no unfired events remain in the list (this allows us
4610  * to avoid repeating afterTriggerMarkEvents).
4611  */
4612 static bool
4614  CommandId firing_id,
4615  EState *estate,
4616  bool delete_ok)
4617 {
4618  bool all_fired = true;
4619  AfterTriggerEventChunk *chunk;
4620  MemoryContext per_tuple_context;
4621  bool local_estate = false;
4622  ResultRelInfo *rInfo = NULL;
4623  Relation rel = NULL;
4624  TriggerDesc *trigdesc = NULL;
4625  FmgrInfo *finfo = NULL;
4626  Instrumentation *instr = NULL;
4627  TupleTableSlot *slot1 = NULL,
4628  *slot2 = NULL;
4629 
4630  /* Make a local EState if need be */
4631  if (estate == NULL)
4632  {
4633  estate = CreateExecutorState();
4634  local_estate = true;
4635  }
4636 
4637  /* Make a per-tuple memory context for trigger function calls */
4638  per_tuple_context =
4640  "AfterTriggerTupleContext",
4642 
4643  for_each_chunk(chunk, *events)
4644  {
4645  AfterTriggerEvent event;
4646  bool all_fired_in_chunk = true;
4647 
4648  for_each_event(event, chunk)
4649  {
4650  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4651 
4652  /*
4653  * Is it one for me to fire?
4654  */
4655  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4656  evtshared->ats_firing_id == firing_id)
4657  {
4658  ResultRelInfo *src_rInfo,
4659  *dst_rInfo;
4660 
4661  /*
4662  * So let's fire it... but first, find the correct relation if
4663  * this is not the same relation as before.
4664  */
4665  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4666  {
4667  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4668  NULL);
4669  rel = rInfo->ri_RelationDesc;
4670  /* Catch calls with insufficient relcache refcounting */
4672  trigdesc = rInfo->ri_TrigDesc;
4673  finfo = rInfo->ri_TrigFunctions;
4674  instr = rInfo->ri_TrigInstrument;
4675  if (slot1 != NULL)
4676  {
4679  slot1 = slot2 = NULL;
4680  }
4681  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4682  {
4683  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4685  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4687  }
4688  if (trigdesc == NULL) /* should not happen */
4689  elog(ERROR, "relation %u has no triggers",
4690  evtshared->ats_relid);
4691  }
4692 
4693  /*
4694  * Look up source and destination partition result rels of a
4695  * cross-partition update event.
4696  */
4697  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4699  {
4700  Assert(OidIsValid(event->ate_src_part) &&
4701  OidIsValid(event->ate_dst_part));
4702  src_rInfo = ExecGetTriggerResultRel(estate,
4703  event->ate_src_part,
4704  rInfo);
4705  dst_rInfo = ExecGetTriggerResultRel(estate,
4706  event->ate_dst_part,
4707  rInfo);
4708  }
4709  else
4710  src_rInfo = dst_rInfo = rInfo;
4711 
4712  /*
4713  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4714  * still set, so recursive examinations of the event list
4715  * won't try to re-fire it.
4716  */
4717  AfterTriggerExecute(estate, event, rInfo,
4718  src_rInfo, dst_rInfo,
4719  trigdesc, finfo, instr,
4720  per_tuple_context, slot1, slot2);
4721 
4722  /*
4723  * Mark the event as done.
4724  */
4725  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4726  event->ate_flags |= AFTER_TRIGGER_DONE;
4727  }
4728  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4729  {
4730  /* something remains to be done */
4731  all_fired = all_fired_in_chunk = false;
4732  }
4733  }
4734 
4735  /* Clear the chunk if delete_ok and nothing left of interest */
4736  if (delete_ok && all_fired_in_chunk)
4737  {
4738  chunk->freeptr = CHUNK_DATA_START(chunk);
4739  chunk->endfree = chunk->endptr;
4740 
4741  /*
4742  * If it's last chunk, must sync event list's tailfree too. Note
4743  * that delete_ok must NOT be passed as true if there could be
4744  * additional AfterTriggerEventList values pointing at this event
4745  * list, since we'd fail to fix their copies of tailfree.
4746  */
4747  if (chunk == events->tail)
4748  events->tailfree = chunk->freeptr;
4749  }
4750  }
4751  if (slot1 != NULL)
4752  {
4755  }
4756 
4757  /* Release working resources */
4758  MemoryContextDelete(per_tuple_context);
4759 
4760  if (local_estate)
4761  {
4762  ExecCloseResultRelations(estate);
4763  ExecResetTupleTable(estate->es_tupleTable, false);
4764  FreeExecutorState(estate);
4765  }
4766 
4767  return all_fired;
4768 }
4769 
4770 
4771 /*
4772  * GetAfterTriggersTableData
4773  *
4774  * Find or create an AfterTriggersTableData struct for the specified
4775  * trigger event (relation + operation type). Ignore existing structs
4776  * marked "closed"; we don't want to put any additional tuples into them,
4777  * nor change their stmt-triggers-fired state.
4778  *
4779  * Note: the AfterTriggersTableData list is allocated in the current
4780  * (sub)transaction's CurTransactionContext. This is OK because
4781  * we don't need it to live past AfterTriggerEndQuery.
4782  */
4783 static AfterTriggersTableData *
4785 {
4786  AfterTriggersTableData *table;
4788  MemoryContext oldcxt;
4789  ListCell *lc;
4790 
4791  /* Caller should have ensured query_depth is OK. */
4795 
4796  foreach(lc, qs->tables)
4797  {
4798  table = (AfterTriggersTableData *) lfirst(lc);
4799  if (table->relid == relid && table->cmdType == cmdType &&
4800  !table->closed)
4801  return table;
4802  }
4803 
4805 
4807  table->relid = relid;
4808  table->cmdType = cmdType;
4809  qs->tables = lappend(qs->tables, table);
4810 
4811  MemoryContextSwitchTo(oldcxt);
4812 
4813  return table;
4814 }
4815 
4816 /*
4817  * Returns a TupleTableSlot suitable for holding the tuples to be put
4818  * into AfterTriggersTableData's transition table tuplestores.
4819  */
4820 static TupleTableSlot *
4822  TupleDesc tupdesc)
4823 {
4824  /* Create it if not already done. */
4825  if (!table->storeslot)
4826  {
4827  MemoryContext oldcxt;
4828 
4829  /*
4830  * We need this slot only until AfterTriggerEndQuery, but making it
4831  * last till end-of-subxact is good enough. It'll be freed by
4832  * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4833  * a different lifespan, so we'd better make a copy of that.
4834  */
4836  tupdesc = CreateTupleDescCopy(tupdesc);
4837  table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4838  MemoryContextSwitchTo(oldcxt);
4839  }
4840 
4841  return table->storeslot;
4842 }
4843 
4844 /*
4845  * MakeTransitionCaptureState
4846  *
4847  * Make a TransitionCaptureState object for the given TriggerDesc, target
4848  * relation, and operation type. The TCS object holds all the state needed
4849  * to decide whether to capture tuples in transition tables.
4850  *
4851  * If there are no triggers in 'trigdesc' that request relevant transition
4852  * tables, then return NULL.
4853  *
4854  * The resulting object can be passed to the ExecAR* functions. When
4855  * dealing with child tables, the caller can set tcs_original_insert_tuple
4856  * to avoid having to reconstruct the original tuple in the root table's
4857  * format.
4858  *
4859  * Note that we copy the flags from a parent table into this struct (rather
4860  * than subsequently using the relation's TriggerDesc directly) so that we can
4861  * use it to control collection of transition tuples from child tables.
4862  *
4863  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4864  * on the same table during one query should share one transition table.
4865  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4866  * looked up using the table OID + CmdType, and are merely referenced by
4867  * the TransitionCaptureState objects we hand out to callers.
4868  */
4871 {
4873  bool need_old_upd,
4874  need_new_upd,
4875  need_old_del,
4876  need_new_ins;
4877  AfterTriggersTableData *table;
4878  MemoryContext oldcxt;
4879  ResourceOwner saveResourceOwner;
4880 
4881  if (trigdesc == NULL)
4882  return NULL;
4883 
4884  /* Detect which table(s) we need. */
4885  switch (cmdType)
4886  {
4887  case CMD_INSERT:
4888  need_old_upd = need_old_del = need_new_upd = false;
4889  need_new_ins = trigdesc->trig_insert_new_table;
4890  break;
4891  case CMD_UPDATE:
4892  need_old_upd = trigdesc->trig_update_old_table;
4893  need_new_upd = trigdesc->trig_update_new_table;
4894  need_old_del = need_new_ins = false;
4895  break;
4896  case CMD_DELETE:
4897  need_old_del = trigdesc->trig_delete_old_table;
4898  need_old_upd = need_new_upd = need_new_ins = false;
4899  break;
4900  case CMD_MERGE:
4901  need_old_upd = trigdesc->trig_update_old_table;
4902  need_new_upd = trigdesc->trig_update_new_table;
4903  need_old_del = trigdesc->trig_delete_old_table;
4904  need_new_ins = trigdesc->trig_insert_new_table;
4905  break;
4906  default:
4907  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4908  /* keep compiler quiet */
4909  need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4910  break;
4911  }
4912  if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4913  return NULL;
4914 
4915  /* Check state, like AfterTriggerSaveEvent. */
4916  if (afterTriggers.query_depth < 0)
4917  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4918 
4919  /* Be sure we have enough space to record events at this query depth. */
4922 
4923  /*
4924  * Find or create an AfterTriggersTableData struct to hold the
4925  * tuplestore(s). If there's a matching struct but it's marked closed,
4926  * ignore it; we need a newer one.
4927  *
4928  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4929  * allocated in the current (sub)transaction's CurTransactionContext, and
4930  * the tuplestores are managed by the (sub)transaction's resource owner.
4931  * This is sufficient lifespan because we do not allow triggers using
4932  * transition tables to be deferrable; they will be fired during
4933  * AfterTriggerEndQuery, after which it's okay to delete the data.
4934  */
4935  table = GetAfterTriggersTableData(relid, cmdType);
4936 
4937  /* Now create required tuplestore(s), if we don't have them already. */
4939  saveResourceOwner = CurrentResourceOwner;
4941 
4942  if (need_old_upd && table->old_upd_tuplestore == NULL)
4943  table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4944  if (need_new_upd && table->new_upd_tuplestore == NULL)
4945  table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4946  if (need_old_del && table->old_del_tuplestore == NULL)
4947  table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4948  if (need_new_ins && table->new_ins_tuplestore == NULL)
4949  table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4950 
4951  CurrentResourceOwner = saveResourceOwner;
4952  MemoryContextSwitchTo(oldcxt);
4953 
4954  /* Now build the TransitionCaptureState struct, in caller's context */
4956  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4957  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4958  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4959  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4960  state->tcs_private = table;
4961 
4962  return state;
4963 }
4964 
4965 
4966 /* ----------
4967  * AfterTriggerBeginXact()
4968  *
4969  * Called at transaction start (either BEGIN or implicit for single
4970  * statement outside of transaction block).
4971  * ----------
4972  */
4973 void
4975 {
4976  /*
4977  * Initialize after-trigger state structure to empty
4978  */
4979  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4981 
4982  /*
4983  * Verify that there is no leftover state remaining. If these assertions
4984  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4985  * up properly.
4986  */
4987  Assert(afterTriggers.state == NULL);
4988  Assert(afterTriggers.query_stack == NULL);
4990  Assert(afterTriggers.event_cxt == NULL);
4991  Assert(afterTriggers.events.head == NULL);
4992  Assert(afterTriggers.trans_stack == NULL);
4994 }
4995 
4996 
4997 /* ----------
4998  * AfterTriggerBeginQuery()
4999  *
5000  * Called just before we start processing a single query within a
5001  * transaction (or subtransaction). Most of the real work gets deferred
5002  * until somebody actually tries to queue a trigger event.
5003  * ----------
5004  */
5005 void
5007 {
5008  /* Increase the query stack depth */
5010 }
5011 
5012 
5013 /* ----------
5014  * AfterTriggerEndQuery()
5015  *
5016  * Called after one query has been completely processed. At this time
5017  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
5018  * transfer deferred trigger events to the global deferred-trigger list.
5019  *
5020  * Note that this must be called BEFORE closing down the executor
5021  * with ExecutorEnd, because we make use of the EState's info about
5022  * target relations. Normally it is called from ExecutorFinish.
5023  * ----------
5024  */
5025 void
5027 {
5029 
5030  /* Must be inside a query, too */
5032 
5033  /*
5034  * If we never even got as far as initializing the event stack, there
5035  * certainly won't be any events, so exit quickly.
5036  */
5038  {
5040  return;
5041  }
5042 
5043  /*
5044  * Process all immediate-mode triggers queued by the query, and move the
5045  * deferred ones to the main list of deferred events.
5046  *
5047  * Notice that we decide which ones will be fired, and put the deferred
5048  * ones on the main list, before anything is actually fired. This ensures
5049  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5050  * IMMEDIATE: all events we have decided to defer will be available for it
5051  * to fire.
5052  *
5053  * We loop in case a trigger queues more events at the same query level.
5054  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5055  * will instead fire any triggers in a dedicated query level. Foreign key
5056  * enforcement triggers do add to the current query level, thanks to their
5057  * passing fire_triggers = false to SPI_execute_snapshot(). Other
5058  * C-language triggers might do likewise.
5059  *
5060  * If we find no firable events, we don't have to increment
5061  * firing_counter.
5062  */
5064 
5065  for (;;)
5066  {
5068  {
5069  CommandId firing_id = afterTriggers.firing_counter++;
5070  AfterTriggerEventChunk *oldtail = qs->events.tail;
5071 
5072  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5073  break; /* all fired */
5074 
5075  /*
5076  * Firing a trigger could result in query_stack being repalloc'd,
5077  * so we must recalculate qs after each afterTriggerInvokeEvents
5078  * call. Furthermore, it's unsafe to pass delete_ok = true here,
5079  * because that could cause afterTriggerInvokeEvents to try to
5080  * access qs->events after the stack has been repalloc'd.
5081  */
5083 
5084  /*
5085  * We'll need to scan the events list again. To reduce the cost
5086  * of doing so, get rid of completely-fired chunks. We know that
5087  * all events were marked IN_PROGRESS or DONE at the conclusion of
5088  * afterTriggerMarkEvents, so any still-interesting events must
5089  * have been added after that, and so must be in the chunk that
5090  * was then the tail chunk, or in later chunks. So, zap all
5091  * chunks before oldtail. This is approximately the same set of
5092  * events we would have gotten rid of by passing delete_ok = true.
5093  */
5094  Assert(oldtail != NULL);
5095  while (qs->events.head != oldtail)
5097  }
5098  else
5099  break;
5100  }
5101 
5102  /* Release query-level-local storage, including tuplestores if any */
5104 
5106 }
5107 
5108 
5109 /*
5110  * AfterTriggerFreeQuery
5111  * Release subsidiary storage for a trigger query level.
5112  * This includes closing down tuplestores.
5113  * Note: it's important for this to be safe if interrupted by an error
5114  * and then called again for the same query level.
5115  */
5116 static void
5118 {
5119  Tuplestorestate *ts;
5120  List *tables;
5121  ListCell *lc;
5122 
5123  /* Drop the trigger events */
5125 
5126  /* Drop FDW tuplestore if any */
5127  ts = qs->fdw_tuplestore;
5128  qs->fdw_tuplestore = NULL;
5129  if (ts)
5130  tuplestore_end(ts);
5131 
5132  /* Release per-table subsidiary storage */
5133  tables = qs->tables;
5134  foreach(lc, tables)
5135  {
5137 
5138  ts = table->old_upd_tuplestore;
5139  table->old_upd_tuplestore = NULL;
5140  if (ts)
5141  tuplestore_end(ts);
5142  ts = table->new_upd_tuplestore;
5143  table->new_upd_tuplestore = NULL;
5144  if (ts)
5145  tuplestore_end(ts);
5146  ts = table->old_del_tuplestore;
5147  table->old_del_tuplestore = NULL;
5148  if (ts)
5149  tuplestore_end(ts);
5150  ts = table->new_ins_tuplestore;
5151  table->new_ins_tuplestore = NULL;
5152  if (ts)
5153  tuplestore_end(ts);
5154  if (table->storeslot)
5155  {
5156  TupleTableSlot *slot = table->storeslot;
5157 
5158  table->storeslot = NULL;
5160  }
5161  }
5162 
5163  /*
5164  * Now free the AfterTriggersTableData structs and list cells. Reset list
5165  * pointer first; if list_free_deep somehow gets an error, better to leak
5166  * that storage than have an infinite loop.
5167  */
5168  qs->tables = NIL;
5169  list_free_deep(tables);
5170 }
5171 
5172 
5173 /* ----------
5174  * AfterTriggerFireDeferred()
5175  *
5176  * Called just before the current transaction is committed. At this
5177  * time we invoke all pending DEFERRED triggers.
5178  *
5179  * It is possible for other modules to queue additional deferred triggers
5180  * during pre-commit processing; therefore xact.c may have to call this
5181  * multiple times.
5182  * ----------
5183  */
5184 void
5186 {
5187  AfterTriggerEventList *events;
5188  bool snap_pushed = false;
5189 
5190  /* Must not be inside a query */
5192 
5193  /*
5194  * If there are any triggers to fire, make sure we have set a snapshot for
5195  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5196  * can't assume ActiveSnapshot is valid on entry.)
5197  */
5198  events = &afterTriggers.events;
5199  if (events->head != NULL)
5200  {
5202  snap_pushed = true;
5203  }
5204 
5205  /*
5206  * Run all the remaining triggers. Loop until they are all gone, in case
5207  * some trigger queues more for us to do.
5208  */
5209  while (afterTriggerMarkEvents(events, NULL, false))
5210  {
5211  CommandId firing_id = afterTriggers.firing_counter++;
5212 
5213  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5214  break; /* all fired */
5215  }
5216 
5217  /*
5218  * We don't bother freeing the event list, since it will go away anyway
5219  * (and more efficiently than via pfree) in AfterTriggerEndXact.
5220  */
5221 
5222  if (snap_pushed)
5224 }
5225 
5226 
5227 /* ----------
5228  * AfterTriggerEndXact()
5229  *
5230  * The current transaction is finishing.
5231  *
5232  * Any unfired triggers are canceled so we simply throw
5233  * away anything we know.
5234  *
5235  * Note: it is possible for this to be called repeatedly in case of
5236  * error during transaction abort; therefore, do not complain if
5237  * already closed down.
5238  * ----------
5239  */
5240 void
5241 AfterTriggerEndXact(bool isCommit)
5242 {
5243  /*
5244  * Forget the pending-events list.
5245  *
5246  * Since all the info is in TopTransactionContext or children thereof, we
5247  * don't really need to do anything to reclaim memory. However, the
5248  * pending-events list could be large, and so it's useful to discard it as
5249  * soon as possible --- especially if we are aborting because we ran out
5250  * of memory for the list!
5251  */
5253  {
5255  afterTriggers.event_cxt = NULL;
5256  afterTriggers.events.head = NULL;
5257  afterTriggers.events.tail = NULL;
5258  afterTriggers.events.tailfree = NULL;
5259  }
5260 
5261  /*
5262  * Forget any subtransaction state as well. Since this can't be very
5263  * large, we let the eventual reset of TopTransactionContext free the
5264  * memory instead of doing it here.
5265  */
5266  afterTriggers.trans_stack = NULL;
5268 
5269 
5270  /*
5271  * Forget the query stack and constraint-related state information. As
5272  * with the subtransaction state information, we don't bother freeing the
5273  * memory here.
5274  */
5275  afterTriggers.query_stack = NULL;
5277  afterTriggers.state = NULL;
5278 
5279  /* No more afterTriggers manipulation until next transaction starts. */
5281 }
5282 
5283 /*
5284  * AfterTriggerBeginSubXact()
5285  *
5286  * Start a subtransaction.
5287  */
5288 void
5290 {
5291  int my_level = GetCurrentTransactionNestLevel();
5292 
5293  /*
5294  * Allocate more space in the trans_stack if needed. (Note: because the
5295  * minimum nest level of a subtransaction is 2, we waste the first couple
5296  * entries of the array; not worth the notational effort to avoid it.)
5297  */
5298  while (my_level >= afterTriggers.maxtransdepth)
5299  {
5300  if (afterTriggers.maxtransdepth == 0)
5301  {
5302  /* Arbitrarily initialize for max of 8 subtransaction levels */
5305  8 * sizeof(AfterTriggersTransData));
5307  }
5308  else
5309  {
5310  /* repalloc will keep the stack in the same context */
5311  int new_alloc = afterTriggers.maxtransdepth * 2;
5312 
5315  new_alloc * sizeof(AfterTriggersTransData));
5316  afterTriggers.maxtransdepth = new_alloc;
5317  }
5318  }
5319 
5320  /*
5321  * Push the current information into the stack. The SET CONSTRAINTS state
5322  * is not saved until/unless changed. Likewise, we don't make a
5323  * per-subtransaction event context until needed.
5324  */
5325  afterTriggers.trans_stack[my_level].state = NULL;
5329 }
5330 
5331 /*
5332  * AfterTriggerEndSubXact()
5333  *
5334  * The current subtransaction is ending.
5335  */
5336 void
5338 {
5339  int my_level = GetCurrentTransactionNestLevel();
5341  AfterTriggerEvent event;
5342  AfterTriggerEventChunk *chunk;
5343  CommandId subxact_firing_id;
5344 
5345  /*
5346  * Pop the prior state if needed.
5347  */
5348  if (isCommit)
5349  {
5350  Assert(my_level < afterTriggers.maxtransdepth);
5351  /* If we saved a prior state, we don't need it anymore */
5352  state = afterTriggers.trans_stack[my_level].state;
5353  if (state != NULL)
5354  pfree(state);
5355  /* this avoids double pfree if error later: */
5356  afterTriggers.trans_stack[my_level].state = NULL;
5359  }
5360  else
5361  {
5362  /*
5363  * Aborting. It is possible subxact start failed before calling
5364  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5365  * trans_stack levels that aren't there.
5366  */
5367  if (my_level >= afterTriggers.maxtransdepth)
5368  return;
5369 
5370  /*
5371  * Release query-level storage for queries being aborted, and restore
5372  * query_depth to its pre-subxact value. This assumes that a
5373  * subtransaction will not add events to query levels started in a
5374  * earlier transaction state.
5375  */
5377  {
5381  }
5384 
5385  /*
5386  * Restore the global deferred-event list to its former length,
5387  * discarding any events queued by the subxact.
5388  */
5390  &afterTriggers.trans_stack[my_level].events);
5391 
5392  /*
5393  * Restore the trigger state. If the saved state is NULL, then this
5394  * subxact didn't save it, so it doesn't need restoring.
5395  */
5396  state = afterTriggers.trans_stack[my_level].state;
5397  if (state != NULL)
5398  {
5401  }
5402  /* this avoids double pfree if error later: */
5403  afterTriggers.trans_stack[my_level].state = NULL;
5404 
5405  /*
5406  * Scan for any remaining deferred events that were marked DONE or IN
5407  * PROGRESS by this subxact or a child, and un-mark them. We can
5408  * recognize such events because they have a firing ID greater than or
5409  * equal to the firing_counter value we saved at subtransaction start.
5410  * (This essentially assumes that the current subxact includes all
5411  * subxacts started after it.)
5412  */
5413  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5415  {
5416  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5417 
5418  if (event->ate_flags &
5420  {
5421  if (evtshared->ats_firing_id >= subxact_firing_id)
5422  event->ate_flags &=
5424  }
5425  }
5426  }
5427 }
5428 
5429 /*
5430  * Get the transition table for the given event and depending on whether we are
5431  * processing the old or the new tuple.
5432  */
5433 static Tuplestorestate *
5435  TupleTableSlot *oldslot,
5436  TupleTableSlot *newslot,
5437  TransitionCaptureState *transition_capture)
5438 {
5439  Tuplestorestate *tuplestore = NULL;
5440  bool delete_old_table = transition_capture->tcs_delete_old_table;
5441  bool update_old_table = transition_capture->tcs_update_old_table;
5442  bool update_new_table = transition_capture->tcs_update_new_table;
5443  bool insert_new_table = transition_capture->tcs_insert_new_table;
5444 
5445  /*
5446  * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5447  * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5448  * non-NULL. But for UPDATE events fired for capturing transition tuples
5449  * during UPDATE partition-key row movement, OLD is NULL when the event is
5450  * for a row being inserted, whereas NEW is NULL when the event is for a
5451  * row being deleted.
5452  */
5453  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5454  TupIsNull(oldslot)));
5455  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5456  TupIsNull(newslot)));
5457 
5458  if (!TupIsNull(oldslot))
5459  {
5460  Assert(TupIsNull(newslot));
5461  if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5462  tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5463  else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5464  tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5465  }
5466  else if (!TupIsNull(newslot))
5467  {
5468  Assert(TupIsNull(oldslot));
5469  if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5470  tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5471  else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5472  tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5473  }
5474 
5475  return tuplestore;
5476 }
5477 
5478 /*
5479  * Add the given heap tuple to the given tuplestore, applying the conversion
5480  * map if necessary.
5481  *
5482  * If original_insert_tuple is given, we can add that tuple without conversion.
5483  */
5484 static void
5486  TransitionCaptureState *transition_capture,
5487  ResultRelInfo *relinfo,
5488  TupleTableSlot *slot,
5489  TupleTableSlot *original_insert_tuple,
5490  Tuplestorestate *tuplestore)
5491 {
5492  TupleConversionMap *map;
5493 
5494  /*
5495  * Nothing needs to be done if we don't have a tuplestore.
5496  */
5497  if (tuplestore == NULL)
5498  return;
5499 
5500  if (original_insert_tuple)
5501  tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5502  else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5503  {
5504  AfterTriggersTableData *table = transition_capture->tcs_private;
5505  TupleTableSlot *storeslot;
5506 
5507  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5508  execute_attr_map_slot(map->attrMap, slot, storeslot);
5509  tuplestore_puttupleslot(tuplestore, storeslot);
5510  }
5511  else
5512  tuplestore_puttupleslot(tuplestore, slot);
5513 }
5514 
5515 /* ----------
5516  * AfterTriggerEnlargeQueryState()
5517  *
5518  * Prepare the necessary state so that we can record AFTER trigger events
5519  * queued by a query. It is allowed to have nested queries within a
5520  * (sub)transaction, so we need to have separate state for each query
5521  * nesting level.
5522  * ----------
5523  */
5524 static void
5526 {
5527  int init_depth = afterTriggers.maxquerydepth;
5528 
5530 
5531  if (afterTriggers.maxquerydepth == 0)
5532  {
5533  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5534 
5537  new_alloc * sizeof(AfterTriggersQueryData));
5538  afterTriggers.maxquerydepth = new_alloc;
5539  }
5540  else
5541  {
5542  /* repalloc will keep the stack in the same context */
5543  int old_alloc = afterTriggers.maxquerydepth;
5544  int new_alloc = Max(afterTriggers.query_depth + 1,
5545  old_alloc * 2);
5546 
5549  new_alloc * sizeof(AfterTriggersQueryData));
5550  afterTriggers.maxquerydepth = new_alloc;
5551  }
5552 
5553  /* Initialize new array entries to empty */
5554  while (init_depth < afterTriggers.maxquerydepth)
5555  {
5557 
5558  qs->events.head = NULL;
5559  qs->events.tail = NULL;
5560  qs->events.tailfree = NULL;
5561  qs->fdw_tuplestore = NULL;
5562  qs->tables = NIL;
5563 
5564  ++init_depth;
5565  }
5566 }
5567 
5568 /*
5569  * Create an empty SetConstraintState with room for numalloc trigstates
5570  */
5571 static SetConstraintState
5573 {
5575 
5576  /* Behave sanely with numalloc == 0 */
5577  if (numalloc <= 0)
5578  numalloc = 1;
5579 
5580  /*
5581  * We assume that zeroing will correctly initialize the state values.
5582  */