PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/guc_hooks.h"
59 #include "utils/inval.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/plancache.h"
63 #include "utils/rel.h"
64 #include "utils/snapmgr.h"
65 #include "utils/syscache.h"
66 #include "utils/tuplestore.h"
67 
68 
69 /* GUC variables */
71 
72 /* How many levels deep into trigger execution are we? */
73 static int MyTriggerDepth = 0;
74 
75 /* Local function prototypes */
76 static void renametrig_internal(Relation tgrel, Relation targetrel,
77  HeapTuple trigtup, const char *newname,
78  const char *expected_name);
79 static void renametrig_partition(Relation tgrel, Oid partitionId,
80  Oid parentTriggerOid, const char *newname,
81  const char *expected_name);
82 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
83 static bool GetTupleForTrigger(EState *estate,
84  EPQState *epqstate,
85  ResultRelInfo *relinfo,
86  ItemPointer tid,
87  LockTupleMode lockmode,
88  TupleTableSlot *oldslot,
89  TupleTableSlot **epqslot,
90  TM_Result *tmresultp,
91  TM_FailureData *tmfdp);
92 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
93  Trigger *trigger, TriggerEvent event,
94  Bitmapset *modifiedCols,
95  TupleTableSlot *oldslot, TupleTableSlot *newslot);
97  int tgindx,
98  FmgrInfo *finfo,
99  Instrumentation *instr,
100  MemoryContext per_tuple_context);
101 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
102  ResultRelInfo *src_partinfo,
103  ResultRelInfo *dst_partinfo,
104  int event, bool row_trigger,
105  TupleTableSlot *oldslot, TupleTableSlot *newslot,
106  List *recheckIndexes, Bitmapset *modifiedCols,
107  TransitionCaptureState *transition_capture,
108  bool is_crosspart_update);
109 static void AfterTriggerEnlargeQueryState(void);
110 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
111 
112 
113 /*
114  * Create a trigger. Returns the address of the created trigger.
115  *
116  * queryString is the source text of the CREATE TRIGGER command.
117  * This must be supplied if a whenClause is specified, else it can be NULL.
118  *
119  * relOid, if nonzero, is the relation on which the trigger should be
120  * created. If zero, the name provided in the statement will be looked up.
121  *
122  * refRelOid, if nonzero, is the relation to which the constraint trigger
123  * refers. If zero, the constraint relation name provided in the statement
124  * will be looked up as needed.
125  *
126  * constraintOid, if nonzero, says that this trigger is being created
127  * internally to implement that constraint. A suitable pg_depend entry will
128  * be made to link the trigger to that constraint. constraintOid is zero when
129  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
130  * TRIGGER, we build a pg_constraint entry internally.)
131  *
132  * indexOid, if nonzero, is the OID of an index associated with the constraint.
133  * We do nothing with this except store it into pg_trigger.tgconstrindid;
134  * but when creating a trigger for a deferrable unique constraint on a
135  * partitioned table, its children are looked up. Note we don't cope with
136  * invalid indexes in that case.
137  *
138  * funcoid, if nonzero, is the OID of the function to invoke. When this is
139  * given, stmt->funcname is ignored.
140  *
141  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
142  * if that trigger is dropped, this one should be too. There are two cases
143  * when a nonzero value is passed for this: 1) when this function recurses to
144  * create the trigger on partitions, 2) when creating child foreign key
145  * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
146  *
147  * If whenClause is passed, it is an already-transformed expression for
148  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
149  *
150  * If isInternal is true then this is an internally-generated trigger.
151  * This argument sets the tgisinternal field of the pg_trigger entry, and
152  * if true causes us to modify the given trigger name to ensure uniqueness.
153  *
154  * When isInternal is not true we require ACL_TRIGGER permissions on the
155  * relation, as well as ACL_EXECUTE on the trigger function. For internal
156  * triggers the caller must apply any required permission checks.
157  *
158  * When called on partitioned tables, this function recurses to create the
159  * trigger on all the partitions, except if isInternal is true, in which
160  * case caller is expected to execute recursion on its own. in_partition
161  * indicates such a recursive call; outside callers should pass "false"
162  * (but see CloneRowTriggersToPartition).
163  */
165 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
166  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
167  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
168  bool isInternal, bool in_partition)
169 {
170  return
171  CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
172  constraintOid, indexOid, funcoid,
173  parentTriggerOid, whenClause, isInternal,
174  in_partition, TRIGGER_FIRES_ON_ORIGIN);
175 }
176 
177 /*
178  * Like the above; additionally the firing condition
179  * (always/origin/replica/disabled) can be specified.
180  */
182 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
183  Oid relOid, Oid refRelOid, Oid constraintOid,
184  Oid indexOid, Oid funcoid, Oid parentTriggerOid,
185  Node *whenClause, bool isInternal, bool in_partition,
186  char trigger_fires_when)
187 {
188  int16 tgtype;
189  int ncolumns;
190  int16 *columns;
191  int2vector *tgattr;
192  List *whenRtable;
193  char *qual;
194  Datum values[Natts_pg_trigger];
195  bool nulls[Natts_pg_trigger];
196  Relation rel;
197  AclResult aclresult;
198  Relation tgrel;
199  Relation pgrel;
200  HeapTuple tuple = NULL;
201  Oid funcrettype;
202  Oid trigoid = InvalidOid;
203  char internaltrigname[NAMEDATALEN];
204  char *trigname;
205  Oid constrrelid = InvalidOid;
206  ObjectAddress myself,
207  referenced;
208  char *oldtablename = NULL;
209  char *newtablename = NULL;
210  bool partition_recurse;
211  bool trigger_exists = false;
212  Oid existing_constraint_oid = InvalidOid;
213  bool existing_isInternal = false;
214  bool existing_isClone = false;
215 
216  if (OidIsValid(relOid))
217  rel = table_open(relOid, ShareRowExclusiveLock);
218  else
219  rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
220 
221  /*
222  * Triggers must be on tables or views, and there are additional
223  * relation-type-specific restrictions.
224  */
225  if (rel->rd_rel->relkind == RELKIND_RELATION)
226  {
227  /* Tables can't have INSTEAD OF triggers */
228  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
229  stmt->timing != TRIGGER_TYPE_AFTER)
230  ereport(ERROR,
231  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
232  errmsg("\"%s\" is a table",
234  errdetail("Tables cannot have INSTEAD OF triggers.")));
235  }
236  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
237  {
238  /* Partitioned tables can't have INSTEAD OF triggers */
239  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
240  stmt->timing != TRIGGER_TYPE_AFTER)
241  ereport(ERROR,
242  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
243  errmsg("\"%s\" is a table",
245  errdetail("Tables cannot have INSTEAD OF triggers.")));
246 
247  /*
248  * FOR EACH ROW triggers have further restrictions
249  */
250  if (stmt->row)
251  {
252  /*
253  * Disallow use of transition tables.
254  *
255  * Note that we have another restriction about transition tables
256  * in partitions; search for 'has_superclass' below for an
257  * explanation. The check here is just to protect from the fact
258  * that if we allowed it here, the creation would succeed for a
259  * partitioned table with no partitions, but would be blocked by
260  * the other restriction when the first partition was created,
261  * which is very unfriendly behavior.
262  */
263  if (stmt->transitionRels != NIL)
264  ereport(ERROR,
265  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
266  errmsg("\"%s\" is a partitioned table",
268  errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
269  }
270  }
271  else if (rel->rd_rel->relkind == RELKIND_VIEW)
272  {
273  /*
274  * Views can have INSTEAD OF triggers (which we check below are
275  * row-level), or statement-level BEFORE/AFTER triggers.
276  */
277  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
278  ereport(ERROR,
279  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
280  errmsg("\"%s\" is a view",
282  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
283  /* Disallow TRUNCATE triggers on VIEWs */
284  if (TRIGGER_FOR_TRUNCATE(stmt->events))
285  ereport(ERROR,
286  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
287  errmsg("\"%s\" is a view",
289  errdetail("Views cannot have TRUNCATE triggers.")));
290  }
291  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
292  {
293  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
294  stmt->timing != TRIGGER_TYPE_AFTER)
295  ereport(ERROR,
296  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
297  errmsg("\"%s\" is a foreign table",
299  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
300 
301  /*
302  * We disallow constraint triggers to protect the assumption that
303  * triggers on FKs can't be deferred. See notes with AfterTriggers
304  * data structures, below.
305  */
306  if (stmt->isconstraint)
307  ereport(ERROR,
308  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
309  errmsg("\"%s\" is a foreign table",
311  errdetail("Foreign tables cannot have constraint triggers.")));
312  }
313  else
314  ereport(ERROR,
315  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
316  errmsg("relation \"%s\" cannot have triggers",
318  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
319 
321  ereport(ERROR,
322  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
323  errmsg("permission denied: \"%s\" is a system catalog",
324  RelationGetRelationName(rel))));
325 
326  if (stmt->isconstraint)
327  {
328  /*
329  * We must take a lock on the target relation to protect against
330  * concurrent drop. It's not clear that AccessShareLock is strong
331  * enough, but we certainly need at least that much... otherwise, we
332  * might end up creating a pg_constraint entry referencing a
333  * nonexistent table.
334  */
335  if (OidIsValid(refRelOid))
336  {
337  LockRelationOid(refRelOid, AccessShareLock);
338  constrrelid = refRelOid;
339  }
340  else if (stmt->constrrel != NULL)
341  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
342  false);
343  }
344 
345  /* permission checks */
346  if (!isInternal)
347  {
348  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
349  ACL_TRIGGER);
350  if (aclresult != ACLCHECK_OK)
351  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
353 
354  if (OidIsValid(constrrelid))
355  {
356  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
357  ACL_TRIGGER);
358  if (aclresult != ACLCHECK_OK)
359  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
360  get_rel_name(constrrelid));
361  }
362  }
363 
364  /*
365  * When called on a partitioned table to create a FOR EACH ROW trigger
366  * that's not internal, we create one trigger for each partition, too.
367  *
368  * For that, we'd better hold lock on all of them ahead of time.
369  */
370  partition_recurse = !isInternal && stmt->row &&
371  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
372  if (partition_recurse)
374  ShareRowExclusiveLock, NULL));
375 
376  /* Compute tgtype */
377  TRIGGER_CLEAR_TYPE(tgtype);
378  if (stmt->row)
379  TRIGGER_SETT_ROW(tgtype);
380  tgtype |= stmt->timing;
381  tgtype |= stmt->events;
382 
383  /* Disallow ROW-level TRUNCATE triggers */
384  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
385  ereport(ERROR,
386  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
387  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
388 
389  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
390  if (TRIGGER_FOR_INSTEAD(tgtype))
391  {
392  if (!TRIGGER_FOR_ROW(tgtype))
393  ereport(ERROR,
394  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
395  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
396  if (stmt->whenClause)
397  ereport(ERROR,
398  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
399  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
400  if (stmt->columns != NIL)
401  ereport(ERROR,
402  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
403  errmsg("INSTEAD OF triggers cannot have column lists")));
404  }
405 
406  /*
407  * We don't yet support naming ROW transition variables, but the parser
408  * recognizes the syntax so we can give a nicer message here.
409  *
410  * Per standard, REFERENCING TABLE names are only allowed on AFTER
411  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
412  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
413  * only allowed once. Per standard, OLD may not be specified when
414  * creating a trigger only for INSERT, and NEW may not be specified when
415  * creating a trigger only for DELETE.
416  *
417  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
418  * reference both ROW and TABLE transition data.
419  */
420  if (stmt->transitionRels != NIL)
421  {
422  List *varList = stmt->transitionRels;
423  ListCell *lc;
424 
425  foreach(lc, varList)
426  {
428 
429  if (!(tt->isTable))
430  ereport(ERROR,
431  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
432  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
433  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
434 
435  /*
436  * Because of the above test, we omit further ROW-related testing
437  * below. If we later allow naming OLD and NEW ROW variables,
438  * adjustments will be needed below.
439  */
440 
441  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
442  ereport(ERROR,
443  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
444  errmsg("\"%s\" is a foreign table",
446  errdetail("Triggers on foreign tables cannot have transition tables.")));
447 
448  if (rel->rd_rel->relkind == RELKIND_VIEW)
449  ereport(ERROR,
450  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
451  errmsg("\"%s\" is a view",
453  errdetail("Triggers on views cannot have transition tables.")));
454 
455  /*
456  * We currently don't allow row-level triggers with transition
457  * tables on partition or inheritance children. Such triggers
458  * would somehow need to see tuples converted to the format of the
459  * table they're attached to, and it's not clear which subset of
460  * tuples each child should see. See also the prohibitions in
461  * ATExecAttachPartition() and ATExecAddInherit().
462  */
463  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
464  {
465  /* Use appropriate error message. */
466  if (rel->rd_rel->relispartition)
467  ereport(ERROR,
468  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
469  errmsg("ROW triggers with transition tables are not supported on partitions")));
470  else
471  ereport(ERROR,
472  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
473  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
474  }
475 
476  if (stmt->timing != TRIGGER_TYPE_AFTER)
477  ereport(ERROR,
478  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
479  errmsg("transition table name can only be specified for an AFTER trigger")));
480 
481  if (TRIGGER_FOR_TRUNCATE(tgtype))
482  ereport(ERROR,
483  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
484  errmsg("TRUNCATE triggers with transition tables are not supported")));
485 
486  /*
487  * We currently don't allow multi-event triggers ("INSERT OR
488  * UPDATE") with transition tables, because it's not clear how to
489  * handle INSERT ... ON CONFLICT statements which can fire both
490  * INSERT and UPDATE triggers. We show the inserted tuples to
491  * INSERT triggers and the updated tuples to UPDATE triggers, but
492  * it's not yet clear what INSERT OR UPDATE trigger should see.
493  * This restriction could be lifted if we can decide on the right
494  * semantics in a later release.
495  */
496  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
497  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
498  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
499  ereport(ERROR,
500  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
501  errmsg("transition tables cannot be specified for triggers with more than one event")));
502 
503  /*
504  * We currently don't allow column-specific triggers with
505  * transition tables. Per spec, that seems to require
506  * accumulating separate transition tables for each combination of
507  * columns, which is a lot of work for a rather marginal feature.
508  */
509  if (stmt->columns != NIL)
510  ereport(ERROR,
511  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
512  errmsg("transition tables cannot be specified for triggers with column lists")));
513 
514  /*
515  * We disallow constraint triggers with transition tables, to
516  * protect the assumption that such triggers can't be deferred.
517  * See notes with AfterTriggers data structures, below.
518  *
519  * Currently this is enforced by the grammar, so just Assert here.
520  */
521  Assert(!stmt->isconstraint);
522 
523  if (tt->isNew)
524  {
525  if (!(TRIGGER_FOR_INSERT(tgtype) ||
526  TRIGGER_FOR_UPDATE(tgtype)))
527  ereport(ERROR,
528  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
529  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
530 
531  if (newtablename != NULL)
532  ereport(ERROR,
533  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
534  errmsg("NEW TABLE cannot be specified multiple times")));
535 
536  newtablename = tt->name;
537  }
538  else
539  {
540  if (!(TRIGGER_FOR_DELETE(tgtype) ||
541  TRIGGER_FOR_UPDATE(tgtype)))
542  ereport(ERROR,
543  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
544  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
545 
546  if (oldtablename != NULL)
547  ereport(ERROR,
548  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
549  errmsg("OLD TABLE cannot be specified multiple times")));
550 
551  oldtablename = tt->name;
552  }
553  }
554 
555  if (newtablename != NULL && oldtablename != NULL &&
556  strcmp(newtablename, oldtablename) == 0)
557  ereport(ERROR,
558  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
559  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
560  }
561 
562  /*
563  * Parse the WHEN clause, if any and we weren't passed an already
564  * transformed one.
565  *
566  * Note that as a side effect, we fill whenRtable when parsing. If we got
567  * an already parsed clause, this does not occur, which is what we want --
568  * no point in adding redundant dependencies below.
569  */
570  if (!whenClause && stmt->whenClause)
571  {
572  ParseState *pstate;
573  ParseNamespaceItem *nsitem;
574  List *varList;
575  ListCell *lc;
576 
577  /* Set up a pstate to parse with */
578  pstate = make_parsestate(NULL);
579  pstate->p_sourcetext = queryString;
580 
581  /*
582  * Set up nsitems for OLD and NEW references.
583  *
584  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
585  */
586  nsitem = addRangeTableEntryForRelation(pstate, rel,
588  makeAlias("old", NIL),
589  false, false);
590  addNSItemToQuery(pstate, nsitem, false, true, true);
591  nsitem = addRangeTableEntryForRelation(pstate, rel,
593  makeAlias("new", NIL),
594  false, false);
595  addNSItemToQuery(pstate, nsitem, false, true, true);
596 
597  /* Transform expression. Copy to be sure we don't modify original */
598  whenClause = transformWhereClause(pstate,
599  copyObject(stmt->whenClause),
601  "WHEN");
602  /* we have to fix its collations too */
603  assign_expr_collations(pstate, whenClause);
604 
605  /*
606  * Check for disallowed references to OLD/NEW.
607  *
608  * NB: pull_var_clause is okay here only because we don't allow
609  * subselects in WHEN clauses; it would fail to examine the contents
610  * of subselects.
611  */
612  varList = pull_var_clause(whenClause, 0);
613  foreach(lc, varList)
614  {
615  Var *var = (Var *) lfirst(lc);
616 
617  switch (var->varno)
618  {
619  case PRS2_OLD_VARNO:
620  if (!TRIGGER_FOR_ROW(tgtype))
621  ereport(ERROR,
622  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
623  errmsg("statement trigger's WHEN condition cannot reference column values"),
624  parser_errposition(pstate, var->location)));
625  if (TRIGGER_FOR_INSERT(tgtype))
626  ereport(ERROR,
627  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
628  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
629  parser_errposition(pstate, var->location)));
630  /* system columns are okay here */
631  break;
632  case PRS2_NEW_VARNO:
633  if (!TRIGGER_FOR_ROW(tgtype))
634  ereport(ERROR,
635  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
636  errmsg("statement trigger's WHEN condition cannot reference column values"),
637  parser_errposition(pstate, var->location)));
638  if (TRIGGER_FOR_DELETE(tgtype))
639  ereport(ERROR,
640  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
641  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
642  parser_errposition(pstate, var->location)));
643  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
644  ereport(ERROR,
645  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
646  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
647  parser_errposition(pstate, var->location)));
648  if (TRIGGER_FOR_BEFORE(tgtype) &&
649  var->varattno == 0 &&
650  RelationGetDescr(rel)->constr &&
651  RelationGetDescr(rel)->constr->has_generated_stored)
652  ereport(ERROR,
653  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
654  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
655  errdetail("A whole-row reference is used and the table contains generated columns."),
656  parser_errposition(pstate, var->location)));
657  if (TRIGGER_FOR_BEFORE(tgtype) &&
658  var->varattno > 0 &&
659  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
660  ereport(ERROR,
661  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
662  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
663  errdetail("Column \"%s\" is a generated column.",
664  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
665  parser_errposition(pstate, var->location)));
666  break;
667  default:
668  /* can't happen without add_missing_from, so just elog */
669  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
670  break;
671  }
672  }
673 
674  /* we'll need the rtable for recordDependencyOnExpr */
675  whenRtable = pstate->p_rtable;
676 
677  qual = nodeToString(whenClause);
678 
679  free_parsestate(pstate);
680  }
681  else if (!whenClause)
682  {
683  whenClause = NULL;
684  whenRtable = NIL;
685  qual = NULL;
686  }
687  else
688  {
689  qual = nodeToString(whenClause);
690  whenRtable = NIL;
691  }
692 
693  /*
694  * Find and validate the trigger function.
695  */
696  if (!OidIsValid(funcoid))
697  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
698  if (!isInternal)
699  {
700  aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
701  if (aclresult != ACLCHECK_OK)
702  aclcheck_error(aclresult, OBJECT_FUNCTION,
703  NameListToString(stmt->funcname));
704  }
705  funcrettype = get_func_rettype(funcoid);
706  if (funcrettype != TRIGGEROID)
707  ereport(ERROR,
708  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
709  errmsg("function %s must return type %s",
710  NameListToString(stmt->funcname), "trigger")));
711 
712  /*
713  * Scan pg_trigger to see if there is already a trigger of the same name.
714  * Skip this for internally generated triggers, since we'll modify the
715  * name to be unique below.
716  *
717  * NOTE that this is cool only because we have ShareRowExclusiveLock on
718  * the relation, so the trigger set won't be changing underneath us.
719  */
720  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
721  if (!isInternal)
722  {
723  ScanKeyData skeys[2];
724  SysScanDesc tgscan;
725 
726  ScanKeyInit(&skeys[0],
727  Anum_pg_trigger_tgrelid,
728  BTEqualStrategyNumber, F_OIDEQ,
730 
731  ScanKeyInit(&skeys[1],
732  Anum_pg_trigger_tgname,
733  BTEqualStrategyNumber, F_NAMEEQ,
734  CStringGetDatum(stmt->trigname));
735 
736  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
737  NULL, 2, skeys);
738 
739  /* There should be at most one matching tuple */
740  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
741  {
742  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
743 
744  trigoid = oldtrigger->oid;
745  existing_constraint_oid = oldtrigger->tgconstraint;
746  existing_isInternal = oldtrigger->tgisinternal;
747  existing_isClone = OidIsValid(oldtrigger->tgparentid);
748  trigger_exists = true;
749  /* copy the tuple to use in CatalogTupleUpdate() */
750  tuple = heap_copytuple(tuple);
751  }
752  systable_endscan(tgscan);
753  }
754 
755  if (!trigger_exists)
756  {
757  /* Generate the OID for the new trigger. */
758  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
759  Anum_pg_trigger_oid);
760  }
761  else
762  {
763  /*
764  * If OR REPLACE was specified, we'll replace the old trigger;
765  * otherwise complain about the duplicate name.
766  */
767  if (!stmt->replace)
768  ereport(ERROR,
770  errmsg("trigger \"%s\" for relation \"%s\" already exists",
771  stmt->trigname, RelationGetRelationName(rel))));
772 
773  /*
774  * An internal trigger or a child trigger (isClone) cannot be replaced
775  * by a user-defined trigger. However, skip this test when
776  * in_partition, because then we're recursing from a partitioned table
777  * and the check was made at the parent level.
778  */
779  if ((existing_isInternal || existing_isClone) &&
780  !isInternal && !in_partition)
781  ereport(ERROR,
783  errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
784  stmt->trigname, RelationGetRelationName(rel))));
785 
786  /*
787  * It is not allowed to replace with a constraint trigger; gram.y
788  * should have enforced this already.
789  */
790  Assert(!stmt->isconstraint);
791 
792  /*
793  * It is not allowed to replace an existing constraint trigger,
794  * either. (The reason for these restrictions is partly that it seems
795  * difficult to deal with pending trigger events in such cases, and
796  * partly that the command might imply changing the constraint's
797  * properties as well, which doesn't seem nice.)
798  */
799  if (OidIsValid(existing_constraint_oid))
800  ereport(ERROR,
802  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
803  stmt->trigname, RelationGetRelationName(rel))));
804  }
805 
806  /*
807  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
808  * corresponding pg_constraint entry.
809  */
810  if (stmt->isconstraint && !OidIsValid(constraintOid))
811  {
812  /* Internal callers should have made their own constraints */
813  Assert(!isInternal);
814  constraintOid = CreateConstraintEntry(stmt->trigname,
816  CONSTRAINT_TRIGGER,
817  stmt->deferrable,
818  stmt->initdeferred,
819  true,
820  InvalidOid, /* no parent */
821  RelationGetRelid(rel),
822  NULL, /* no conkey */
823  0,
824  0,
825  InvalidOid, /* no domain */
826  InvalidOid, /* no index */
827  InvalidOid, /* no foreign key */
828  NULL,
829  NULL,
830  NULL,
831  NULL,
832  0,
833  ' ',
834  ' ',
835  NULL,
836  0,
837  ' ',
838  NULL, /* no exclusion */
839  NULL, /* no check constraint */
840  NULL,
841  true, /* islocal */
842  0, /* inhcount */
843  true, /* noinherit */
844  isInternal); /* is_internal */
845  }
846 
847  /*
848  * If trigger is internally generated, modify the provided trigger name to
849  * ensure uniqueness by appending the trigger OID. (Callers will usually
850  * supply a simple constant trigger name in these cases.)
851  */
852  if (isInternal)
853  {
854  snprintf(internaltrigname, sizeof(internaltrigname),
855  "%s_%u", stmt->trigname, trigoid);
856  trigname = internaltrigname;
857  }
858  else
859  {
860  /* user-defined trigger; use the specified trigger name as-is */
861  trigname = stmt->trigname;
862  }
863 
864  /*
865  * Build the new pg_trigger tuple.
866  *
867  * When we're creating a trigger in a partition, we mark it as internal,
868  * even though we don't do the isInternal magic in this function. This
869  * makes the triggers in partitions identical to the ones in the
870  * partitioned tables, except that they are marked internal.
871  */
872  memset(nulls, false, sizeof(nulls));
873 
874  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
875  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
876  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
877  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
878  CStringGetDatum(trigname));
879  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
880  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
881  values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
882  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
883  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
884  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
885  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
886  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
887  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
888 
889  if (stmt->args)
890  {
891  ListCell *le;
892  char *args;
893  int16 nargs = list_length(stmt->args);
894  int len = 0;
895 
896  foreach(le, stmt->args)
897  {
898  char *ar = strVal(lfirst(le));
899 
900  len += strlen(ar) + 4;
901  for (; *ar; ar++)
902  {
903  if (*ar == '\\')
904  len++;
905  }
906  }
907  args = (char *) palloc(len + 1);
908  args[0] = '\0';
909  foreach(le, stmt->args)
910  {
911  char *s = strVal(lfirst(le));
912  char *d = args + strlen(args);
913 
914  while (*s)
915  {
916  if (*s == '\\')
917  *d++ = '\\';
918  *d++ = *s++;
919  }
920  strcpy(d, "\\000");
921  }
922  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
923  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
925  }
926  else
927  {
928  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
929  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
930  CStringGetDatum(""));
931  }
932 
933  /* build column number array if it's a column-specific trigger */
934  ncolumns = list_length(stmt->columns);
935  if (ncolumns == 0)
936  columns = NULL;
937  else
938  {
939  ListCell *cell;
940  int i = 0;
941 
942  columns = (int16 *) palloc(ncolumns * sizeof(int16));
943  foreach(cell, stmt->columns)
944  {
945  char *name = strVal(lfirst(cell));
946  int16 attnum;
947  int j;
948 
949  /* Lookup column name. System columns are not allowed */
950  attnum = attnameAttNum(rel, name, false);
951  if (attnum == InvalidAttrNumber)
952  ereport(ERROR,
953  (errcode(ERRCODE_UNDEFINED_COLUMN),
954  errmsg("column \"%s\" of relation \"%s\" does not exist",
955  name, RelationGetRelationName(rel))));
956 
957  /* Check for duplicates */
958  for (j = i - 1; j >= 0; j--)
959  {
960  if (columns[j] == attnum)
961  ereport(ERROR,
962  (errcode(ERRCODE_DUPLICATE_COLUMN),
963  errmsg("column \"%s\" specified more than once",
964  name)));
965  }
966 
967  columns[i++] = attnum;
968  }
969  }
970  tgattr = buildint2vector(columns, ncolumns);
971  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
972 
973  /* set tgqual if trigger has WHEN clause */
974  if (qual)
975  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
976  else
977  nulls[Anum_pg_trigger_tgqual - 1] = true;
978 
979  if (oldtablename)
980  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
981  CStringGetDatum(oldtablename));
982  else
983  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
984  if (newtablename)
985  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
986  CStringGetDatum(newtablename));
987  else
988  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
989 
990  /*
991  * Insert or replace tuple in pg_trigger.
992  */
993  if (!trigger_exists)
994  {
995  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
996  CatalogTupleInsert(tgrel, tuple);
997  }
998  else
999  {
1000  HeapTuple newtup;
1001 
1002  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
1003  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
1004  heap_freetuple(newtup);
1005  }
1006 
1007  heap_freetuple(tuple); /* free either original or new tuple */
1008  table_close(tgrel, RowExclusiveLock);
1009 
1010  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1011  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1012  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1013  if (oldtablename)
1014  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1015  if (newtablename)
1016  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1017 
1018  /*
1019  * Update relation's pg_class entry; if necessary; and if not, send an SI
1020  * message to make other backends (and this one) rebuild relcache entries.
1021  */
1022  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1023  tuple = SearchSysCacheCopy1(RELOID,
1025  if (!HeapTupleIsValid(tuple))
1026  elog(ERROR, "cache lookup failed for relation %u",
1027  RelationGetRelid(rel));
1028  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1029  {
1030  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1031 
1032  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1033 
1035  }
1036  else
1038 
1039  heap_freetuple(tuple);
1040  table_close(pgrel, RowExclusiveLock);
1041 
1042  /*
1043  * If we're replacing a trigger, flush all the old dependencies before
1044  * recording new ones.
1045  */
1046  if (trigger_exists)
1047  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1048 
1049  /*
1050  * Record dependencies for trigger. Always place a normal dependency on
1051  * the function.
1052  */
1053  myself.classId = TriggerRelationId;
1054  myself.objectId = trigoid;
1055  myself.objectSubId = 0;
1056 
1057  referenced.classId = ProcedureRelationId;
1058  referenced.objectId = funcoid;
1059  referenced.objectSubId = 0;
1060  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1061 
1062  if (isInternal && OidIsValid(constraintOid))
1063  {
1064  /*
1065  * Internally-generated trigger for a constraint, so make it an
1066  * internal dependency of the constraint. We can skip depending on
1067  * the relation(s), as there'll be an indirect dependency via the
1068  * constraint.
1069  */
1070  referenced.classId = ConstraintRelationId;
1071  referenced.objectId = constraintOid;
1072  referenced.objectSubId = 0;
1073  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1074  }
1075  else
1076  {
1077  /*
1078  * User CREATE TRIGGER, so place dependencies. We make trigger be
1079  * auto-dropped if its relation is dropped or if the FK relation is
1080  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1081  */
1082  referenced.classId = RelationRelationId;
1083  referenced.objectId = RelationGetRelid(rel);
1084  referenced.objectSubId = 0;
1085  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1086 
1087  if (OidIsValid(constrrelid))
1088  {
1089  referenced.classId = RelationRelationId;
1090  referenced.objectId = constrrelid;
1091  referenced.objectSubId = 0;
1092  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1093  }
1094  /* Not possible to have an index dependency in this case */
1095  Assert(!OidIsValid(indexOid));
1096 
1097  /*
1098  * If it's a user-specified constraint trigger, make the constraint
1099  * internally dependent on the trigger instead of vice versa.
1100  */
1101  if (OidIsValid(constraintOid))
1102  {
1103  referenced.classId = ConstraintRelationId;
1104  referenced.objectId = constraintOid;
1105  referenced.objectSubId = 0;
1106  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1107  }
1108 
1109  /*
1110  * If it's a partition trigger, create the partition dependencies.
1111  */
1112  if (OidIsValid(parentTriggerOid))
1113  {
1114  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1115  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1116  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1117  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1118  }
1119  }
1120 
1121  /* If column-specific trigger, add normal dependencies on columns */
1122  if (columns != NULL)
1123  {
1124  int i;
1125 
1126  referenced.classId = RelationRelationId;
1127  referenced.objectId = RelationGetRelid(rel);
1128  for (i = 0; i < ncolumns; i++)
1129  {
1130  referenced.objectSubId = columns[i];
1131  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1132  }
1133  }
1134 
1135  /*
1136  * If it has a WHEN clause, add dependencies on objects mentioned in the
1137  * expression (eg, functions, as well as any columns used).
1138  */
1139  if (whenRtable != NIL)
1140  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1142 
1143  /* Post creation hook for new trigger */
1144  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1145  isInternal);
1146 
1147  /*
1148  * Lastly, create the trigger on child relations, if needed.
1149  */
1150  if (partition_recurse)
1151  {
1152  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1153  int i;
1154  MemoryContext oldcxt,
1155  perChildCxt;
1156 
1158  "part trig clone",
1160 
1161  /*
1162  * We don't currently expect to be called with a valid indexOid. If
1163  * that ever changes then we'll need to write code here to find the
1164  * corresponding child index.
1165  */
1166  Assert(!OidIsValid(indexOid));
1167 
1168  oldcxt = MemoryContextSwitchTo(perChildCxt);
1169 
1170  /* Iterate to create the trigger on each existing partition */
1171  for (i = 0; i < partdesc->nparts; i++)
1172  {
1173  CreateTrigStmt *childStmt;
1174  Relation childTbl;
1175  Node *qual;
1176 
1177  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1178 
1179  /*
1180  * Initialize our fabricated parse node by copying the original
1181  * one, then resetting fields that we pass separately.
1182  */
1183  childStmt = (CreateTrigStmt *) copyObject(stmt);
1184  childStmt->funcname = NIL;
1185  childStmt->whenClause = NULL;
1186 
1187  /* If there is a WHEN clause, create a modified copy of it */
1188  qual = copyObject(whenClause);
1189  qual = (Node *)
1191  childTbl, rel);
1192  qual = (Node *)
1194  childTbl, rel);
1195 
1196  CreateTriggerFiringOn(childStmt, queryString,
1197  partdesc->oids[i], refRelOid,
1199  funcoid, trigoid, qual,
1200  isInternal, true, trigger_fires_when);
1201 
1202  table_close(childTbl, NoLock);
1203 
1204  MemoryContextReset(perChildCxt);
1205  }
1206 
1207  MemoryContextSwitchTo(oldcxt);
1208  MemoryContextDelete(perChildCxt);
1209  }
1210 
1211  /* Keep lock on target rel until end of xact */
1212  table_close(rel, NoLock);
1213 
1214  return myself;
1215 }
1216 
1217 /*
1218  * TriggerSetParentTrigger
1219  * Set a partition's trigger as child of its parent trigger,
1220  * or remove the linkage if parentTrigId is InvalidOid.
1221  *
1222  * This updates the constraint's pg_trigger row to show it as inherited, and
1223  * adds PARTITION dependencies to prevent the trigger from being deleted
1224  * on its own. Alternatively, reverse that.
1225  */
1226 void
1228  Oid childTrigId,
1229  Oid parentTrigId,
1230  Oid childTableId)
1231 {
1232  SysScanDesc tgscan;
1233  ScanKeyData skey[1];
1234  Form_pg_trigger trigForm;
1235  HeapTuple tuple,
1236  newtup;
1237  ObjectAddress depender;
1238  ObjectAddress referenced;
1239 
1240  /*
1241  * Find the trigger to delete.
1242  */
1243  ScanKeyInit(&skey[0],
1244  Anum_pg_trigger_oid,
1245  BTEqualStrategyNumber, F_OIDEQ,
1246  ObjectIdGetDatum(childTrigId));
1247 
1248  tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1249  NULL, 1, skey);
1250 
1251  tuple = systable_getnext(tgscan);
1252  if (!HeapTupleIsValid(tuple))
1253  elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1254  newtup = heap_copytuple(tuple);
1255  trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1256  if (OidIsValid(parentTrigId))
1257  {
1258  /* don't allow setting parent for a constraint that already has one */
1259  if (OidIsValid(trigForm->tgparentid))
1260  elog(ERROR, "trigger %u already has a parent trigger",
1261  childTrigId);
1262 
1263  trigForm->tgparentid = parentTrigId;
1264 
1265  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1266 
1267  ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1268 
1269  ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1270  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1271 
1272  ObjectAddressSet(referenced, RelationRelationId, childTableId);
1273  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1274  }
1275  else
1276  {
1277  trigForm->tgparentid = InvalidOid;
1278 
1279  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1280 
1281  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1282  TriggerRelationId,
1284  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1285  RelationRelationId,
1287  }
1288 
1289  heap_freetuple(newtup);
1290  systable_endscan(tgscan);
1291 }
1292 
1293 
1294 /*
1295  * Guts of trigger deletion.
1296  */
1297 void
1299 {
1300  Relation tgrel;
1301  SysScanDesc tgscan;
1302  ScanKeyData skey[1];
1303  HeapTuple tup;
1304  Oid relid;
1305  Relation rel;
1306 
1307  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1308 
1309  /*
1310  * Find the trigger to delete.
1311  */
1312  ScanKeyInit(&skey[0],
1313  Anum_pg_trigger_oid,
1314  BTEqualStrategyNumber, F_OIDEQ,
1315  ObjectIdGetDatum(trigOid));
1316 
1317  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1318  NULL, 1, skey);
1319 
1320  tup = systable_getnext(tgscan);
1321  if (!HeapTupleIsValid(tup))
1322  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1323 
1324  /*
1325  * Open and exclusive-lock the relation the trigger belongs to.
1326  */
1327  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1328 
1329  rel = table_open(relid, AccessExclusiveLock);
1330 
1331  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1332  rel->rd_rel->relkind != RELKIND_VIEW &&
1333  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1334  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1335  ereport(ERROR,
1336  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1337  errmsg("relation \"%s\" cannot have triggers",
1339  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1340 
1342  ereport(ERROR,
1343  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1344  errmsg("permission denied: \"%s\" is a system catalog",
1345  RelationGetRelationName(rel))));
1346 
1347  /*
1348  * Delete the pg_trigger tuple.
1349  */
1350  CatalogTupleDelete(tgrel, &tup->t_self);
1351 
1352  systable_endscan(tgscan);
1353  table_close(tgrel, RowExclusiveLock);
1354 
1355  /*
1356  * We do not bother to try to determine whether any other triggers remain,
1357  * which would be needed in order to decide whether it's safe to clear the
1358  * relation's relhastriggers. (In any case, there might be a concurrent
1359  * process adding new triggers.) Instead, just force a relcache inval to
1360  * make other backends (and this one too!) rebuild their relcache entries.
1361  * There's no great harm in leaving relhastriggers true even if there are
1362  * no triggers left.
1363  */
1365 
1366  /* Keep lock on trigger's rel until end of xact */
1367  table_close(rel, NoLock);
1368 }
1369 
1370 /*
1371  * get_trigger_oid - Look up a trigger by name to find its OID.
1372  *
1373  * If missing_ok is false, throw an error if trigger not found. If
1374  * true, just return InvalidOid.
1375  */
1376 Oid
1377 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1378 {
1379  Relation tgrel;
1380  ScanKeyData skey[2];
1381  SysScanDesc tgscan;
1382  HeapTuple tup;
1383  Oid oid;
1384 
1385  /*
1386  * Find the trigger, verify permissions, set up object address
1387  */
1388  tgrel = table_open(TriggerRelationId, AccessShareLock);
1389 
1390  ScanKeyInit(&skey[0],
1391  Anum_pg_trigger_tgrelid,
1392  BTEqualStrategyNumber, F_OIDEQ,
1393  ObjectIdGetDatum(relid));
1394  ScanKeyInit(&skey[1],
1395  Anum_pg_trigger_tgname,
1396  BTEqualStrategyNumber, F_NAMEEQ,
1397  CStringGetDatum(trigname));
1398 
1399  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1400  NULL, 2, skey);
1401 
1402  tup = systable_getnext(tgscan);
1403 
1404  if (!HeapTupleIsValid(tup))
1405  {
1406  if (!missing_ok)
1407  ereport(ERROR,
1408  (errcode(ERRCODE_UNDEFINED_OBJECT),
1409  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1410  trigname, get_rel_name(relid))));
1411  oid = InvalidOid;
1412  }
1413  else
1414  {
1415  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1416  }
1417 
1418  systable_endscan(tgscan);
1419  table_close(tgrel, AccessShareLock);
1420  return oid;
1421 }
1422 
1423 /*
1424  * Perform permissions and integrity checks before acquiring a relation lock.
1425  */
1426 static void
1428  void *arg)
1429 {
1430  HeapTuple tuple;
1431  Form_pg_class form;
1432 
1433  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1434  if (!HeapTupleIsValid(tuple))
1435  return; /* concurrently dropped */
1436  form = (Form_pg_class) GETSTRUCT(tuple);
1437 
1438  /* only tables and views can have triggers */
1439  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1440  form->relkind != RELKIND_FOREIGN_TABLE &&
1441  form->relkind != RELKIND_PARTITIONED_TABLE)
1442  ereport(ERROR,
1443  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1444  errmsg("relation \"%s\" cannot have triggers",
1445  rv->relname),
1446  errdetail_relkind_not_supported(form->relkind)));
1447 
1448  /* you must own the table to rename one of its triggers */
1449  if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1451  if (!allowSystemTableMods && IsSystemClass(relid, form))
1452  ereport(ERROR,
1453  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1454  errmsg("permission denied: \"%s\" is a system catalog",
1455  rv->relname)));
1456 
1457  ReleaseSysCache(tuple);
1458 }
1459 
1460 /*
1461  * renametrig - changes the name of a trigger on a relation
1462  *
1463  * trigger name is changed in trigger catalog.
1464  * No record of the previous name is kept.
1465  *
1466  * get proper relrelation from relation catalog (if not arg)
1467  * scan trigger catalog
1468  * for name conflict (within rel)
1469  * for original trigger (if not arg)
1470  * modify tgname in trigger tuple
1471  * update row in catalog
1472  */
1475 {
1476  Oid tgoid;
1477  Relation targetrel;
1478  Relation tgrel;
1479  HeapTuple tuple;
1480  SysScanDesc tgscan;
1481  ScanKeyData key[2];
1482  Oid relid;
1483  ObjectAddress address;
1484 
1485  /*
1486  * Look up name, check permissions, and acquire lock (which we will NOT
1487  * release until end of transaction).
1488  */
1490  0,
1492  NULL);
1493 
1494  /* Have lock already, so just need to build relcache entry. */
1495  targetrel = relation_open(relid, NoLock);
1496 
1497  /*
1498  * On partitioned tables, this operation recurses to partitions. Lock all
1499  * tables upfront.
1500  */
1501  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1502  (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1503 
1504  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1505 
1506  /*
1507  * Search for the trigger to modify.
1508  */
1509  ScanKeyInit(&key[0],
1510  Anum_pg_trigger_tgrelid,
1511  BTEqualStrategyNumber, F_OIDEQ,
1512  ObjectIdGetDatum(relid));
1513  ScanKeyInit(&key[1],
1514  Anum_pg_trigger_tgname,
1515  BTEqualStrategyNumber, F_NAMEEQ,
1516  PointerGetDatum(stmt->subname));
1517  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1518  NULL, 2, key);
1519  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1520  {
1521  Form_pg_trigger trigform;
1522 
1523  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1524  tgoid = trigform->oid;
1525 
1526  /*
1527  * If the trigger descends from a trigger on a parent partitioned
1528  * table, reject the rename. We don't allow a trigger in a partition
1529  * to differ in name from that of its parent: that would lead to an
1530  * inconsistency that pg_dump would not reproduce.
1531  */
1532  if (OidIsValid(trigform->tgparentid))
1533  ereport(ERROR,
1534  errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1535  stmt->subname, RelationGetRelationName(targetrel)),
1536  errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1537  get_rel_name(get_partition_parent(relid, false))));
1538 
1539 
1540  /* Rename the trigger on this relation ... */
1541  renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1542  stmt->subname);
1543 
1544  /* ... and if it is partitioned, recurse to its partitions */
1545  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1546  {
1547  PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1548 
1549  for (int i = 0; i < partdesc->nparts; i++)
1550  {
1551  Oid partitionId = partdesc->oids[i];
1552 
1553  renametrig_partition(tgrel, partitionId, trigform->oid,
1554  stmt->newname, stmt->subname);
1555  }
1556  }
1557  }
1558  else
1559  {
1560  ereport(ERROR,
1561  (errcode(ERRCODE_UNDEFINED_OBJECT),
1562  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1563  stmt->subname, RelationGetRelationName(targetrel))));
1564  }
1565 
1566  ObjectAddressSet(address, TriggerRelationId, tgoid);
1567 
1568  systable_endscan(tgscan);
1569 
1570  table_close(tgrel, RowExclusiveLock);
1571 
1572  /*
1573  * Close rel, but keep exclusive lock!
1574  */
1575  relation_close(targetrel, NoLock);
1576 
1577  return address;
1578 }
1579 
1580 /*
1581  * Subroutine for renametrig -- perform the actual work of renaming one
1582  * trigger on one table.
1583  *
1584  * If the trigger has a name different from the expected one, raise a
1585  * NOTICE about it.
1586  */
1587 static void
1589  const char *newname, const char *expected_name)
1590 {
1591  HeapTuple tuple;
1592  Form_pg_trigger tgform;
1593  ScanKeyData key[2];
1594  SysScanDesc tgscan;
1595 
1596  /* If the trigger already has the new name, nothing to do. */
1597  tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1598  if (strcmp(NameStr(tgform->tgname), newname) == 0)
1599  return;
1600 
1601  /*
1602  * Before actually trying the rename, search for triggers with the same
1603  * name. The update would fail with an ugly message in that case, and it
1604  * is better to throw a nicer error.
1605  */
1606  ScanKeyInit(&key[0],
1607  Anum_pg_trigger_tgrelid,
1608  BTEqualStrategyNumber, F_OIDEQ,
1609  ObjectIdGetDatum(RelationGetRelid(targetrel)));
1610  ScanKeyInit(&key[1],
1611  Anum_pg_trigger_tgname,
1612  BTEqualStrategyNumber, F_NAMEEQ,
1613  PointerGetDatum(newname));
1614  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1615  NULL, 2, key);
1616  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1617  ereport(ERROR,
1619  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1620  newname, RelationGetRelationName(targetrel))));
1621  systable_endscan(tgscan);
1622 
1623  /*
1624  * The target name is free; update the existing pg_trigger tuple with it.
1625  */
1626  tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1627  tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1628 
1629  /*
1630  * If the trigger has a name different from what we expected, let the user
1631  * know. (We can proceed anyway, since we must have reached here following
1632  * a tgparentid link.)
1633  */
1634  if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1635  ereport(NOTICE,
1636  errmsg("renamed trigger \"%s\" on relation \"%s\"",
1637  NameStr(tgform->tgname),
1638  RelationGetRelationName(targetrel)));
1639 
1640  namestrcpy(&tgform->tgname, newname);
1641 
1642  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1643 
1644  InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1645 
1646  /*
1647  * Invalidate relation's relcache entry so that other backends (and this
1648  * one too!) are sent SI message to make them rebuild relcache entries.
1649  * (Ideally this should happen automatically...)
1650  */
1651  CacheInvalidateRelcache(targetrel);
1652 }
1653 
1654 /*
1655  * Subroutine for renametrig -- Helper for recursing to partitions when
1656  * renaming triggers on a partitioned table.
1657  */
1658 static void
1659 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1660  const char *newname, const char *expected_name)
1661 {
1662  SysScanDesc tgscan;
1663  ScanKeyData key;
1664  HeapTuple tuple;
1665 
1666  /*
1667  * Given a relation and the OID of a trigger on parent relation, find the
1668  * corresponding trigger in the child and rename that trigger to the given
1669  * name.
1670  */
1671  ScanKeyInit(&key,
1672  Anum_pg_trigger_tgrelid,
1673  BTEqualStrategyNumber, F_OIDEQ,
1674  ObjectIdGetDatum(partitionId));
1675  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1676  NULL, 1, &key);
1677  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1678  {
1679  Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1680  Relation partitionRel;
1681 
1682  if (tgform->tgparentid != parentTriggerOid)
1683  continue; /* not our trigger */
1684 
1685  partitionRel = table_open(partitionId, NoLock);
1686 
1687  /* Rename the trigger on this partition */
1688  renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1689 
1690  /* And if this relation is partitioned, recurse to its partitions */
1691  if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1692  {
1693  PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1694  true);
1695 
1696  for (int i = 0; i < partdesc->nparts; i++)
1697  {
1698  Oid partoid = partdesc->oids[i];
1699 
1700  renametrig_partition(tgrel, partoid, tgform->oid, newname,
1701  NameStr(tgform->tgname));
1702  }
1703  }
1704  table_close(partitionRel, NoLock);
1705 
1706  /* There should be at most one matching tuple */
1707  break;
1708  }
1709  systable_endscan(tgscan);
1710 }
1711 
1712 /*
1713  * EnableDisableTrigger()
1714  *
1715  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1716  * to change 'tgenabled' field for the specified trigger(s)
1717  *
1718  * rel: relation to process (caller must hold suitable lock on it)
1719  * tgname: name of trigger to process, or NULL to scan all triggers
1720  * tgparent: if not zero, process only triggers with this tgparentid
1721  * fires_when: new value for tgenabled field. In addition to generic
1722  * enablement/disablement, this also defines when the trigger
1723  * should be fired in session replication roles.
1724  * skip_system: if true, skip "system" triggers (constraint triggers)
1725  * recurse: if true, recurse to partitions
1726  *
1727  * Caller should have checked permissions for the table; here we also
1728  * enforce that superuser privilege is required to alter the state of
1729  * system triggers
1730  */
1731 void
1732 EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1733  char fires_when, bool skip_system, bool recurse,
1734  LOCKMODE lockmode)
1735 {
1736  Relation tgrel;
1737  int nkeys;
1738  ScanKeyData keys[2];
1739  SysScanDesc tgscan;
1740  HeapTuple tuple;
1741  bool found;
1742  bool changed;
1743 
1744  /* Scan the relevant entries in pg_triggers */
1745  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1746 
1747  ScanKeyInit(&keys[0],
1748  Anum_pg_trigger_tgrelid,
1749  BTEqualStrategyNumber, F_OIDEQ,
1751  if (tgname)
1752  {
1753  ScanKeyInit(&keys[1],
1754  Anum_pg_trigger_tgname,
1755  BTEqualStrategyNumber, F_NAMEEQ,
1756  CStringGetDatum(tgname));
1757  nkeys = 2;
1758  }
1759  else
1760  nkeys = 1;
1761 
1762  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1763  NULL, nkeys, keys);
1764 
1765  found = changed = false;
1766 
1767  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1768  {
1769  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1770 
1771  if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1772  continue;
1773 
1774  if (oldtrig->tgisinternal)
1775  {
1776  /* system trigger ... ok to process? */
1777  if (skip_system)
1778  continue;
1779  if (!superuser())
1780  ereport(ERROR,
1781  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1782  errmsg("permission denied: \"%s\" is a system trigger",
1783  NameStr(oldtrig->tgname))));
1784  }
1785 
1786  found = true;
1787 
1788  if (oldtrig->tgenabled != fires_when)
1789  {
1790  /* need to change this one ... make a copy to scribble on */
1791  HeapTuple newtup = heap_copytuple(tuple);
1792  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1793 
1794  newtrig->tgenabled = fires_when;
1795 
1796  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1797 
1798  heap_freetuple(newtup);
1799 
1800  changed = true;
1801  }
1802 
1803  /*
1804  * When altering FOR EACH ROW triggers on a partitioned table, do the
1805  * same on the partitions as well, unless ONLY is specified.
1806  *
1807  * Note that we recurse even if we didn't change the trigger above,
1808  * because the partitions' copy of the trigger may have a different
1809  * value of tgenabled than the parent's trigger and thus might need to
1810  * be changed.
1811  */
1812  if (recurse &&
1813  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1814  (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1815  {
1816  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1817  int i;
1818 
1819  for (i = 0; i < partdesc->nparts; i++)
1820  {
1821  Relation part;
1822 
1823  part = relation_open(partdesc->oids[i], lockmode);
1824  /* Match on child triggers' tgparentid, not their name */
1825  EnableDisableTrigger(part, NULL, oldtrig->oid,
1826  fires_when, skip_system, recurse,
1827  lockmode);
1828  table_close(part, NoLock); /* keep lock till commit */
1829  }
1830  }
1831 
1832  InvokeObjectPostAlterHook(TriggerRelationId,
1833  oldtrig->oid, 0);
1834  }
1835 
1836  systable_endscan(tgscan);
1837 
1838  table_close(tgrel, RowExclusiveLock);
1839 
1840  if (tgname && !found)
1841  ereport(ERROR,
1842  (errcode(ERRCODE_UNDEFINED_OBJECT),
1843  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1844  tgname, RelationGetRelationName(rel))));
1845 
1846  /*
1847  * If we changed anything, broadcast a SI inval message to force each
1848  * backend (including our own!) to rebuild relation's relcache entry.
1849  * Otherwise they will fail to apply the change promptly.
1850  */
1851  if (changed)
1853 }
1854 
1855 
1856 /*
1857  * Build trigger data to attach to the given relcache entry.
1858  *
1859  * Note that trigger data attached to a relcache entry must be stored in
1860  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1861  * But we should be running in a less long-lived working context. To avoid
1862  * leaking cache memory if this routine fails partway through, we build a
1863  * temporary TriggerDesc in working memory and then copy the completed
1864  * structure into cache memory.
1865  */
1866 void
1868 {
1869  TriggerDesc *trigdesc;
1870  int numtrigs;
1871  int maxtrigs;
1872  Trigger *triggers;
1873  Relation tgrel;
1874  ScanKeyData skey;
1875  SysScanDesc tgscan;
1876  HeapTuple htup;
1877  MemoryContext oldContext;
1878  int i;
1879 
1880  /*
1881  * Allocate a working array to hold the triggers (the array is extended if
1882  * necessary)
1883  */
1884  maxtrigs = 16;
1885  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1886  numtrigs = 0;
1887 
1888  /*
1889  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1890  * be reading the triggers in name order, except possibly during
1891  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1892  * ensures that triggers will be fired in name order.
1893  */
1894  ScanKeyInit(&skey,
1895  Anum_pg_trigger_tgrelid,
1896  BTEqualStrategyNumber, F_OIDEQ,
1897  ObjectIdGetDatum(RelationGetRelid(relation)));
1898 
1899  tgrel = table_open(TriggerRelationId, AccessShareLock);
1900  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1901  NULL, 1, &skey);
1902 
1903  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1904  {
1905  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1906  Trigger *build;
1907  Datum datum;
1908  bool isnull;
1909 
1910  if (numtrigs >= maxtrigs)
1911  {
1912  maxtrigs *= 2;
1913  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1914  }
1915  build = &(triggers[numtrigs]);
1916 
1917  build->tgoid = pg_trigger->oid;
1919  NameGetDatum(&pg_trigger->tgname)));
1920  build->tgfoid = pg_trigger->tgfoid;
1921  build->tgtype = pg_trigger->tgtype;
1922  build->tgenabled = pg_trigger->tgenabled;
1923  build->tgisinternal = pg_trigger->tgisinternal;
1924  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1925  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1926  build->tgconstrindid = pg_trigger->tgconstrindid;
1927  build->tgconstraint = pg_trigger->tgconstraint;
1928  build->tgdeferrable = pg_trigger->tgdeferrable;
1929  build->tginitdeferred = pg_trigger->tginitdeferred;
1930  build->tgnargs = pg_trigger->tgnargs;
1931  /* tgattr is first var-width field, so OK to access directly */
1932  build->tgnattr = pg_trigger->tgattr.dim1;
1933  if (build->tgnattr > 0)
1934  {
1935  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1936  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1937  build->tgnattr * sizeof(int16));
1938  }
1939  else
1940  build->tgattr = NULL;
1941  if (build->tgnargs > 0)
1942  {
1943  bytea *val;
1944  char *p;
1945 
1947  Anum_pg_trigger_tgargs,
1948  tgrel->rd_att, &isnull));
1949  if (isnull)
1950  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1951  RelationGetRelationName(relation));
1952  p = (char *) VARDATA_ANY(val);
1953  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1954  for (i = 0; i < build->tgnargs; i++)
1955  {
1956  build->tgargs[i] = pstrdup(p);
1957  p += strlen(p) + 1;
1958  }
1959  }
1960  else
1961  build->tgargs = NULL;
1962 
1963  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1964  tgrel->rd_att, &isnull);
1965  if (!isnull)
1966  build->tgoldtable =
1968  else
1969  build->tgoldtable = NULL;
1970 
1971  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1972  tgrel->rd_att, &isnull);
1973  if (!isnull)
1974  build->tgnewtable =
1976  else
1977  build->tgnewtable = NULL;
1978 
1979  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1980  tgrel->rd_att, &isnull);
1981  if (!isnull)
1982  build->tgqual = TextDatumGetCString(datum);
1983  else
1984  build->tgqual = NULL;
1985 
1986  numtrigs++;
1987  }
1988 
1989  systable_endscan(tgscan);
1990  table_close(tgrel, AccessShareLock);
1991 
1992  /* There might not be any triggers */
1993  if (numtrigs == 0)
1994  {
1995  pfree(triggers);
1996  return;
1997  }
1998 
1999  /* Build trigdesc */
2000  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
2001  trigdesc->triggers = triggers;
2002  trigdesc->numtriggers = numtrigs;
2003  for (i = 0; i < numtrigs; i++)
2004  SetTriggerFlags(trigdesc, &(triggers[i]));
2005 
2006  /* Copy completed trigdesc into cache storage */
2008  relation->trigdesc = CopyTriggerDesc(trigdesc);
2009  MemoryContextSwitchTo(oldContext);
2010 
2011  /* Release working memory */
2012  FreeTriggerDesc(trigdesc);
2013 }
2014 
2015 /*
2016  * Update the TriggerDesc's hint flags to include the specified trigger
2017  */
2018 static void
2020 {
2021  int16 tgtype = trigger->tgtype;
2022 
2023  trigdesc->trig_insert_before_row |=
2024  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2025  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2026  trigdesc->trig_insert_after_row |=
2027  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2028  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2029  trigdesc->trig_insert_instead_row |=
2030  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2031  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2032  trigdesc->trig_insert_before_statement |=
2033  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2034  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2035  trigdesc->trig_insert_after_statement |=
2036  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2037  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2038  trigdesc->trig_update_before_row |=
2039  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2040  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2041  trigdesc->trig_update_after_row |=
2042  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2043  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2044  trigdesc->trig_update_instead_row |=
2045  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2046  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2047  trigdesc->trig_update_before_statement |=
2048  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2049  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2050  trigdesc->trig_update_after_statement |=
2051  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2052  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2053  trigdesc->trig_delete_before_row |=
2054  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2055  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2056  trigdesc->trig_delete_after_row |=
2057  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2058  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2059  trigdesc->trig_delete_instead_row |=
2060  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2061  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2062  trigdesc->trig_delete_before_statement |=
2063  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2064  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2065  trigdesc->trig_delete_after_statement |=
2066  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2067  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2068  /* there are no row-level truncate triggers */
2069  trigdesc->trig_truncate_before_statement |=
2070  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2071  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2072  trigdesc->trig_truncate_after_statement |=
2073  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2074  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2075 
2076  trigdesc->trig_insert_new_table |=
2077  (TRIGGER_FOR_INSERT(tgtype) &&
2078  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2079  trigdesc->trig_update_old_table |=
2080  (TRIGGER_FOR_UPDATE(tgtype) &&
2081  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2082  trigdesc->trig_update_new_table |=
2083  (TRIGGER_FOR_UPDATE(tgtype) &&
2084  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2085  trigdesc->trig_delete_old_table |=
2086  (TRIGGER_FOR_DELETE(tgtype) &&
2087  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2088 }
2089 
2090 /*
2091  * Copy a TriggerDesc data structure.
2092  *
2093  * The copy is allocated in the current memory context.
2094  */
2095 TriggerDesc *
2097 {
2098  TriggerDesc *newdesc;
2099  Trigger *trigger;
2100  int i;
2101 
2102  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2103  return NULL;
2104 
2105  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2106  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2107 
2108  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2109  memcpy(trigger, trigdesc->triggers,
2110  trigdesc->numtriggers * sizeof(Trigger));
2111  newdesc->triggers = trigger;
2112 
2113  for (i = 0; i < trigdesc->numtriggers; i++)
2114  {
2115  trigger->tgname = pstrdup(trigger->tgname);
2116  if (trigger->tgnattr > 0)
2117  {
2118  int16 *newattr;
2119 
2120  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2121  memcpy(newattr, trigger->tgattr,
2122  trigger->tgnattr * sizeof(int16));
2123  trigger->tgattr = newattr;
2124  }
2125  if (trigger->tgnargs > 0)
2126  {
2127  char **newargs;
2128  int16 j;
2129 
2130  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2131  for (j = 0; j < trigger->tgnargs; j++)
2132  newargs[j] = pstrdup(trigger->tgargs[j]);
2133  trigger->tgargs = newargs;
2134  }
2135  if (trigger->tgqual)
2136  trigger->tgqual = pstrdup(trigger->tgqual);
2137  if (trigger->tgoldtable)
2138  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2139  if (trigger->tgnewtable)
2140  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2141  trigger++;
2142  }
2143 
2144  return newdesc;
2145 }
2146 
2147 /*
2148  * Free a TriggerDesc data structure.
2149  */
2150 void
2152 {
2153  Trigger *trigger;
2154  int i;
2155 
2156  if (trigdesc == NULL)
2157  return;
2158 
2159  trigger = trigdesc->triggers;
2160  for (i = 0; i < trigdesc->numtriggers; i++)
2161  {
2162  pfree(trigger->tgname);
2163  if (trigger->tgnattr > 0)
2164  pfree(trigger->tgattr);
2165  if (trigger->tgnargs > 0)
2166  {
2167  while (--(trigger->tgnargs) >= 0)
2168  pfree(trigger->tgargs[trigger->tgnargs]);
2169  pfree(trigger->tgargs);
2170  }
2171  if (trigger->tgqual)
2172  pfree(trigger->tgqual);
2173  if (trigger->tgoldtable)
2174  pfree(trigger->tgoldtable);
2175  if (trigger->tgnewtable)
2176  pfree(trigger->tgnewtable);
2177  trigger++;
2178  }
2179  pfree(trigdesc->triggers);
2180  pfree(trigdesc);
2181 }
2182 
2183 /*
2184  * Compare two TriggerDesc structures for logical equality.
2185  */
2186 #ifdef NOT_USED
2187 bool
2188 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2189 {
2190  int i,
2191  j;
2192 
2193  /*
2194  * We need not examine the hint flags, just the trigger array itself; if
2195  * we have the same triggers with the same types, the flags should match.
2196  *
2197  * As of 7.3 we assume trigger set ordering is significant in the
2198  * comparison; so we just compare corresponding slots of the two sets.
2199  *
2200  * Note: comparing the stringToNode forms of the WHEN clauses means that
2201  * parse column locations will affect the result. This is okay as long as
2202  * this function is only used for detecting exact equality, as for example
2203  * in checking for staleness of a cache entry.
2204  */
2205  if (trigdesc1 != NULL)
2206  {
2207  if (trigdesc2 == NULL)
2208  return false;
2209  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2210  return false;
2211  for (i = 0; i < trigdesc1->numtriggers; i++)
2212  {
2213  Trigger *trig1 = trigdesc1->triggers + i;
2214  Trigger *trig2 = trigdesc2->triggers + i;
2215 
2216  if (trig1->tgoid != trig2->tgoid)
2217  return false;
2218  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2219  return false;
2220  if (trig1->tgfoid != trig2->tgfoid)
2221  return false;
2222  if (trig1->tgtype != trig2->tgtype)
2223  return false;
2224  if (trig1->tgenabled != trig2->tgenabled)
2225  return false;
2226  if (trig1->tgisinternal != trig2->tgisinternal)
2227  return false;
2228  if (trig1->tgisclone != trig2->tgisclone)
2229  return false;
2230  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2231  return false;
2232  if (trig1->tgconstrindid != trig2->tgconstrindid)
2233  return false;
2234  if (trig1->tgconstraint != trig2->tgconstraint)
2235  return false;
2236  if (trig1->tgdeferrable != trig2->tgdeferrable)
2237  return false;
2238  if (trig1->tginitdeferred != trig2->tginitdeferred)
2239  return false;
2240  if (trig1->tgnargs != trig2->tgnargs)
2241  return false;
2242  if (trig1->tgnattr != trig2->tgnattr)
2243  return false;
2244  if (trig1->tgnattr > 0 &&
2245  memcmp(trig1->tgattr, trig2->tgattr,
2246  trig1->tgnattr * sizeof(int16)) != 0)
2247  return false;
2248  for (j = 0; j < trig1->tgnargs; j++)
2249  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2250  return false;
2251  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2252  /* ok */ ;
2253  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2254  return false;
2255  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2256  return false;
2257  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2258  /* ok */ ;
2259  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2260  return false;
2261  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2262  return false;
2263  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2264  /* ok */ ;
2265  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2266  return false;
2267  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2268  return false;
2269  }
2270  }
2271  else if (trigdesc2 != NULL)
2272  return false;
2273  return true;
2274 }
2275 #endif /* NOT_USED */
2276 
2277 /*
2278  * Check if there is a row-level trigger with transition tables that prevents
2279  * a table from becoming an inheritance child or partition. Return the name
2280  * of the first such incompatible trigger, or NULL if there is none.
2281  */
2282 const char *
2284 {
2285  if (trigdesc != NULL)
2286  {
2287  int i;
2288 
2289  for (i = 0; i < trigdesc->numtriggers; ++i)
2290  {
2291  Trigger *trigger = &trigdesc->triggers[i];
2292 
2293  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2294  return trigger->tgname;
2295  }
2296  }
2297 
2298  return NULL;
2299 }
2300 
2301 /*
2302  * Call a trigger function.
2303  *
2304  * trigdata: trigger descriptor.
2305  * tgindx: trigger's index in finfo and instr arrays.
2306  * finfo: array of cached trigger function call information.
2307  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2308  * per_tuple_context: memory context to execute the function in.
2309  *
2310  * Returns the tuple (or NULL) as returned by the function.
2311  */
2312 static HeapTuple
2314  int tgindx,
2315  FmgrInfo *finfo,
2316  Instrumentation *instr,
2317  MemoryContext per_tuple_context)
2318 {
2319  LOCAL_FCINFO(fcinfo, 0);
2320  PgStat_FunctionCallUsage fcusage;
2321  Datum result;
2322  MemoryContext oldContext;
2323 
2324  /*
2325  * Protect against code paths that may fail to initialize transition table
2326  * info.
2327  */
2328  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2329  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2330  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2331  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2332  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2333  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2334  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2335 
2336  finfo += tgindx;
2337 
2338  /*
2339  * We cache fmgr lookup info, to avoid making the lookup again on each
2340  * call.
2341  */
2342  if (finfo->fn_oid == InvalidOid)
2343  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2344 
2345  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2346 
2347  /*
2348  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2349  */
2350  if (instr)
2351  InstrStartNode(instr + tgindx);
2352 
2353  /*
2354  * Do the function evaluation in the per-tuple memory context, so that
2355  * leaked memory will be reclaimed once per tuple. Note in particular that
2356  * any new tuple created by the trigger function will live till the end of
2357  * the tuple cycle.
2358  */
2359  oldContext = MemoryContextSwitchTo(per_tuple_context);
2360 
2361  /*
2362  * Call the function, passing no arguments but setting a context.
2363  */
2364  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2365  InvalidOid, (Node *) trigdata, NULL);
2366 
2367  pgstat_init_function_usage(fcinfo, &fcusage);
2368 
2369  MyTriggerDepth++;
2370  PG_TRY();
2371  {
2372  result = FunctionCallInvoke(fcinfo);
2373  }
2374  PG_FINALLY();
2375  {
2376  MyTriggerDepth--;
2377  }
2378  PG_END_TRY();
2379 
2380  pgstat_end_function_usage(&fcusage, true);
2381 
2382  MemoryContextSwitchTo(oldContext);
2383 
2384  /*
2385  * Trigger protocol allows function to return a null pointer, but NOT to
2386  * set the isnull result flag.
2387  */
2388  if (fcinfo->isnull)
2389  ereport(ERROR,
2390  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2391  errmsg("trigger function %u returned null value",
2392  fcinfo->flinfo->fn_oid)));
2393 
2394  /*
2395  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2396  * one "tuple returned" (really the number of firings).
2397  */
2398  if (instr)
2399  InstrStopNode(instr + tgindx, 1);
2400 
2401  return (HeapTuple) DatumGetPointer(result);
2402 }
2403 
2404 void
2406 {
2407  TriggerDesc *trigdesc;
2408  int i;
2409  TriggerData LocTriggerData = {0};
2410 
2411  trigdesc = relinfo->ri_TrigDesc;
2412 
2413  if (trigdesc == NULL)
2414  return;
2415  if (!trigdesc->trig_insert_before_statement)
2416  return;
2417 
2418  /* no-op if we already fired BS triggers in this context */
2420  CMD_INSERT))
2421  return;
2422 
2423  LocTriggerData.type = T_TriggerData;
2424  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2426  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2427  for (i = 0; i < trigdesc->numtriggers; i++)
2428  {
2429  Trigger *trigger = &trigdesc->triggers[i];
2430  HeapTuple newtuple;
2431 
2432  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2433  TRIGGER_TYPE_STATEMENT,
2434  TRIGGER_TYPE_BEFORE,
2435  TRIGGER_TYPE_INSERT))
2436  continue;
2437  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2438  NULL, NULL, NULL))
2439  continue;
2440 
2441  LocTriggerData.tg_trigger = trigger;
2442  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2443  i,
2444  relinfo->ri_TrigFunctions,
2445  relinfo->ri_TrigInstrument,
2446  GetPerTupleMemoryContext(estate));
2447 
2448  if (newtuple)
2449  ereport(ERROR,
2450  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2451  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2452  }
2453 }
2454 
2455 void
2457  TransitionCaptureState *transition_capture)
2458 {
2459  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2460 
2461  if (trigdesc && trigdesc->trig_insert_after_statement)
2462  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2464  false, NULL, NULL, NIL, NULL, transition_capture,
2465  false);
2466 }
2467 
2468 bool
2470  TupleTableSlot *slot)
2471 {
2472  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2473  HeapTuple newtuple = NULL;
2474  bool should_free;
2475  TriggerData LocTriggerData = {0};
2476  int i;
2477 
2478  LocTriggerData.type = T_TriggerData;
2479  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2482  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2483  for (i = 0; i < trigdesc->numtriggers; i++)
2484  {
2485  Trigger *trigger = &trigdesc->triggers[i];
2486  HeapTuple oldtuple;
2487 
2488  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2489  TRIGGER_TYPE_ROW,
2490  TRIGGER_TYPE_BEFORE,
2491  TRIGGER_TYPE_INSERT))
2492  continue;
2493  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2494  NULL, NULL, slot))
2495  continue;
2496 
2497  if (!newtuple)
2498  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2499 
2500  LocTriggerData.tg_trigslot = slot;
2501  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2502  LocTriggerData.tg_trigger = trigger;
2503  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2504  i,
2505  relinfo->ri_TrigFunctions,
2506  relinfo->ri_TrigInstrument,
2507  GetPerTupleMemoryContext(estate));
2508  if (newtuple == NULL)
2509  {
2510  if (should_free)
2511  heap_freetuple(oldtuple);
2512  return false; /* "do nothing" */
2513  }
2514  else if (newtuple != oldtuple)
2515  {
2516  ExecForceStoreHeapTuple(newtuple, slot, false);
2517 
2518  /*
2519  * After a tuple in a partition goes through a trigger, the user
2520  * could have changed the partition key enough that the tuple no
2521  * longer fits the partition. Verify that.
2522  */
2523  if (trigger->tgisclone &&
2524  !ExecPartitionCheck(relinfo, slot, estate, false))
2525  ereport(ERROR,
2526  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2527  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2528  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2529  trigger->tgname,
2532 
2533  if (should_free)
2534  heap_freetuple(oldtuple);
2535 
2536  /* signal tuple should be re-fetched if used */
2537  newtuple = NULL;
2538  }
2539  }
2540 
2541  return true;
2542 }
2543 
2544 void
2546  TupleTableSlot *slot, List *recheckIndexes,
2547  TransitionCaptureState *transition_capture)
2548 {
2549  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2550 
2551  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2552  (transition_capture && transition_capture->tcs_insert_new_table))
2553  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2555  true, NULL, slot,
2556  recheckIndexes, NULL,
2557  transition_capture,
2558  false);
2559 }
2560 
2561 bool
2563  TupleTableSlot *slot)
2564 {
2565  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2566  HeapTuple newtuple = NULL;
2567  bool should_free;
2568  TriggerData LocTriggerData = {0};
2569  int i;
2570 
2571  LocTriggerData.type = T_TriggerData;
2572  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2575  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2576  for (i = 0; i < trigdesc->numtriggers; i++)
2577  {
2578  Trigger *trigger = &trigdesc->triggers[i];
2579  HeapTuple oldtuple;
2580 
2581  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2582  TRIGGER_TYPE_ROW,
2583  TRIGGER_TYPE_INSTEAD,
2584  TRIGGER_TYPE_INSERT))
2585  continue;
2586  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2587  NULL, NULL, slot))
2588  continue;
2589 
2590  if (!newtuple)
2591  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2592 
2593  LocTriggerData.tg_trigslot = slot;
2594  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2595  LocTriggerData.tg_trigger = trigger;
2596  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2597  i,
2598  relinfo->ri_TrigFunctions,
2599  relinfo->ri_TrigInstrument,
2600  GetPerTupleMemoryContext(estate));
2601  if (newtuple == NULL)
2602  {
2603  if (should_free)
2604  heap_freetuple(oldtuple);
2605  return false; /* "do nothing" */
2606  }
2607  else if (newtuple != oldtuple)
2608  {
2609  ExecForceStoreHeapTuple(newtuple, slot, false);
2610 
2611  if (should_free)
2612  heap_freetuple(oldtuple);
2613 
2614  /* signal tuple should be re-fetched if used */
2615  newtuple = NULL;
2616  }
2617  }
2618 
2619  return true;
2620 }
2621 
2622 void
2624 {
2625  TriggerDesc *trigdesc;
2626  int i;
2627  TriggerData LocTriggerData = {0};
2628 
2629  trigdesc = relinfo->ri_TrigDesc;
2630 
2631  if (trigdesc == NULL)
2632  return;
2633  if (!trigdesc->trig_delete_before_statement)
2634  return;
2635 
2636  /* no-op if we already fired BS triggers in this context */
2638  CMD_DELETE))
2639  return;
2640 
2641  LocTriggerData.type = T_TriggerData;
2642  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2644  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2645  for (i = 0; i < trigdesc->numtriggers; i++)
2646  {
2647  Trigger *trigger = &trigdesc->triggers[i];
2648  HeapTuple newtuple;
2649 
2650  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2651  TRIGGER_TYPE_STATEMENT,
2652  TRIGGER_TYPE_BEFORE,
2653  TRIGGER_TYPE_DELETE))
2654  continue;
2655  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2656  NULL, NULL, NULL))
2657  continue;
2658 
2659  LocTriggerData.tg_trigger = trigger;
2660  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2661  i,
2662  relinfo->ri_TrigFunctions,
2663  relinfo->ri_TrigInstrument,
2664  GetPerTupleMemoryContext(estate));
2665 
2666  if (newtuple)
2667  ereport(ERROR,
2668  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2669  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2670  }
2671 }
2672 
2673 void
2675  TransitionCaptureState *transition_capture)
2676 {
2677  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2678 
2679  if (trigdesc && trigdesc->trig_delete_after_statement)
2680  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2682  false, NULL, NULL, NIL, NULL, transition_capture,
2683  false);
2684 }
2685 
2686 /*
2687  * Execute BEFORE ROW DELETE triggers.
2688  *
2689  * True indicates caller can proceed with the delete. False indicates caller
2690  * need to suppress the delete and additionally if requested, we need to pass
2691  * back the concurrently updated tuple if any.
2692  */
2693 bool
2695  ResultRelInfo *relinfo,
2696  ItemPointer tupleid,
2697  HeapTuple fdw_trigtuple,
2698  TupleTableSlot **epqslot,
2699  TM_Result *tmresult,
2700  TM_FailureData *tmfd)
2701 {
2702  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2703  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2704  bool result = true;
2705  TriggerData LocTriggerData = {0};
2706  HeapTuple trigtuple;
2707  bool should_free = false;
2708  int i;
2709 
2710  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2711  if (fdw_trigtuple == NULL)
2712  {
2713  TupleTableSlot *epqslot_candidate = NULL;
2714 
2715  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2716  LockTupleExclusive, slot, &epqslot_candidate,
2717  tmresult, tmfd))
2718  return false;
2719 
2720  /*
2721  * If the tuple was concurrently updated and the caller of this
2722  * function requested for the updated tuple, skip the trigger
2723  * execution.
2724  */
2725  if (epqslot_candidate != NULL && epqslot != NULL)
2726  {
2727  *epqslot = epqslot_candidate;
2728  return false;
2729  }
2730 
2731  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2732  }
2733  else
2734  {
2735  trigtuple = fdw_trigtuple;
2736  ExecForceStoreHeapTuple(trigtuple, slot, false);
2737  }
2738 
2739  LocTriggerData.type = T_TriggerData;
2740  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2743  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2744  for (i = 0; i < trigdesc->numtriggers; i++)
2745  {
2746  HeapTuple newtuple;
2747  Trigger *trigger = &trigdesc->triggers[i];
2748 
2749  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2750  TRIGGER_TYPE_ROW,
2751  TRIGGER_TYPE_BEFORE,
2752  TRIGGER_TYPE_DELETE))
2753  continue;
2754  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2755  NULL, slot, NULL))
2756  continue;
2757 
2758  LocTriggerData.tg_trigslot = slot;
2759  LocTriggerData.tg_trigtuple = trigtuple;
2760  LocTriggerData.tg_trigger = trigger;
2761  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2762  i,
2763  relinfo->ri_TrigFunctions,
2764  relinfo->ri_TrigInstrument,
2765  GetPerTupleMemoryContext(estate));
2766  if (newtuple == NULL)
2767  {
2768  result = false; /* tell caller to suppress delete */
2769  break;
2770  }
2771  if (newtuple != trigtuple)
2772  heap_freetuple(newtuple);
2773  }
2774  if (should_free)
2775  heap_freetuple(trigtuple);
2776 
2777  return result;
2778 }
2779 
2780 /*
2781  * Note: is_crosspart_update must be true if the DELETE is being performed
2782  * as part of a cross-partition update.
2783  */
2784 void
2786  ResultRelInfo *relinfo,
2787  ItemPointer tupleid,
2788  HeapTuple fdw_trigtuple,
2789  TransitionCaptureState *transition_capture,
2790  bool is_crosspart_update)
2791 {
2792  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2793 
2794  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2795  (transition_capture && transition_capture->tcs_delete_old_table))
2796  {
2797  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2798 
2799  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2800  if (fdw_trigtuple == NULL)
2801  GetTupleForTrigger(estate,
2802  NULL,
2803  relinfo,
2804  tupleid,
2806  slot,
2807  NULL,
2808  NULL,
2809  NULL);
2810  else
2811  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2812 
2813  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2815  true, slot, NULL, NIL, NULL,
2816  transition_capture,
2817  is_crosspart_update);
2818  }
2819 }
2820 
2821 bool
2823  HeapTuple trigtuple)
2824 {
2825  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2826  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2827  TriggerData LocTriggerData = {0};
2828  int i;
2829 
2830  LocTriggerData.type = T_TriggerData;
2831  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2834  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2835 
2836  ExecForceStoreHeapTuple(trigtuple, slot, false);
2837 
2838  for (i = 0; i < trigdesc->numtriggers; i++)
2839  {
2840  HeapTuple rettuple;
2841  Trigger *trigger = &trigdesc->triggers[i];
2842 
2843  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2844  TRIGGER_TYPE_ROW,
2845  TRIGGER_TYPE_INSTEAD,
2846  TRIGGER_TYPE_DELETE))
2847  continue;
2848  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2849  NULL, slot, NULL))
2850  continue;
2851 
2852  LocTriggerData.tg_trigslot = slot;
2853  LocTriggerData.tg_trigtuple = trigtuple;
2854  LocTriggerData.tg_trigger = trigger;
2855  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2856  i,
2857  relinfo->ri_TrigFunctions,
2858  relinfo->ri_TrigInstrument,
2859  GetPerTupleMemoryContext(estate));
2860  if (rettuple == NULL)
2861  return false; /* Delete was suppressed */
2862  if (rettuple != trigtuple)
2863  heap_freetuple(rettuple);
2864  }
2865  return true;
2866 }
2867 
2868 void
2870 {
2871  TriggerDesc *trigdesc;
2872  int i;
2873  TriggerData LocTriggerData = {0};
2874  Bitmapset *updatedCols;
2875 
2876  trigdesc = relinfo->ri_TrigDesc;
2877 
2878  if (trigdesc == NULL)
2879  return;
2880  if (!trigdesc->trig_update_before_statement)
2881  return;
2882 
2883  /* no-op if we already fired BS triggers in this context */
2885  CMD_UPDATE))
2886  return;
2887 
2888  /* statement-level triggers operate on the parent table */
2889  Assert(relinfo->ri_RootResultRelInfo == NULL);
2890 
2891  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2892 
2893  LocTriggerData.type = T_TriggerData;
2894  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2896  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2897  LocTriggerData.tg_updatedcols = updatedCols;
2898  for (i = 0; i < trigdesc->numtriggers; i++)
2899  {
2900  Trigger *trigger = &trigdesc->triggers[i];
2901  HeapTuple newtuple;
2902 
2903  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2904  TRIGGER_TYPE_STATEMENT,
2905  TRIGGER_TYPE_BEFORE,
2906  TRIGGER_TYPE_UPDATE))
2907  continue;
2908  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2909  updatedCols, NULL, NULL))
2910  continue;
2911 
2912  LocTriggerData.tg_trigger = trigger;
2913  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2914  i,
2915  relinfo->ri_TrigFunctions,
2916  relinfo->ri_TrigInstrument,
2917  GetPerTupleMemoryContext(estate));
2918 
2919  if (newtuple)
2920  ereport(ERROR,
2921  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2922  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2923  }
2924 }
2925 
2926 void
2928  TransitionCaptureState *transition_capture)
2929 {
2930  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2931 
2932  /* statement-level triggers operate on the parent table */
2933  Assert(relinfo->ri_RootResultRelInfo == NULL);
2934 
2935  if (trigdesc && trigdesc->trig_update_after_statement)
2936  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2938  false, NULL, NULL, NIL,
2939  ExecGetAllUpdatedCols(relinfo, estate),
2940  transition_capture,
2941  false);
2942 }
2943 
2944 bool
2946  ResultRelInfo *relinfo,
2947  ItemPointer tupleid,
2948  HeapTuple fdw_trigtuple,
2949  TupleTableSlot *newslot,
2950  TM_Result *tmresult,
2951  TM_FailureData *tmfd)
2952 {
2953  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2954  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2955  HeapTuple newtuple = NULL;
2956  HeapTuple trigtuple;
2957  bool should_free_trig = false;
2958  bool should_free_new = false;
2959  TriggerData LocTriggerData = {0};
2960  int i;
2961  Bitmapset *updatedCols;
2962  LockTupleMode lockmode;
2963 
2964  /* Determine lock mode to use */
2965  lockmode = ExecUpdateLockMode(estate, relinfo);
2966 
2967  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2968  if (fdw_trigtuple == NULL)
2969  {
2970  TupleTableSlot *epqslot_candidate = NULL;
2971 
2972  /* get a copy of the on-disk tuple we are planning to update */
2973  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2974  lockmode, oldslot, &epqslot_candidate,
2975  tmresult, tmfd))
2976  return false; /* cancel the update action */
2977 
2978  /*
2979  * In READ COMMITTED isolation level it's possible that target tuple
2980  * was changed due to concurrent update. In that case we have a raw
2981  * subplan output tuple in epqslot_candidate, and need to form a new
2982  * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2983  * received in newslot. Neither we nor our callers have any further
2984  * interest in the passed-in tuple, so it's okay to overwrite newslot
2985  * with the newer data.
2986  *
2987  * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so
2988  * that epqslot_clean will be that same slot and the copy step below
2989  * is not needed.)
2990  */
2991  if (epqslot_candidate != NULL)
2992  {
2993  TupleTableSlot *epqslot_clean;
2994 
2995  epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2996  oldslot);
2997 
2998  if (newslot != epqslot_clean)
2999  ExecCopySlot(newslot, epqslot_clean);
3000  }
3001 
3002  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3003  }
3004  else
3005  {
3006  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3007  trigtuple = fdw_trigtuple;
3008  }
3009 
3010  LocTriggerData.type = T_TriggerData;
3011  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3014  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3015  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3016  LocTriggerData.tg_updatedcols = updatedCols;
3017  for (i = 0; i < trigdesc->numtriggers; i++)
3018  {
3019  Trigger *trigger = &trigdesc->triggers[i];
3020  HeapTuple oldtuple;
3021 
3022  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3023  TRIGGER_TYPE_ROW,
3024  TRIGGER_TYPE_BEFORE,
3025  TRIGGER_TYPE_UPDATE))
3026  continue;
3027  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3028  updatedCols, oldslot, newslot))
3029  continue;
3030 
3031  if (!newtuple)
3032  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3033 
3034  LocTriggerData.tg_trigslot = oldslot;
3035  LocTriggerData.tg_trigtuple = trigtuple;
3036  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3037  LocTriggerData.tg_newslot = newslot;
3038  LocTriggerData.tg_trigger = trigger;
3039  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3040  i,
3041  relinfo->ri_TrigFunctions,
3042  relinfo->ri_TrigInstrument,
3043  GetPerTupleMemoryContext(estate));
3044 
3045  if (newtuple == NULL)
3046  {
3047  if (should_free_trig)
3048  heap_freetuple(trigtuple);
3049  if (should_free_new)
3050  heap_freetuple(oldtuple);
3051  return false; /* "do nothing" */
3052  }
3053  else if (newtuple != oldtuple)
3054  {
3055  ExecForceStoreHeapTuple(newtuple, newslot, false);
3056 
3057  /*
3058  * If the tuple returned by the trigger / being stored, is the old
3059  * row version, and the heap tuple passed to the trigger was
3060  * allocated locally, materialize the slot. Otherwise we might
3061  * free it while still referenced by the slot.
3062  */
3063  if (should_free_trig && newtuple == trigtuple)
3064  ExecMaterializeSlot(newslot);
3065 
3066  if (should_free_new)
3067  heap_freetuple(oldtuple);
3068 
3069  /* signal tuple should be re-fetched if used */
3070  newtuple = NULL;
3071  }
3072  }
3073  if (should_free_trig)
3074  heap_freetuple(trigtuple);
3075 
3076  return true;
3077 }
3078 
3079 /*
3080  * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3081  * and destination partitions, respectively, of a cross-partition update of
3082  * the root partitioned table mentioned in the query, given by 'relinfo'.
3083  * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3084  * partition, and 'newslot' contains the "new" tuple in the destination
3085  * partition. This interface allows to support the requirements of
3086  * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3087  * that case.
3088  */
3089 void
3091  ResultRelInfo *src_partinfo,
3092  ResultRelInfo *dst_partinfo,
3093  ItemPointer tupleid,
3094  HeapTuple fdw_trigtuple,
3095  TupleTableSlot *newslot,
3096  List *recheckIndexes,
3097  TransitionCaptureState *transition_capture,
3098  bool is_crosspart_update)
3099 {
3100  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3101 
3102  if ((trigdesc && trigdesc->trig_update_after_row) ||
3103  (transition_capture &&
3104  (transition_capture->tcs_update_old_table ||
3105  transition_capture->tcs_update_new_table)))
3106  {
3107  /*
3108  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3109  * update-partition-key operation, then this function is also called
3110  * separately for DELETE and INSERT to capture transition table rows.
3111  * In such case, either old tuple or new tuple can be NULL.
3112  */
3113  TupleTableSlot *oldslot;
3114  ResultRelInfo *tupsrc;
3115 
3116  Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3117  !is_crosspart_update);
3118 
3119  tupsrc = src_partinfo ? src_partinfo : relinfo;
3120  oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3121 
3122  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3123  GetTupleForTrigger(estate,
3124  NULL,
3125  tupsrc,
3126  tupleid,
3128  oldslot,
3129  NULL,
3130  NULL,
3131  NULL);
3132  else if (fdw_trigtuple != NULL)
3133  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3134  else
3135  ExecClearTuple(oldslot);
3136 
3137  AfterTriggerSaveEvent(estate, relinfo,
3138  src_partinfo, dst_partinfo,
3140  true,
3141  oldslot, newslot, recheckIndexes,
3142  ExecGetAllUpdatedCols(relinfo, estate),
3143  transition_capture,
3144  is_crosspart_update);
3145  }
3146 }
3147 
3148 bool
3150  HeapTuple trigtuple, TupleTableSlot *newslot)
3151 {
3152  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3153  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3154  HeapTuple newtuple = NULL;
3155  bool should_free;
3156  TriggerData LocTriggerData = {0};
3157  int i;
3158 
3159  LocTriggerData.type = T_TriggerData;
3160  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3163  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3164 
3165  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3166 
3167  for (i = 0; i < trigdesc->numtriggers; i++)
3168  {
3169  Trigger *trigger = &trigdesc->triggers[i];
3170  HeapTuple oldtuple;
3171 
3172  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3173  TRIGGER_TYPE_ROW,
3174  TRIGGER_TYPE_INSTEAD,
3175  TRIGGER_TYPE_UPDATE))
3176  continue;
3177  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3178  NULL, oldslot, newslot))
3179  continue;
3180 
3181  if (!newtuple)
3182  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3183 
3184  LocTriggerData.tg_trigslot = oldslot;
3185  LocTriggerData.tg_trigtuple = trigtuple;
3186  LocTriggerData.tg_newslot = newslot;
3187  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3188 
3189  LocTriggerData.tg_trigger = trigger;
3190  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3191  i,
3192  relinfo->ri_TrigFunctions,
3193  relinfo->ri_TrigInstrument,
3194  GetPerTupleMemoryContext(estate));
3195  if (newtuple == NULL)
3196  {
3197  return false; /* "do nothing" */
3198  }
3199  else if (newtuple != oldtuple)
3200  {
3201  ExecForceStoreHeapTuple(newtuple, newslot, false);
3202 
3203  if (should_free)
3204  heap_freetuple(oldtuple);
3205 
3206  /* signal tuple should be re-fetched if used */
3207  newtuple = NULL;
3208  }
3209  }
3210 
3211  return true;
3212 }
3213 
3214 void
3216 {
3217  TriggerDesc *trigdesc;
3218  int i;
3219  TriggerData LocTriggerData = {0};
3220 
3221  trigdesc = relinfo->ri_TrigDesc;
3222 
3223  if (trigdesc == NULL)
3224  return;
3225  if (!trigdesc->trig_truncate_before_statement)
3226  return;
3227 
3228  LocTriggerData.type = T_TriggerData;
3229  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3231  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3232 
3233  for (i = 0; i < trigdesc->numtriggers; i++)
3234  {
3235  Trigger *trigger = &trigdesc->triggers[i];
3236  HeapTuple newtuple;
3237 
3238  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3239  TRIGGER_TYPE_STATEMENT,
3240  TRIGGER_TYPE_BEFORE,
3241  TRIGGER_TYPE_TRUNCATE))
3242  continue;
3243  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3244  NULL, NULL, NULL))
3245  continue;
3246 
3247  LocTriggerData.tg_trigger = trigger;
3248  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3249  i,
3250  relinfo->ri_TrigFunctions,
3251  relinfo->ri_TrigInstrument,
3252  GetPerTupleMemoryContext(estate));
3253 
3254  if (newtuple)
3255  ereport(ERROR,
3256  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3257  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3258  }
3259 }
3260 
3261 void
3263 {
3264  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3265 
3266  if (trigdesc && trigdesc->trig_truncate_after_statement)
3267  AfterTriggerSaveEvent(estate, relinfo,
3268  NULL, NULL,
3270  false, NULL, NULL, NIL, NULL, NULL,
3271  false);
3272 }
3273 
3274 
3275 /*
3276  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3277  */
3278 static bool
3280  EPQState *epqstate,
3281  ResultRelInfo *relinfo,
3282  ItemPointer tid,
3283  LockTupleMode lockmode,
3284  TupleTableSlot *oldslot,
3285  TupleTableSlot **epqslot,
3286  TM_Result *tmresultp,
3287  TM_FailureData *tmfdp)
3288 {
3289  Relation relation = relinfo->ri_RelationDesc;
3290 
3291  if (epqslot != NULL)
3292  {
3293  TM_Result test;
3294  TM_FailureData tmfd;
3295  int lockflags = 0;
3296 
3297  *epqslot = NULL;
3298 
3299  /* caller must pass an epqstate if EvalPlanQual is possible */
3300  Assert(epqstate != NULL);
3301 
3302  /*
3303  * lock tuple for update
3304  */
3306  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3307  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3308  estate->es_output_cid,
3309  lockmode, LockWaitBlock,
3310  lockflags,
3311  &tmfd);
3312 
3313  /* Let the caller know about the status of this operation */
3314  if (tmresultp)
3315  *tmresultp = test;
3316  if (tmfdp)
3317  *tmfdp = tmfd;
3318 
3319  switch (test)
3320  {
3321  case TM_SelfModified:
3322 
3323  /*
3324  * The target tuple was already updated or deleted by the
3325  * current command, or by a later command in the current
3326  * transaction. We ignore the tuple in the former case, and
3327  * throw error in the latter case, for the same reasons
3328  * enumerated in ExecUpdate and ExecDelete in
3329  * nodeModifyTable.c.
3330  */
3331  if (tmfd.cmax != estate->es_output_cid)
3332  ereport(ERROR,
3333  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3334  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3335  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3336 
3337  /* treat it as deleted; do not process */
3338  return false;
3339 
3340  case TM_Ok:
3341  if (tmfd.traversed)
3342  {
3343  /*
3344  * Recheck the tuple using EPQ. For MERGE, we leave this
3345  * to the caller (it must do additional rechecking, and
3346  * might end up executing a different action entirely).
3347  */
3348  if (estate->es_plannedstmt->commandType == CMD_MERGE)
3349  {
3350  if (tmresultp)
3351  *tmresultp = TM_Updated;
3352  return false;
3353  }
3354 
3355  *epqslot = EvalPlanQual(epqstate,
3356  relation,
3357  relinfo->ri_RangeTableIndex,
3358  oldslot);
3359 
3360  /*
3361  * If PlanQual failed for updated tuple - we must not
3362  * process this tuple!
3363  */
3364  if (TupIsNull(*epqslot))
3365  {
3366  *epqslot = NULL;
3367  return false;
3368  }
3369  }
3370  break;
3371 
3372  case TM_Updated:
3374  ereport(ERROR,
3376  errmsg("could not serialize access due to concurrent update")));
3377  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3378  break;
3379 
3380  case TM_Deleted:
3382  ereport(ERROR,
3384  errmsg("could not serialize access due to concurrent delete")));
3385  /* tuple was deleted */
3386  return false;
3387 
3388  case TM_Invisible:
3389  elog(ERROR, "attempted to lock invisible tuple");
3390  break;
3391 
3392  default:
3393  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3394  return false; /* keep compiler quiet */
3395  }
3396  }
3397  else
3398  {
3399  /*
3400  * We expect the tuple to be present, thus very simple error handling
3401  * suffices.
3402  */
3403  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3404  oldslot))
3405  elog(ERROR, "failed to fetch tuple for trigger");
3406  }
3407 
3408  return true;
3409 }
3410 
3411 /*
3412  * Is trigger enabled to fire?
3413  */
3414 static bool
3416  Trigger *trigger, TriggerEvent event,
3417  Bitmapset *modifiedCols,
3418  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3419 {
3420  /* Check replication-role-dependent enable state */
3422  {
3423  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3424  trigger->tgenabled == TRIGGER_DISABLED)
3425  return false;
3426  }
3427  else /* ORIGIN or LOCAL role */
3428  {
3429  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3430  trigger->tgenabled == TRIGGER_DISABLED)
3431  return false;
3432  }
3433 
3434  /*
3435  * Check for column-specific trigger (only possible for UPDATE, and in
3436  * fact we *must* ignore tgattr for other event types)
3437  */
3438  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3439  {
3440  int i;
3441  bool modified;
3442 
3443  modified = false;
3444  for (i = 0; i < trigger->tgnattr; i++)
3445  {
3447  modifiedCols))
3448  {
3449  modified = true;
3450  break;
3451  }
3452  }
3453  if (!modified)
3454  return false;
3455  }
3456 
3457  /* Check for WHEN clause */
3458  if (trigger->tgqual)
3459  {
3460  ExprState **predicate;
3461  ExprContext *econtext;
3462  MemoryContext oldContext;
3463  int i;
3464 
3465  Assert(estate != NULL);
3466 
3467  /*
3468  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3469  * matching element of relinfo->ri_TrigWhenExprs[]
3470  */
3471  i = trigger - relinfo->ri_TrigDesc->triggers;
3472  predicate = &relinfo->ri_TrigWhenExprs[i];
3473 
3474  /*
3475  * If first time through for this WHEN expression, build expression
3476  * nodetrees for it. Keep them in the per-query memory context so
3477  * they'll survive throughout the query.
3478  */
3479  if (*predicate == NULL)
3480  {
3481  Node *tgqual;
3482 
3483  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3484  tgqual = stringToNode(trigger->tgqual);
3485  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3488  /* ExecPrepareQual wants implicit-AND form */
3489  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3490  *predicate = ExecPrepareQual((List *) tgqual, estate);
3491  MemoryContextSwitchTo(oldContext);
3492  }
3493 
3494  /*
3495  * We will use the EState's per-tuple context for evaluating WHEN
3496  * expressions (creating it if it's not already there).
3497  */
3498  econtext = GetPerTupleExprContext(estate);
3499 
3500  /*
3501  * Finally evaluate the expression, making the old and/or new tuples
3502  * available as INNER_VAR/OUTER_VAR respectively.
3503  */
3504  econtext->ecxt_innertuple = oldslot;
3505  econtext->ecxt_outertuple = newslot;
3506  if (!ExecQual(*predicate, econtext))
3507  return false;
3508  }
3509 
3510  return true;
3511 }
3512 
3513 
3514 /* ----------
3515  * After-trigger stuff
3516  *
3517  * The AfterTriggersData struct holds data about pending AFTER trigger events
3518  * during the current transaction tree. (BEFORE triggers are fired
3519  * immediately so we don't need any persistent state about them.) The struct
3520  * and most of its subsidiary data are kept in TopTransactionContext; however
3521  * some data that can be discarded sooner appears in the CurTransactionContext
3522  * of the relevant subtransaction. Also, the individual event records are
3523  * kept in a separate sub-context of TopTransactionContext. This is done
3524  * mainly so that it's easy to tell from a memory context dump how much space
3525  * is being eaten by trigger events.
3526  *
3527  * Because the list of pending events can grow large, we go to some
3528  * considerable effort to minimize per-event memory consumption. The event
3529  * records are grouped into chunks and common data for similar events in the
3530  * same chunk is only stored once.
3531  *
3532  * XXX We need to be able to save the per-event data in a file if it grows too
3533  * large.
3534  * ----------
3535  */
3536 
3537 /* Per-trigger SET CONSTRAINT status */
3539 {
3543 
3545 
3546 /*
3547  * SET CONSTRAINT intra-transaction status.
3548  *
3549  * We make this a single palloc'd object so it can be copied and freed easily.
3550  *
3551  * all_isset and all_isdeferred are used to keep track
3552  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3553  *
3554  * trigstates[] stores per-trigger tgisdeferred settings.
3555  */
3557 {
3560  int numstates; /* number of trigstates[] entries in use */
3561  int numalloc; /* allocated size of trigstates[] */
3564 
3566 
3567 
3568 /*
3569  * Per-trigger-event data
3570  *
3571  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3572  * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3573  * Each event record also has an associated AfterTriggerSharedData that is
3574  * shared across all instances of similar events within a "chunk".
3575  *
3576  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3577  * fields. Updates of regular tables use two; inserts and deletes of regular
3578  * tables use one; foreign tables always use zero and save the tuple(s) to a
3579  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3580  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3581  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3582  * tuple(s). This permits storing tuples once regardless of the number of
3583  * row-level triggers on a foreign table.
3584  *
3585  * When updates on partitioned tables cause rows to move between partitions,
3586  * the OIDs of both partitions are stored too, so that the tuples can be
3587  * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3588  * partition update").
3589  *
3590  * Note that we need triggers on foreign tables to be fired in exactly the
3591  * order they were queued, so that the tuples come out of the tuplestore in
3592  * the right order. To ensure that, we forbid deferrable (constraint)
3593  * triggers on foreign tables. This also ensures that such triggers do not
3594  * get deferred into outer trigger query levels, meaning that it's okay to
3595  * destroy the tuplestore at the end of the query level.
3596  *
3597  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3598  * require no ctid field. We lack the flag bit space to neatly represent that
3599  * distinct case, and it seems unlikely to be worth much trouble.
3600  *
3601  * Note: ats_firing_id is initially zero and is set to something else when
3602  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3603  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3604  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3605  * because all instances of the same type of event in a given event list will
3606  * be fired at the same time, if they were queued between the same firing
3607  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3608  * a new event to an existing AfterTriggerSharedData record.
3609  */
3611 
3612 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3613 #define AFTER_TRIGGER_DONE 0x80000000
3614 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3615 /* bits describing the size and tuple sources of this event */
3616 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3617 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3618 #define AFTER_TRIGGER_1CTID 0x10000000
3619 #define AFTER_TRIGGER_2CTID 0x30000000
3620 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3621 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3623 
3625 {
3626  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3627  Oid ats_tgoid; /* the trigger's ID */
3628  Oid ats_relid; /* the relation it's on */
3629  CommandId ats_firing_id; /* ID for firing cycle */
3630  struct AfterTriggersTableData *ats_table; /* transition table access */
3631  Bitmapset *ats_modifiedcols; /* modified columns */
3633 
3635 
3637 {
3638  TriggerFlags ate_flags; /* status bits and offset to shared data */
3639  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3640  ItemPointerData ate_ctid2; /* new updated tuple */
3641 
3642  /*
3643  * During a cross-partition update of a partitioned table, we also store
3644  * the OIDs of source and destination partitions that are needed to fetch
3645  * the old (ctid1) and the new tuple (ctid2) from, respectively.
3646  */
3650 
3651 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3653 {
3658 
3659 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3661 {
3662  TriggerFlags ate_flags; /* status bits and offset to shared data */
3663  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3665 
3666 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3668 {
3669  TriggerFlags ate_flags; /* status bits and offset to shared data */
3671 
3672 #define SizeofTriggerEvent(evt) \
3673  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3674  sizeof(AfterTriggerEventData) : \
3675  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3676  sizeof(AfterTriggerEventDataNoOids) : \
3677  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3678  sizeof(AfterTriggerEventDataOneCtid) : \
3679  sizeof(AfterTriggerEventDataZeroCtids))))
3680 
3681 #define GetTriggerSharedData(evt) \
3682  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3683 
3684 /*
3685  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3686  * larger chunks (a slightly more sophisticated version of an expansible
3687  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3688  * AfterTriggerEventData records; the space between endfree and endptr is
3689  * occupied by AfterTriggerSharedData records.
3690  */
3692 {
3693  struct AfterTriggerEventChunk *next; /* list link */
3694  char *freeptr; /* start of free space in chunk */
3695  char *endfree; /* end of free space in chunk */
3696  char *endptr; /* end of chunk */
3697  /* event data follows here */
3699 
3700 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3701 
3702 /* A list of events */
3704 {
3707  char *tailfree; /* freeptr of tail chunk */
3709 
3710 /* Macros to help in iterating over a list of events */
3711 #define for_each_chunk(cptr, evtlist) \
3712  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3713 #define for_each_event(eptr, cptr) \
3714  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3715  (char *) eptr < (cptr)->freeptr; \
3716  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3717 /* Use this if no special per-chunk processing is needed */
3718 #define for_each_event_chunk(eptr, cptr, evtlist) \
3719  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3720 
3721 /* Macros for iterating from a start point that might not be list start */
3722 #define for_each_chunk_from(cptr) \
3723  for (; cptr != NULL; cptr = cptr->next)
3724 #define for_each_event_from(eptr, cptr) \
3725  for (; \
3726  (char *) eptr < (cptr)->freeptr; \
3727  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3728 
3729 
3730 /*
3731  * All per-transaction data for the AFTER TRIGGERS module.
3732  *
3733  * AfterTriggersData has the following fields:
3734  *
3735  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3736  * We mark firable events with the current firing cycle's ID so that we can
3737  * tell which ones to work on. This ensures sane behavior if a trigger
3738  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3739  * only fire those events that weren't already scheduled for firing.
3740  *
3741  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3742  * This is saved and restored across failed subtransactions.
3743  *
3744  * events is the current list of deferred events. This is global across
3745  * all subtransactions of the current transaction. In a subtransaction
3746  * abort, we know that the events added by the subtransaction are at the
3747  * end of the list, so it is relatively easy to discard them. The event
3748  * list chunks themselves are stored in event_cxt.
3749  *
3750  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3751  * (-1 when the stack is empty).
3752  *
3753  * query_stack[query_depth] is the per-query-level data, including these fields:
3754  *
3755  * events is a list of AFTER trigger events queued by the current query.
3756  * None of these are valid until the matching AfterTriggerEndQuery call
3757  * occurs. At that point we fire immediate-mode triggers, and append any
3758  * deferred events to the main events list.
3759  *
3760  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3761  * needed by events queued by the current query. (Note: we use just one
3762  * tuplestore even though more than one foreign table might be involved.
3763  * This is okay because tuplestores don't really care what's in the tuples
3764  * they store; but it's possible that someday it'd break.)
3765  *
3766  * tables is a List of AfterTriggersTableData structs for target tables
3767  * of the current query (see below).
3768  *
3769  * maxquerydepth is just the allocated length of query_stack.
3770  *
3771  * trans_stack holds per-subtransaction data, including these fields:
3772  *
3773  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3774  * state data. Each subtransaction level that modifies that state first
3775  * saves a copy, which we use to restore the state if we abort.
3776  *
3777  * events is a copy of the events head/tail pointers,
3778  * which we use to restore those values during subtransaction abort.
3779  *
3780  * query_depth is the subtransaction-start-time value of query_depth,
3781  * which we similarly use to clean up at subtransaction abort.
3782  *
3783  * firing_counter is the subtransaction-start-time value of firing_counter.
3784  * We use this to recognize which deferred triggers were fired (or marked
3785  * for firing) within an aborted subtransaction.
3786  *
3787  * We use GetCurrentTransactionNestLevel() to determine the correct array
3788  * index in trans_stack. maxtransdepth is the number of allocated entries in
3789  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3790  * in cases where errors during subxact abort cause multiple invocations
3791  * of AfterTriggerEndSubXact() at the same nesting depth.)
3792  *
3793  * We create an AfterTriggersTableData struct for each target table of the
3794  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3795  * either transition tables or statement-level triggers. This is used to
3796  * hold the relevant transition tables, as well as info tracking whether
3797  * we already queued the statement triggers. (We use that info to prevent
3798  * firing the same statement triggers more than once per statement, or really
3799  * once per transition table set.) These structs, along with the transition
3800  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3801  * That's sufficient lifespan because we don't allow transition tables to be
3802  * used by deferrable triggers, so they only need to survive until
3803  * AfterTriggerEndQuery.
3804  */
3808 
3809 typedef struct AfterTriggersData
3810 {
3811  CommandId firing_counter; /* next firing ID to assign */
3812  SetConstraintState state; /* the active S C state */
3813  AfterTriggerEventList events; /* deferred-event list */
3814  MemoryContext event_cxt; /* memory context for events, if any */
3815 
3816  /* per-query-level data: */
3817  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3818  int query_depth; /* current index in above array */
3819  int maxquerydepth; /* allocated len of above array */
3820 
3821  /* per-subtransaction-level data: */
3822  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3823  int maxtransdepth; /* allocated len of above array */
3825 
3827 {
3828  AfterTriggerEventList events; /* events pending from this query */
3829  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3830  List *tables; /* list of AfterTriggersTableData, see below */
3831 };
3832 
3834 {
3835  /* these fields are just for resetting at subtrans abort: */
3836  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3837  AfterTriggerEventList events; /* saved list pointer */
3838  int query_depth; /* saved query_depth */
3839  CommandId firing_counter; /* saved firing_counter */
3840 };
3841 
3843 {
3844  /* relid + cmdType form the lookup key for these structs: */
3845  Oid relid; /* target table's OID */
3846  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3847  bool closed; /* true when no longer OK to add tuples */
3848  bool before_trig_done; /* did we already queue BS triggers? */
3849  bool after_trig_done; /* did we already queue AS triggers? */
3850  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3851 
3852  /*
3853  * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3854  * MERGE can run all three actions in a single statement. Note that UPDATE
3855  * needs both old and new transition tables whereas INSERT needs only new,
3856  * and DELETE needs only old.
3857  */
3858 
3859  /* "old" transition table for UPDATE, if any */
3861  /* "new" transition table for UPDATE, if any */
3863  /* "old" transition table for DELETE, if any */
3865  /* "new" transition table for INSERT, if any */
3867 
3868  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3869 };
3870 
3872 
3873 static void AfterTriggerExecute(EState *estate,
3874  AfterTriggerEvent event,
3875  ResultRelInfo *relInfo,
3876  ResultRelInfo *src_relInfo,
3877  ResultRelInfo *dst_relInfo,
3878  TriggerDesc *trigdesc,
3879  FmgrInfo *finfo,
3880  Instrumentation *instr,
3881  MemoryContext per_tuple_context,
3882  TupleTableSlot *trig_tuple_slot1,
3883  TupleTableSlot *trig_tuple_slot2);
3885  CmdType cmdType);
3887  TupleDesc tupdesc);
3889  TupleTableSlot *oldslot,
3890  TupleTableSlot *newslot,
3891  TransitionCaptureState *transition_capture);
3892 static void TransitionTableAddTuple(EState *estate,
3893  TransitionCaptureState *transition_capture,
3894  ResultRelInfo *relinfo,
3895  TupleTableSlot *slot,
3896  TupleTableSlot *original_insert_tuple,
3897  Tuplestorestate *tuplestore);
3899 static SetConstraintState SetConstraintStateCreate(int numalloc);
3902  Oid tgoid, bool tgisdeferred);
3903 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3904 
3905 
3906 /*
3907  * Get the FDW tuplestore for the current trigger query level, creating it
3908  * if necessary.
3909  */
3910 static Tuplestorestate *
3912 {
3913  Tuplestorestate *ret;
3914 
3916  if (ret == NULL)
3917  {
3918  MemoryContext oldcxt;
3919  ResourceOwner saveResourceOwner;
3920 
3921  /*
3922  * Make the tuplestore valid until end of subtransaction. We really
3923  * only need it until AfterTriggerEndQuery().
3924  */
3926  saveResourceOwner = CurrentResourceOwner;
3928 
3929  ret = tuplestore_begin_heap(false, false, work_mem);
3930 
3931  CurrentResourceOwner = saveResourceOwner;
3932  MemoryContextSwitchTo(oldcxt);
3933 
3935  }
3936 
3937  return ret;
3938 }
3939 
3940 /* ----------
3941  * afterTriggerCheckState()
3942  *
3943  * Returns true if the trigger event is actually in state DEFERRED.
3944  * ----------
3945  */
3946 static bool
3948 {
3949  Oid tgoid = evtshared->ats_tgoid;
3951  int i;
3952 
3953  /*
3954  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3955  * constraints declared NOT DEFERRABLE), the state is always false.
3956  */
3957  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3958  return false;
3959 
3960  /*
3961  * If constraint state exists, SET CONSTRAINTS might have been executed
3962  * either for this trigger or for all triggers.
3963  */
3964  if (state != NULL)
3965  {
3966  /* Check for SET CONSTRAINTS for this specific trigger. */
3967  for (i = 0; i < state->numstates; i++)
3968  {
3969  if (state->trigstates[i].sct_tgoid == tgoid)
3970  return state->trigstates[i].sct_tgisdeferred;
3971  }
3972 
3973  /* Check for SET CONSTRAINTS ALL. */
3974  if (state->all_isset)
3975  return state->all_isdeferred;
3976  }
3977 
3978  /*
3979  * Otherwise return the default state for the trigger.
3980  */
3981  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3982 }
3983 
3984 
3985 /* ----------
3986  * afterTriggerAddEvent()
3987  *
3988  * Add a new trigger event to the specified queue.
3989  * The passed-in event data is copied.
3990  * ----------
3991  */
3992 static void
3994  AfterTriggerEvent event, AfterTriggerShared evtshared)
3995 {
3996  Size eventsize = SizeofTriggerEvent(event);
3997  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3998  AfterTriggerEventChunk *chunk;
3999  AfterTriggerShared newshared;
4000  AfterTriggerEvent newevent;
4001 
4002  /*
4003  * If empty list or not enough room in the tail chunk, make a new chunk.
4004  * We assume here that a new shared record will always be needed.
4005  */
4006  chunk = events->tail;
4007  if (chunk == NULL ||
4008  chunk->endfree - chunk->freeptr < needed)
4009  {
4010  Size chunksize;
4011 
4012  /* Create event context if we didn't already */
4013  if (afterTriggers.event_cxt == NULL)
4016  "AfterTriggerEvents",
4018 
4019  /*
4020  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4021  * These numbers are fairly arbitrary, though there is a hard limit at
4022  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4023  * shared records using the available space in ate_flags. Another
4024  * constraint is that if the chunk size gets too huge, the search loop
4025  * below would get slow given a (not too common) usage pattern with
4026  * many distinct event types in a chunk. Therefore, we double the
4027  * preceding chunk size only if there weren't too many shared records
4028  * in the preceding chunk; otherwise we halve it. This gives us some
4029  * ability to adapt to the actual usage pattern of the current query
4030  * while still having large chunk sizes in typical usage. All chunk
4031  * sizes used should be MAXALIGN multiples, to ensure that the shared
4032  * records will be aligned safely.
4033  */
4034 #define MIN_CHUNK_SIZE 1024
4035 #define MAX_CHUNK_SIZE (1024*1024)
4036 
4037 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4038 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4039 #endif
4040 
4041  if (chunk == NULL)
4042  chunksize = MIN_CHUNK_SIZE;
4043  else
4044  {
4045  /* preceding chunk size... */
4046  chunksize = chunk->endptr - (char *) chunk;
4047  /* check number of shared records in preceding chunk */
4048  if ((chunk->endptr - chunk->endfree) <=
4049  (100 * sizeof(AfterTriggerSharedData)))
4050  chunksize *= 2; /* okay, double it */
4051  else
4052  chunksize /= 2; /* too many shared records */
4053  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4054  }
4055  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4056  chunk->next = NULL;
4057  chunk->freeptr = CHUNK_DATA_START(chunk);
4058  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4059  Assert(chunk->endfree - chunk->freeptr >= needed);
4060 
4061  if (events->head == NULL)
4062  events->head = chunk;
4063  else
4064  events->tail->next = chunk;
4065  events->tail = chunk;
4066  /* events->tailfree is now out of sync, but we'll fix it below */
4067  }
4068 
4069  /*
4070  * Try to locate a matching shared-data record already in the chunk. If
4071  * none, make a new one.
4072  */
4073  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4074  (char *) newshared >= chunk->endfree;
4075  newshared--)
4076  {
4077  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4078  newshared->ats_relid == evtshared->ats_relid &&
4079  newshared->ats_event == evtshared->ats_event &&
4080  newshared->ats_table == evtshared->ats_table &&
4081  newshared->ats_firing_id == 0)
4082  break;
4083  }
4084  if ((char *) newshared < chunk->endfree)
4085  {
4086  *newshared = *evtshared;
4087  newshared->ats_firing_id = 0; /* just to be sure */
4088  chunk->endfree = (char *) newshared;
4089  }
4090 
4091  /* Insert the data */
4092  newevent = (AfterTriggerEvent) chunk->freeptr;
4093  memcpy(newevent, event, eventsize);
4094  /* ... and link the new event to its shared record */
4095  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4096  newevent->ate_flags |= (char *) newshared - (char *) newevent;
4097 
4098  chunk->freeptr += eventsize;
4099  events->tailfree = chunk->freeptr;
4100 }
4101 
4102 /* ----------
4103  * afterTriggerFreeEventList()
4104  *
4105  * Free all the event storage in the given list.
4106  * ----------
4107  */
4108 static void
4110 {
4111  AfterTriggerEventChunk *chunk;
4112 
4113  while ((chunk = events->head) != NULL)
4114  {
4115  events->head = chunk->next;
4116  pfree(chunk);
4117  }
4118  events->tail = NULL;
4119  events->tailfree = NULL;
4120 }
4121 
4122 /* ----------
4123  * afterTriggerRestoreEventList()
4124  *
4125  * Restore an event list to its prior length, removing all the events
4126  * added since it had the value old_events.
4127  * ----------
4128  */
4129 static void
4131  const AfterTriggerEventList *old_events)
4132 {
4133  AfterTriggerEventChunk *chunk;
4134  AfterTriggerEventChunk *next_chunk;
4135 
4136  if (old_events->tail == NULL)
4137  {
4138  /* restoring to a completely empty state, so free everything */
4139  afterTriggerFreeEventList(events);
4140  }
4141  else
4142  {
4143  *events = *old_events;
4144  /* free any chunks after the last one we want to keep */
4145  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4146  {
4147  next_chunk = chunk->next;
4148  pfree(chunk);
4149  }
4150  /* and clean up the tail chunk to be the right length */
4151  events->tail->next = NULL;
4152  events->tail->freeptr = events->tailfree;
4153 
4154  /*
4155  * We don't make any effort to remove now-unused shared data records.
4156  * They might still be useful, anyway.
4157  */
4158  }
4159 }
4160 
4161 /* ----------
4162  * afterTriggerDeleteHeadEventChunk()
4163  *
4164  * Remove the first chunk of events from the query level's event list.
4165  * Keep any event list pointers elsewhere in the query level's data
4166  * structures in sync.
4167  * ----------
4168  */
4169 static void
4171 {
4172  AfterTriggerEventChunk *target = qs->events.head;
4173  ListCell *lc;
4174 
4175  Assert(target && target->next);
4176 
4177  /*
4178  * First, update any pointers in the per-table data, so that they won't be
4179  * dangling. Resetting obsoleted pointers to NULL will make
4180  * cancel_prior_stmt_triggers start from the list head, which is fine.
4181  */
4182  foreach(lc, qs->tables)
4183  {
4185 
4186  if (table->after_trig_done &&
4187  table->after_trig_events.tail == target)
4188  {
4189  table->after_trig_events.head = NULL;
4190  table->after_trig_events.tail = NULL;
4191  table->after_trig_events.tailfree = NULL;
4192  }
4193  }
4194 
4195  /* Now we can flush the head chunk */
4196  qs->events.head = target->next;
4197  pfree(target);
4198 }
4199 
4200 
4201 /* ----------
4202  * AfterTriggerExecute()
4203  *
4204  * Fetch the required tuples back from the heap and fire one
4205  * single trigger function.
4206  *
4207  * Frequently, this will be fired many times in a row for triggers of
4208  * a single relation. Therefore, we cache the open relation and provide
4209  * fmgr lookup cache space at the caller level. (For triggers fired at
4210  * the end of a query, we can even piggyback on the executor's state.)
4211  *
4212  * When fired for a cross-partition update of a partitioned table, the old
4213  * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4214  * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4215  * both are converted into the root partitioned table's format before passing
4216  * to the trigger function.
4217  *
4218  * event: event currently being fired.
4219  * relInfo: result relation for event.
4220  * src_relInfo: source partition of a cross-partition update
4221  * dst_relInfo: its destination partition
4222  * trigdesc: working copy of rel's trigger info.
4223  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4224  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4225  * or NULL if no instrumentation is wanted.
4226  * per_tuple_context: memory context to call trigger function in.
4227  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4228  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4229  * ----------
4230  */
4231 static void
4233  AfterTriggerEvent event,
4234  ResultRelInfo *relInfo,
4235  ResultRelInfo *src_relInfo,
4236  ResultRelInfo *dst_relInfo,
4237  TriggerDesc *trigdesc,
4238  FmgrInfo *finfo, Instrumentation *instr,
4239  MemoryContext per_tuple_context,
4240  TupleTableSlot *trig_tuple_slot1,
4241  TupleTableSlot *trig_tuple_slot2)
4242 {
4243  Relation rel = relInfo->ri_RelationDesc;
4244  Relation src_rel = src_relInfo->ri_RelationDesc;
4245  Relation dst_rel = dst_relInfo->ri_RelationDesc;
4246  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4247  Oid tgoid = evtshared->ats_tgoid;
4248  TriggerData LocTriggerData = {0};
4249  HeapTuple rettuple;
4250  int tgindx;
4251  bool should_free_trig = false;
4252  bool should_free_new = false;
4253 
4254  /*
4255  * Locate trigger in trigdesc.
4256  */
4257  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4258  {
4259  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4260  {
4261  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4262  break;
4263  }
4264  }
4265  if (LocTriggerData.tg_trigger == NULL)
4266  elog(ERROR, "could not find trigger %u", tgoid);
4267 
4268  /*
4269  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4270  * to include time spent re-fetching tuples in the trigger cost.
4271  */
4272  if (instr)
4273  InstrStartNode(instr + tgindx);
4274 
4275  /*
4276  * Fetch the required tuple(s).
4277  */
4278  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4279  {
4281  {
4282  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4283 
4284  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4285  trig_tuple_slot1))
4286  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4287 
4288  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4290  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4291  trig_tuple_slot2))
4292  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4293  }
4294  /* fall through */
4296 
4297  /*
4298  * Store tuple in the slot so that tg_trigtuple does not reference
4299  * tuplestore memory. (It is formally possible for the trigger
4300  * function to queue trigger events that add to the same
4301  * tuplestore, which can push other tuples out of memory.) The
4302  * distinction is academic, because we start with a minimal tuple
4303  * that is stored as a heap tuple, constructed in different memory
4304  * context, in the slot anyway.
4305  */
4306  LocTriggerData.tg_trigslot = trig_tuple_slot1;
4307  LocTriggerData.tg_trigtuple =
4308  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4309 
4310  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4312  {
4313  LocTriggerData.tg_newslot = trig_tuple_slot2;
4314  LocTriggerData.tg_newtuple =
4315  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4316  }
4317  else
4318  {
4319  LocTriggerData.tg_newtuple = NULL;
4320  }
4321  break;
4322 
4323  default:
4324  if (ItemPointerIsValid(&(event->ate_ctid1)))
4325  {
4326  TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4327  src_relInfo);
4328 
4329  if (!table_tuple_fetch_row_version(src_rel,
4330  &(event->ate_ctid1),
4331  SnapshotAny,
4332  src_slot))
4333  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4334 
4335  /*
4336  * Store the tuple fetched from the source partition into the
4337  * target (root partitioned) table slot, converting if needed.
4338  */
4339  if (src_relInfo != relInfo)
4340  {
4341  TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4342 
4343  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4344  if (map)
4345  {
4347  src_slot,
4348  LocTriggerData.tg_trigslot);
4349  }
4350  else
4351  ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4352  }
4353  else
4354  LocTriggerData.tg_trigslot = src_slot;
4355  LocTriggerData.tg_trigtuple =
4356  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4357  }
4358  else
4359  {
4360  LocTriggerData.tg_trigtuple = NULL;
4361  }
4362 
4363  /* don't touch ctid2 if not there */
4365  (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4366  ItemPointerIsValid(&(event->ate_ctid2)))
4367  {
4368  TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4369  dst_relInfo);
4370 
4371  if (!table_tuple_fetch_row_version(dst_rel,
4372  &(event->ate_ctid2),
4373  SnapshotAny,
4374  dst_slot))
4375  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4376 
4377  /*
4378  * Store the tuple fetched from the destination partition into
4379  * the target (root partitioned) table slot, converting if
4380  * needed.
4381  */
4382  if (dst_relInfo != relInfo)
4383  {
4384  TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4385 
4386  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4387  if (map)
4388  {
4390  dst_slot,
4391  LocTriggerData.tg_newslot);
4392  }
4393  else
4394  ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4395  }
4396  else
4397  LocTriggerData.tg_newslot = dst_slot;
4398  LocTriggerData.tg_newtuple =
4399  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4400  }
4401  else
4402  {
4403  LocTriggerData.tg_newtuple = NULL;
4404  }
4405  }
4406 
4407  /*
4408  * Set up the tuplestore information to let the trigger have access to
4409  * transition tables. When we first make a transition table available to
4410  * a trigger, mark it "closed" so that it cannot change anymore. If any
4411  * additional events of the same type get queued in the current trigger
4412  * query level, they'll go into new transition tables.
4413  */
4414  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4415  if (evtshared->ats_table)
4416  {
4417  if (LocTriggerData.tg_trigger->tgoldtable)
4418  {
4419  if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4420  LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4421  else
4422  LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4423  evtshared->ats_table->closed = true;
4424  }
4425 
4426  if (LocTriggerData.tg_trigger->tgnewtable)
4427  {
4428  if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4429  LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4430  else
4431  LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4432  evtshared->ats_table->closed = true;
4433  }
4434  }
4435 
4436  /*
4437  * Setup the remaining trigger information
4438  */
4439  LocTriggerData.type = T_TriggerData;
4440  LocTriggerData.tg_event =
4442  LocTriggerData.tg_relation = rel;
4443  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4444  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4445 
4446  MemoryContextReset(per_tuple_context);
4447 
4448  /*
4449  * Call the trigger and throw away any possibly returned updated tuple.
4450  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4451  */
4452  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4453  tgindx,
4454  finfo,
4455  NULL,
4456  per_tuple_context);
4457  if (rettuple != NULL &&
4458  rettuple != LocTriggerData.tg_trigtuple &&
4459  rettuple != LocTriggerData.tg_newtuple)
4460  heap_freetuple(rettuple);
4461 
4462  /*
4463  * Release resources
4464  */
4465  if (should_free_trig)
4466  heap_freetuple(LocTriggerData.tg_trigtuple);
4467  if (should_free_new)
4468  heap_freetuple(LocTriggerData.tg_newtuple);
4469 
4470  /* don't clear slots' contents if foreign table */
4471  if (trig_tuple_slot1 == NULL)
4472  {
4473  if (LocTriggerData.tg_trigslot)
4474  ExecClearTuple(LocTriggerData.tg_trigslot);
4475  if (LocTriggerData.tg_newslot)
4476  ExecClearTuple(LocTriggerData.tg_newslot);
4477  }
4478 
4479  /*
4480  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4481  * one "tuple returned" (really the number of firings).
4482  */
4483  if (instr)
4484  InstrStopNode(instr + tgindx, 1);
4485 }
4486 
4487 
4488 /*
4489  * afterTriggerMarkEvents()
4490  *
4491  * Scan the given event list for not yet invoked events. Mark the ones
4492  * that can be invoked now with the current firing ID.
4493  *
4494  * If move_list isn't NULL, events that are not to be invoked now are
4495  * transferred to move_list.
4496  *
4497  * When immediate_only is true, do not invoke currently-deferred triggers.
4498  * (This will be false only at main transaction exit.)
4499  *
4500  * Returns true if any invokable events were found.
4501  */
4502 static bool
4504  AfterTriggerEventList *move_list,
4505  bool immediate_only)
4506 {
4507  bool found = false;
4508  bool deferred_found = false;
4509  AfterTriggerEvent event;
4510  AfterTriggerEventChunk *chunk;
4511 
4512  for_each_event_chunk(event, chunk, *events)
4513  {
4514  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4515  bool defer_it = false;
4516 
4517  if (!(event->ate_flags &
4519  {
4520  /*
4521  * This trigger hasn't been called or scheduled yet. Check if we
4522  * should call it now.
4523  */
4524  if (immediate_only && afterTriggerCheckState(evtshared))
4525  {
4526  defer_it = true;
4527  }
4528  else
4529  {
4530  /*
4531  * Mark it as to be fired in this firing cycle.
4532  */
4534  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4535  found = true;
4536  }
4537  }
4538 
4539  /*
4540  * If it's deferred, move it to move_list, if requested.
4541  */
4542  if (defer_it && move_list != NULL)
4543  {
4544  deferred_found = true;
4545  /* add it to move_list */
4546  afterTriggerAddEvent(move_list, event, evtshared);
4547  /* mark original copy "done" so we don't do it again */
4548  event->ate_flags |= AFTER_TRIGGER_DONE;
4549  }
4550  }
4551 
4552  /*
4553  * We could allow deferred triggers if, before the end of the
4554  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4555  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4556  */
4557  if (deferred_found && InSecurityRestrictedOperation())
4558  ereport(ERROR,
4559  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4560  errmsg("cannot fire deferred trigger within security-restricted operation")));
4561 
4562  return found;
4563 }
4564 
4565 /*
4566  * afterTriggerInvokeEvents()
4567  *
4568  * Scan the given event list for events that are marked as to be fired
4569  * in the current firing cycle, and fire them.
4570  *
4571  * If estate isn't NULL, we use its result relation info to avoid repeated
4572  * openings and closing of trigger target relations. If it is NULL, we
4573  * make one locally to cache the info in case there are multiple trigger
4574  * events per rel.
4575  *
4576  * When delete_ok is true, it's safe to delete fully-processed events.
4577  * (We are not very tense about that: we simply reset a chunk to be empty
4578  * if all its events got fired. The objective here is just to avoid useless
4579  * rescanning of events when a trigger queues new events during transaction
4580  * end, so it's not necessary to worry much about the case where only
4581  * some events are fired.)
4582  *
4583  * Returns true if no unfired events remain in the list (this allows us
4584  * to avoid repeating afterTriggerMarkEvents).
4585  */
4586 static bool
4588  CommandId firing_id,
4589  EState *estate,
4590  bool delete_ok)
4591 {
4592  bool all_fired = true;
4593  AfterTriggerEventChunk *chunk;
4594  MemoryContext per_tuple_context;
4595  bool local_estate = false;
4596  ResultRelInfo *rInfo = NULL;
4597  Relation rel = NULL;
4598  TriggerDesc *trigdesc = NULL;
4599  FmgrInfo *finfo = NULL;
4600  Instrumentation *instr = NULL;
4601  TupleTableSlot *slot1 = NULL,
4602  *slot2 = NULL;
4603 
4604  /* Make a local EState if need be */
4605  if (estate == NULL)
4606  {
4607  estate = CreateExecutorState();
4608  local_estate = true;
4609  }
4610 
4611  /* Make a per-tuple memory context for trigger function calls */
4612  per_tuple_context =
4614  "AfterTriggerTupleContext",
4616 
4617  for_each_chunk(chunk, *events)
4618  {
4619  AfterTriggerEvent event;
4620  bool all_fired_in_chunk = true;
4621 
4622  for_each_event(event, chunk)
4623  {
4624  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4625 
4626  /*
4627  * Is it one for me to fire?
4628  */
4629  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4630  evtshared->ats_firing_id == firing_id)
4631  {
4632  ResultRelInfo *src_rInfo,
4633  *dst_rInfo;
4634 
4635  /*
4636  * So let's fire it... but first, find the correct relation if
4637  * this is not the same relation as before.
4638  */
4639  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4640  {
4641  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4642  NULL);
4643  rel = rInfo->ri_RelationDesc;
4644  /* Catch calls with insufficient relcache refcounting */
4646  trigdesc = rInfo->ri_TrigDesc;
4647  finfo = rInfo->ri_TrigFunctions;
4648  instr = rInfo->ri_TrigInstrument;
4649  if (slot1 != NULL)
4650  {
4653  slot1 = slot2 = NULL;
4654  }
4655  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4656  {
4657  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4659  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4661  }
4662  if (trigdesc == NULL) /* should not happen */
4663  elog(ERROR, "relation %u has no triggers",
4664  evtshared->ats_relid);
4665  }
4666 
4667  /*
4668  * Look up source and destination partition result rels of a
4669  * cross-partition update event.
4670  */
4671  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4673  {
4674  Assert(OidIsValid(event->ate_src_part) &&
4675  OidIsValid(event->ate_dst_part));
4676  src_rInfo = ExecGetTriggerResultRel(estate,
4677  event->ate_src_part,
4678  rInfo);
4679  dst_rInfo = ExecGetTriggerResultRel(estate,
4680  event->ate_dst_part,
4681  rInfo);
4682  }
4683  else
4684  src_rInfo = dst_rInfo = rInfo;
4685 
4686  /*
4687  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4688  * still set, so recursive examinations of the event list
4689  * won't try to re-fire it.
4690  */
4691  AfterTriggerExecute(estate, event, rInfo,
4692  src_rInfo, dst_rInfo,
4693  trigdesc, finfo, instr,
4694  per_tuple_context, slot1, slot2);
4695 
4696  /*
4697  * Mark the event as done.
4698  */
4699  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4700  event->ate_flags |= AFTER_TRIGGER_DONE;
4701  }
4702  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4703  {
4704  /* something remains to be done */
4705  all_fired = all_fired_in_chunk = false;
4706  }
4707  }
4708 
4709  /* Clear the chunk if delete_ok and nothing left of interest */
4710  if (delete_ok && all_fired_in_chunk)
4711  {
4712  chunk->freeptr = CHUNK_DATA_START(chunk);
4713  chunk->endfree = chunk->endptr;
4714 
4715  /*
4716  * If it's last chunk, must sync event list's tailfree too. Note
4717  * that delete_ok must NOT be passed as true if there could be
4718  * additional AfterTriggerEventList values pointing at this event
4719  * list, since we'd fail to fix their copies of tailfree.
4720  */
4721  if (chunk == events->tail)
4722  events->tailfree = chunk->freeptr;
4723  }
4724  }
4725  if (slot1 != NULL)
4726  {
4729  }
4730 
4731  /* Release working resources */
4732  MemoryContextDelete(per_tuple_context);
4733 
4734  if (local_estate)
4735  {
4736  ExecCloseResultRelations(estate);
4737  ExecResetTupleTable(estate->es_tupleTable, false);
4738  FreeExecutorState(estate);
4739  }
4740 
4741  return all_fired;
4742 }
4743 
4744 
4745 /*
4746  * GetAfterTriggersTableData
4747  *
4748  * Find or create an AfterTriggersTableData struct for the specified
4749  * trigger event (relation + operation type). Ignore existing structs
4750  * marked "closed"; we don't want to put any additional tuples into them,
4751  * nor change their stmt-triggers-fired state.
4752  *
4753  * Note: the AfterTriggersTableData list is allocated in the current
4754  * (sub)transaction's CurTransactionContext. This is OK because
4755  * we don't need it to live past AfterTriggerEndQuery.
4756  */
4757 static AfterTriggersTableData *
4759 {
4760  AfterTriggersTableData *table;
4762  MemoryContext oldcxt;
4763  ListCell *lc;
4764 
4765  /* Caller should have ensured query_depth is OK. */
4769 
4770  foreach(lc, qs->tables)
4771  {
4772  table = (AfterTriggersTableData *) lfirst(lc);
4773  if (table->relid == relid && table->cmdType == cmdType &&
4774  !table->closed)
4775  return table;
4776  }
4777 
4779 
4781  table->relid = relid;
4782  table->cmdType = cmdType;
4783  qs->tables = lappend(qs->tables, table);
4784 
4785  MemoryContextSwitchTo(oldcxt);
4786 
4787  return table;
4788 }
4789 
4790 /*
4791  * Returns a TupleTableSlot suitable for holding the tuples to be put
4792  * into AfterTriggersTableData's transition table tuplestores.
4793  */
4794 static TupleTableSlot *
4796  TupleDesc tupdesc)
4797 {
4798  /* Create it if not already done. */
4799  if (!table->storeslot)
4800  {
4801  MemoryContext oldcxt;
4802 
4803  /*
4804  * We need this slot only until AfterTriggerEndQuery, but making it
4805  * last till end-of-subxact is good enough. It'll be freed by
4806  * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4807  * a different lifespan, so we'd better make a copy of that.
4808  */
4810  tupdesc = CreateTupleDescCopy(tupdesc);
4811  table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4812  MemoryContextSwitchTo(oldcxt);
4813  }
4814 
4815  return table->storeslot;
4816 }
4817 
4818 /*
4819  * MakeTransitionCaptureState
4820  *
4821  * Make a TransitionCaptureState object for the given TriggerDesc, target
4822  * relation, and operation type. The TCS object holds all the state needed
4823  * to decide whether to capture tuples in transition tables.
4824  *
4825  * If there are no triggers in 'trigdesc' that request relevant transition
4826  * tables, then return NULL.
4827  *
4828  * The resulting object can be passed to the ExecAR* functions. When
4829  * dealing with child tables, the caller can set tcs_original_insert_tuple
4830  * to avoid having to reconstruct the original tuple in the root table's
4831  * format.
4832  *
4833  * Note that we copy the flags from a parent table into this struct (rather
4834  * than subsequently using the relation's TriggerDesc directly) so that we can
4835  * use it to control collection of transition tuples from child tables.
4836  *
4837  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4838  * on the same table during one query should share one transition table.
4839  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4840  * looked up using the table OID + CmdType, and are merely referenced by
4841  * the TransitionCaptureState objects we hand out to callers.
4842  */
4845 {
4847  bool need_old_upd,
4848  need_new_upd,
4849  need_old_del,
4850  need_new_ins;
4851  AfterTriggersTableData *table;
4852  MemoryContext oldcxt;
4853  ResourceOwner saveResourceOwner;
4854 
4855  if (trigdesc == NULL)
4856  return NULL;
4857 
4858  /* Detect which table(s) we need. */
4859  switch (cmdType)
4860  {
4861  case CMD_INSERT:
4862  need_old_upd = need_old_del = need_new_upd = false;
4863  need_new_ins = trigdesc->trig_insert_new_table;
4864  break;
4865  case CMD_UPDATE:
4866  need_old_upd = trigdesc->trig_update_old_table;
4867  need_new_upd = trigdesc->trig_update_new_table;
4868  need_old_del = need_new_ins = false;
4869  break;
4870  case CMD_DELETE:
4871  need_old_del = trigdesc->trig_delete_old_table;
4872  need_old_upd = need_new_upd = need_new_ins = false;
4873  break;
4874  case CMD_MERGE:
4875  need_old_upd = trigdesc->trig_update_old_table;
4876  need_new_upd = trigdesc->trig_update_new_table;
4877  need_old_del = trigdesc->trig_delete_old_table;
4878  need_new_ins = trigdesc->trig_insert_new_table;
4879  break;
4880  default:
4881  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4882  /* keep compiler quiet */
4883  need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4884  break;
4885  }
4886  if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4887  return NULL;
4888 
4889  /* Check state, like AfterTriggerSaveEvent. */
4890  if (afterTriggers.query_depth < 0)
4891  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4892 
4893  /* Be sure we have enough space to record events at this query depth. */
4896 
4897  /*
4898  * Find or create an AfterTriggersTableData struct to hold the
4899  * tuplestore(s). If there's a matching struct but it's marked closed,
4900  * ignore it; we need a newer one.
4901  *
4902  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4903  * allocated in the current (sub)transaction's CurTransactionContext, and
4904  * the tuplestores are managed by the (sub)transaction's resource owner.
4905  * This is sufficient lifespan because we do not allow triggers using
4906  * transition tables to be deferrable; they will be fired during
4907  * AfterTriggerEndQuery, after which it's okay to delete the data.
4908  */
4909  table = GetAfterTriggersTableData(relid, cmdType);
4910 
4911  /* Now create required tuplestore(s), if we don't have them already. */
4913  saveResourceOwner = CurrentResourceOwner;
4915 
4916  if (need_old_upd && table->old_upd_tuplestore == NULL)
4917  table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4918  if (need_new_upd && table->new_upd_tuplestore == NULL)
4919  table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4920  if (need_old_del && table->old_del_tuplestore == NULL)
4921  table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4922  if (need_new_ins && table->new_ins_tuplestore == NULL)
4923  table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4924 
4925  CurrentResourceOwner = saveResourceOwner;
4926  MemoryContextSwitchTo(oldcxt);
4927 
4928  /* Now build the TransitionCaptureState struct, in caller's context */
4930  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4931  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4932  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4933  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4934  state->tcs_private = table;
4935 
4936  return state;
4937 }
4938 
4939 
4940 /* ----------
4941  * AfterTriggerBeginXact()
4942  *
4943  * Called at transaction start (either BEGIN or implicit for single
4944  * statement outside of transaction block).
4945  * ----------
4946  */
4947 void
4949 {
4950  /*
4951  * Initialize after-trigger state structure to empty
4952  */
4953  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4955 
4956  /*
4957  * Verify that there is no leftover state remaining. If these assertions
4958  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4959  * up properly.
4960  */
4961  Assert(afterTriggers.state == NULL);
4962  Assert(afterTriggers.query_stack == NULL);
4964  Assert(afterTriggers.event_cxt == NULL);
4965  Assert(afterTriggers.events.head == NULL);
4966  Assert(afterTriggers.trans_stack == NULL);
4968 }
4969 
4970 
4971 /* ----------
4972  * AfterTriggerBeginQuery()
4973  *
4974  * Called just before we start processing a single query within a
4975  * transaction (or subtransaction). Most of the real work gets deferred
4976  * until somebody actually tries to queue a trigger event.
4977  * ----------
4978  */
4979 void
4981 {
4982  /* Increase the query stack depth */
4984 }
4985 
4986 
4987 /* ----------
4988  * AfterTriggerEndQuery()
4989  *
4990  * Called after one query has been completely processed. At this time
4991  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4992  * transfer deferred trigger events to the global deferred-trigger list.
4993  *
4994  * Note that this must be called BEFORE closing down the executor
4995  * with ExecutorEnd, because we make use of the EState's info about
4996  * target relations. Normally it is called from ExecutorFinish.
4997  * ----------
4998  */
4999 void
5001 {
5003 
5004  /* Must be inside a query, too */
5006 
5007  /*
5008  * If we never even got as far as initializing the event stack, there
5009  * certainly won't be any events, so exit quickly.
5010  */
5012  {
5014  return;
5015  }
5016 
5017  /*
5018  * Process all immediate-mode triggers queued by the query, and move the
5019  * deferred ones to the main list of deferred events.
5020  *
5021  * Notice that we decide which ones will be fired, and put the deferred
5022  * ones on the main list, before anything is actually fired. This ensures
5023  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5024  * IMMEDIATE: all events we have decided to defer will be available for it
5025  * to fire.
5026  *
5027  * We loop in case a trigger queues more events at the same query level.
5028  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5029  * will instead fire any triggers in a dedicated query level. Foreign key
5030  * enforcement triggers do add to the current query level, thanks to their
5031  * passing fire_triggers = false to SPI_execute_snapshot(). Other
5032  * C-language triggers might do likewise.
5033  *
5034  * If we find no firable events, we don't have to increment
5035  * firing_counter.
5036  */
5038 
5039  for (;;)
5040  {
5042  {
5043  CommandId firing_id = afterTriggers.firing_counter++;
5044  AfterTriggerEventChunk *oldtail = qs->events.tail;
5045 
5046  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5047  break; /* all fired */
5048 
5049  /*
5050  * Firing a trigger could result in query_stack being repalloc'd,
5051  * so we must recalculate qs after each afterTriggerInvokeEvents
5052  * call. Furthermore, it's unsafe to pass delete_ok = true here,
5053  * because that could cause afterTriggerInvokeEvents to try to
5054  * access qs->events after the stack has been repalloc'd.
5055  */
5057 
5058  /*
5059  * We'll need to scan the events list again. To reduce the cost
5060  * of doing so, get rid of completely-fired chunks. We know that
5061  * all events were marked IN_PROGRESS or DONE at the conclusion of
5062  * afterTriggerMarkEvents, so any still-interesting events must
5063  * have been added after that, and so must be in the chunk that
5064  * was then the tail chunk, or in later chunks. So, zap all
5065  * chunks before oldtail. This is approximately the same set of
5066  * events we would have gotten rid of by passing delete_ok = true.
5067  */
5068  Assert(oldtail != NULL);
5069  while (qs->events.head != oldtail)
5071  }
5072  else
5073  break;
5074  }
5075 
5076  /* Release query-level-local storage, including tuplestores if any */
5078 
5080 }
5081 
5082 
5083 /*
5084  * AfterTriggerFreeQuery
5085  * Release subsidiary storage for a trigger query level.
5086  * This includes closing down tuplestores.
5087  * Note: it's important for this to be safe if interrupted by an error
5088  * and then called again for the same query level.
5089  */
5090 static void
5092 {
5093  Tuplestorestate *ts;
5094  List *tables;
5095  ListCell *lc;
5096 
5097  /* Drop the trigger events */
5099 
5100  /* Drop FDW tuplestore if any */
5101  ts = qs->fdw_tuplestore;
5102  qs->fdw_tuplestore = NULL;
5103  if (ts)
5104  tuplestore_end(ts);
5105 
5106  /* Release per-table subsidiary storage */
5107  tables = qs->tables;
5108  foreach(lc, tables)
5109  {
5111 
5112  ts = table->old_upd_tuplestore;
5113  table->old_upd_tuplestore = NULL;
5114  if (ts)
5115  tuplestore_end(ts);
5116  ts = table->new_upd_tuplestore;
5117  table->new_upd_tuplestore = NULL;
5118  if (ts)
5119  tuplestore_end(ts);
5120  ts = table->old_del_tuplestore;
5121  table->old_del_tuplestore = NULL;
5122  if (ts)
5123  tuplestore_end(ts);
5124  ts = table->new_ins_tuplestore;
5125  table->new_ins_tuplestore = NULL;
5126  if (ts)
5127  tuplestore_end(ts);
5128  if (table->storeslot)
5129  {
5130  TupleTableSlot *slot = table->storeslot;
5131 
5132  table->storeslot = NULL;
5134  }
5135  }
5136 
5137  /*
5138  * Now free the AfterTriggersTableData structs and list cells. Reset list
5139  * pointer first; if list_free_deep somehow gets an error, better to leak
5140  * that storage than have an infinite loop.
5141  */
5142  qs->tables = NIL;
5143  list_free_deep(tables);
5144 }
5145 
5146 
5147 /* ----------
5148  * AfterTriggerFireDeferred()
5149  *
5150  * Called just before the current transaction is committed. At this
5151  * time we invoke all pending DEFERRED triggers.
5152  *
5153  * It is possible for other modules to queue additional deferred triggers
5154  * during pre-commit processing; therefore xact.c may have to call this
5155  * multiple times.
5156  * ----------
5157  */
5158 void
5160 {
5161  AfterTriggerEventList *events;
5162  bool snap_pushed = false;
5163 
5164  /* Must not be inside a query */
5166 
5167  /*
5168  * If there are any triggers to fire, make sure we have set a snapshot for
5169  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5170  * can't assume ActiveSnapshot is valid on entry.)
5171  */
5172  events = &afterTriggers.events;
5173  if (events->head != NULL)
5174  {
5176  snap_pushed = true;
5177  }
5178 
5179  /*
5180  * Run all the remaining triggers. Loop until they are all gone, in case
5181  * some trigger queues more for us to do.
5182  */
5183  while (afterTriggerMarkEvents(events, NULL, false))
5184  {
5185  CommandId firing_id = afterTriggers.firing_counter++;
5186 
5187  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5188  break; /* all fired */
5189  }
5190 
5191  /*
5192  * We don't bother freeing the event list, since it will go away anyway
5193  * (and more efficiently than via pfree) in AfterTriggerEndXact.
5194  */
5195 
5196  if (snap_pushed)
5198 }
5199 
5200 
5201 /* ----------
5202  * AfterTriggerEndXact()
5203  *
5204  * The current transaction is finishing.
5205  *
5206  * Any unfired triggers are canceled so we simply throw
5207  * away anything we know.
5208  *
5209  * Note: it is possible for this to be called repeatedly in case of
5210  * error during transaction abort; therefore, do not complain if
5211  * already closed down.
5212  * ----------
5213  */
5214 void
5215 AfterTriggerEndXact(bool isCommit)
5216 {
5217  /*
5218  * Forget the pending-events list.
5219  *
5220  * Since all the info is in TopTransactionContext or children thereof, we
5221  * don't really need to do anything to reclaim memory. However, the
5222  * pending-events list could be large, and so it's useful to discard it as
5223  * soon as possible --- especially if we are aborting because we ran out
5224  * of memory for the list!
5225  */
5227  {
5229  afterTriggers.event_cxt = NULL;
5230  afterTriggers.events.head = NULL;
5231  afterTriggers.events.tail = NULL;
5232  afterTriggers.events.tailfree = NULL;
5233  }
5234 
5235  /*
5236  * Forget any subtransaction state as well. Since this can't be very
5237  * large, we let the eventual reset of TopTransactionContext free the
5238  * memory instead of doing it here.
5239  */
5240  afterTriggers.trans_stack = NULL;
5242 
5243 
5244  /*
5245  * Forget the query stack and constraint-related state information. As
5246  * with the subtransaction state information, we don't bother freeing the
5247  * memory here.
5248  */
5249  afterTriggers.query_stack = NULL;
5251  afterTriggers.state = NULL;
5252 
5253  /* No more afterTriggers manipulation until next transaction starts. */
5255 }
5256 
5257 /*
5258  * AfterTriggerBeginSubXact()
5259  *
5260  * Start a subtransaction.
5261  */
5262 void
5264 {
5265  int my_level = GetCurrentTransactionNestLevel();
5266 
5267  /*
5268  * Allocate more space in the trans_stack if needed. (Note: because the
5269  * minimum nest level of a subtransaction is 2, we waste the first couple
5270  * entries of the array; not worth the notational effort to avoid it.)
5271  */
5272  while (my_level >= afterTriggers.maxtransdepth)
5273  {
5274  if (afterTriggers.maxtransdepth == 0)
5275  {
5276  /* Arbitrarily initialize for max of 8 subtransaction levels */
5279  8 * sizeof(AfterTriggersTransData));
5281  }
5282  else
5283  {
5284  /* repalloc will keep the stack in the same context */
5285  int new_alloc = afterTriggers.maxtransdepth * 2;
5286 
5289  new_alloc * sizeof(AfterTriggersTransData));
5290  afterTriggers.maxtransdepth = new_alloc;
5291  }
5292  }
5293 
5294  /*
5295  * Push the current information into the stack. The SET CONSTRAINTS state
5296  * is not saved until/unless changed. Likewise, we don't make a
5297  * per-subtransaction event context until needed.
5298  */
5299  afterTriggers.trans_stack[my_level].state = NULL;
5303 }
5304 
5305 /*
5306  * AfterTriggerEndSubXact()
5307  *
5308  * The current subtransaction is ending.
5309  */
5310 void
5312 {
5313  int my_level = GetCurrentTransactionNestLevel();
5315  AfterTriggerEvent event;
5316  AfterTriggerEventChunk *chunk;
5317  CommandId subxact_firing_id;
5318 
5319  /*
5320  * Pop the prior state if needed.
5321  */
5322  if (isCommit)
5323  {
5324  Assert(my_level < afterTriggers.maxtransdepth);
5325  /* If we saved a prior state, we don't need it anymore */
5326  state = afterTriggers.trans_stack[my_level].state;
5327  if (state != NULL)
5328  pfree(state);
5329  /* this avoids double pfree if error later: */
5330  afterTriggers.trans_stack[my_level].state = NULL;
5333  }
5334  else
5335  {
5336  /*
5337  * Aborting. It is possible subxact start failed before calling
5338  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5339  * trans_stack levels that aren't there.
5340  */
5341  if (my_level >= afterTriggers.maxtransdepth)
5342  return;
5343 
5344  /*
5345  * Release query-level storage for queries being aborted, and restore
5346  * query_depth to its pre-subxact value. This assumes that a
5347  * subtransaction will not add events to query levels started in a
5348  * earlier transaction state.
5349  */
5351  {
5355  }
5358 
5359  /*
5360  * Restore the global deferred-event list to its former length,
5361  * discarding any events queued by the subxact.
5362  */
5364  &afterTriggers.trans_stack[my_level].events);
5365 
5366  /*
5367  * Restore the trigger state. If the saved state is NULL, then this
5368  * subxact didn't save it, so it doesn't need restoring.
5369  */
5370  state = afterTriggers.trans_stack[my_level].state;
5371  if (state != NULL)
5372  {
5375  }
5376  /* this avoids double pfree if error later: */
5377  afterTriggers.trans_stack[my_level].state = NULL;
5378 
5379  /*
5380  * Scan for any remaining deferred events that were marked DONE or IN
5381  * PROGRESS by this subxact or a child, and un-mark them. We can
5382  * recognize such events because they have a firing ID greater than or
5383  * equal to the firing_counter value we saved at subtransaction start.
5384  * (This essentially assumes that the current subxact includes all
5385  * subxacts started after it.)
5386  */
5387  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5389  {
5390  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5391 
5392  if (event->ate_flags &
5394  {
5395  if (evtshared->ats_firing_id >= subxact_firing_id)
5396  event->ate_flags &=
5398  }
5399  }
5400  }
5401 }
5402 
5403 /*
5404  * Get the transition table for the given event and depending on whether we are
5405  * processing the old or the new tuple.
5406  */
5407 static Tuplestorestate *
5409  TupleTableSlot *oldslot,
5410  TupleTableSlot *newslot,
5411  TransitionCaptureState *transition_capture)
5412 {
5413  Tuplestorestate *tuplestore = NULL;
5414  bool delete_old_table = transition_capture->tcs_delete_old_table;
5415  bool update_old_table = transition_capture->tcs_update_old_table;
5416  bool update_new_table = transition_capture->tcs_update_new_table;
5417  bool insert_new_table = transition_capture->tcs_insert_new_table;
5418 
5419  /*
5420  * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5421  * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5422  * non-NULL. But for UPDATE events fired for capturing transition tuples
5423  * during UPDATE partition-key row movement, OLD is NULL when the event is
5424  * for a row being inserted, whereas NEW is NULL when the event is for a
5425  * row being deleted.
5426  */
5427  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5428  TupIsNull(oldslot)));
5429  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5430  TupIsNull(newslot)));
5431 
5432  if (!TupIsNull(oldslot))
5433  {
5434  Assert(TupIsNull(newslot));
5435  if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5436  tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5437  else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5438  tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5439  }
5440  else if (!TupIsNull(newslot))
5441  {
5442  Assert(TupIsNull(oldslot));
5443  if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5444  tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5445  else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5446  tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5447  }
5448 
5449  return tuplestore;
5450 }
5451 
5452 /*
5453  * Add the given heap tuple to the given tuplestore, applying the conversion
5454  * map if necessary.
5455  *
5456  * If original_insert_tuple is given, we can add that tuple without conversion.
5457  */
5458 static void
5460  TransitionCaptureState *transition_capture,
5461  ResultRelInfo *relinfo,
5462  TupleTableSlot *slot,
5463  TupleTableSlot *original_insert_tuple,
5464  Tuplestorestate *tuplestore)
5465 {
5466  TupleConversionMap *map;
5467 
5468  /*
5469  * Nothing needs to be done if we don't have a tuplestore.
5470  */
5471  if (tuplestore == NULL)
5472  return;
5473 
5474  if (original_insert_tuple)
5475  tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5476  else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5477  {
5478  AfterTriggersTableData *table = transition_capture->tcs_private;
5479  TupleTableSlot *storeslot;
5480 
5481  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5482  execute_attr_map_slot(map->attrMap, slot, storeslot);
5483  tuplestore_puttupleslot(tuplestore, storeslot);
5484  }
5485  else
5486  tuplestore_puttupleslot(tuplestore, slot);
5487 }
5488 
5489 /* ----------
5490  * AfterTriggerEnlargeQueryState()
5491  *
5492  * Prepare the necessary state so that we can record AFTER trigger events
5493  * queued by a query. It is allowed to have nested queries within a
5494  * (sub)transaction, so we need to have separate state for each query
5495  * nesting level.
5496  * ----------
5497  */
5498 static void
5500 {
5501  int init_depth = afterTriggers.maxquerydepth;
5502 
5504 
5505  if (afterTriggers.maxquerydepth == 0)
5506  {
5507  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5508 
5511  new_alloc * sizeof(AfterTriggersQueryData));
5512  afterTriggers.maxquerydepth = new_alloc;
5513  }
5514  else
5515  {
5516  /* repalloc will keep the stack in the same context */
5517  int old_alloc = afterTriggers.maxquerydepth;
5518  int new_alloc = Max(afterTriggers.query_depth + 1,
5519  old_alloc * 2);
5520 
5523  new_alloc * sizeof(AfterTriggersQueryData));
5524  afterTriggers.maxquerydepth = new_alloc;
5525  }
5526 
5527  /* Initialize new array entries to empty */
5528  while (init_depth < afterTriggers.maxquerydepth)
5529  {
5531 
5532  qs->events.head = NULL;
5533  qs->events.tail = NULL;
5534  qs->events.tailfree = NULL;
5535  qs->fdw_tuplestore = NULL;
5536  qs->tables = NIL;
5537 
5538  ++init_depth;
5539  }
5540 }
5541 
5542 /*
5543  * Create an empty SetConstraintState with room for numalloc trigstates
5544  */
5545 static SetConstraintState
5547 {
5549 
5550  /* Behave sanely with numalloc == 0 */
5551  if (numalloc <= 0)
5552  numalloc = 1;
5553 
5554  /*
5555  * We assume that zeroing will correctly initialize the state values.
5556  */
5559  offsetof(SetConstraintStateData, trigstates) +
5560  numalloc * sizeof(SetConstraintTriggerData));
5561 
5562  state->numalloc = numalloc;
5563 
5564  return state;
5565 }
5566 
5567 /*
5568  * Copy a SetConstraintState
5569  */
5570 static SetConstraintState
5572 {
5574 
5576 
5577  state->all_isset = origstate->all_isset;
5578  state->all_isdeferred = origstate->all_isdeferred;
5579  state->numstates = origstate->numstates;
5580  memcpy(state->trigstates, origstate->