PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/inval.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/rel.h"
62 #include "utils/snapmgr.h"
63 #include "utils/syscache.h"
64 #include "utils/tuplestore.h"
65 
66 
67 /* GUC variables */
69 
70 /* How many levels deep into trigger execution are we? */
71 static int MyTriggerDepth = 0;
72 
73 /* Local function prototypes */
74 static void renametrig_internal(Relation tgrel, Relation targetrel,
75  HeapTuple trigtup, const char *newname,
76  const char *expected_name);
77 static void renametrig_partition(Relation tgrel, Oid partitionId,
78  Oid parentTriggerOid, const char *newname,
79  const char *expected_name);
80 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
81 static bool GetTupleForTrigger(EState *estate,
82  EPQState *epqstate,
83  ResultRelInfo *relinfo,
84  ItemPointer tid,
85  LockTupleMode lockmode,
86  TupleTableSlot *oldslot,
87  TupleTableSlot **newSlot,
88  TM_FailureData *tmfpd);
89 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
90  Trigger *trigger, TriggerEvent event,
91  Bitmapset *modifiedCols,
92  TupleTableSlot *oldslot, TupleTableSlot *newslot);
94  int tgindx,
95  FmgrInfo *finfo,
96  Instrumentation *instr,
97  MemoryContext per_tuple_context);
98 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
99  ResultRelInfo *src_partinfo,
100  ResultRelInfo *dst_partinfo,
101  int event, bool row_trigger,
102  TupleTableSlot *oldtup, TupleTableSlot *newtup,
103  List *recheckIndexes, Bitmapset *modifiedCols,
104  TransitionCaptureState *transition_capture,
105  bool is_crosspart_update);
106 static void AfterTriggerEnlargeQueryState(void);
107 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
108 
109 
110 /*
111  * Create a trigger. Returns the address of the created trigger.
112  *
113  * queryString is the source text of the CREATE TRIGGER command.
114  * This must be supplied if a whenClause is specified, else it can be NULL.
115  *
116  * relOid, if nonzero, is the relation on which the trigger should be
117  * created. If zero, the name provided in the statement will be looked up.
118  *
119  * refRelOid, if nonzero, is the relation to which the constraint trigger
120  * refers. If zero, the constraint relation name provided in the statement
121  * will be looked up as needed.
122  *
123  * constraintOid, if nonzero, says that this trigger is being created
124  * internally to implement that constraint. A suitable pg_depend entry will
125  * be made to link the trigger to that constraint. constraintOid is zero when
126  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
127  * TRIGGER, we build a pg_constraint entry internally.)
128  *
129  * indexOid, if nonzero, is the OID of an index associated with the constraint.
130  * We do nothing with this except store it into pg_trigger.tgconstrindid;
131  * but when creating a trigger for a deferrable unique constraint on a
132  * partitioned table, its children are looked up. Note we don't cope with
133  * invalid indexes in that case.
134  *
135  * funcoid, if nonzero, is the OID of the function to invoke. When this is
136  * given, stmt->funcname is ignored.
137  *
138  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
139  * if that trigger is dropped, this one should be too. There are two cases
140  * when a nonzero value is passed for this: 1) when this function recurses to
141  * create the trigger on partitions, 2) when creating child foreign key
142  * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
143  *
144  * If whenClause is passed, it is an already-transformed expression for
145  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
146  *
147  * If isInternal is true then this is an internally-generated trigger.
148  * This argument sets the tgisinternal field of the pg_trigger entry, and
149  * if true causes us to modify the given trigger name to ensure uniqueness.
150  *
151  * When isInternal is not true we require ACL_TRIGGER permissions on the
152  * relation, as well as ACL_EXECUTE on the trigger function. For internal
153  * triggers the caller must apply any required permission checks.
154  *
155  * When called on partitioned tables, this function recurses to create the
156  * trigger on all the partitions, except if isInternal is true, in which
157  * case caller is expected to execute recursion on its own. in_partition
158  * indicates such a recursive call; outside callers should pass "false"
159  * (but see CloneRowTriggersToPartition).
160  */
162 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
163  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
164  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
165  bool isInternal, bool in_partition)
166 {
167  return
168  CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
169  constraintOid, indexOid, funcoid,
170  parentTriggerOid, whenClause, isInternal,
171  in_partition, TRIGGER_FIRES_ON_ORIGIN);
172 }
173 
174 /*
175  * Like the above; additionally the firing condition
176  * (always/origin/replica/disabled) can be specified.
177  */
179 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
180  Oid relOid, Oid refRelOid, Oid constraintOid,
181  Oid indexOid, Oid funcoid, Oid parentTriggerOid,
182  Node *whenClause, bool isInternal, bool in_partition,
183  char trigger_fires_when)
184 {
185  int16 tgtype;
186  int ncolumns;
187  int16 *columns;
188  int2vector *tgattr;
189  List *whenRtable;
190  char *qual;
191  Datum values[Natts_pg_trigger];
192  bool nulls[Natts_pg_trigger];
193  Relation rel;
194  AclResult aclresult;
195  Relation tgrel;
196  Relation pgrel;
197  HeapTuple tuple = NULL;
198  Oid funcrettype;
199  Oid trigoid = InvalidOid;
200  char internaltrigname[NAMEDATALEN];
201  char *trigname;
202  Oid constrrelid = InvalidOid;
203  ObjectAddress myself,
204  referenced;
205  char *oldtablename = NULL;
206  char *newtablename = NULL;
207  bool partition_recurse;
208  bool trigger_exists = false;
209  Oid existing_constraint_oid = InvalidOid;
210  bool existing_isInternal = false;
211  bool existing_isClone = false;
212 
213  if (OidIsValid(relOid))
214  rel = table_open(relOid, ShareRowExclusiveLock);
215  else
217 
218  /*
219  * Triggers must be on tables or views, and there are additional
220  * relation-type-specific restrictions.
221  */
222  if (rel->rd_rel->relkind == RELKIND_RELATION)
223  {
224  /* Tables can't have INSTEAD OF triggers */
225  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
226  stmt->timing != TRIGGER_TYPE_AFTER)
227  ereport(ERROR,
228  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
229  errmsg("\"%s\" is a table",
231  errdetail("Tables cannot have INSTEAD OF triggers.")));
232  }
233  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
234  {
235  /* Partitioned tables can't have INSTEAD OF triggers */
236  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
237  stmt->timing != TRIGGER_TYPE_AFTER)
238  ereport(ERROR,
239  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
240  errmsg("\"%s\" is a table",
242  errdetail("Tables cannot have INSTEAD OF triggers.")));
243 
244  /*
245  * FOR EACH ROW triggers have further restrictions
246  */
247  if (stmt->row)
248  {
249  /*
250  * Disallow use of transition tables.
251  *
252  * Note that we have another restriction about transition tables
253  * in partitions; search for 'has_superclass' below for an
254  * explanation. The check here is just to protect from the fact
255  * that if we allowed it here, the creation would succeed for a
256  * partitioned table with no partitions, but would be blocked by
257  * the other restriction when the first partition was created,
258  * which is very unfriendly behavior.
259  */
260  if (stmt->transitionRels != NIL)
261  ereport(ERROR,
262  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
263  errmsg("\"%s\" is a partitioned table",
265  errdetail("Triggers on partitioned tables cannot have transition tables.")));
266  }
267  }
268  else if (rel->rd_rel->relkind == RELKIND_VIEW)
269  {
270  /*
271  * Views can have INSTEAD OF triggers (which we check below are
272  * row-level), or statement-level BEFORE/AFTER triggers.
273  */
274  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
275  ereport(ERROR,
276  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
277  errmsg("\"%s\" is a view",
279  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
280  /* Disallow TRUNCATE triggers on VIEWs */
281  if (TRIGGER_FOR_TRUNCATE(stmt->events))
282  ereport(ERROR,
283  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
284  errmsg("\"%s\" is a view",
286  errdetail("Views cannot have TRUNCATE triggers.")));
287  }
288  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
289  {
290  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
291  stmt->timing != TRIGGER_TYPE_AFTER)
292  ereport(ERROR,
293  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
294  errmsg("\"%s\" is a foreign table",
296  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
297 
298  if (TRIGGER_FOR_TRUNCATE(stmt->events))
299  ereport(ERROR,
300  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
301  errmsg("\"%s\" is a foreign table",
303  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
304 
305  /*
306  * We disallow constraint triggers to protect the assumption that
307  * triggers on FKs can't be deferred. See notes with AfterTriggers
308  * data structures, below.
309  */
310  if (stmt->isconstraint)
311  ereport(ERROR,
312  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
313  errmsg("\"%s\" is a foreign table",
315  errdetail("Foreign tables cannot have constraint triggers.")));
316  }
317  else
318  ereport(ERROR,
319  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
320  errmsg("relation \"%s\" cannot have triggers",
322  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
323 
325  ereport(ERROR,
326  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
327  errmsg("permission denied: \"%s\" is a system catalog",
328  RelationGetRelationName(rel))));
329 
330  if (stmt->isconstraint)
331  {
332  /*
333  * We must take a lock on the target relation to protect against
334  * concurrent drop. It's not clear that AccessShareLock is strong
335  * enough, but we certainly need at least that much... otherwise, we
336  * might end up creating a pg_constraint entry referencing a
337  * nonexistent table.
338  */
339  if (OidIsValid(refRelOid))
340  {
341  LockRelationOid(refRelOid, AccessShareLock);
342  constrrelid = refRelOid;
343  }
344  else if (stmt->constrrel != NULL)
345  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
346  false);
347  }
348 
349  /* permission checks */
350  if (!isInternal)
351  {
352  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
353  ACL_TRIGGER);
354  if (aclresult != ACLCHECK_OK)
355  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
357 
358  if (OidIsValid(constrrelid))
359  {
360  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
361  ACL_TRIGGER);
362  if (aclresult != ACLCHECK_OK)
363  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
364  get_rel_name(constrrelid));
365  }
366  }
367 
368  /*
369  * When called on a partitioned table to create a FOR EACH ROW trigger
370  * that's not internal, we create one trigger for each partition, too.
371  *
372  * For that, we'd better hold lock on all of them ahead of time.
373  */
374  partition_recurse = !isInternal && stmt->row &&
375  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
376  if (partition_recurse)
378  ShareRowExclusiveLock, NULL));
379 
380  /* Compute tgtype */
381  TRIGGER_CLEAR_TYPE(tgtype);
382  if (stmt->row)
383  TRIGGER_SETT_ROW(tgtype);
384  tgtype |= stmt->timing;
385  tgtype |= stmt->events;
386 
387  /* Disallow ROW-level TRUNCATE triggers */
388  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
389  ereport(ERROR,
390  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
391  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
392 
393  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
394  if (TRIGGER_FOR_INSTEAD(tgtype))
395  {
396  if (!TRIGGER_FOR_ROW(tgtype))
397  ereport(ERROR,
398  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
399  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
400  if (stmt->whenClause)
401  ereport(ERROR,
402  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
403  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
404  if (stmt->columns != NIL)
405  ereport(ERROR,
406  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
407  errmsg("INSTEAD OF triggers cannot have column lists")));
408  }
409 
410  /*
411  * We don't yet support naming ROW transition variables, but the parser
412  * recognizes the syntax so we can give a nicer message here.
413  *
414  * Per standard, REFERENCING TABLE names are only allowed on AFTER
415  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
416  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
417  * only allowed once. Per standard, OLD may not be specified when
418  * creating a trigger only for INSERT, and NEW may not be specified when
419  * creating a trigger only for DELETE.
420  *
421  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
422  * reference both ROW and TABLE transition data.
423  */
424  if (stmt->transitionRels != NIL)
425  {
426  List *varList = stmt->transitionRels;
427  ListCell *lc;
428 
429  foreach(lc, varList)
430  {
432 
433  if (!(tt->isTable))
434  ereport(ERROR,
435  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
436  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
437  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
438 
439  /*
440  * Because of the above test, we omit further ROW-related testing
441  * below. If we later allow naming OLD and NEW ROW variables,
442  * adjustments will be needed below.
443  */
444 
445  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
446  ereport(ERROR,
447  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
448  errmsg("\"%s\" is a foreign table",
450  errdetail("Triggers on foreign tables cannot have transition tables.")));
451 
452  if (rel->rd_rel->relkind == RELKIND_VIEW)
453  ereport(ERROR,
454  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
455  errmsg("\"%s\" is a view",
457  errdetail("Triggers on views cannot have transition tables.")));
458 
459  /*
460  * We currently don't allow row-level triggers with transition
461  * tables on partition or inheritance children. Such triggers
462  * would somehow need to see tuples converted to the format of the
463  * table they're attached to, and it's not clear which subset of
464  * tuples each child should see. See also the prohibitions in
465  * ATExecAttachPartition() and ATExecAddInherit().
466  */
467  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
468  {
469  /* Use appropriate error message. */
470  if (rel->rd_rel->relispartition)
471  ereport(ERROR,
472  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
473  errmsg("ROW triggers with transition tables are not supported on partitions")));
474  else
475  ereport(ERROR,
476  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
477  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
478  }
479 
480  if (stmt->timing != TRIGGER_TYPE_AFTER)
481  ereport(ERROR,
482  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
483  errmsg("transition table name can only be specified for an AFTER trigger")));
484 
485  if (TRIGGER_FOR_TRUNCATE(tgtype))
486  ereport(ERROR,
487  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
488  errmsg("TRUNCATE triggers with transition tables are not supported")));
489 
490  /*
491  * We currently don't allow multi-event triggers ("INSERT OR
492  * UPDATE") with transition tables, because it's not clear how to
493  * handle INSERT ... ON CONFLICT statements which can fire both
494  * INSERT and UPDATE triggers. We show the inserted tuples to
495  * INSERT triggers and the updated tuples to UPDATE triggers, but
496  * it's not yet clear what INSERT OR UPDATE trigger should see.
497  * This restriction could be lifted if we can decide on the right
498  * semantics in a later release.
499  */
500  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
501  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
502  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
503  ereport(ERROR,
504  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
505  errmsg("transition tables cannot be specified for triggers with more than one event")));
506 
507  /*
508  * We currently don't allow column-specific triggers with
509  * transition tables. Per spec, that seems to require
510  * accumulating separate transition tables for each combination of
511  * columns, which is a lot of work for a rather marginal feature.
512  */
513  if (stmt->columns != NIL)
514  ereport(ERROR,
515  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
516  errmsg("transition tables cannot be specified for triggers with column lists")));
517 
518  /*
519  * We disallow constraint triggers with transition tables, to
520  * protect the assumption that such triggers can't be deferred.
521  * See notes with AfterTriggers data structures, below.
522  *
523  * Currently this is enforced by the grammar, so just Assert here.
524  */
525  Assert(!stmt->isconstraint);
526 
527  if (tt->isNew)
528  {
529  if (!(TRIGGER_FOR_INSERT(tgtype) ||
530  TRIGGER_FOR_UPDATE(tgtype)))
531  ereport(ERROR,
532  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
533  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
534 
535  if (newtablename != NULL)
536  ereport(ERROR,
537  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
538  errmsg("NEW TABLE cannot be specified multiple times")));
539 
540  newtablename = tt->name;
541  }
542  else
543  {
544  if (!(TRIGGER_FOR_DELETE(tgtype) ||
545  TRIGGER_FOR_UPDATE(tgtype)))
546  ereport(ERROR,
547  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
548  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
549 
550  if (oldtablename != NULL)
551  ereport(ERROR,
552  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
553  errmsg("OLD TABLE cannot be specified multiple times")));
554 
555  oldtablename = tt->name;
556  }
557  }
558 
559  if (newtablename != NULL && oldtablename != NULL &&
560  strcmp(newtablename, oldtablename) == 0)
561  ereport(ERROR,
562  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
563  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
564  }
565 
566  /*
567  * Parse the WHEN clause, if any and we weren't passed an already
568  * transformed one.
569  *
570  * Note that as a side effect, we fill whenRtable when parsing. If we got
571  * an already parsed clause, this does not occur, which is what we want --
572  * no point in adding redundant dependencies below.
573  */
574  if (!whenClause && stmt->whenClause)
575  {
576  ParseState *pstate;
577  ParseNamespaceItem *nsitem;
578  List *varList;
579  ListCell *lc;
580 
581  /* Set up a pstate to parse with */
582  pstate = make_parsestate(NULL);
583  pstate->p_sourcetext = queryString;
584 
585  /*
586  * Set up nsitems for OLD and NEW references.
587  *
588  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
589  */
590  nsitem = addRangeTableEntryForRelation(pstate, rel,
592  makeAlias("old", NIL),
593  false, false);
594  addNSItemToQuery(pstate, nsitem, false, true, true);
595  nsitem = addRangeTableEntryForRelation(pstate, rel,
597  makeAlias("new", NIL),
598  false, false);
599  addNSItemToQuery(pstate, nsitem, false, true, true);
600 
601  /* Transform expression. Copy to be sure we don't modify original */
602  whenClause = transformWhereClause(pstate,
603  copyObject(stmt->whenClause),
605  "WHEN");
606  /* we have to fix its collations too */
607  assign_expr_collations(pstate, whenClause);
608 
609  /*
610  * Check for disallowed references to OLD/NEW.
611  *
612  * NB: pull_var_clause is okay here only because we don't allow
613  * subselects in WHEN clauses; it would fail to examine the contents
614  * of subselects.
615  */
616  varList = pull_var_clause(whenClause, 0);
617  foreach(lc, varList)
618  {
619  Var *var = (Var *) lfirst(lc);
620 
621  switch (var->varno)
622  {
623  case PRS2_OLD_VARNO:
624  if (!TRIGGER_FOR_ROW(tgtype))
625  ereport(ERROR,
626  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
627  errmsg("statement trigger's WHEN condition cannot reference column values"),
628  parser_errposition(pstate, var->location)));
629  if (TRIGGER_FOR_INSERT(tgtype))
630  ereport(ERROR,
631  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
632  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
633  parser_errposition(pstate, var->location)));
634  /* system columns are okay here */
635  break;
636  case PRS2_NEW_VARNO:
637  if (!TRIGGER_FOR_ROW(tgtype))
638  ereport(ERROR,
639  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
640  errmsg("statement trigger's WHEN condition cannot reference column values"),
641  parser_errposition(pstate, var->location)));
642  if (TRIGGER_FOR_DELETE(tgtype))
643  ereport(ERROR,
644  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
645  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
646  parser_errposition(pstate, var->location)));
647  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
648  ereport(ERROR,
649  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
650  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
651  parser_errposition(pstate, var->location)));
652  if (TRIGGER_FOR_BEFORE(tgtype) &&
653  var->varattno == 0 &&
654  RelationGetDescr(rel)->constr &&
655  RelationGetDescr(rel)->constr->has_generated_stored)
656  ereport(ERROR,
657  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
658  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
659  errdetail("A whole-row reference is used and the table contains generated columns."),
660  parser_errposition(pstate, var->location)));
661  if (TRIGGER_FOR_BEFORE(tgtype) &&
662  var->varattno > 0 &&
663  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
664  ereport(ERROR,
665  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
666  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
667  errdetail("Column \"%s\" is a generated column.",
668  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
669  parser_errposition(pstate, var->location)));
670  break;
671  default:
672  /* can't happen without add_missing_from, so just elog */
673  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
674  break;
675  }
676  }
677 
678  /* we'll need the rtable for recordDependencyOnExpr */
679  whenRtable = pstate->p_rtable;
680 
681  qual = nodeToString(whenClause);
682 
683  free_parsestate(pstate);
684  }
685  else if (!whenClause)
686  {
687  whenClause = NULL;
688  whenRtable = NIL;
689  qual = NULL;
690  }
691  else
692  {
693  qual = nodeToString(whenClause);
694  whenRtable = NIL;
695  }
696 
697  /*
698  * Find and validate the trigger function.
699  */
700  if (!OidIsValid(funcoid))
701  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
702  if (!isInternal)
703  {
704  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
705  if (aclresult != ACLCHECK_OK)
706  aclcheck_error(aclresult, OBJECT_FUNCTION,
707  NameListToString(stmt->funcname));
708  }
709  funcrettype = get_func_rettype(funcoid);
710  if (funcrettype != TRIGGEROID)
711  ereport(ERROR,
712  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
713  errmsg("function %s must return type %s",
714  NameListToString(stmt->funcname), "trigger")));
715 
716  /*
717  * Scan pg_trigger to see if there is already a trigger of the same name.
718  * Skip this for internally generated triggers, since we'll modify the
719  * name to be unique below.
720  *
721  * NOTE that this is cool only because we have ShareRowExclusiveLock on
722  * the relation, so the trigger set won't be changing underneath us.
723  */
724  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
725  if (!isInternal)
726  {
727  ScanKeyData skeys[2];
728  SysScanDesc tgscan;
729 
730  ScanKeyInit(&skeys[0],
731  Anum_pg_trigger_tgrelid,
732  BTEqualStrategyNumber, F_OIDEQ,
734 
735  ScanKeyInit(&skeys[1],
736  Anum_pg_trigger_tgname,
737  BTEqualStrategyNumber, F_NAMEEQ,
738  CStringGetDatum(stmt->trigname));
739 
740  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
741  NULL, 2, skeys);
742 
743  /* There should be at most one matching tuple */
744  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
745  {
746  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
747 
748  trigoid = oldtrigger->oid;
749  existing_constraint_oid = oldtrigger->tgconstraint;
750  existing_isInternal = oldtrigger->tgisinternal;
751  existing_isClone = OidIsValid(oldtrigger->tgparentid);
752  trigger_exists = true;
753  /* copy the tuple to use in CatalogTupleUpdate() */
754  tuple = heap_copytuple(tuple);
755  }
756  systable_endscan(tgscan);
757  }
758 
759  if (!trigger_exists)
760  {
761  /* Generate the OID for the new trigger. */
762  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
763  Anum_pg_trigger_oid);
764  }
765  else
766  {
767  /*
768  * If OR REPLACE was specified, we'll replace the old trigger;
769  * otherwise complain about the duplicate name.
770  */
771  if (!stmt->replace)
772  ereport(ERROR,
774  errmsg("trigger \"%s\" for relation \"%s\" already exists",
775  stmt->trigname, RelationGetRelationName(rel))));
776 
777  /*
778  * An internal trigger or a child trigger (isClone) cannot be replaced
779  * by a user-defined trigger. However, skip this test when
780  * in_partition, because then we're recursing from a partitioned table
781  * and the check was made at the parent level.
782  */
783  if ((existing_isInternal || existing_isClone) &&
784  !isInternal && !in_partition)
785  ereport(ERROR,
787  errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
788  stmt->trigname, RelationGetRelationName(rel))));
789 
790  /*
791  * It is not allowed to replace with a constraint trigger; gram.y
792  * should have enforced this already.
793  */
794  Assert(!stmt->isconstraint);
795 
796  /*
797  * It is not allowed to replace an existing constraint trigger,
798  * either. (The reason for these restrictions is partly that it seems
799  * difficult to deal with pending trigger events in such cases, and
800  * partly that the command might imply changing the constraint's
801  * properties as well, which doesn't seem nice.)
802  */
803  if (OidIsValid(existing_constraint_oid))
804  ereport(ERROR,
806  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
807  stmt->trigname, RelationGetRelationName(rel))));
808  }
809 
810  /*
811  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
812  * corresponding pg_constraint entry.
813  */
814  if (stmt->isconstraint && !OidIsValid(constraintOid))
815  {
816  /* Internal callers should have made their own constraints */
817  Assert(!isInternal);
818  constraintOid = CreateConstraintEntry(stmt->trigname,
820  CONSTRAINT_TRIGGER,
821  stmt->deferrable,
822  stmt->initdeferred,
823  true,
824  InvalidOid, /* no parent */
825  RelationGetRelid(rel),
826  NULL, /* no conkey */
827  0,
828  0,
829  InvalidOid, /* no domain */
830  InvalidOid, /* no index */
831  InvalidOid, /* no foreign key */
832  NULL,
833  NULL,
834  NULL,
835  NULL,
836  0,
837  ' ',
838  ' ',
839  NULL,
840  0,
841  ' ',
842  NULL, /* no exclusion */
843  NULL, /* no check constraint */
844  NULL,
845  true, /* islocal */
846  0, /* inhcount */
847  true, /* noinherit */
848  isInternal); /* is_internal */
849  }
850 
851  /*
852  * If trigger is internally generated, modify the provided trigger name to
853  * ensure uniqueness by appending the trigger OID. (Callers will usually
854  * supply a simple constant trigger name in these cases.)
855  */
856  if (isInternal)
857  {
858  snprintf(internaltrigname, sizeof(internaltrigname),
859  "%s_%u", stmt->trigname, trigoid);
860  trigname = internaltrigname;
861  }
862  else
863  {
864  /* user-defined trigger; use the specified trigger name as-is */
865  trigname = stmt->trigname;
866  }
867 
868  /*
869  * Build the new pg_trigger tuple.
870  *
871  * When we're creating a trigger in a partition, we mark it as internal,
872  * even though we don't do the isInternal magic in this function. This
873  * makes the triggers in partitions identical to the ones in the
874  * partitioned tables, except that they are marked internal.
875  */
876  memset(nulls, false, sizeof(nulls));
877 
878  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
879  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
880  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
881  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
882  CStringGetDatum(trigname));
883  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
884  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
885  values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
886  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
887  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
888  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
889  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
890  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
891  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
892 
893  if (stmt->args)
894  {
895  ListCell *le;
896  char *args;
897  int16 nargs = list_length(stmt->args);
898  int len = 0;
899 
900  foreach(le, stmt->args)
901  {
902  char *ar = strVal(lfirst(le));
903 
904  len += strlen(ar) + 4;
905  for (; *ar; ar++)
906  {
907  if (*ar == '\\')
908  len++;
909  }
910  }
911  args = (char *) palloc(len + 1);
912  args[0] = '\0';
913  foreach(le, stmt->args)
914  {
915  char *s = strVal(lfirst(le));
916  char *d = args + strlen(args);
917 
918  while (*s)
919  {
920  if (*s == '\\')
921  *d++ = '\\';
922  *d++ = *s++;
923  }
924  strcpy(d, "\\000");
925  }
926  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
927  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
929  }
930  else
931  {
932  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
933  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
934  CStringGetDatum(""));
935  }
936 
937  /* build column number array if it's a column-specific trigger */
938  ncolumns = list_length(stmt->columns);
939  if (ncolumns == 0)
940  columns = NULL;
941  else
942  {
943  ListCell *cell;
944  int i = 0;
945 
946  columns = (int16 *) palloc(ncolumns * sizeof(int16));
947  foreach(cell, stmt->columns)
948  {
949  char *name = strVal(lfirst(cell));
950  int16 attnum;
951  int j;
952 
953  /* Lookup column name. System columns are not allowed */
954  attnum = attnameAttNum(rel, name, false);
955  if (attnum == InvalidAttrNumber)
956  ereport(ERROR,
957  (errcode(ERRCODE_UNDEFINED_COLUMN),
958  errmsg("column \"%s\" of relation \"%s\" does not exist",
959  name, RelationGetRelationName(rel))));
960 
961  /* Check for duplicates */
962  for (j = i - 1; j >= 0; j--)
963  {
964  if (columns[j] == attnum)
965  ereport(ERROR,
966  (errcode(ERRCODE_DUPLICATE_COLUMN),
967  errmsg("column \"%s\" specified more than once",
968  name)));
969  }
970 
971  columns[i++] = attnum;
972  }
973  }
974  tgattr = buildint2vector(columns, ncolumns);
975  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
976 
977  /* set tgqual if trigger has WHEN clause */
978  if (qual)
979  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
980  else
981  nulls[Anum_pg_trigger_tgqual - 1] = true;
982 
983  if (oldtablename)
984  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
985  CStringGetDatum(oldtablename));
986  else
987  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
988  if (newtablename)
989  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
990  CStringGetDatum(newtablename));
991  else
992  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
993 
994  /*
995  * Insert or replace tuple in pg_trigger.
996  */
997  if (!trigger_exists)
998  {
999  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
1000  CatalogTupleInsert(tgrel, tuple);
1001  }
1002  else
1003  {
1004  HeapTuple newtup;
1005 
1006  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
1007  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
1008  heap_freetuple(newtup);
1009  }
1010 
1011  heap_freetuple(tuple); /* free either original or new tuple */
1012  table_close(tgrel, RowExclusiveLock);
1013 
1014  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1015  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1016  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1017  if (oldtablename)
1018  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1019  if (newtablename)
1020  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1021 
1022  /*
1023  * Update relation's pg_class entry; if necessary; and if not, send an SI
1024  * message to make other backends (and this one) rebuild relcache entries.
1025  */
1026  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1027  tuple = SearchSysCacheCopy1(RELOID,
1029  if (!HeapTupleIsValid(tuple))
1030  elog(ERROR, "cache lookup failed for relation %u",
1031  RelationGetRelid(rel));
1032  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1033  {
1034  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1035 
1036  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1037 
1039  }
1040  else
1042 
1043  heap_freetuple(tuple);
1044  table_close(pgrel, RowExclusiveLock);
1045 
1046  /*
1047  * If we're replacing a trigger, flush all the old dependencies before
1048  * recording new ones.
1049  */
1050  if (trigger_exists)
1051  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1052 
1053  /*
1054  * Record dependencies for trigger. Always place a normal dependency on
1055  * the function.
1056  */
1057  myself.classId = TriggerRelationId;
1058  myself.objectId = trigoid;
1059  myself.objectSubId = 0;
1060 
1061  referenced.classId = ProcedureRelationId;
1062  referenced.objectId = funcoid;
1063  referenced.objectSubId = 0;
1064  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1065 
1066  if (isInternal && OidIsValid(constraintOid))
1067  {
1068  /*
1069  * Internally-generated trigger for a constraint, so make it an
1070  * internal dependency of the constraint. We can skip depending on
1071  * the relation(s), as there'll be an indirect dependency via the
1072  * constraint.
1073  */
1074  referenced.classId = ConstraintRelationId;
1075  referenced.objectId = constraintOid;
1076  referenced.objectSubId = 0;
1077  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1078  }
1079  else
1080  {
1081  /*
1082  * User CREATE TRIGGER, so place dependencies. We make trigger be
1083  * auto-dropped if its relation is dropped or if the FK relation is
1084  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1085  */
1086  referenced.classId = RelationRelationId;
1087  referenced.objectId = RelationGetRelid(rel);
1088  referenced.objectSubId = 0;
1089  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1090 
1091  if (OidIsValid(constrrelid))
1092  {
1093  referenced.classId = RelationRelationId;
1094  referenced.objectId = constrrelid;
1095  referenced.objectSubId = 0;
1096  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1097  }
1098  /* Not possible to have an index dependency in this case */
1099  Assert(!OidIsValid(indexOid));
1100 
1101  /*
1102  * If it's a user-specified constraint trigger, make the constraint
1103  * internally dependent on the trigger instead of vice versa.
1104  */
1105  if (OidIsValid(constraintOid))
1106  {
1107  referenced.classId = ConstraintRelationId;
1108  referenced.objectId = constraintOid;
1109  referenced.objectSubId = 0;
1110  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1111  }
1112 
1113  /*
1114  * If it's a partition trigger, create the partition dependencies.
1115  */
1116  if (OidIsValid(parentTriggerOid))
1117  {
1118  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1119  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1120  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1121  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1122  }
1123  }
1124 
1125  /* If column-specific trigger, add normal dependencies on columns */
1126  if (columns != NULL)
1127  {
1128  int i;
1129 
1130  referenced.classId = RelationRelationId;
1131  referenced.objectId = RelationGetRelid(rel);
1132  for (i = 0; i < ncolumns; i++)
1133  {
1134  referenced.objectSubId = columns[i];
1135  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1136  }
1137  }
1138 
1139  /*
1140  * If it has a WHEN clause, add dependencies on objects mentioned in the
1141  * expression (eg, functions, as well as any columns used).
1142  */
1143  if (whenRtable != NIL)
1144  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1146 
1147  /* Post creation hook for new trigger */
1148  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1149  isInternal);
1150 
1151  /*
1152  * Lastly, create the trigger on child relations, if needed.
1153  */
1154  if (partition_recurse)
1155  {
1156  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1157  List *idxs = NIL;
1158  List *childTbls = NIL;
1159  ListCell *l;
1160  int i;
1161  MemoryContext oldcxt,
1162  perChildCxt;
1163 
1165  "part trig clone",
1167 
1168  /*
1169  * When a trigger is being created associated with an index, we'll
1170  * need to associate the trigger in each child partition with the
1171  * corresponding index on it.
1172  */
1173  if (OidIsValid(indexOid))
1174  {
1175  ListCell *l;
1176  List *idxs = NIL;
1177 
1179  foreach(l, idxs)
1180  childTbls = lappend_oid(childTbls,
1182  false));
1183  }
1184 
1185  oldcxt = MemoryContextSwitchTo(perChildCxt);
1186 
1187  /* Iterate to create the trigger on each existing partition */
1188  for (i = 0; i < partdesc->nparts; i++)
1189  {
1190  Oid indexOnChild = InvalidOid;
1191  ListCell *l2;
1192  CreateTrigStmt *childStmt;
1193  Relation childTbl;
1194  Node *qual;
1195 
1196  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1197 
1198  /* Find which of the child indexes is the one on this partition */
1199  if (OidIsValid(indexOid))
1200  {
1201  forboth(l, idxs, l2, childTbls)
1202  {
1203  if (lfirst_oid(l2) == partdesc->oids[i])
1204  {
1205  indexOnChild = lfirst_oid(l);
1206  break;
1207  }
1208  }
1209  if (!OidIsValid(indexOnChild))
1210  elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"",
1211  get_rel_name(indexOid),
1212  get_rel_name(partdesc->oids[i]));
1213  }
1214 
1215  /*
1216  * Initialize our fabricated parse node by copying the original
1217  * one, then resetting fields that we pass separately.
1218  */
1219  childStmt = (CreateTrigStmt *) copyObject(stmt);
1220  childStmt->funcname = NIL;
1221  childStmt->whenClause = NULL;
1222 
1223  /* If there is a WHEN clause, create a modified copy of it */
1224  qual = copyObject(whenClause);
1225  qual = (Node *)
1227  childTbl, rel);
1228  qual = (Node *)
1230  childTbl, rel);
1231 
1232  CreateTriggerFiringOn(childStmt, queryString,
1233  partdesc->oids[i], refRelOid,
1234  InvalidOid, indexOnChild,
1235  funcoid, trigoid, qual,
1236  isInternal, true, trigger_fires_when);
1237 
1238  table_close(childTbl, NoLock);
1239 
1240  MemoryContextReset(perChildCxt);
1241  }
1242 
1243  MemoryContextSwitchTo(oldcxt);
1244  MemoryContextDelete(perChildCxt);
1245  list_free(idxs);
1246  list_free(childTbls);
1247  }
1248 
1249  /* Keep lock on target rel until end of xact */
1250  table_close(rel, NoLock);
1251 
1252  return myself;
1253 }
1254 
1255 /*
1256  * TriggerSetParentTrigger
1257  * Set a partition's trigger as child of its parent trigger,
1258  * or remove the linkage if parentTrigId is InvalidOid.
1259  *
1260  * This updates the constraint's pg_trigger row to show it as inherited, and
1261  * adds PARTITION dependencies to prevent the trigger from being deleted
1262  * on its own. Alternatively, reverse that.
1263  */
1264 void
1266  Oid childTrigId,
1267  Oid parentTrigId,
1268  Oid childTableId)
1269 {
1270  SysScanDesc tgscan;
1271  ScanKeyData skey[1];
1272  Form_pg_trigger trigForm;
1273  HeapTuple tuple,
1274  newtup;
1275  ObjectAddress depender;
1276  ObjectAddress referenced;
1277 
1278  /*
1279  * Find the trigger to delete.
1280  */
1281  ScanKeyInit(&skey[0],
1282  Anum_pg_trigger_oid,
1283  BTEqualStrategyNumber, F_OIDEQ,
1284  ObjectIdGetDatum(childTrigId));
1285 
1286  tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1287  NULL, 1, skey);
1288 
1289  tuple = systable_getnext(tgscan);
1290  if (!HeapTupleIsValid(tuple))
1291  elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1292  newtup = heap_copytuple(tuple);
1293  trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1294  if (OidIsValid(parentTrigId))
1295  {
1296  /* don't allow setting parent for a constraint that already has one */
1297  if (OidIsValid(trigForm->tgparentid))
1298  elog(ERROR, "trigger %u already has a parent trigger",
1299  childTrigId);
1300 
1301  trigForm->tgparentid = parentTrigId;
1302 
1303  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1304 
1305  ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1306 
1307  ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1308  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1309 
1310  ObjectAddressSet(referenced, RelationRelationId, childTableId);
1311  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1312  }
1313  else
1314  {
1315  trigForm->tgparentid = InvalidOid;
1316 
1317  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1318 
1319  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1320  TriggerRelationId,
1322  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1323  RelationRelationId,
1325  }
1326 
1327  heap_freetuple(newtup);
1328  systable_endscan(tgscan);
1329 }
1330 
1331 
1332 /*
1333  * Guts of trigger deletion.
1334  */
1335 void
1337 {
1338  Relation tgrel;
1339  SysScanDesc tgscan;
1340  ScanKeyData skey[1];
1341  HeapTuple tup;
1342  Oid relid;
1343  Relation rel;
1344 
1345  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1346 
1347  /*
1348  * Find the trigger to delete.
1349  */
1350  ScanKeyInit(&skey[0],
1351  Anum_pg_trigger_oid,
1352  BTEqualStrategyNumber, F_OIDEQ,
1353  ObjectIdGetDatum(trigOid));
1354 
1355  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1356  NULL, 1, skey);
1357 
1358  tup = systable_getnext(tgscan);
1359  if (!HeapTupleIsValid(tup))
1360  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1361 
1362  /*
1363  * Open and exclusive-lock the relation the trigger belongs to.
1364  */
1365  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1366 
1367  rel = table_open(relid, AccessExclusiveLock);
1368 
1369  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1370  rel->rd_rel->relkind != RELKIND_VIEW &&
1371  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1372  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1373  ereport(ERROR,
1374  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1375  errmsg("relation \"%s\" cannot have triggers",
1377  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1378 
1380  ereport(ERROR,
1381  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1382  errmsg("permission denied: \"%s\" is a system catalog",
1383  RelationGetRelationName(rel))));
1384 
1385  /*
1386  * Delete the pg_trigger tuple.
1387  */
1388  CatalogTupleDelete(tgrel, &tup->t_self);
1389 
1390  systable_endscan(tgscan);
1391  table_close(tgrel, RowExclusiveLock);
1392 
1393  /*
1394  * We do not bother to try to determine whether any other triggers remain,
1395  * which would be needed in order to decide whether it's safe to clear the
1396  * relation's relhastriggers. (In any case, there might be a concurrent
1397  * process adding new triggers.) Instead, just force a relcache inval to
1398  * make other backends (and this one too!) rebuild their relcache entries.
1399  * There's no great harm in leaving relhastriggers true even if there are
1400  * no triggers left.
1401  */
1403 
1404  /* Keep lock on trigger's rel until end of xact */
1405  table_close(rel, NoLock);
1406 }
1407 
1408 /*
1409  * get_trigger_oid - Look up a trigger by name to find its OID.
1410  *
1411  * If missing_ok is false, throw an error if trigger not found. If
1412  * true, just return InvalidOid.
1413  */
1414 Oid
1415 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1416 {
1417  Relation tgrel;
1418  ScanKeyData skey[2];
1419  SysScanDesc tgscan;
1420  HeapTuple tup;
1421  Oid oid;
1422 
1423  /*
1424  * Find the trigger, verify permissions, set up object address
1425  */
1426  tgrel = table_open(TriggerRelationId, AccessShareLock);
1427 
1428  ScanKeyInit(&skey[0],
1429  Anum_pg_trigger_tgrelid,
1430  BTEqualStrategyNumber, F_OIDEQ,
1431  ObjectIdGetDatum(relid));
1432  ScanKeyInit(&skey[1],
1433  Anum_pg_trigger_tgname,
1434  BTEqualStrategyNumber, F_NAMEEQ,
1435  CStringGetDatum(trigname));
1436 
1437  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1438  NULL, 2, skey);
1439 
1440  tup = systable_getnext(tgscan);
1441 
1442  if (!HeapTupleIsValid(tup))
1443  {
1444  if (!missing_ok)
1445  ereport(ERROR,
1446  (errcode(ERRCODE_UNDEFINED_OBJECT),
1447  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1448  trigname, get_rel_name(relid))));
1449  oid = InvalidOid;
1450  }
1451  else
1452  {
1453  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1454  }
1455 
1456  systable_endscan(tgscan);
1457  table_close(tgrel, AccessShareLock);
1458  return oid;
1459 }
1460 
1461 /*
1462  * Perform permissions and integrity checks before acquiring a relation lock.
1463  */
1464 static void
1466  void *arg)
1467 {
1468  HeapTuple tuple;
1469  Form_pg_class form;
1470 
1471  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1472  if (!HeapTupleIsValid(tuple))
1473  return; /* concurrently dropped */
1474  form = (Form_pg_class) GETSTRUCT(tuple);
1475 
1476  /* only tables and views can have triggers */
1477  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1478  form->relkind != RELKIND_FOREIGN_TABLE &&
1479  form->relkind != RELKIND_PARTITIONED_TABLE)
1480  ereport(ERROR,
1481  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1482  errmsg("relation \"%s\" cannot have triggers",
1483  rv->relname),
1484  errdetail_relkind_not_supported(form->relkind)));
1485 
1486  /* you must own the table to rename one of its triggers */
1487  if (!pg_class_ownercheck(relid, GetUserId()))
1489  if (!allowSystemTableMods && IsSystemClass(relid, form))
1490  ereport(ERROR,
1491  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1492  errmsg("permission denied: \"%s\" is a system catalog",
1493  rv->relname)));
1494 
1495  ReleaseSysCache(tuple);
1496 }
1497 
1498 /*
1499  * renametrig - changes the name of a trigger on a relation
1500  *
1501  * trigger name is changed in trigger catalog.
1502  * No record of the previous name is kept.
1503  *
1504  * get proper relrelation from relation catalog (if not arg)
1505  * scan trigger catalog
1506  * for name conflict (within rel)
1507  * for original trigger (if not arg)
1508  * modify tgname in trigger tuple
1509  * update row in catalog
1510  */
1513 {
1514  Oid tgoid;
1515  Relation targetrel;
1516  Relation tgrel;
1517  HeapTuple tuple;
1518  SysScanDesc tgscan;
1519  ScanKeyData key[2];
1520  Oid relid;
1521  ObjectAddress address;
1522 
1523  /*
1524  * Look up name, check permissions, and acquire lock (which we will NOT
1525  * release until end of transaction).
1526  */
1528  0,
1530  NULL);
1531 
1532  /* Have lock already, so just need to build relcache entry. */
1533  targetrel = relation_open(relid, NoLock);
1534 
1535  /*
1536  * On partitioned tables, this operation recurses to partitions. Lock all
1537  * tables upfront.
1538  */
1539  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1540  (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1541 
1542  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1543 
1544  /*
1545  * Search for the trigger to modify.
1546  */
1547  ScanKeyInit(&key[0],
1548  Anum_pg_trigger_tgrelid,
1549  BTEqualStrategyNumber, F_OIDEQ,
1550  ObjectIdGetDatum(relid));
1551  ScanKeyInit(&key[1],
1552  Anum_pg_trigger_tgname,
1553  BTEqualStrategyNumber, F_NAMEEQ,
1554  PointerGetDatum(stmt->subname));
1555  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1556  NULL, 2, key);
1557  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1558  {
1559  Form_pg_trigger trigform;
1560 
1561  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1562  tgoid = trigform->oid;
1563 
1564  /*
1565  * If the trigger descends from a trigger on a parent partitioned
1566  * table, reject the rename. We don't allow a trigger in a partition
1567  * to differ in name from that of its parent: that would lead to an
1568  * inconsistency that pg_dump would not reproduce.
1569  */
1570  if (OidIsValid(trigform->tgparentid))
1571  ereport(ERROR,
1572  errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1573  stmt->subname, RelationGetRelationName(targetrel)),
1574  errhint("Rename trigger on partitioned table \"%s\" instead.",
1575  get_rel_name(get_partition_parent(relid, false))));
1576 
1577 
1578  /* Rename the trigger on this relation ... */
1579  renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1580  stmt->subname);
1581 
1582  /* ... and if it is partitioned, recurse to its partitions */
1583  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1584  {
1585  PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1586 
1587  for (int i = 0; i < partdesc->nparts; i++)
1588  {
1589  Oid partitionId = partdesc->oids[i];
1590 
1591  renametrig_partition(tgrel, partitionId, trigform->oid,
1592  stmt->newname, stmt->subname);
1593  }
1594  }
1595  }
1596  else
1597  {
1598  ereport(ERROR,
1599  (errcode(ERRCODE_UNDEFINED_OBJECT),
1600  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1601  stmt->subname, RelationGetRelationName(targetrel))));
1602  }
1603 
1604  ObjectAddressSet(address, TriggerRelationId, tgoid);
1605 
1606  systable_endscan(tgscan);
1607 
1608  table_close(tgrel, RowExclusiveLock);
1609 
1610  /*
1611  * Close rel, but keep exclusive lock!
1612  */
1613  relation_close(targetrel, NoLock);
1614 
1615  return address;
1616 }
1617 
1618 /*
1619  * Subroutine for renametrig -- perform the actual work of renaming one
1620  * trigger on one table.
1621  *
1622  * If the trigger has a name different from the expected one, raise a
1623  * NOTICE about it.
1624  */
1625 static void
1627  const char *newname, const char *expected_name)
1628 {
1629  HeapTuple tuple;
1630  Form_pg_trigger tgform;
1631  ScanKeyData key[2];
1632  SysScanDesc tgscan;
1633 
1634  /* If the trigger already has the new name, nothing to do. */
1635  tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1636  if (strcmp(NameStr(tgform->tgname), newname) == 0)
1637  return;
1638 
1639  /*
1640  * Before actually trying the rename, search for triggers with the same
1641  * name. The update would fail with an ugly message in that case, and it
1642  * is better to throw a nicer error.
1643  */
1644  ScanKeyInit(&key[0],
1645  Anum_pg_trigger_tgrelid,
1646  BTEqualStrategyNumber, F_OIDEQ,
1647  ObjectIdGetDatum(RelationGetRelid(targetrel)));
1648  ScanKeyInit(&key[1],
1649  Anum_pg_trigger_tgname,
1650  BTEqualStrategyNumber, F_NAMEEQ,
1651  PointerGetDatum(newname));
1652  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1653  NULL, 2, key);
1654  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1655  ereport(ERROR,
1657  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1658  newname, RelationGetRelationName(targetrel))));
1659  systable_endscan(tgscan);
1660 
1661  /*
1662  * The target name is free; update the existing pg_trigger tuple with it.
1663  */
1664  tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1665  tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1666 
1667  /*
1668  * If the trigger has a name different from what we expected, let the user
1669  * know. (We can proceed anyway, since we must have reached here following
1670  * a tgparentid link.)
1671  */
1672  if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1673  ereport(NOTICE,
1674  errmsg("renamed trigger \"%s\" on relation \"%s\"",
1675  NameStr(tgform->tgname),
1676  RelationGetRelationName(targetrel)));
1677 
1678  namestrcpy(&tgform->tgname, newname);
1679 
1680  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1681 
1682  InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1683 
1684  /*
1685  * Invalidate relation's relcache entry so that other backends (and this
1686  * one too!) are sent SI message to make them rebuild relcache entries.
1687  * (Ideally this should happen automatically...)
1688  */
1689  CacheInvalidateRelcache(targetrel);
1690 }
1691 
1692 /*
1693  * Subroutine for renametrig -- Helper for recursing to partitions when
1694  * renaming triggers on a partitioned table.
1695  */
1696 static void
1697 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1698  const char *newname, const char *expected_name)
1699 {
1700  SysScanDesc tgscan;
1701  ScanKeyData key;
1702  HeapTuple tuple;
1703 
1704  /*
1705  * Given a relation and the OID of a trigger on parent relation, find the
1706  * corresponding trigger in the child and rename that trigger to the given
1707  * name.
1708  */
1709  ScanKeyInit(&key,
1710  Anum_pg_trigger_tgrelid,
1711  BTEqualStrategyNumber, F_OIDEQ,
1712  ObjectIdGetDatum(partitionId));
1713  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1714  NULL, 1, &key);
1715  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1716  {
1717  Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1718  Relation partitionRel;
1719 
1720  if (tgform->tgparentid != parentTriggerOid)
1721  continue; /* not our trigger */
1722 
1723  partitionRel = table_open(partitionId, NoLock);
1724 
1725  /* Rename the trigger on this partition */
1726  renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1727 
1728  /* And if this relation is partitioned, recurse to its partitions */
1729  if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1730  {
1731  PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1732  true);
1733 
1734  for (int i = 0; i < partdesc->nparts; i++)
1735  {
1736  Oid partitionId = partdesc->oids[i];
1737 
1738  renametrig_partition(tgrel, partitionId, tgform->oid, newname,
1739  NameStr(tgform->tgname));
1740  }
1741  }
1742  table_close(partitionRel, NoLock);
1743 
1744  /* There should be at most one matching tuple */
1745  break;
1746  }
1747  systable_endscan(tgscan);
1748 }
1749 
1750 /*
1751  * EnableDisableTrigger()
1752  *
1753  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1754  * to change 'tgenabled' field for the specified trigger(s)
1755  *
1756  * rel: relation to process (caller must hold suitable lock on it)
1757  * tgname: trigger to process, or NULL to scan all triggers
1758  * fires_when: new value for tgenabled field. In addition to generic
1759  * enablement/disablement, this also defines when the trigger
1760  * should be fired in session replication roles.
1761  * skip_system: if true, skip "system" triggers (constraint triggers)
1762  *
1763  * Caller should have checked permissions for the table; here we also
1764  * enforce that superuser privilege is required to alter the state of
1765  * system triggers
1766  */
1767 void
1768 EnableDisableTrigger(Relation rel, const char *tgname,
1769  char fires_when, bool skip_system, LOCKMODE lockmode)
1770 {
1771  Relation tgrel;
1772  int nkeys;
1773  ScanKeyData keys[2];
1774  SysScanDesc tgscan;
1775  HeapTuple tuple;
1776  bool found;
1777  bool changed;
1778 
1779  /* Scan the relevant entries in pg_triggers */
1780  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1781 
1782  ScanKeyInit(&keys[0],
1783  Anum_pg_trigger_tgrelid,
1784  BTEqualStrategyNumber, F_OIDEQ,
1786  if (tgname)
1787  {
1788  ScanKeyInit(&keys[1],
1789  Anum_pg_trigger_tgname,
1790  BTEqualStrategyNumber, F_NAMEEQ,
1791  CStringGetDatum(tgname));
1792  nkeys = 2;
1793  }
1794  else
1795  nkeys = 1;
1796 
1797  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1798  NULL, nkeys, keys);
1799 
1800  found = changed = false;
1801 
1802  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1803  {
1804  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1805 
1806  if (oldtrig->tgisinternal)
1807  {
1808  /* system trigger ... ok to process? */
1809  if (skip_system)
1810  continue;
1811  if (!superuser())
1812  ereport(ERROR,
1813  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1814  errmsg("permission denied: \"%s\" is a system trigger",
1815  NameStr(oldtrig->tgname))));
1816  }
1817 
1818  found = true;
1819 
1820  if (oldtrig->tgenabled != fires_when)
1821  {
1822  /* need to change this one ... make a copy to scribble on */
1823  HeapTuple newtup = heap_copytuple(tuple);
1824  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1825 
1826  newtrig->tgenabled = fires_when;
1827 
1828  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1829 
1830  heap_freetuple(newtup);
1831 
1832  changed = true;
1833  }
1834 
1835  InvokeObjectPostAlterHook(TriggerRelationId,
1836  oldtrig->oid, 0);
1837  }
1838 
1839  systable_endscan(tgscan);
1840 
1841  table_close(tgrel, RowExclusiveLock);
1842 
1843  if (tgname && !found)
1844  ereport(ERROR,
1845  (errcode(ERRCODE_UNDEFINED_OBJECT),
1846  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1847  tgname, RelationGetRelationName(rel))));
1848 
1849  /*
1850  * If we changed anything, broadcast a SI inval message to force each
1851  * backend (including our own!) to rebuild relation's relcache entry.
1852  * Otherwise they will fail to apply the change promptly.
1853  */
1854  if (changed)
1856 }
1857 
1858 
1859 /*
1860  * Build trigger data to attach to the given relcache entry.
1861  *
1862  * Note that trigger data attached to a relcache entry must be stored in
1863  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1864  * But we should be running in a less long-lived working context. To avoid
1865  * leaking cache memory if this routine fails partway through, we build a
1866  * temporary TriggerDesc in working memory and then copy the completed
1867  * structure into cache memory.
1868  */
1869 void
1871 {
1872  TriggerDesc *trigdesc;
1873  int numtrigs;
1874  int maxtrigs;
1875  Trigger *triggers;
1876  Relation tgrel;
1877  ScanKeyData skey;
1878  SysScanDesc tgscan;
1879  HeapTuple htup;
1880  MemoryContext oldContext;
1881  int i;
1882 
1883  /*
1884  * Allocate a working array to hold the triggers (the array is extended if
1885  * necessary)
1886  */
1887  maxtrigs = 16;
1888  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1889  numtrigs = 0;
1890 
1891  /*
1892  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1893  * be reading the triggers in name order, except possibly during
1894  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1895  * ensures that triggers will be fired in name order.
1896  */
1897  ScanKeyInit(&skey,
1898  Anum_pg_trigger_tgrelid,
1899  BTEqualStrategyNumber, F_OIDEQ,
1900  ObjectIdGetDatum(RelationGetRelid(relation)));
1901 
1902  tgrel = table_open(TriggerRelationId, AccessShareLock);
1903  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1904  NULL, 1, &skey);
1905 
1906  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1907  {
1908  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1909  Trigger *build;
1910  Datum datum;
1911  bool isnull;
1912 
1913  if (numtrigs >= maxtrigs)
1914  {
1915  maxtrigs *= 2;
1916  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1917  }
1918  build = &(triggers[numtrigs]);
1919 
1920  build->tgoid = pg_trigger->oid;
1922  NameGetDatum(&pg_trigger->tgname)));
1923  build->tgfoid = pg_trigger->tgfoid;
1924  build->tgtype = pg_trigger->tgtype;
1925  build->tgenabled = pg_trigger->tgenabled;
1926  build->tgisinternal = pg_trigger->tgisinternal;
1927  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1928  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1929  build->tgconstrindid = pg_trigger->tgconstrindid;
1930  build->tgconstraint = pg_trigger->tgconstraint;
1931  build->tgdeferrable = pg_trigger->tgdeferrable;
1932  build->tginitdeferred = pg_trigger->tginitdeferred;
1933  build->tgnargs = pg_trigger->tgnargs;
1934  /* tgattr is first var-width field, so OK to access directly */
1935  build->tgnattr = pg_trigger->tgattr.dim1;
1936  if (build->tgnattr > 0)
1937  {
1938  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1939  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1940  build->tgnattr * sizeof(int16));
1941  }
1942  else
1943  build->tgattr = NULL;
1944  if (build->tgnargs > 0)
1945  {
1946  bytea *val;
1947  char *p;
1948 
1950  Anum_pg_trigger_tgargs,
1951  tgrel->rd_att, &isnull));
1952  if (isnull)
1953  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1954  RelationGetRelationName(relation));
1955  p = (char *) VARDATA_ANY(val);
1956  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1957  for (i = 0; i < build->tgnargs; i++)
1958  {
1959  build->tgargs[i] = pstrdup(p);
1960  p += strlen(p) + 1;
1961  }
1962  }
1963  else
1964  build->tgargs = NULL;
1965 
1966  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1967  tgrel->rd_att, &isnull);
1968  if (!isnull)
1969  build->tgoldtable =
1971  else
1972  build->tgoldtable = NULL;
1973 
1974  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1975  tgrel->rd_att, &isnull);
1976  if (!isnull)
1977  build->tgnewtable =
1979  else
1980  build->tgnewtable = NULL;
1981 
1982  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1983  tgrel->rd_att, &isnull);
1984  if (!isnull)
1985  build->tgqual = TextDatumGetCString(datum);
1986  else
1987  build->tgqual = NULL;
1988 
1989  numtrigs++;
1990  }
1991 
1992  systable_endscan(tgscan);
1993  table_close(tgrel, AccessShareLock);
1994 
1995  /* There might not be any triggers */
1996  if (numtrigs == 0)
1997  {
1998  pfree(triggers);
1999  return;
2000  }
2001 
2002  /* Build trigdesc */
2003  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
2004  trigdesc->triggers = triggers;
2005  trigdesc->numtriggers = numtrigs;
2006  for (i = 0; i < numtrigs; i++)
2007  SetTriggerFlags(trigdesc, &(triggers[i]));
2008 
2009  /* Copy completed trigdesc into cache storage */
2011  relation->trigdesc = CopyTriggerDesc(trigdesc);
2012  MemoryContextSwitchTo(oldContext);
2013 
2014  /* Release working memory */
2015  FreeTriggerDesc(trigdesc);
2016 }
2017 
2018 /*
2019  * Update the TriggerDesc's hint flags to include the specified trigger
2020  */
2021 static void
2023 {
2024  int16 tgtype = trigger->tgtype;
2025 
2026  trigdesc->trig_insert_before_row |=
2027  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2028  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2029  trigdesc->trig_insert_after_row |=
2030  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2031  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2032  trigdesc->trig_insert_instead_row |=
2033  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2034  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2035  trigdesc->trig_insert_before_statement |=
2036  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2037  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2038  trigdesc->trig_insert_after_statement |=
2039  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2040  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2041  trigdesc->trig_update_before_row |=
2042  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2043  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2044  trigdesc->trig_update_after_row |=
2045  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2046  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2047  trigdesc->trig_update_instead_row |=
2048  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2049  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2050  trigdesc->trig_update_before_statement |=
2051  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2052  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2053  trigdesc->trig_update_after_statement |=
2054  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2055  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2056  trigdesc->trig_delete_before_row |=
2057  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2058  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2059  trigdesc->trig_delete_after_row |=
2060  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2061  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2062  trigdesc->trig_delete_instead_row |=
2063  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2064  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2065  trigdesc->trig_delete_before_statement |=
2066  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2067  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2068  trigdesc->trig_delete_after_statement |=
2069  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2070  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2071  /* there are no row-level truncate triggers */
2072  trigdesc->trig_truncate_before_statement |=
2073  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2074  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2075  trigdesc->trig_truncate_after_statement |=
2076  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2077  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2078 
2079  trigdesc->trig_insert_new_table |=
2080  (TRIGGER_FOR_INSERT(tgtype) &&
2081  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2082  trigdesc->trig_update_old_table |=
2083  (TRIGGER_FOR_UPDATE(tgtype) &&
2084  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2085  trigdesc->trig_update_new_table |=
2086  (TRIGGER_FOR_UPDATE(tgtype) &&
2087  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2088  trigdesc->trig_delete_old_table |=
2089  (TRIGGER_FOR_DELETE(tgtype) &&
2090  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2091 }
2092 
2093 /*
2094  * Copy a TriggerDesc data structure.
2095  *
2096  * The copy is allocated in the current memory context.
2097  */
2098 TriggerDesc *
2100 {
2101  TriggerDesc *newdesc;
2102  Trigger *trigger;
2103  int i;
2104 
2105  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2106  return NULL;
2107 
2108  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2109  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2110 
2111  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2112  memcpy(trigger, trigdesc->triggers,
2113  trigdesc->numtriggers * sizeof(Trigger));
2114  newdesc->triggers = trigger;
2115 
2116  for (i = 0; i < trigdesc->numtriggers; i++)
2117  {
2118  trigger->tgname = pstrdup(trigger->tgname);
2119  if (trigger->tgnattr > 0)
2120  {
2121  int16 *newattr;
2122 
2123  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2124  memcpy(newattr, trigger->tgattr,
2125  trigger->tgnattr * sizeof(int16));
2126  trigger->tgattr = newattr;
2127  }
2128  if (trigger->tgnargs > 0)
2129  {
2130  char **newargs;
2131  int16 j;
2132 
2133  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2134  for (j = 0; j < trigger->tgnargs; j++)
2135  newargs[j] = pstrdup(trigger->tgargs[j]);
2136  trigger->tgargs = newargs;
2137  }
2138  if (trigger->tgqual)
2139  trigger->tgqual = pstrdup(trigger->tgqual);
2140  if (trigger->tgoldtable)
2141  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2142  if (trigger->tgnewtable)
2143  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2144  trigger++;
2145  }
2146 
2147  return newdesc;
2148 }
2149 
2150 /*
2151  * Free a TriggerDesc data structure.
2152  */
2153 void
2155 {
2156  Trigger *trigger;
2157  int i;
2158 
2159  if (trigdesc == NULL)
2160  return;
2161 
2162  trigger = trigdesc->triggers;
2163  for (i = 0; i < trigdesc->numtriggers; i++)
2164  {
2165  pfree(trigger->tgname);
2166  if (trigger->tgnattr > 0)
2167  pfree(trigger->tgattr);
2168  if (trigger->tgnargs > 0)
2169  {
2170  while (--(trigger->tgnargs) >= 0)
2171  pfree(trigger->tgargs[trigger->tgnargs]);
2172  pfree(trigger->tgargs);
2173  }
2174  if (trigger->tgqual)
2175  pfree(trigger->tgqual);
2176  if (trigger->tgoldtable)
2177  pfree(trigger->tgoldtable);
2178  if (trigger->tgnewtable)
2179  pfree(trigger->tgnewtable);
2180  trigger++;
2181  }
2182  pfree(trigdesc->triggers);
2183  pfree(trigdesc);
2184 }
2185 
2186 /*
2187  * Compare two TriggerDesc structures for logical equality.
2188  */
2189 #ifdef NOT_USED
2190 bool
2191 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2192 {
2193  int i,
2194  j;
2195 
2196  /*
2197  * We need not examine the hint flags, just the trigger array itself; if
2198  * we have the same triggers with the same types, the flags should match.
2199  *
2200  * As of 7.3 we assume trigger set ordering is significant in the
2201  * comparison; so we just compare corresponding slots of the two sets.
2202  *
2203  * Note: comparing the stringToNode forms of the WHEN clauses means that
2204  * parse column locations will affect the result. This is okay as long as
2205  * this function is only used for detecting exact equality, as for example
2206  * in checking for staleness of a cache entry.
2207  */
2208  if (trigdesc1 != NULL)
2209  {
2210  if (trigdesc2 == NULL)
2211  return false;
2212  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2213  return false;
2214  for (i = 0; i < trigdesc1->numtriggers; i++)
2215  {
2216  Trigger *trig1 = trigdesc1->triggers + i;
2217  Trigger *trig2 = trigdesc2->triggers + i;
2218 
2219  if (trig1->tgoid != trig2->tgoid)
2220  return false;
2221  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2222  return false;
2223  if (trig1->tgfoid != trig2->tgfoid)
2224  return false;
2225  if (trig1->tgtype != trig2->tgtype)
2226  return false;
2227  if (trig1->tgenabled != trig2->tgenabled)
2228  return false;
2229  if (trig1->tgisinternal != trig2->tgisinternal)
2230  return false;
2231  if (trig1->tgisclone != trig2->tgisclone)
2232  return false;
2233  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2234  return false;
2235  if (trig1->tgconstrindid != trig2->tgconstrindid)
2236  return false;
2237  if (trig1->tgconstraint != trig2->tgconstraint)
2238  return false;
2239  if (trig1->tgdeferrable != trig2->tgdeferrable)
2240  return false;
2241  if (trig1->tginitdeferred != trig2->tginitdeferred)
2242  return false;
2243  if (trig1->tgnargs != trig2->tgnargs)
2244  return false;
2245  if (trig1->tgnattr != trig2->tgnattr)
2246  return false;
2247  if (trig1->tgnattr > 0 &&
2248  memcmp(trig1->tgattr, trig2->tgattr,
2249  trig1->tgnattr * sizeof(int16)) != 0)
2250  return false;
2251  for (j = 0; j < trig1->tgnargs; j++)
2252  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2253  return false;
2254  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2255  /* ok */ ;
2256  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2257  return false;
2258  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2259  return false;
2260  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2261  /* ok */ ;
2262  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2263  return false;
2264  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2265  return false;
2266  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2267  /* ok */ ;
2268  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2269  return false;
2270  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2271  return false;
2272  }
2273  }
2274  else if (trigdesc2 != NULL)
2275  return false;
2276  return true;
2277 }
2278 #endif /* NOT_USED */
2279 
2280 /*
2281  * Check if there is a row-level trigger with transition tables that prevents
2282  * a table from becoming an inheritance child or partition. Return the name
2283  * of the first such incompatible trigger, or NULL if there is none.
2284  */
2285 const char *
2287 {
2288  if (trigdesc != NULL)
2289  {
2290  int i;
2291 
2292  for (i = 0; i < trigdesc->numtriggers; ++i)
2293  {
2294  Trigger *trigger = &trigdesc->triggers[i];
2295 
2296  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2297  return trigger->tgname;
2298  }
2299  }
2300 
2301  return NULL;
2302 }
2303 
2304 /*
2305  * Call a trigger function.
2306  *
2307  * trigdata: trigger descriptor.
2308  * tgindx: trigger's index in finfo and instr arrays.
2309  * finfo: array of cached trigger function call information.
2310  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2311  * per_tuple_context: memory context to execute the function in.
2312  *
2313  * Returns the tuple (or NULL) as returned by the function.
2314  */
2315 static HeapTuple
2317  int tgindx,
2318  FmgrInfo *finfo,
2319  Instrumentation *instr,
2320  MemoryContext per_tuple_context)
2321 {
2322  LOCAL_FCINFO(fcinfo, 0);
2323  PgStat_FunctionCallUsage fcusage;
2324  Datum result;
2325  MemoryContext oldContext;
2326 
2327  /*
2328  * Protect against code paths that may fail to initialize transition table
2329  * info.
2330  */
2331  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2332  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2333  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2334  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2335  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2336  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2337  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2338 
2339  finfo += tgindx;
2340 
2341  /*
2342  * We cache fmgr lookup info, to avoid making the lookup again on each
2343  * call.
2344  */
2345  if (finfo->fn_oid == InvalidOid)
2346  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2347 
2348  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2349 
2350  /*
2351  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2352  */
2353  if (instr)
2354  InstrStartNode(instr + tgindx);
2355 
2356  /*
2357  * Do the function evaluation in the per-tuple memory context, so that
2358  * leaked memory will be reclaimed once per tuple. Note in particular that
2359  * any new tuple created by the trigger function will live till the end of
2360  * the tuple cycle.
2361  */
2362  oldContext = MemoryContextSwitchTo(per_tuple_context);
2363 
2364  /*
2365  * Call the function, passing no arguments but setting a context.
2366  */
2367  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2368  InvalidOid, (Node *) trigdata, NULL);
2369 
2370  pgstat_init_function_usage(fcinfo, &fcusage);
2371 
2372  MyTriggerDepth++;
2373  PG_TRY();
2374  {
2375  result = FunctionCallInvoke(fcinfo);
2376  }
2377  PG_FINALLY();
2378  {
2379  MyTriggerDepth--;
2380  }
2381  PG_END_TRY();
2382 
2383  pgstat_end_function_usage(&fcusage, true);
2384 
2385  MemoryContextSwitchTo(oldContext);
2386 
2387  /*
2388  * Trigger protocol allows function to return a null pointer, but NOT to
2389  * set the isnull result flag.
2390  */
2391  if (fcinfo->isnull)
2392  ereport(ERROR,
2393  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2394  errmsg("trigger function %u returned null value",
2395  fcinfo->flinfo->fn_oid)));
2396 
2397  /*
2398  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2399  * one "tuple returned" (really the number of firings).
2400  */
2401  if (instr)
2402  InstrStopNode(instr + tgindx, 1);
2403 
2404  return (HeapTuple) DatumGetPointer(result);
2405 }
2406 
2407 void
2409 {
2410  TriggerDesc *trigdesc;
2411  int i;
2412  TriggerData LocTriggerData = {0};
2413 
2414  trigdesc = relinfo->ri_TrigDesc;
2415 
2416  if (trigdesc == NULL)
2417  return;
2418  if (!trigdesc->trig_insert_before_statement)
2419  return;
2420 
2421  /* no-op if we already fired BS triggers in this context */
2423  CMD_INSERT))
2424  return;
2425 
2426  LocTriggerData.type = T_TriggerData;
2427  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2429  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2430  for (i = 0; i < trigdesc->numtriggers; i++)
2431  {
2432  Trigger *trigger = &trigdesc->triggers[i];
2433  HeapTuple newtuple;
2434 
2435  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2436  TRIGGER_TYPE_STATEMENT,
2437  TRIGGER_TYPE_BEFORE,
2438  TRIGGER_TYPE_INSERT))
2439  continue;
2440  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2441  NULL, NULL, NULL))
2442  continue;
2443 
2444  LocTriggerData.tg_trigger = trigger;
2445  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2446  i,
2447  relinfo->ri_TrigFunctions,
2448  relinfo->ri_TrigInstrument,
2449  GetPerTupleMemoryContext(estate));
2450 
2451  if (newtuple)
2452  ereport(ERROR,
2453  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2454  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2455  }
2456 }
2457 
2458 void
2460  TransitionCaptureState *transition_capture)
2461 {
2462  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2463 
2464  if (trigdesc && trigdesc->trig_insert_after_statement)
2465  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2467  false, NULL, NULL, NIL, NULL, transition_capture,
2468  false);
2469 }
2470 
2471 bool
2473  TupleTableSlot *slot)
2474 {
2475  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2476  HeapTuple newtuple = NULL;
2477  bool should_free;
2478  TriggerData LocTriggerData = {0};
2479  int i;
2480 
2481  LocTriggerData.type = T_TriggerData;
2482  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2485  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2486  for (i = 0; i < trigdesc->numtriggers; i++)
2487  {
2488  Trigger *trigger = &trigdesc->triggers[i];
2489  HeapTuple oldtuple;
2490 
2491  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2492  TRIGGER_TYPE_ROW,
2493  TRIGGER_TYPE_BEFORE,
2494  TRIGGER_TYPE_INSERT))
2495  continue;
2496  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2497  NULL, NULL, slot))
2498  continue;
2499 
2500  if (!newtuple)
2501  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2502 
2503  LocTriggerData.tg_trigslot = slot;
2504  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2505  LocTriggerData.tg_trigger = trigger;
2506  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2507  i,
2508  relinfo->ri_TrigFunctions,
2509  relinfo->ri_TrigInstrument,
2510  GetPerTupleMemoryContext(estate));
2511  if (newtuple == NULL)
2512  {
2513  if (should_free)
2514  heap_freetuple(oldtuple);
2515  return false; /* "do nothing" */
2516  }
2517  else if (newtuple != oldtuple)
2518  {
2519  ExecForceStoreHeapTuple(newtuple, slot, false);
2520 
2521  /*
2522  * After a tuple in a partition goes through a trigger, the user
2523  * could have changed the partition key enough that the tuple no
2524  * longer fits the partition. Verify that.
2525  */
2526  if (trigger->tgisclone &&
2527  !ExecPartitionCheck(relinfo, slot, estate, false))
2528  ereport(ERROR,
2529  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2530  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2531  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2532  trigger->tgname,
2535 
2536  if (should_free)
2537  heap_freetuple(oldtuple);
2538 
2539  /* signal tuple should be re-fetched if used */
2540  newtuple = NULL;
2541  }
2542  }
2543 
2544  return true;
2545 }
2546 
2547 void
2549  TupleTableSlot *slot, List *recheckIndexes,
2550  TransitionCaptureState *transition_capture)
2551 {
2552  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2553 
2554  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2555  (transition_capture && transition_capture->tcs_insert_new_table))
2556  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2558  true, NULL, slot,
2559  recheckIndexes, NULL,
2560  transition_capture,
2561  false);
2562 }
2563 
2564 bool
2566  TupleTableSlot *slot)
2567 {
2568  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2569  HeapTuple newtuple = NULL;
2570  bool should_free;
2571  TriggerData LocTriggerData = {0};
2572  int i;
2573 
2574  LocTriggerData.type = T_TriggerData;
2575  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2578  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2579  for (i = 0; i < trigdesc->numtriggers; i++)
2580  {
2581  Trigger *trigger = &trigdesc->triggers[i];
2582  HeapTuple oldtuple;
2583 
2584  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2585  TRIGGER_TYPE_ROW,
2586  TRIGGER_TYPE_INSTEAD,
2587  TRIGGER_TYPE_INSERT))
2588  continue;
2589  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2590  NULL, NULL, slot))
2591  continue;
2592 
2593  if (!newtuple)
2594  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2595 
2596  LocTriggerData.tg_trigslot = slot;
2597  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2598  LocTriggerData.tg_trigger = trigger;
2599  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2600  i,
2601  relinfo->ri_TrigFunctions,
2602  relinfo->ri_TrigInstrument,
2603  GetPerTupleMemoryContext(estate));
2604  if (newtuple == NULL)
2605  {
2606  if (should_free)
2607  heap_freetuple(oldtuple);
2608  return false; /* "do nothing" */
2609  }
2610  else if (newtuple != oldtuple)
2611  {
2612  ExecForceStoreHeapTuple(newtuple, slot, false);
2613 
2614  if (should_free)
2615  heap_freetuple(oldtuple);
2616 
2617  /* signal tuple should be re-fetched if used */
2618  newtuple = NULL;
2619  }
2620  }
2621 
2622  return true;
2623 }
2624 
2625 void
2627 {
2628  TriggerDesc *trigdesc;
2629  int i;
2630  TriggerData LocTriggerData = {0};
2631 
2632  trigdesc = relinfo->ri_TrigDesc;
2633 
2634  if (trigdesc == NULL)
2635  return;
2636  if (!trigdesc->trig_delete_before_statement)
2637  return;
2638 
2639  /* no-op if we already fired BS triggers in this context */
2641  CMD_DELETE))
2642  return;
2643 
2644  LocTriggerData.type = T_TriggerData;
2645  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2647  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2648  for (i = 0; i < trigdesc->numtriggers; i++)
2649  {
2650  Trigger *trigger = &trigdesc->triggers[i];
2651  HeapTuple newtuple;
2652 
2653  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2654  TRIGGER_TYPE_STATEMENT,
2655  TRIGGER_TYPE_BEFORE,
2656  TRIGGER_TYPE_DELETE))
2657  continue;
2658  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2659  NULL, NULL, NULL))
2660  continue;
2661 
2662  LocTriggerData.tg_trigger = trigger;
2663  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2664  i,
2665  relinfo->ri_TrigFunctions,
2666  relinfo->ri_TrigInstrument,
2667  GetPerTupleMemoryContext(estate));
2668 
2669  if (newtuple)
2670  ereport(ERROR,
2671  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2672  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2673  }
2674 }
2675 
2676 void
2678  TransitionCaptureState *transition_capture)
2679 {
2680  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2681 
2682  if (trigdesc && trigdesc->trig_delete_after_statement)
2683  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2685  false, NULL, NULL, NIL, NULL, transition_capture,
2686  false);
2687 }
2688 
2689 /*
2690  * Execute BEFORE ROW DELETE triggers.
2691  *
2692  * True indicates caller can proceed with the delete. False indicates caller
2693  * need to suppress the delete and additionally if requested, we need to pass
2694  * back the concurrently updated tuple if any.
2695  */
2696 bool
2698  ResultRelInfo *relinfo,
2699  ItemPointer tupleid,
2700  HeapTuple fdw_trigtuple,
2701  TupleTableSlot **epqslot)
2702 {
2703  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2704  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2705  bool result = true;
2706  TriggerData LocTriggerData = {0};
2707  HeapTuple trigtuple;
2708  bool should_free = false;
2709  int i;
2710 
2711  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2712  if (fdw_trigtuple == NULL)
2713  {
2714  TupleTableSlot *epqslot_candidate = NULL;
2715 
2716  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2717  LockTupleExclusive, slot, &epqslot_candidate,
2718  NULL))
2719  return false;
2720 
2721  /*
2722  * If the tuple was concurrently updated and the caller of this
2723  * function requested for the updated tuple, skip the trigger
2724  * execution.
2725  */
2726  if (epqslot_candidate != NULL && epqslot != NULL)
2727  {
2728  *epqslot = epqslot_candidate;
2729  return false;
2730  }
2731 
2732  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2733  }
2734  else
2735  {
2736  trigtuple = fdw_trigtuple;
2737  ExecForceStoreHeapTuple(trigtuple, slot, false);
2738  }
2739 
2740  LocTriggerData.type = T_TriggerData;
2741  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2744  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2745  for (i = 0; i < trigdesc->numtriggers; i++)
2746  {
2747  HeapTuple newtuple;
2748  Trigger *trigger = &trigdesc->triggers[i];
2749 
2750  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2751  TRIGGER_TYPE_ROW,
2752  TRIGGER_TYPE_BEFORE,
2753  TRIGGER_TYPE_DELETE))
2754  continue;
2755  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2756  NULL, slot, NULL))
2757  continue;
2758 
2759  LocTriggerData.tg_trigslot = slot;
2760  LocTriggerData.tg_trigtuple = trigtuple;
2761  LocTriggerData.tg_trigger = trigger;
2762  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2763  i,
2764  relinfo->ri_TrigFunctions,
2765  relinfo->ri_TrigInstrument,
2766  GetPerTupleMemoryContext(estate));
2767  if (newtuple == NULL)
2768  {
2769  result = false; /* tell caller to suppress delete */
2770  break;
2771  }
2772  if (newtuple != trigtuple)
2773  heap_freetuple(newtuple);
2774  }
2775  if (should_free)
2776  heap_freetuple(trigtuple);
2777 
2778  return result;
2779 }
2780 
2781 /*
2782  * Note: is_crosspart_update must be true if the DELETE is being performed
2783  * as part of a cross-partition update.
2784  */
2785 void
2787  ResultRelInfo *relinfo,
2788  ItemPointer tupleid,
2789  HeapTuple fdw_trigtuple,
2790  TransitionCaptureState *transition_capture,
2791  bool is_crosspart_update)
2792 {
2793  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2794 
2795  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2796  (transition_capture && transition_capture->tcs_delete_old_table))
2797  {
2798  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2799 
2800  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2801  if (fdw_trigtuple == NULL)
2802  GetTupleForTrigger(estate,
2803  NULL,
2804  relinfo,
2805  tupleid,
2807  slot,
2808  NULL,
2809  NULL);
2810  else
2811  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2812 
2813  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2815  true, slot, NULL, NIL, NULL,
2816  transition_capture,
2817  is_crosspart_update);
2818  }
2819 }
2820 
2821 bool
2823  HeapTuple trigtuple)
2824 {
2825  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2826  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2827  TriggerData LocTriggerData = {0};
2828  int i;
2829 
2830  LocTriggerData.type = T_TriggerData;
2831  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2834  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2835 
2836  ExecForceStoreHeapTuple(trigtuple, slot, false);
2837 
2838  for (i = 0; i < trigdesc->numtriggers; i++)
2839  {
2840  HeapTuple rettuple;
2841  Trigger *trigger = &trigdesc->triggers[i];
2842 
2843  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2844  TRIGGER_TYPE_ROW,
2845  TRIGGER_TYPE_INSTEAD,
2846  TRIGGER_TYPE_DELETE))
2847  continue;
2848  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2849  NULL, slot, NULL))
2850  continue;
2851 
2852  LocTriggerData.tg_trigslot = slot;
2853  LocTriggerData.tg_trigtuple = trigtuple;
2854  LocTriggerData.tg_trigger = trigger;
2855  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2856  i,
2857  relinfo->ri_TrigFunctions,
2858  relinfo->ri_TrigInstrument,
2859  GetPerTupleMemoryContext(estate));
2860  if (rettuple == NULL)
2861  return false; /* Delete was suppressed */
2862  if (rettuple != trigtuple)
2863  heap_freetuple(rettuple);
2864  }
2865  return true;
2866 }
2867 
2868 void
2870 {
2871  TriggerDesc *trigdesc;
2872  int i;
2873  TriggerData LocTriggerData = {0};
2874  Bitmapset *updatedCols;
2875 
2876  trigdesc = relinfo->ri_TrigDesc;
2877 
2878  if (trigdesc == NULL)
2879  return;
2880  if (!trigdesc->trig_update_before_statement)
2881  return;
2882 
2883  /* no-op if we already fired BS triggers in this context */
2885  CMD_UPDATE))
2886  return;
2887 
2888  /* statement-level triggers operate on the parent table */
2889  Assert(relinfo->ri_RootResultRelInfo == NULL);
2890 
2891  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2892 
2893  LocTriggerData.type = T_TriggerData;
2894  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2896  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2897  LocTriggerData.tg_updatedcols = updatedCols;
2898  for (i = 0; i < trigdesc->numtriggers; i++)
2899  {
2900  Trigger *trigger = &trigdesc->triggers[i];
2901  HeapTuple newtuple;
2902 
2903  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2904  TRIGGER_TYPE_STATEMENT,
2905  TRIGGER_TYPE_BEFORE,
2906  TRIGGER_TYPE_UPDATE))
2907  continue;
2908  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2909  updatedCols, NULL, NULL))
2910  continue;
2911 
2912  LocTriggerData.tg_trigger = trigger;
2913  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2914  i,
2915  relinfo->ri_TrigFunctions,
2916  relinfo->ri_TrigInstrument,
2917  GetPerTupleMemoryContext(estate));
2918 
2919  if (newtuple)
2920  ereport(ERROR,
2921  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2922  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2923  }
2924 }
2925 
2926 void
2928  TransitionCaptureState *transition_capture)
2929 {
2930  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2931 
2932  /* statement-level triggers operate on the parent table */
2933  Assert(relinfo->ri_RootResultRelInfo == NULL);
2934 
2935  if (trigdesc && trigdesc->trig_update_after_statement)
2936  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2938  false, NULL, NULL, NIL,
2939  ExecGetAllUpdatedCols(relinfo, estate),
2940  transition_capture,
2941  false);
2942 }
2943 
2944 bool
2946  ResultRelInfo *relinfo,
2947  ItemPointer tupleid,
2948  HeapTuple fdw_trigtuple,
2949  TupleTableSlot *newslot,
2950  TM_FailureData *tmfd)
2951 {
2952  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2953  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2954  HeapTuple newtuple = NULL;
2955  HeapTuple trigtuple;
2956  bool should_free_trig = false;
2957  bool should_free_new = false;
2958  TriggerData LocTriggerData = {0};
2959  int i;
2960  Bitmapset *updatedCols;
2961  LockTupleMode lockmode;
2962 
2963  /* Determine lock mode to use */
2964  lockmode = ExecUpdateLockMode(estate, relinfo);
2965 
2966  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2967  if (fdw_trigtuple == NULL)
2968  {
2969  TupleTableSlot *epqslot_candidate = NULL;
2970 
2971  /* get a copy of the on-disk tuple we are planning to update */
2972  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2973  lockmode, oldslot, &epqslot_candidate,
2974  tmfd))
2975  return false; /* cancel the update action */
2976 
2977  /*
2978  * In READ COMMITTED isolation level it's possible that target tuple
2979  * was changed due to concurrent update. In that case we have a raw
2980  * subplan output tuple in epqslot_candidate, and need to form a new
2981  * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2982  * received in newslot. Neither we nor our callers have any further
2983  * interest in the passed-in tuple, so it's okay to overwrite newslot
2984  * with the newer data.
2985  *
2986  * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so
2987  * that epqslot_clean will be that same slot and the copy step below
2988  * is not needed.)
2989  */
2990  if (epqslot_candidate != NULL)
2991  {
2992  TupleTableSlot *epqslot_clean;
2993 
2994  epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2995  oldslot);
2996 
2997  if (newslot != epqslot_clean)
2998  ExecCopySlot(newslot, epqslot_clean);
2999  }
3000 
3001  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3002  }
3003  else
3004  {
3005  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3006  trigtuple = fdw_trigtuple;
3007  }
3008 
3009  LocTriggerData.type = T_TriggerData;
3010  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3013  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3014  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3015  LocTriggerData.tg_updatedcols = updatedCols;
3016  for (i = 0; i < trigdesc->numtriggers; i++)
3017  {
3018  Trigger *trigger = &trigdesc->triggers[i];
3019  HeapTuple oldtuple;
3020 
3021  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3022  TRIGGER_TYPE_ROW,
3023  TRIGGER_TYPE_BEFORE,
3024  TRIGGER_TYPE_UPDATE))
3025  continue;
3026  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3027  updatedCols, oldslot, newslot))
3028  continue;
3029 
3030  if (!newtuple)
3031  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3032 
3033  LocTriggerData.tg_trigslot = oldslot;
3034  LocTriggerData.tg_trigtuple = trigtuple;
3035  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3036  LocTriggerData.tg_newslot = newslot;
3037  LocTriggerData.tg_trigger = trigger;
3038  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3039  i,
3040  relinfo->ri_TrigFunctions,
3041  relinfo->ri_TrigInstrument,
3042  GetPerTupleMemoryContext(estate));
3043 
3044  if (newtuple == NULL)
3045  {
3046  if (should_free_trig)
3047  heap_freetuple(trigtuple);
3048  if (should_free_new)
3049  heap_freetuple(oldtuple);
3050  return false; /* "do nothing" */
3051  }
3052  else if (newtuple != oldtuple)
3053  {
3054  ExecForceStoreHeapTuple(newtuple, newslot, false);
3055 
3056  /*
3057  * If the tuple returned by the trigger / being stored, is the old
3058  * row version, and the heap tuple passed to the trigger was
3059  * allocated locally, materialize the slot. Otherwise we might
3060  * free it while still referenced by the slot.
3061  */
3062  if (should_free_trig && newtuple == trigtuple)
3063  ExecMaterializeSlot(newslot);
3064 
3065  if (should_free_new)
3066  heap_freetuple(oldtuple);
3067 
3068  /* signal tuple should be re-fetched if used */
3069  newtuple = NULL;
3070  }
3071  }
3072  if (should_free_trig)
3073  heap_freetuple(trigtuple);
3074 
3075  return true;
3076 }
3077 
3078 /*
3079  * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3080  * and destination partitions, respectively, of a cross-partition update of
3081  * the root partitioned table mentioned in the query, given by 'relinfo'.
3082  * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3083  * partition, and 'newslot' contains the "new" tuple in the destination
3084  * partition. This interface allows to support the requirements of
3085  * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3086  * that case.
3087  */
3088 void
3090  ResultRelInfo *src_partinfo,
3091  ResultRelInfo *dst_partinfo,
3092  ItemPointer tupleid,
3093  HeapTuple fdw_trigtuple,
3094  TupleTableSlot *newslot,
3095  List *recheckIndexes,
3096  TransitionCaptureState *transition_capture,
3097  bool is_crosspart_update)
3098 {
3099  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3100 
3101  if ((trigdesc && trigdesc->trig_update_after_row) ||
3102  (transition_capture &&
3103  (transition_capture->tcs_update_old_table ||
3104  transition_capture->tcs_update_new_table)))
3105  {
3106  /*
3107  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3108  * update-partition-key operation, then this function is also called
3109  * separately for DELETE and INSERT to capture transition table rows.
3110  * In such case, either old tuple or new tuple can be NULL.
3111  */
3112  TupleTableSlot *oldslot;
3113  ResultRelInfo *tupsrc;
3114 
3115  Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3116  !is_crosspart_update);
3117 
3118  tupsrc = src_partinfo ? src_partinfo : relinfo;
3119  oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3120 
3121  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3122  GetTupleForTrigger(estate,
3123  NULL,
3124  tupsrc,
3125  tupleid,
3127  oldslot,
3128  NULL,
3129  NULL);
3130  else if (fdw_trigtuple != NULL)
3131  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3132  else
3133  ExecClearTuple(oldslot);
3134 
3135  AfterTriggerSaveEvent(estate, relinfo,
3136  src_partinfo, dst_partinfo,
3138  true,
3139  oldslot, newslot, recheckIndexes,
3140  ExecGetAllUpdatedCols(relinfo, estate),
3141  transition_capture,
3142  is_crosspart_update);
3143  }
3144 }
3145 
3146 bool
3148  HeapTuple trigtuple, TupleTableSlot *newslot)
3149 {
3150  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3151  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3152  HeapTuple newtuple = NULL;
3153  bool should_free;
3154  TriggerData LocTriggerData = {0};
3155  int i;
3156 
3157  LocTriggerData.type = T_TriggerData;
3158  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3161  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3162 
3163  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3164 
3165  for (i = 0; i < trigdesc->numtriggers; i++)
3166  {
3167  Trigger *trigger = &trigdesc->triggers[i];
3168  HeapTuple oldtuple;
3169 
3170  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3171  TRIGGER_TYPE_ROW,
3172  TRIGGER_TYPE_INSTEAD,
3173  TRIGGER_TYPE_UPDATE))
3174  continue;
3175  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3176  NULL, oldslot, newslot))
3177  continue;
3178 
3179  if (!newtuple)
3180  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3181 
3182  LocTriggerData.tg_trigslot = oldslot;
3183  LocTriggerData.tg_trigtuple = trigtuple;
3184  LocTriggerData.tg_newslot = newslot;
3185  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3186 
3187  LocTriggerData.tg_trigger = trigger;
3188  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3189  i,
3190  relinfo->ri_TrigFunctions,
3191  relinfo->ri_TrigInstrument,
3192  GetPerTupleMemoryContext(estate));
3193  if (newtuple == NULL)
3194  {
3195  return false; /* "do nothing" */
3196  }
3197  else if (newtuple != oldtuple)
3198  {
3199  ExecForceStoreHeapTuple(newtuple, newslot, false);
3200 
3201  if (should_free)
3202  heap_freetuple(oldtuple);
3203 
3204  /* signal tuple should be re-fetched if used */
3205  newtuple = NULL;
3206  }
3207  }
3208 
3209  return true;
3210 }
3211 
3212 void
3214 {
3215  TriggerDesc *trigdesc;
3216  int i;
3217  TriggerData LocTriggerData = {0};
3218 
3219  trigdesc = relinfo->ri_TrigDesc;
3220 
3221  if (trigdesc == NULL)
3222  return;
3223  if (!trigdesc->trig_truncate_before_statement)
3224  return;
3225 
3226  LocTriggerData.type = T_TriggerData;
3227  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3229  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3230 
3231  for (i = 0; i < trigdesc->numtriggers; i++)
3232  {
3233  Trigger *trigger = &trigdesc->triggers[i];
3234  HeapTuple newtuple;
3235 
3236  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3237  TRIGGER_TYPE_STATEMENT,
3238  TRIGGER_TYPE_BEFORE,
3239  TRIGGER_TYPE_TRUNCATE))
3240  continue;
3241  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3242  NULL, NULL, NULL))
3243  continue;
3244 
3245  LocTriggerData.tg_trigger = trigger;
3246  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3247  i,
3248  relinfo->ri_TrigFunctions,
3249  relinfo->ri_TrigInstrument,
3250  GetPerTupleMemoryContext(estate));
3251 
3252  if (newtuple)
3253  ereport(ERROR,
3254  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3255  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3256  }
3257 }
3258 
3259 void
3261 {
3262  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3263 
3264  if (trigdesc && trigdesc->trig_truncate_after_statement)
3265  AfterTriggerSaveEvent(estate, relinfo,
3266  NULL, NULL,
3268  false, NULL, NULL, NIL, NULL, NULL,
3269  false);
3270 }
3271 
3272 
3273 /*
3274  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3275  */
3276 static bool
3278  EPQState *epqstate,
3279  ResultRelInfo *relinfo,
3280  ItemPointer tid,
3281  LockTupleMode lockmode,
3282  TupleTableSlot *oldslot,
3283  TupleTableSlot **epqslot,
3284  TM_FailureData *tmfdp)
3285 {
3286  Relation relation = relinfo->ri_RelationDesc;
3287 
3288  if (epqslot != NULL)
3289  {
3290  TM_Result test;
3291  TM_FailureData tmfd;
3292  int lockflags = 0;
3293 
3294  *epqslot = NULL;
3295 
3296  /* caller must pass an epqstate if EvalPlanQual is possible */
3297  Assert(epqstate != NULL);
3298 
3299  /*
3300  * lock tuple for update
3301  */
3303  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3304  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3305  estate->es_output_cid,
3306  lockmode, LockWaitBlock,
3307  lockflags,
3308  &tmfd);
3309 
3310  /* Let the caller know about the status of this operation */
3311  if (tmfdp)
3312  *tmfdp = tmfd;
3313 
3314  switch (test)
3315  {
3316  case TM_SelfModified:
3317 
3318  /*
3319  * The target tuple was already updated or deleted by the
3320  * current command, or by a later command in the current
3321  * transaction. We ignore the tuple in the former case, and
3322  * throw error in the latter case, for the same reasons
3323  * enumerated in ExecUpdate and ExecDelete in
3324  * nodeModifyTable.c.
3325  */
3326  if (tmfd.cmax != estate->es_output_cid)
3327  ereport(ERROR,
3328  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3329  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3330  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3331 
3332  /* treat it as deleted; do not process */
3333  return false;
3334 
3335  case TM_Ok:
3336  if (tmfd.traversed)
3337  {
3338  *epqslot = EvalPlanQual(epqstate,
3339  relation,
3340  relinfo->ri_RangeTableIndex,
3341  oldslot);
3342 
3343  /*
3344  * If PlanQual failed for updated tuple - we must not
3345  * process this tuple!
3346  */
3347  if (TupIsNull(*epqslot))
3348  {
3349  *epqslot = NULL;
3350  return false;
3351  }
3352  }
3353  break;
3354 
3355  case TM_Updated:
3357  ereport(ERROR,
3359  errmsg("could not serialize access due to concurrent update")));
3360  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3361  break;
3362 
3363  case TM_Deleted:
3365  ereport(ERROR,
3367  errmsg("could not serialize access due to concurrent delete")));
3368  /* tuple was deleted */
3369  return false;
3370 
3371  case TM_Invisible:
3372  elog(ERROR, "attempted to lock invisible tuple");
3373  break;
3374 
3375  default:
3376  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3377  return false; /* keep compiler quiet */
3378  }
3379  }
3380  else
3381  {
3382  /*
3383  * We expect the tuple to be present, thus very simple error handling
3384  * suffices.
3385  */
3386  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3387  oldslot))
3388  elog(ERROR, "failed to fetch tuple for trigger");
3389  }
3390 
3391  return true;
3392 }
3393 
3394 /*
3395  * Is trigger enabled to fire?
3396  */
3397 static bool
3399  Trigger *trigger, TriggerEvent event,
3400  Bitmapset *modifiedCols,
3401  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3402 {
3403  /* Check replication-role-dependent enable state */
3405  {
3406  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3407  trigger->tgenabled == TRIGGER_DISABLED)
3408  return false;
3409  }
3410  else /* ORIGIN or LOCAL role */
3411  {
3412  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3413  trigger->tgenabled == TRIGGER_DISABLED)
3414  return false;
3415  }
3416 
3417  /*
3418  * Check for column-specific trigger (only possible for UPDATE, and in
3419  * fact we *must* ignore tgattr for other event types)
3420  */
3421  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3422  {
3423  int i;
3424  bool modified;
3425 
3426  modified = false;
3427  for (i = 0; i < trigger->tgnattr; i++)
3428  {
3430  modifiedCols))
3431  {
3432  modified = true;
3433  break;
3434  }
3435  }
3436  if (!modified)
3437  return false;
3438  }
3439 
3440  /* Check for WHEN clause */
3441  if (trigger->tgqual)
3442  {
3443  ExprState **predicate;
3444  ExprContext *econtext;
3445  MemoryContext oldContext;
3446  int i;
3447 
3448  Assert(estate != NULL);
3449 
3450  /*
3451  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3452  * matching element of relinfo->ri_TrigWhenExprs[]
3453  */
3454  i = trigger - relinfo->ri_TrigDesc->triggers;
3455  predicate = &relinfo->ri_TrigWhenExprs[i];
3456 
3457  /*
3458  * If first time through for this WHEN expression, build expression
3459  * nodetrees for it. Keep them in the per-query memory context so
3460  * they'll survive throughout the query.
3461  */
3462  if (*predicate == NULL)
3463  {
3464  Node *tgqual;
3465 
3466  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3467  tgqual = stringToNode(trigger->tgqual);
3468  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3471  /* ExecPrepareQual wants implicit-AND form */
3472  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3473  *predicate = ExecPrepareQual((List *) tgqual, estate);
3474  MemoryContextSwitchTo(oldContext);
3475  }
3476 
3477  /*
3478  * We will use the EState's per-tuple context for evaluating WHEN
3479  * expressions (creating it if it's not already there).
3480  */
3481  econtext = GetPerTupleExprContext(estate);
3482 
3483  /*
3484  * Finally evaluate the expression, making the old and/or new tuples
3485  * available as INNER_VAR/OUTER_VAR respectively.
3486  */
3487  econtext->ecxt_innertuple = oldslot;
3488  econtext->ecxt_outertuple = newslot;
3489  if (!ExecQual(*predicate, econtext))
3490  return false;
3491  }
3492 
3493  return true;
3494 }
3495 
3496 
3497 /* ----------
3498  * After-trigger stuff
3499  *
3500  * The AfterTriggersData struct holds data about pending AFTER trigger events
3501  * during the current transaction tree. (BEFORE triggers are fired
3502  * immediately so we don't need any persistent state about them.) The struct
3503  * and most of its subsidiary data are kept in TopTransactionContext; however
3504  * some data that can be discarded sooner appears in the CurTransactionContext
3505  * of the relevant subtransaction. Also, the individual event records are
3506  * kept in a separate sub-context of TopTransactionContext. This is done
3507  * mainly so that it's easy to tell from a memory context dump how much space
3508  * is being eaten by trigger events.
3509  *
3510  * Because the list of pending events can grow large, we go to some
3511  * considerable effort to minimize per-event memory consumption. The event
3512  * records are grouped into chunks and common data for similar events in the
3513  * same chunk is only stored once.
3514  *
3515  * XXX We need to be able to save the per-event data in a file if it grows too
3516  * large.
3517  * ----------
3518  */
3519 
3520 /* Per-trigger SET CONSTRAINT status */
3522 {
3526 
3528 
3529 /*
3530  * SET CONSTRAINT intra-transaction status.
3531  *
3532  * We make this a single palloc'd object so it can be copied and freed easily.
3533  *
3534  * all_isset and all_isdeferred are used to keep track
3535  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3536  *
3537  * trigstates[] stores per-trigger tgisdeferred settings.
3538  */
3540 {
3543  int numstates; /* number of trigstates[] entries in use */
3544  int numalloc; /* allocated size of trigstates[] */
3547 
3549 
3550 
3551 /*
3552  * Per-trigger-event data
3553  *
3554  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3555  * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3556  * Each event record also has an associated AfterTriggerSharedData that is
3557  * shared across all instances of similar events within a "chunk".
3558  *
3559  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3560  * fields. Updates of regular tables use two; inserts and deletes of regular
3561  * tables use one; foreign tables always use zero and save the tuple(s) to a
3562  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3563  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3564  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3565  * tuple(s). This permits storing tuples once regardless of the number of
3566  * row-level triggers on a foreign table.
3567  *
3568  * When updates on partitioned tables cause rows to move between partitions,
3569  * the OIDs of both partitions are stored too, so that the tuples can be
3570  * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3571  * partition update").
3572  *
3573  * Note that we need triggers on foreign tables to be fired in exactly the
3574  * order they were queued, so that the tuples come out of the tuplestore in
3575  * the right order. To ensure that, we forbid deferrable (constraint)
3576  * triggers on foreign tables. This also ensures that such triggers do not
3577  * get deferred into outer trigger query levels, meaning that it's okay to
3578  * destroy the tuplestore at the end of the query level.
3579  *
3580  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3581  * require no ctid field. We lack the flag bit space to neatly represent that
3582  * distinct case, and it seems unlikely to be worth much trouble.
3583  *
3584  * Note: ats_firing_id is initially zero and is set to something else when
3585  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3586  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3587  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3588  * because all instances of the same type of event in a given event list will
3589  * be fired at the same time, if they were queued between the same firing
3590  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3591  * a new event to an existing AfterTriggerSharedData record.
3592  */
3594 
3595 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3596 #define AFTER_TRIGGER_DONE 0x80000000
3597 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3598 /* bits describing the size and tuple sources of this event */
3599 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3600 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3601 #define AFTER_TRIGGER_1CTID 0x10000000
3602 #define AFTER_TRIGGER_2CTID 0x30000000
3603 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3604 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3606 
3608 {
3609  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3610  Oid ats_tgoid; /* the trigger's ID */
3611  Oid ats_relid; /* the relation it's on */
3612  CommandId ats_firing_id; /* ID for firing cycle */
3613  struct AfterTriggersTableData *ats_table; /* transition table access */
3614  Bitmapset *ats_modifiedcols; /* modified columns */
3616 
3618 
3620 {
3621  TriggerFlags ate_flags; /* status bits and offset to shared data */
3622  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3623  ItemPointerData ate_ctid2; /* new updated tuple */
3624 
3625  /*
3626  * During a cross-partition update of a partitioned table, we also store
3627  * the OIDs of source and destination partitions that are needed to fetch
3628  * the old (ctid1) and the new tuple (ctid2) from, respectively.
3629  */
3633 
3634 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3636 {
3641 
3642 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3644 {
3645  TriggerFlags ate_flags; /* status bits and offset to shared data */
3646  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3648 
3649 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3651 {
3652  TriggerFlags ate_flags; /* status bits and offset to shared data */
3654 
3655 #define SizeofTriggerEvent(evt) \
3656  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3657  sizeof(AfterTriggerEventData) : \
3658  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3659  sizeof(AfterTriggerEventDataNoOids) : \
3660  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3661  sizeof(AfterTriggerEventDataOneCtid) : \
3662  sizeof(AfterTriggerEventDataZeroCtids))))
3663 
3664 #define GetTriggerSharedData(evt) \
3665  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3666 
3667 /*
3668  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3669  * larger chunks (a slightly more sophisticated version of an expansible
3670  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3671  * AfterTriggerEventData records; the space between endfree and endptr is
3672  * occupied by AfterTriggerSharedData records.
3673  */
3675 {
3676  struct AfterTriggerEventChunk *next; /* list link */
3677  char *freeptr; /* start of free space in chunk */
3678  char *endfree; /* end of free space in chunk */
3679  char *endptr; /* end of chunk */
3680  /* event data follows here */
3682 
3683 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3684 
3685 /* A list of events */
3687 {
3690  char *tailfree; /* freeptr of tail chunk */
3692 
3693 /* Macros to help in iterating over a list of events */
3694 #define for_each_chunk(cptr, evtlist) \
3695  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3696 #define for_each_event(eptr, cptr) \
3697  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3698  (char *) eptr < (cptr)->freeptr; \
3699  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3700 /* Use this if no special per-chunk processing is needed */
3701 #define for_each_event_chunk(eptr, cptr, evtlist) \
3702  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3703 
3704 /* Macros for iterating from a start point that might not be list start */
3705 #define for_each_chunk_from(cptr) \
3706  for (; cptr != NULL; cptr = cptr->next)
3707 #define for_each_event_from(eptr, cptr) \
3708  for (; \
3709  (char *) eptr < (cptr)->freeptr; \
3710  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3711 
3712 
3713 /*
3714  * All per-transaction data for the AFTER TRIGGERS module.
3715  *
3716  * AfterTriggersData has the following fields:
3717  *
3718  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3719  * We mark firable events with the current firing cycle's ID so that we can
3720  * tell which ones to work on. This ensures sane behavior if a trigger
3721  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3722  * only fire those events that weren't already scheduled for firing.
3723  *
3724  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3725  * This is saved and restored across failed subtransactions.
3726  *
3727  * events is the current list of deferred events. This is global across
3728  * all subtransactions of the current transaction. In a subtransaction
3729  * abort, we know that the events added by the subtransaction are at the
3730  * end of the list, so it is relatively easy to discard them. The event
3731  * list chunks themselves are stored in event_cxt.
3732  *
3733  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3734  * (-1 when the stack is empty).
3735  *
3736  * query_stack[query_depth] is the per-query-level data, including these fields:
3737  *
3738  * events is a list of AFTER trigger events queued by the current query.
3739  * None of these are valid until the matching AfterTriggerEndQuery call
3740  * occurs. At that point we fire immediate-mode triggers, and append any
3741  * deferred events to the main events list.
3742  *
3743  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3744  * needed by events queued by the current query. (Note: we use just one
3745  * tuplestore even though more than one foreign table might be involved.
3746  * This is okay because tuplestores don't really care what's in the tuples
3747  * they store; but it's possible that someday it'd break.)
3748  *
3749  * tables is a List of AfterTriggersTableData structs for target tables
3750  * of the current query (see below).
3751  *
3752  * maxquerydepth is just the allocated length of query_stack.
3753  *
3754  * trans_stack holds per-subtransaction data, including these fields:
3755  *
3756  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3757  * state data. Each subtransaction level that modifies that state first
3758  * saves a copy, which we use to restore the state if we abort.
3759  *
3760  * events is a copy of the events head/tail pointers,
3761  * which we use to restore those values during subtransaction abort.
3762  *
3763  * query_depth is the subtransaction-start-time value of query_depth,
3764  * which we similarly use to clean up at subtransaction abort.
3765  *
3766  * firing_counter is the subtransaction-start-time value of firing_counter.
3767  * We use this to recognize which deferred triggers were fired (or marked
3768  * for firing) within an aborted subtransaction.
3769  *
3770  * We use GetCurrentTransactionNestLevel() to determine the correct array
3771  * index in trans_stack. maxtransdepth is the number of allocated entries in
3772  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3773  * in cases where errors during subxact abort cause multiple invocations
3774  * of AfterTriggerEndSubXact() at the same nesting depth.)
3775  *
3776  * We create an AfterTriggersTableData struct for each target table of the
3777  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3778  * either transition tables or statement-level triggers. This is used to
3779  * hold the relevant transition tables, as well as info tracking whether
3780  * we already queued the statement triggers. (We use that info to prevent
3781  * firing the same statement triggers more than once per statement, or really
3782  * once per transition table set.) These structs, along with the transition
3783  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3784  * That's sufficient lifespan because we don't allow transition tables to be
3785  * used by deferrable triggers, so they only need to survive until
3786  * AfterTriggerEndQuery.
3787  */
3791 
3792 typedef struct AfterTriggersData
3793 {
3794  CommandId firing_counter; /* next firing ID to assign */
3795  SetConstraintState state; /* the active S C state */
3796  AfterTriggerEventList events; /* deferred-event list */
3797  MemoryContext event_cxt; /* memory context for events, if any */
3798 
3799  /* per-query-level data: */
3800  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3801  int query_depth; /* current index in above array */
3802  int maxquerydepth; /* allocated len of above array */
3803 
3804  /* per-subtransaction-level data: */
3805  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3806  int maxtransdepth; /* allocated len of above array */
3808 
3810 {
3811  AfterTriggerEventList events; /* events pending from this query */
3812  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3813  List *tables; /* list of AfterTriggersTableData, see below */
3814 };
3815 
3817 {
3818  /* these fields are just for resetting at subtrans abort: */
3819  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3820  AfterTriggerEventList events; /* saved list pointer */
3821  int query_depth; /* saved query_depth */
3822  CommandId firing_counter; /* saved firing_counter */
3823 };
3824 
3826 {
3827  /* relid + cmdType form the lookup key for these structs: */
3828  Oid relid; /* target table's OID */
3829  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3830  bool closed; /* true when no longer OK to add tuples */
3831  bool before_trig_done; /* did we already queue BS triggers? */
3832  bool after_trig_done; /* did we already queue AS triggers? */
3833  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3834 
3835  /*
3836  * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3837  * MERGE can run all three actions in a single statement. Note that UPDATE
3838  * needs both old and new transition tables whereas INSERT needs only new,
3839  * and DELETE needs only old.
3840  */
3841 
3842  /* "old" transition table for UPDATE, if any */
3844  /* "new" transition table for UPDATE, if any */
3846  /* "old" transition table for DELETE, if any */
3848  /* "new" transition table for INSERT, if any */
3850 
3851  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3852 };
3853 
3855 
3856 static void AfterTriggerExecute(EState *estate,
3857  AfterTriggerEvent event,
3858  ResultRelInfo *relInfo,
3859  ResultRelInfo *src_relInfo,
3860  ResultRelInfo *dst_relInfo,
3861  TriggerDesc *trigdesc,
3862  FmgrInfo *finfo,
3863  Instrumentation *instr,
3864  MemoryContext per_tuple_context,
3865  TupleTableSlot *trig_tuple_slot1,
3866  TupleTableSlot *trig_tuple_slot2);
3868  CmdType cmdType);
3870  TupleDesc tupdesc);
3872  TupleTableSlot *oldslot,
3873  TupleTableSlot *newslot,
3874  TransitionCaptureState *transition_capture);
3875 static void TransitionTableAddTuple(EState *estate,
3876  TransitionCaptureState *transition_capture,
3877  ResultRelInfo *relinfo,
3878  TupleTableSlot *slot,
3879  TupleTableSlot *original_insert_tuple,
3880  Tuplestorestate *tuplestore);
3882 static SetConstraintState SetConstraintStateCreate(int numalloc);
3885  Oid tgoid, bool tgisdeferred);
3886 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3887 
3888 
3889 /*
3890  * Get the FDW tuplestore for the current trigger query level, creating it
3891  * if necessary.
3892  */
3893 static Tuplestorestate *
3895 {
3896  Tuplestorestate *ret;
3897 
3899  if (ret == NULL)
3900  {
3901  MemoryContext oldcxt;
3902  ResourceOwner saveResourceOwner;
3903 
3904  /*
3905  * Make the tuplestore valid until end of subtransaction. We really
3906  * only need it until AfterTriggerEndQuery().
3907  */
3909  saveResourceOwner = CurrentResourceOwner;
3911 
3912  ret = tuplestore_begin_heap(false, false, work_mem);
3913 
3914  CurrentResourceOwner = saveResourceOwner;
3915  MemoryContextSwitchTo(oldcxt);
3916 
3918  }
3919 
3920  return ret;
3921 }
3922 
3923 /* ----------
3924  * afterTriggerCheckState()
3925  *
3926  * Returns true if the trigger event is actually in state DEFERRED.
3927  * ----------
3928  */
3929 static bool
3931 {
3932  Oid tgoid = evtshared->ats_tgoid;
3934  int i;
3935 
3936  /*
3937  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3938  * constraints declared NOT DEFERRABLE), the state is always false.
3939  */
3940  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3941  return false;
3942 
3943  /*
3944  * If constraint state exists, SET CONSTRAINTS might have been executed
3945  * either for this trigger or for all triggers.
3946  */
3947  if (state != NULL)
3948  {
3949  /* Check for SET CONSTRAINTS for this specific trigger. */
3950  for (i = 0; i < state->numstates; i++)
3951  {
3952  if (state->trigstates[i].sct_tgoid == tgoid)
3953  return state->trigstates[i].sct_tgisdeferred;
3954  }
3955 
3956  /* Check for SET CONSTRAINTS ALL. */
3957  if (state->all_isset)
3958  return state->all_isdeferred;
3959  }
3960 
3961  /*
3962  * Otherwise return the default state for the trigger.
3963  */
3964  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3965 }
3966 
3967 
3968 /* ----------
3969  * afterTriggerAddEvent()
3970  *
3971  * Add a new trigger event to the specified queue.
3972  * The passed-in event data is copied.
3973  * ----------
3974  */
3975 static void
3977  AfterTriggerEvent event, AfterTriggerShared evtshared)
3978 {
3979  Size eventsize = SizeofTriggerEvent(event);
3980  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3981  AfterTriggerEventChunk *chunk;
3982  AfterTriggerShared newshared;
3983  AfterTriggerEvent newevent;
3984 
3985  /*
3986  * If empty list or not enough room in the tail chunk, make a new chunk.
3987  * We assume here that a new shared record will always be needed.
3988  */
3989  chunk = events->tail;
3990  if (chunk == NULL ||
3991  chunk->endfree - chunk->freeptr < needed)
3992  {
3993  Size chunksize;
3994 
3995  /* Create event context if we didn't already */
3996  if (afterTriggers.event_cxt == NULL)
3999  "AfterTriggerEvents",
4001 
4002  /*
4003  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4004  * These numbers are fairly arbitrary, though there is a hard limit at
4005  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4006  * shared records using the available space in ate_flags. Another
4007  * constraint is that if the chunk size gets too huge, the search loop
4008  * below would get slow given a (not too common) usage pattern with
4009  * many distinct event types in a chunk. Therefore, we double the
4010  * preceding chunk size only if there weren't too many shared records
4011  * in the preceding chunk; otherwise we halve it. This gives us some
4012  * ability to adapt to the actual usage pattern of the current query
4013  * while still having large chunk sizes in typical usage. All chunk
4014  * sizes used should be MAXALIGN multiples, to ensure that the shared
4015  * records will be aligned safely.
4016  */
4017 #define MIN_CHUNK_SIZE 1024
4018 #define MAX_CHUNK_SIZE (1024*1024)
4019 
4020 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4021 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4022 #endif
4023 
4024  if (chunk == NULL)
4025  chunksize = MIN_CHUNK_SIZE;
4026  else
4027  {
4028  /* preceding chunk size... */
4029  chunksize = chunk->endptr - (char *) chunk;
4030  /* check number of shared records in preceding chunk */
4031  if ((chunk->endptr - chunk->endfree) <=
4032  (100 * sizeof(AfterTriggerSharedData)))
4033  chunksize *= 2; /* okay, double it */
4034  else
4035  chunksize /= 2; /* too many shared records */
4036  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4037  }
4038  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4039  chunk->next = NULL;
4040  chunk->freeptr = CHUNK_DATA_START(chunk);
4041  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4042  Assert(chunk->endfree - chunk->freeptr >= needed);
4043 
4044  if (events->head == NULL)
4045  events->head = chunk;
4046  else
4047  events->tail->next = chunk;
4048  events->tail = chunk;
4049  /* events->tailfree is now out of sync, but we'll fix it below */
4050  }
4051 
4052  /*
4053  * Try to locate a matching shared-data record already in the chunk. If
4054  * none, make a new one.
4055  */
4056  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4057  (char *) newshared >= chunk->endfree;
4058  newshared--)
4059  {
4060  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4061  newshared->ats_relid == evtshared->ats_relid &&
4062  newshared->ats_event == evtshared->ats_event &&
4063  newshared->ats_table == evtshared->ats_table &&
4064  newshared->ats_firing_id == 0)
4065  break;
4066  }
4067  if ((char *) newshared < chunk->endfree)
4068  {
4069  *newshared = *evtshared;
4070  newshared->ats_firing_id = 0; /* just to be sure */
4071  chunk->endfree = (char *) newshared;
4072  }
4073 
4074  /* Insert the data */
4075  newevent = (AfterTriggerEvent) chunk->freeptr;
4076  memcpy(newevent, event, eventsize);
4077  /* ... and link the new event to its shared record */
4078  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4079  newevent->ate_flags |= (char *) newshared - (char *) newevent;
4080 
4081  chunk->freeptr += eventsize;
4082  events->tailfree = chunk->freeptr;
4083 }
4084 
4085 /* ----------
4086  * afterTriggerFreeEventList()
4087  *
4088  * Free all the event storage in the given list.
4089  * ----------
4090  */
4091 static void
4093 {
4094  AfterTriggerEventChunk *chunk;
4095 
4096  while ((chunk = events->head) != NULL)
4097  {
4098  events->head = chunk->next;
4099  pfree(chunk);
4100  }
4101  events->tail = NULL;
4102  events->tailfree = NULL;
4103 }
4104 
4105 /* ----------
4106  * afterTriggerRestoreEventList()
4107  *
4108  * Restore an event list to its prior length, removing all the events
4109  * added since it had the value old_events.
4110  * ----------
4111  */
4112 static void
4114  const AfterTriggerEventList *old_events)
4115 {
4116  AfterTriggerEventChunk *chunk;
4117  AfterTriggerEventChunk *next_chunk;
4118 
4119  if (old_events->tail == NULL)
4120  {
4121  /* restoring to a completely empty state, so free everything */
4122  afterTriggerFreeEventList(events);
4123  }
4124  else
4125  {
4126  *events = *old_events;
4127  /* free any chunks after the last one we want to keep */
4128  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4129  {
4130  next_chunk = chunk->next;
4131  pfree(chunk);
4132  }
4133  /* and clean up the tail chunk to be the right length */
4134  events->tail->next = NULL;
4135  events->tail->freeptr = events->tailfree;
4136 
4137  /*
4138  * We don't make any effort to remove now-unused shared data records.
4139  * They might still be useful, anyway.
4140  */
4141  }
4142 }
4143 
4144 /* ----------
4145  * afterTriggerDeleteHeadEventChunk()
4146  *
4147  * Remove the first chunk of events from the query level's event list.
4148  * Keep any event list pointers elsewhere in the query level's data
4149  * structures in sync.
4150  * ----------
4151  */
4152 static void
4154 {
4155  AfterTriggerEventChunk *target = qs->events.head;
4156  ListCell *lc;
4157 
4158  Assert(target && target->next);
4159 
4160  /*
4161  * First, update any pointers in the per-table data, so that they won't be
4162  * dangling. Resetting obsoleted pointers to NULL will make
4163  * cancel_prior_stmt_triggers start from the list head, which is fine.
4164  */
4165  foreach(lc, qs->tables)
4166  {
4168 
4169  if (table->after_trig_done &&
4170  table->after_trig_events.tail == target)
4171  {
4172  table->after_trig_events.head = NULL;
4173  table->after_trig_events.tail = NULL;
4174  table->after_trig_events.tailfree = NULL;
4175  }
4176  }
4177 
4178  /* Now we can flush the head chunk */
4179  qs->events.head = target->next;
4180  pfree(target);
4181 }
4182 
4183 
4184 /* ----------
4185  * AfterTriggerExecute()
4186  *
4187  * Fetch the required tuples back from the heap and fire one
4188  * single trigger function.
4189  *
4190  * Frequently, this will be fired many times in a row for triggers of
4191  * a single relation. Therefore, we cache the open relation and provide
4192  * fmgr lookup cache space at the caller level. (For triggers fired at
4193  * the end of a query, we can even piggyback on the executor's state.)
4194  *
4195  * When fired for a cross-partition update of a partitioned table, the old
4196  * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4197  * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4198  * both are converted into the root partitioned table's format before passing
4199  * to the trigger function.
4200  *
4201  * event: event currently being fired.
4202  * relInfo: result relation for event.
4203  * src_relInfo: source partition of a cross-partition update
4204  * dst_relInfo: its destination partition
4205  * trigdesc: working copy of rel's trigger info.
4206  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4207  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4208  * or NULL if no instrumentation is wanted.
4209  * per_tuple_context: memory context to call trigger function in.
4210  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4211  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4212  * ----------
4213  */
4214 static void
4216  AfterTriggerEvent event,
4217  ResultRelInfo *relInfo,
4218  ResultRelInfo *src_relInfo,
4219  ResultRelInfo *dst_relInfo,
4220  TriggerDesc *trigdesc,
4221  FmgrInfo *finfo, Instrumentation *instr,
4222  MemoryContext per_tuple_context,
4223  TupleTableSlot *trig_tuple_slot1,
4224  TupleTableSlot *trig_tuple_slot2)
4225 {
4226  Relation rel = relInfo->ri_RelationDesc;
4227  Relation src_rel = src_relInfo->ri_RelationDesc;
4228  Relation dst_rel = dst_relInfo->ri_RelationDesc;
4229  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4230  Oid tgoid = evtshared->ats_tgoid;
4231  TriggerData LocTriggerData = {0};
4232  HeapTuple rettuple;
4233  int tgindx;
4234  bool should_free_trig = false;
4235  bool should_free_new = false;
4236 
4237  /*
4238  * Locate trigger in trigdesc.
4239  */
4240  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4241  {
4242  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4243  {
4244  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4245  break;
4246  }
4247  }
4248  if (LocTriggerData.tg_trigger == NULL)
4249  elog(ERROR, "could not find trigger %u", tgoid);
4250 
4251  /*
4252  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4253  * to include time spent re-fetching tuples in the trigger cost.
4254  */
4255  if (instr)
4256  InstrStartNode(instr + tgindx);
4257 
4258  /*
4259  * Fetch the required tuple(s).
4260  */
4261  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4262  {
4264  {
4265  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4266 
4267  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4268  trig_tuple_slot1))
4269  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4270 
4271  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4273  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4274  trig_tuple_slot2))
4275  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4276  }
4277  /* fall through */
4279 
4280  /*
4281  * Store tuple in the slot so that tg_trigtuple does not reference
4282  * tuplestore memory. (It is formally possible for the trigger
4283  * function to queue trigger events that add to the same
4284  * tuplestore, which can push other tuples out of memory.) The
4285  * distinction is academic, because we start with a minimal tuple
4286  * that is stored as a heap tuple, constructed in different memory
4287  * context, in the slot anyway.
4288  */
4289  LocTriggerData.tg_trigslot = trig_tuple_slot1;
4290  LocTriggerData.tg_trigtuple =
4291  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4292 
4293  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4295  {
4296  LocTriggerData.tg_newslot = trig_tuple_slot2;
4297  LocTriggerData.tg_newtuple =
4298  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4299  }
4300  else
4301  {
4302  LocTriggerData.tg_newtuple = NULL;
4303  }
4304  break;
4305 
4306  default:
4307  if (ItemPointerIsValid(&(event->ate_ctid1)))
4308  {
4309  TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4310  src_relInfo);
4311 
4312  if (!table_tuple_fetch_row_version(src_rel,
4313  &(event->ate_ctid1),
4314  SnapshotAny,
4315  src_slot))
4316  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4317 
4318  /*
4319  * Store the tuple fetched from the source partition into the
4320  * target (root partitioned) table slot, converting if needed.
4321  */
4322  if (src_relInfo != relInfo)
4323  {
4324  TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4325 
4326  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4327  if (map)
4328  {
4330  src_slot,
4331  LocTriggerData.tg_trigslot);
4332  }
4333  else
4334  ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4335  }
4336  else
4337  LocTriggerData.tg_trigslot = src_slot;
4338  LocTriggerData.tg_trigtuple =
4339  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4340  }
4341  else
4342  {
4343  LocTriggerData.tg_trigtuple = NULL;
4344  }
4345 
4346  /* don't touch ctid2 if not there */
4348  (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4349  ItemPointerIsValid(&(event->ate_ctid2)))
4350  {
4351  TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4352  dst_relInfo);
4353 
4354  if (!table_tuple_fetch_row_version(dst_rel,
4355  &(event->ate_ctid2),
4356  SnapshotAny,
4357  dst_slot))
4358  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4359 
4360  /*
4361  * Store the tuple fetched from the destination partition into
4362  * the target (root partitioned) table slot, converting if
4363  * needed.
4364  */
4365  if (dst_relInfo != relInfo)
4366  {
4367  TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4368 
4369  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4370  if (map)
4371  {
4373  dst_slot,
4374  LocTriggerData.tg_newslot);
4375  }
4376  else
4377  ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4378  }
4379  else
4380  LocTriggerData.tg_newslot = dst_slot;
4381  LocTriggerData.tg_newtuple =
4382  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4383  }
4384  else
4385  {
4386  LocTriggerData.tg_newtuple = NULL;
4387  }
4388  }
4389 
4390  /*
4391  * Set up the tuplestore information to let the trigger have access to
4392  * transition tables. When we first make a transition table available to
4393  * a trigger, mark it "closed" so that it cannot change anymore. If any
4394  * additional events of the same type get queued in the current trigger
4395  * query level, they'll go into new transition tables.
4396  */
4397  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4398  if (evtshared->ats_table)
4399  {
4400  if (LocTriggerData.tg_trigger->tgoldtable)
4401  {
4402  if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4403  LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4404  else
4405  LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4406  evtshared->ats_table->closed = true;
4407  }
4408 
4409  if (LocTriggerData.tg_trigger->tgnewtable)
4410  {
4411  if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4412  LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4413  else
4414  LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4415  evtshared->ats_table->closed = true;
4416  }
4417  }
4418 
4419  /*
4420  * Setup the remaining trigger information
4421  */
4422  LocTriggerData.type = T_TriggerData;
4423  LocTriggerData.tg_event =
4425  LocTriggerData.tg_relation = rel;
4426  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4427  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4428 
4429  MemoryContextReset(per_tuple_context);
4430 
4431  /*
4432  * Call the trigger and throw away any possibly returned updated tuple.
4433  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4434  */
4435  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4436  tgindx,
4437  finfo,
4438  NULL,
4439  per_tuple_context);
4440  if (rettuple != NULL &&
4441  rettuple != LocTriggerData.tg_trigtuple &&
4442  rettuple != LocTriggerData.tg_newtuple)
4443  heap_freetuple(rettuple);
4444 
4445  /*
4446  * Release resources
4447  */
4448  if (should_free_trig)
4449  heap_freetuple(LocTriggerData.tg_trigtuple);
4450  if (should_free_new)
4451  heap_freetuple(LocTriggerData.tg_newtuple);
4452 
4453  /* don't clear slots' contents if foreign table */
4454  if (trig_tuple_slot1 == NULL)
4455  {
4456  if (LocTriggerData.tg_trigslot)
4457  ExecClearTuple(LocTriggerData.tg_trigslot);
4458  if (LocTriggerData.tg_newslot)
4459  ExecClearTuple(LocTriggerData.tg_newslot);
4460  }
4461 
4462  /*
4463  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4464  * one "tuple returned" (really the number of firings).
4465  */
4466  if (instr)
4467  InstrStopNode(instr + tgindx, 1);
4468 }
4469 
4470 
4471 /*
4472  * afterTriggerMarkEvents()
4473  *
4474  * Scan the given event list for not yet invoked events. Mark the ones
4475  * that can be invoked now with the current firing ID.
4476  *
4477  * If move_list isn't NULL, events that are not to be invoked now are
4478  * transferred to move_list.
4479  *
4480  * When immediate_only is true, do not invoke currently-deferred triggers.
4481  * (This will be false only at main transaction exit.)
4482  *
4483  * Returns true if any invokable events were found.
4484  */
4485 static bool
4487  AfterTriggerEventList *move_list,
4488  bool immediate_only)
4489 {
4490  bool found = false;
4491  bool deferred_found = false;
4492  AfterTriggerEvent event;
4493  AfterTriggerEventChunk *chunk;
4494 
4495  for_each_event_chunk(event, chunk, *events)
4496  {
4497  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4498  bool defer_it = false;
4499 
4500  if (!(event->ate_flags &
4502  {
4503  /*
4504  * This trigger hasn't been called or scheduled yet. Check if we
4505  * should call it now.
4506  */
4507  if (immediate_only && afterTriggerCheckState(evtshared))
4508  {
4509  defer_it = true;
4510  }
4511  else
4512  {
4513  /*
4514  * Mark it as to be fired in this firing cycle.
4515  */
4517  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4518  found = true;
4519  }
4520  }
4521 
4522  /*
4523  * If it's deferred, move it to move_list, if requested.
4524  */
4525  if (defer_it && move_list != NULL)
4526  {
4527  deferred_found = true;
4528  /* add it to move_list */
4529  afterTriggerAddEvent(move_list, event, evtshared);
4530  /* mark original copy "done" so we don't do it again */
4531  event->ate_flags |= AFTER_TRIGGER_DONE;
4532  }
4533  }
4534 
4535  /*
4536  * We could allow deferred triggers if, before the end of the
4537  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4538  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4539  */
4540  if (deferred_found && InSecurityRestrictedOperation())
4541  ereport(ERROR,
4542  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4543  errmsg("cannot fire deferred trigger within security-restricted operation")));
4544 
4545  return found;
4546 }
4547 
4548 /*
4549  * afterTriggerInvokeEvents()
4550  *
4551  * Scan the given event list for events that are marked as to be fired
4552  * in the current firing cycle, and fire them.
4553  *
4554  * If estate isn't NULL, we use its result relation info to avoid repeated
4555  * openings and closing of trigger target relations. If it is NULL, we
4556  * make one locally to cache the info in case there are multiple trigger
4557  * events per rel.
4558  *
4559  * When delete_ok is true, it's safe to delete fully-processed events.
4560  * (We are not very tense about that: we simply reset a chunk to be empty
4561  * if all its events got fired. The objective here is just to avoid useless
4562  * rescanning of events when a trigger queues new events during transaction
4563  * end, so it's not necessary to worry much about the case where only
4564  * some events are fired.)
4565  *
4566  * Returns true if no unfired events remain in the list (this allows us
4567  * to avoid repeating afterTriggerMarkEvents).
4568  */
4569 static bool
4571  CommandId firing_id,
4572  EState *estate,
4573  bool delete_ok)
4574 {
4575  bool all_fired = true;
4576  AfterTriggerEventChunk *chunk;
4577  MemoryContext per_tuple_context;
4578  bool local_estate = false;
4579  ResultRelInfo *rInfo = NULL;
4580  Relation rel = NULL;
4581  TriggerDesc *trigdesc = NULL;
4582  FmgrInfo *finfo = NULL;
4583  Instrumentation *instr = NULL;
4584  TupleTableSlot *slot1 = NULL,
4585  *slot2 = NULL;
4586 
4587  /* Make a local EState if need be */
4588  if (estate == NULL)
4589  {
4590  estate = CreateExecutorState();
4591  local_estate = true;
4592  }
4593 
4594  /* Make a per-tuple memory context for trigger function calls */
4595  per_tuple_context =
4597  "AfterTriggerTupleContext",
4599 
4600  for_each_chunk(chunk, *events)
4601  {
4602  AfterTriggerEvent event;
4603  bool all_fired_in_chunk = true;
4604 
4605  for_each_event(event, chunk)
4606  {
4607  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4608 
4609  /*
4610  * Is it one for me to fire?
4611  */
4612  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4613  evtshared->ats_firing_id == firing_id)
4614  {
4615  ResultRelInfo *src_rInfo,
4616  *dst_rInfo;
4617 
4618  /*
4619  * So let's fire it... but first, find the correct relation if
4620  * this is not the same relation as before.
4621  */
4622  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4623  {
4624  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4625  NULL);
4626  rel = rInfo->ri_RelationDesc;
4627  /* Catch calls with insufficient relcache refcounting */
4629  trigdesc = rInfo->ri_TrigDesc;
4630  finfo = rInfo->ri_TrigFunctions;
4631  instr = rInfo->ri_TrigInstrument;
4632  if (slot1 != NULL)
4633  {
4636  slot1 = slot2 = NULL;
4637  }
4638  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4639  {
4640  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4642  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4644  }
4645  if (trigdesc == NULL) /* should not happen */
4646  elog(ERROR, "relation %u has no triggers",
4647  evtshared->ats_relid);
4648  }
4649 
4650  /*
4651  * Look up source and destination partition result rels of a
4652  * cross-partition update event.
4653  */
4654  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4656  {
4657  Assert(OidIsValid(event->ate_src_part) &&
4658  OidIsValid(event->ate_dst_part));
4659  src_rInfo = ExecGetTriggerResultRel(estate,
4660  event->ate_src_part,
4661  rInfo);
4662  dst_rInfo = ExecGetTriggerResultRel(estate,
4663  event->ate_dst_part,
4664  rInfo);
4665  }
4666  else
4667  src_rInfo = dst_rInfo = rInfo;
4668 
4669  /*
4670  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4671  * still set, so recursive examinations of the event list
4672  * won't try to re-fire it.
4673  */
4674  AfterTriggerExecute(estate, event, rInfo,
4675  src_rInfo, dst_rInfo,
4676  trigdesc, finfo, instr,
4677  per_tuple_context, slot1, slot2);
4678 
4679  /*
4680  * Mark the event as done.
4681  */
4682  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4683  event->ate_flags |= AFTER_TRIGGER_DONE;
4684  }
4685  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4686  {
4687  /* something remains to be done */
4688  all_fired = all_fired_in_chunk = false;
4689  }
4690  }
4691 
4692  /* Clear the chunk if delete_ok and nothing left of interest */
4693  if (delete_ok && all_fired_in_chunk)
4694  {
4695  chunk->freeptr = CHUNK_DATA_START(chunk);
4696  chunk->endfree = chunk->endptr;
4697 
4698  /*
4699  * If it's last chunk, must sync event list's tailfree too. Note
4700  * that delete_ok must NOT be passed as true if there could be
4701  * additional AfterTriggerEventList values pointing at this event
4702  * list, since we'd fail to fix their copies of tailfree.
4703  */
4704  if (chunk == events->tail)
4705  events->tailfree = chunk->freeptr;
4706  }
4707  }
4708  if (slot1 != NULL)
4709  {
4712  }
4713 
4714  /* Release working resources */
4715  MemoryContextDelete(per_tuple_context);
4716 
4717  if (local_estate)
4718  {
4719  ExecCloseResultRelations(estate);
4720  ExecResetTupleTable(estate->es_tupleTable, false);
4721  FreeExecutorState(estate);
4722  }
4723 
4724  return all_fired;
4725 }
4726 
4727 
4728 /*
4729  * GetAfterTriggersTableData
4730  *
4731  * Find or create an AfterTriggersTableData struct for the specified
4732  * trigger event (relation + operation type). Ignore existing structs
4733  * marked "closed"; we don't want to put any additional tuples into them,
4734  * nor change their stmt-triggers-fired state.
4735  *
4736  * Note: the AfterTriggersTableData list is allocated in the current
4737  * (sub)transaction's CurTransactionContext. This is OK because
4738  * we don't need it to live past AfterTriggerEndQuery.
4739  */
4740 static AfterTriggersTableData *
4742 {
4743  AfterTriggersTableData *table;
4745  MemoryContext oldcxt;
4746  ListCell *lc;
4747 
4748  /* Caller should have ensured query_depth is OK. */
4752 
4753  foreach(lc, qs->tables)
4754  {
4755  table = (AfterTriggersTableData *) lfirst(lc);
4756  if (table->relid == relid && table->cmdType == cmdType &&
4757  !table->closed)
4758  return table;
4759  }
4760 
4762 
4764  table->relid = relid;
4765  table->cmdType = cmdType;
4766  qs->tables = lappend(qs->tables, table);
4767 
4768  MemoryContextSwitchTo(oldcxt);
4769 
4770  return table;
4771 }
4772 
4773 /*
4774  * Returns a TupleTableSlot suitable for holding the tuples to be put
4775  * into AfterTriggersTableData's transition table tuplestores.
4776  */
4777 static TupleTableSlot *
4779  TupleDesc tupdesc)
4780 {
4781  /* Create it if not already done. */
4782  if (!table->storeslot)
4783  {
4784  MemoryContext oldcxt;
4785 
4786  /*
4787  * We only need this slot only until AfterTriggerEndQuery, but making
4788  * it last till end-of-subxact is good enough. It'll be freed by
4789  * AfterTriggerFreeQuery().
4790  */
4792  table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4793  MemoryContextSwitchTo(oldcxt);
4794  }
4795 
4796  return table->storeslot;
4797 }
4798 
4799 /*
4800  * MakeTransitionCaptureState
4801  *
4802  * Make a TransitionCaptureState object for the given TriggerDesc, target
4803  * relation, and operation type. The TCS object holds all the state needed
4804  * to decide whether to capture tuples in transition tables.
4805  *
4806  * If there are no triggers in 'trigdesc' that request relevant transition
4807  * tables, then return NULL.
4808  *
4809  * The resulting object can be passed to the ExecAR* functions. When
4810  * dealing with child tables, the caller can set tcs_original_insert_tuple
4811  * to avoid having to reconstruct the original tuple in the root table's
4812  * format.
4813  *
4814  * Note that we copy the flags from a parent table into this struct (rather
4815  * than subsequently using the relation's TriggerDesc directly) so that we can
4816  * use it to control collection of transition tuples from child tables.
4817  *
4818  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4819  * on the same table during one query should share one transition table.
4820  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4821  * looked up using the table OID + CmdType, and are merely referenced by
4822  * the TransitionCaptureState objects we hand out to callers.
4823  */
4826 {
4828  bool need_old_upd,
4829  need_new_upd,
4830  need_old_del,
4831  need_new_ins;
4832  AfterTriggersTableData *table;
4833  MemoryContext oldcxt;
4834  ResourceOwner saveResourceOwner;
4835 
4836  if (trigdesc == NULL)
4837  return NULL;
4838 
4839  /* Detect which table(s) we need. */
4840  switch (cmdType)
4841  {
4842  case CMD_INSERT:
4843  need_old_upd = need_old_del = need_new_upd = false;
4844  need_new_ins = trigdesc->trig_insert_new_table;
4845  break;
4846  case CMD_UPDATE:
4847  need_old_upd = trigdesc->trig_update_old_table;
4848  need_new_upd = trigdesc->trig_update_new_table;
4849  need_old_del = need_new_ins = false;
4850  break;
4851  case CMD_DELETE:
4852  need_old_del = trigdesc->trig_delete_old_table;
4853  need_old_upd = need_new_upd = need_new_ins = false;
4854  break;
4855  case CMD_MERGE:
4856  need_old_upd = trigdesc->trig_update_old_table;
4857  need_new_upd = trigdesc->trig_update_new_table;
4858  need_old_del = trigdesc->trig_delete_old_table;
4859  need_new_ins = trigdesc->trig_insert_new_table;
4860  break;
4861  default:
4862  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4863  /* keep compiler quiet */
4864  need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4865  break;
4866  }
4867  if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4868  return NULL;
4869 
4870  /* Check state, like AfterTriggerSaveEvent. */
4871  if (afterTriggers.query_depth < 0)
4872  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4873 
4874  /* Be sure we have enough space to record events at this query depth. */
4877 
4878  /*
4879  * Find or create an AfterTriggersTableData struct to hold the
4880  * tuplestore(s). If there's a matching struct but it's marked closed,
4881  * ignore it; we need a newer one.
4882  *
4883  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4884  * allocated in the current (sub)transaction's CurTransactionContext, and
4885  * the tuplestores are managed by the (sub)transaction's resource owner.
4886  * This is sufficient lifespan because we do not allow triggers using
4887  * transition tables to be deferrable; they will be fired during
4888  * AfterTriggerEndQuery, after which it's okay to delete the data.
4889  */
4890  table = GetAfterTriggersTableData(relid, cmdType);
4891 
4892  /* Now create required tuplestore(s), if we don't have them already. */
4894  saveResourceOwner = CurrentResourceOwner;
4896 
4897  if (need_old_upd && table->old_upd_tuplestore == NULL)
4898  table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4899  if (need_new_upd && table->new_upd_tuplestore == NULL)
4900  table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4901  if (need_old_del && table->old_del_tuplestore == NULL)
4902  table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4903  if (need_new_ins && table->new_ins_tuplestore == NULL)
4904  table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4905 
4906  CurrentResourceOwner = saveResourceOwner;
4907  MemoryContextSwitchTo(oldcxt);
4908 
4909  /* Now build the TransitionCaptureState struct, in caller's context */
4911  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4912  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4913  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4914  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4915  state->tcs_private = table;
4916 
4917  return state;
4918 }
4919 
4920 
4921 /* ----------
4922  * AfterTriggerBeginXact()
4923  *
4924  * Called at transaction start (either BEGIN or implicit for single
4925  * statement outside of transaction block).
4926  * ----------
4927  */
4928 void
4930 {
4931  /*
4932  * Initialize after-trigger state structure to empty
4933  */
4934  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4936 
4937  /*
4938  * Verify that there is no leftover state remaining. If these assertions
4939  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4940  * up properly.
4941  */
4942  Assert(afterTriggers.state == NULL);
4943  Assert(afterTriggers.query_stack == NULL);
4945  Assert(afterTriggers.event_cxt == NULL);
4946  Assert(afterTriggers.events.head == NULL);
4947  Assert(afterTriggers.trans_stack == NULL);
4949 }
4950 
4951 
4952 /* ----------
4953  * AfterTriggerBeginQuery()
4954  *
4955  * Called just before we start processing a single query within a
4956  * transaction (or subtransaction). Most of the real work gets deferred
4957  * until somebody actually tries to queue a trigger event.
4958  * ----------
4959  */
4960 void
4962 {
4963  /* Increase the query stack depth */
4965 }
4966 
4967 
4968 /* ----------
4969  * AfterTriggerEndQuery()
4970  *
4971  * Called after one query has been completely processed. At this time
4972  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4973  * transfer deferred trigger events to the global deferred-trigger list.
4974  *
4975  * Note that this must be called BEFORE closing down the executor
4976  * with ExecutorEnd, because we make use of the EState's info about
4977  * target relations. Normally it is called from ExecutorFinish.
4978  * ----------
4979  */
4980 void
4982 {
4984 
4985  /* Must be inside a query, too */
4987 
4988  /*
4989  * If we never even got as far as initializing the event stack, there
4990  * certainly won't be any events, so exit quickly.
4991  */
4993  {
4995  return;
4996  }
4997 
4998  /*
4999  * Process all immediate-mode triggers queued by the query, and move the
5000  * deferred ones to the main list of deferred events.
5001  *
5002  * Notice that we decide which ones will be fired, and put the deferred
5003  * ones on the main list, before anything is actually fired. This ensures
5004  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5005  * IMMEDIATE: all events we have decided to defer will be available for it
5006  * to fire.
5007  *
5008  * We loop in case a trigger queues more events at the same query level.
5009  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5010  * will instead fire any triggers in a dedicated query level. Foreign key
5011  * enforcement triggers do add to the current query level, thanks to their
5012  * passing fire_triggers = false to SPI_execute_snapshot(). Other
5013  * C-language triggers might do likewise.
5014  *
5015  * If we find no firable events, we don't have to increment
5016  * firing_counter.
5017  */
5019 
5020  for (;;)
5021  {
5023  {
5024  CommandId firing_id = afterTriggers.firing_counter++;
5025  AfterTriggerEventChunk *oldtail = qs->events.tail;
5026 
5027  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5028  break; /* all fired */
5029 
5030  /*
5031  * Firing a trigger could result in query_stack being repalloc'd,
5032  * so we must recalculate qs after each afterTriggerInvokeEvents
5033  * call. Furthermore, it's unsafe to pass delete_ok = true here,
5034  * because that could cause afterTriggerInvokeEvents to try to
5035  * access qs->events after the stack has been repalloc'd.
5036  */
5038 
5039  /*
5040  * We'll need to scan the events list again. To reduce the cost
5041  * of doing so, get rid of completely-fired chunks. We know that
5042  * all events were marked IN_PROGRESS or DONE at the conclusion of
5043  * afterTriggerMarkEvents, so any still-interesting events must
5044  * have been added after that, and so must be in the chunk that
5045  * was then the tail chunk, or in later chunks. So, zap all
5046  * chunks before oldtail. This is approximately the same set of
5047  * events we would have gotten rid of by passing delete_ok = true.
5048  */
5049  Assert(oldtail != NULL);
5050  while (qs->events.head != oldtail)
5052  }
5053  else
5054  break;
5055  }
5056 
5057  /* Release query-level-local storage, including tuplestores if any */
5059 
5061 }
5062 
5063 
5064 /*
5065  * AfterTriggerFreeQuery
5066  * Release subsidiary storage for a trigger query level.
5067  * This includes closing down tuplestores.
5068  * Note: it's important for this to be safe if interrupted by an error
5069  * and then called again for the same query level.
5070  */
5071 static void
5073 {
5074  Tuplestorestate *ts;
5075  List *tables;
5076  ListCell *lc;
5077 
5078  /* Drop the trigger events */
5080 
5081  /* Drop FDW tuplestore if any */
5082  ts = qs->fdw_tuplestore;
5083  qs->fdw_tuplestore = NULL;
5084  if (ts)
5085  tuplestore_end(ts);
5086 
5087  /* Release per-table subsidiary storage */
5088  tables = qs->tables;
5089  foreach(lc, tables)
5090  {
5092 
5093  ts = table->old_upd_tuplestore;
5094  table->old_upd_tuplestore = NULL;
5095  if (ts)
5096  tuplestore_end(ts);
5097  ts = table->new_upd_tuplestore;
5098  table->new_upd_tuplestore = NULL;
5099  if (ts)
5100  tuplestore_end(ts);
5101  ts = table->old_del_tuplestore;
5102  table->old_del_tuplestore = NULL;
5103  if (ts)
5104  tuplestore_end(ts);
5105  ts = table->new_ins_tuplestore;
5106  table->new_ins_tuplestore = NULL;
5107  if (ts)
5108  tuplestore_end(ts);
5109  if (table->storeslot)
5111  }
5112 
5113  /*
5114  * Now free the AfterTriggersTableData structs and list cells. Reset list
5115  * pointer first; if list_free_deep somehow gets an error, better to leak
5116  * that storage than have an infinite loop.
5117  */
5118  qs->tables = NIL;
5119  list_free_deep(tables);
5120 }
5121 
5122 
5123 /* ----------
5124  * AfterTriggerFireDeferred()
5125  *
5126  * Called just before the current transaction is committed. At this
5127  * time we invoke all pending DEFERRED triggers.
5128  *
5129  * It is possible for other modules to queue additional deferred triggers
5130  * during pre-commit processing; therefore xact.c may have to call this
5131  * multiple times.
5132  * ----------
5133  */
5134 void
5136 {
5137  AfterTriggerEventList *events;
5138  bool snap_pushed = false;
5139 
5140  /* Must not be inside a query */
5142 
5143  /*
5144  * If there are any triggers to fire, make sure we have set a snapshot for
5145  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5146  * can't assume ActiveSnapshot is valid on entry.)
5147  */
5148  events = &afterTriggers.events;
5149  if (events->head != NULL)
5150  {
5152  snap_pushed = true;
5153  }
5154 
5155  /*
5156  * Run all the remaining triggers. Loop until they are all gone, in case
5157  * some trigger queues more for us to do.
5158  */
5159  while (afterTriggerMarkEvents(events, NULL, false))
5160  {
5161  CommandId firing_id = afterTriggers.firing_counter++;
5162 
5163  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5164  break; /* all fired */
5165  }
5166 
5167  /*
5168  * We don't bother freeing the event list, since it will go away anyway
5169  * (and more efficiently than via pfree) in AfterTriggerEndXact.
5170  */
5171 
5172  if (snap_pushed)
5174 }
5175 
5176 
5177 /* ----------
5178  * AfterTriggerEndXact()
5179  *
5180  * The current transaction is finishing.
5181  *
5182  * Any unfired triggers are canceled so we simply throw
5183  * away anything we know.
5184  *
5185  * Note: it is possible for this to be called repeatedly in case of
5186  * error during transaction abort; therefore, do not complain if
5187  * already closed down.
5188  * ----------
5189  */
5190 void
5191 AfterTriggerEndXact(bool isCommit)
5192 {
5193  /*
5194  * Forget the pending-events list.
5195  *
5196  * Since all the info is in TopTransactionContext or children thereof, we
5197  * don't really need to do anything to reclaim memory. However, the
5198  * pending-events list could be large, and so it's useful to discard it as
5199  * soon as possible --- especially if we are aborting because we ran out
5200  * of memory for the list!
5201  */
5203  {
5205  afterTriggers.event_cxt = NULL;
5206  afterTriggers.events.head = NULL;
5207  afterTriggers.events.tail = NULL;
5208  afterTriggers.events.tailfree = NULL;
5209  }
5210 
5211  /*
5212  * Forget any subtransaction state as well. Since this can't be very
5213  * large, we let the eventual reset of TopTransactionContext free the
5214  * memory instead of doing it here.
5215  */
5216  afterTriggers.trans_stack = NULL;
5218 
5219 
5220  /*
5221  * Forget the query stack and constraint-related state information. As
5222  * with the subtransaction state information, we don't bother freeing the
5223  * memory here.
5224  */
5225  afterTriggers.query_stack = NULL;
5227  afterTriggers.state = NULL;
5228 
5229  /* No more afterTriggers manipulation until next transaction starts. */
5231 }
5232 
5233 /*
5234  * AfterTriggerBeginSubXact()
5235  *
5236  * Start a subtransaction.
5237  */
5238 void
5240 {
5241  int my_level = GetCurrentTransactionNestLevel();
5242 
5243  /*
5244  * Allocate more space in the trans_stack if needed. (Note: because the
5245  * minimum nest level of a subtransaction is 2, we waste the first couple
5246  * entries of the array; not worth the notational effort to avoid it.)
5247  */
5248  while (my_level >= afterTriggers.maxtransdepth)
5249  {
5250  if (afterTriggers.maxtransdepth == 0)
5251  {
5252  /* Arbitrarily initialize for max of 8 subtransaction levels */
5255  8 * sizeof(AfterTriggersTransData));
5257  }
5258  else
5259  {
5260  /* repalloc will keep the stack in the same context */
5261  int new_alloc = afterTriggers.maxtransdepth * 2;
5262 
5265  new_alloc * sizeof(AfterTriggersTransData));
5266  afterTriggers.maxtransdepth = new_alloc;
5267  }
5268  }
5269 
5270  /*
5271  * Push the current information into the stack. The SET CONSTRAINTS state
5272  * is not saved until/unless changed. Likewise, we don't make a
5273  * per-subtransaction event context until needed.
5274  */
5275  afterTriggers.trans_stack[my_level].state = NULL;
5279 }
5280 
5281 /*
5282  * AfterTriggerEndSubXact()
5283  *
5284  * The current subtransaction is ending.
5285  */
5286 void
5288 {
5289  int my_level = GetCurrentTransactionNestLevel();
5291  AfterTriggerEvent event;
5292  AfterTriggerEventChunk *chunk;
5293  CommandId subxact_firing_id;
5294 
5295  /*
5296  * Pop the prior state if needed.
5297  */
5298  if (isCommit)
5299  {
5300  Assert(my_level < afterTriggers.maxtransdepth);
5301  /* If we saved a prior state, we don't need it anymore */
5302  state = afterTriggers.trans_stack[my_level].state;
5303  if (state != NULL)
5304  pfree(state);
5305  /* this avoids double pfree if error later: */
5306  afterTriggers.trans_stack[my_level].state = NULL;
5309  }
5310  else
5311  {
5312  /*
5313  * Aborting. It is possible subxact start failed before calling
5314  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5315  * trans_stack levels that aren't there.
5316  */
5317  if (my_level >= afterTriggers.maxtransdepth)
5318  return;
5319 
5320  /*
5321  * Release query-level storage for queries being aborted, and restore
5322  * query_depth to its pre-subxact value. This assumes that a
5323  * subtransaction will not add events to query levels started in a
5324  * earlier transaction state.
5325  */
5327  {
5331  }
5334 
5335  /*
5336  * Restore the global deferred-event list to its former length,
5337  * discarding any events queued by the subxact.
5338  */
5340  &afterTriggers.trans_stack[my_level].events);
5341 
5342  /*
5343  * Restore the trigger state. If the saved state is NULL, then this
5344  * subxact didn't save it, so it doesn't need restoring.
5345  */
5346  state = afterTriggers.trans_stack[my_level].state;
5347  if (state != NULL)
5348  {
5351  }
5352  /* this avoids double pfree if error later: */
5353  afterTriggers.trans_stack[my_level].state = NULL;
5354 
5355  /*
5356  * Scan for any remaining deferred events that were marked DONE or IN
5357  * PROGRESS by this subxact or a child, and un-mark them. We can
5358  * recognize such events because they have a firing ID greater than or
5359  * equal to the firing_counter value we saved at subtransaction start.
5360  * (This essentially assumes that the current subxact includes all
5361  * subxacts started after it.)
5362  */
5363  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5365  {
5366  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5367 
5368  if (event->ate_flags &
5370  {
5371  if (evtshared->ats_firing_id >= subxact_firing_id)
5372  event->ate_flags &=
5374  }
5375  }
5376  }
5377 }
5378 
5379 /*
5380  * Get the transition table for the given event and depending on whether we are
5381  * processing the old or the new tuple.
5382  */
5383 static Tuplestorestate *
5385  TupleTableSlot *oldslot,
5386  TupleTableSlot *newslot,
5387  TransitionCaptureState *transition_capture)
5388 {
5389  Tuplestorestate *tuplestore = NULL;
5390  bool delete_old_table = transition_capture->tcs_delete_old_table;
5391  bool update_old_table = transition_capture->tcs_update_old_table;
5392  bool update_new_table = transition_capture->tcs_update_new_table;
5393  bool insert_new_table = transition_capture->tcs_insert_new_table;
5394 
5395  /*
5396  * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5397  * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5398  * non-NULL. But for UPDATE events fired for capturing transition tuples
5399  * during UPDATE partition-key row movement, OLD is NULL when the event is
5400  * for a row being inserted, whereas NEW is NULL when the event is for a
5401  * row being deleted.
5402  */
5403  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5404  TupIsNull(oldslot)));
5405  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5406  TupIsNull(newslot)));
5407 
5408  if (!TupIsNull(oldslot))
5409  {
5410  Assert(TupIsNull(newslot));
5411  if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5412  tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5413  else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5414  tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5415  }
5416  else if (!TupIsNull(newslot))
5417  {
5418  Assert(TupIsNull(oldslot));
5419  if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5420  tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5421  else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5422  tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5423  }
5424 
5425  return tuplestore;
5426 }
5427 
5428 /*
5429  * Add the given heap tuple to the given tuplestore, applying the conversion
5430  * map if necessary.
5431  *
5432  * If original_insert_tuple is given, we can add that tuple without conversion.
5433  */
5434 static void
5436  TransitionCaptureState *transition_capture,
5437  ResultRelInfo *relinfo,
5438  TupleTableSlot *slot,
5439  TupleTableSlot *original_insert_tuple,
5440  Tuplestorestate *tuplestore)
5441 {
5442  TupleConversionMap *map;
5443 
5444  /*
5445  * Nothing needs to be done if we don't have a tuplestore.
5446  */
5447  if (tuplestore == NULL)
5448  return;
5449 
5450  if (original_insert_tuple)
5451  tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5452  else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5453  {
5454  AfterTriggersTableData *table = transition_capture->tcs_private;
5455  TupleTableSlot *storeslot;
5456 
5457  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5458  execute_attr_map_slot(map->attrMap, slot, storeslot);
5459  tuplestore_puttupleslot(tuplestore, storeslot);
5460  }
5461  else
5462  tuplestore_puttupleslot(tuplestore, slot);
5463 }
5464 
5465 /* ----------
5466  * AfterTriggerEnlargeQueryState()
5467  *
5468  * Prepare the necessary state so that we can record AFTER trigger events
5469  * queued by a query. It is allowed to have nested queries within a
5470  * (sub)transaction, so we need to have separate state for each query
5471  * nesting level.
5472  * ----------
5473  */
5474 static void
5476 {
5477  int init_depth = afterTriggers.maxquerydepth;
5478 
5480 
5481  if (afterTriggers.maxquerydepth == 0)
5482  {
5483  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5484 
5487  new_alloc * sizeof(AfterTriggersQueryData));
5488  afterTriggers.maxquerydepth = new_alloc;
5489  }
5490  else
5491  {
5492  /* repalloc will keep the stack in the same context */
5493  int old_alloc = afterTriggers.maxquerydepth;
5494  int new_alloc = Max(afterTriggers.query_depth + 1,
5495  old_alloc * 2);
5496 
5499  new_alloc * sizeof(AfterTriggersQueryData));
5500  afterTriggers.maxquerydepth = new_alloc;
5501  }
5502 
5503  /* Initialize new array entries to empty */
5504  while (init_depth < afterTriggers.maxquerydepth)
5505  {
5507 
5508  qs->events.head = NULL;
5509  qs->events.tail = NULL;
5510  qs->events.tailfree = NULL;
5511  qs->fdw_tuplestore = NULL;
5512  qs->tables = NIL;
5513 
5514  ++init_depth;
5515  }
5516 }
5517 
5518 /*
5519  * Create an empty SetConstraintState with room for numalloc trigstates
5520  */
5521 static SetConstraintState
5523 {
5525 
5526  /* Behave sanely with numalloc == 0 */
5527  if (numalloc <= 0)
5528  numalloc = 1;
5529 
5530  /*
5531  * We assume that zeroing will correctly initialize the state values.
5532  */
5535  offsetof(SetConstraintStateData, trigstates) +
5536  numalloc * sizeof(SetConstraintTriggerData));
5537 
5538  state->numalloc = numalloc;
5539 
5540  return state;
5541 }
5542 
5543 /*
5544  * Copy a SetConstraintState
5545  */
5546 static SetConstraintState
5548 {
5550 
5552 
5553  state->all_isset = origstate->all_isset;
5554  state->all_isdeferred = origstate->all_isdeferred;
5555  state->numstates = origstate->numstates;
5556  memcpy(state->trigstates, origstate->trigstates,
5557  origstate->numstates * sizeof(SetConstraintTriggerData));
5558 
5559  return state;
5560 }
5561 
5562 /*
5563  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5564  * pointer to the state object (it will change if we have to repalloc).
5565  */
5566 static SetConstraintState
5568  Oid tgoid, bool tgisdeferred)
5569 {
5570  if (state->numstates >= state->nu