PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/indexing.h"
26 #include "catalog/objectaccess.h"
27 #include "catalog/partition.h"
28 #include "catalog/pg_constraint.h"
29 #include "catalog/pg_inherits.h"
30 #include "catalog/pg_proc.h"
31 #include "catalog/pg_trigger.h"
32 #include "catalog/pg_type.h"
33 #include "commands/dbcommands.h"
34 #include "commands/trigger.h"
35 #include "executor/executor.h"
36 #include "miscadmin.h"
37 #include "nodes/bitmapset.h"
38 #include "nodes/makefuncs.h"
39 #include "optimizer/optimizer.h"
40 #include "parser/parse_clause.h"
41 #include "parser/parse_collate.h"
42 #include "parser/parse_func.h"
43 #include "parser/parse_relation.h"
44 #include "partitioning/partdesc.h"
45 #include "pgstat.h"
46 #include "rewrite/rewriteManip.h"
47 #include "storage/lmgr.h"
48 #include "utils/acl.h"
49 #include "utils/builtins.h"
50 #include "utils/fmgroids.h"
51 #include "utils/guc_hooks.h"
52 #include "utils/inval.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/plancache.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tuplestore.h"
60 
61 
62 /* GUC variables */
64 
65 /* How many levels deep into trigger execution are we? */
66 static int MyTriggerDepth = 0;
67 
68 /* Local function prototypes */
69 static void renametrig_internal(Relation tgrel, Relation targetrel,
70  HeapTuple trigtup, const char *newname,
71  const char *expected_name);
72 static void renametrig_partition(Relation tgrel, Oid partitionId,
73  Oid parentTriggerOid, const char *newname,
74  const char *expected_name);
75 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
76 static bool GetTupleForTrigger(EState *estate,
77  EPQState *epqstate,
78  ResultRelInfo *relinfo,
79  ItemPointer tid,
80  LockTupleMode lockmode,
81  TupleTableSlot *oldslot,
82  TupleTableSlot **epqslot,
83  TM_Result *tmresultp,
84  TM_FailureData *tmfdp);
85 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
86  Trigger *trigger, TriggerEvent event,
87  Bitmapset *modifiedCols,
88  TupleTableSlot *oldslot, TupleTableSlot *newslot);
90  int tgindx,
91  FmgrInfo *finfo,
92  Instrumentation *instr,
93  MemoryContext per_tuple_context);
94 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
95  ResultRelInfo *src_partinfo,
96  ResultRelInfo *dst_partinfo,
97  int event, bool row_trigger,
98  TupleTableSlot *oldslot, TupleTableSlot *newslot,
99  List *recheckIndexes, Bitmapset *modifiedCols,
100  TransitionCaptureState *transition_capture,
101  bool is_crosspart_update);
102 static void AfterTriggerEnlargeQueryState(void);
103 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
104 
105 
106 /*
107  * Create a trigger. Returns the address of the created trigger.
108  *
109  * queryString is the source text of the CREATE TRIGGER command.
110  * This must be supplied if a whenClause is specified, else it can be NULL.
111  *
112  * relOid, if nonzero, is the relation on which the trigger should be
113  * created. If zero, the name provided in the statement will be looked up.
114  *
115  * refRelOid, if nonzero, is the relation to which the constraint trigger
116  * refers. If zero, the constraint relation name provided in the statement
117  * will be looked up as needed.
118  *
119  * constraintOid, if nonzero, says that this trigger is being created
120  * internally to implement that constraint. A suitable pg_depend entry will
121  * be made to link the trigger to that constraint. constraintOid is zero when
122  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
123  * TRIGGER, we build a pg_constraint entry internally.)
124  *
125  * indexOid, if nonzero, is the OID of an index associated with the constraint.
126  * We do nothing with this except store it into pg_trigger.tgconstrindid;
127  * but when creating a trigger for a deferrable unique constraint on a
128  * partitioned table, its children are looked up. Note we don't cope with
129  * invalid indexes in that case.
130  *
131  * funcoid, if nonzero, is the OID of the function to invoke. When this is
132  * given, stmt->funcname is ignored.
133  *
134  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
135  * if that trigger is dropped, this one should be too. There are two cases
136  * when a nonzero value is passed for this: 1) when this function recurses to
137  * create the trigger on partitions, 2) when creating child foreign key
138  * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
139  *
140  * If whenClause is passed, it is an already-transformed expression for
141  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
142  *
143  * If isInternal is true then this is an internally-generated trigger.
144  * This argument sets the tgisinternal field of the pg_trigger entry, and
145  * if true causes us to modify the given trigger name to ensure uniqueness.
146  *
147  * When isInternal is not true we require ACL_TRIGGER permissions on the
148  * relation, as well as ACL_EXECUTE on the trigger function. For internal
149  * triggers the caller must apply any required permission checks.
150  *
151  * When called on partitioned tables, this function recurses to create the
152  * trigger on all the partitions, except if isInternal is true, in which
153  * case caller is expected to execute recursion on its own. in_partition
154  * indicates such a recursive call; outside callers should pass "false"
155  * (but see CloneRowTriggersToPartition).
156  */
158 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
159  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
160  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
161  bool isInternal, bool in_partition)
162 {
163  return
164  CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
165  constraintOid, indexOid, funcoid,
166  parentTriggerOid, whenClause, isInternal,
167  in_partition, TRIGGER_FIRES_ON_ORIGIN);
168 }
169 
170 /*
171  * Like the above; additionally the firing condition
172  * (always/origin/replica/disabled) can be specified.
173  */
175 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
176  Oid relOid, Oid refRelOid, Oid constraintOid,
177  Oid indexOid, Oid funcoid, Oid parentTriggerOid,
178  Node *whenClause, bool isInternal, bool in_partition,
179  char trigger_fires_when)
180 {
181  int16 tgtype;
182  int ncolumns;
183  int16 *columns;
184  int2vector *tgattr;
185  List *whenRtable;
186  char *qual;
187  Datum values[Natts_pg_trigger];
188  bool nulls[Natts_pg_trigger];
189  Relation rel;
190  AclResult aclresult;
191  Relation tgrel;
192  Relation pgrel;
193  HeapTuple tuple = NULL;
194  Oid funcrettype;
195  Oid trigoid = InvalidOid;
196  char internaltrigname[NAMEDATALEN];
197  char *trigname;
198  Oid constrrelid = InvalidOid;
199  ObjectAddress myself,
200  referenced;
201  char *oldtablename = NULL;
202  char *newtablename = NULL;
203  bool partition_recurse;
204  bool trigger_exists = false;
205  Oid existing_constraint_oid = InvalidOid;
206  bool existing_isInternal = false;
207  bool existing_isClone = false;
208 
209  if (OidIsValid(relOid))
210  rel = table_open(relOid, ShareRowExclusiveLock);
211  else
212  rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
213 
214  /*
215  * Triggers must be on tables or views, and there are additional
216  * relation-type-specific restrictions.
217  */
218  if (rel->rd_rel->relkind == RELKIND_RELATION)
219  {
220  /* Tables can't have INSTEAD OF triggers */
221  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
222  stmt->timing != TRIGGER_TYPE_AFTER)
223  ereport(ERROR,
224  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
225  errmsg("\"%s\" is a table",
227  errdetail("Tables cannot have INSTEAD OF triggers.")));
228  }
229  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
230  {
231  /* Partitioned tables can't have INSTEAD OF triggers */
232  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
233  stmt->timing != TRIGGER_TYPE_AFTER)
234  ereport(ERROR,
235  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
236  errmsg("\"%s\" is a table",
238  errdetail("Tables cannot have INSTEAD OF triggers.")));
239 
240  /*
241  * FOR EACH ROW triggers have further restrictions
242  */
243  if (stmt->row)
244  {
245  /*
246  * Disallow use of transition tables.
247  *
248  * Note that we have another restriction about transition tables
249  * in partitions; search for 'has_superclass' below for an
250  * explanation. The check here is just to protect from the fact
251  * that if we allowed it here, the creation would succeed for a
252  * partitioned table with no partitions, but would be blocked by
253  * the other restriction when the first partition was created,
254  * which is very unfriendly behavior.
255  */
256  if (stmt->transitionRels != NIL)
257  ereport(ERROR,
258  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
259  errmsg("\"%s\" is a partitioned table",
261  errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
262  }
263  }
264  else if (rel->rd_rel->relkind == RELKIND_VIEW)
265  {
266  /*
267  * Views can have INSTEAD OF triggers (which we check below are
268  * row-level), or statement-level BEFORE/AFTER triggers.
269  */
270  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
271  ereport(ERROR,
272  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
273  errmsg("\"%s\" is a view",
275  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
276  /* Disallow TRUNCATE triggers on VIEWs */
277  if (TRIGGER_FOR_TRUNCATE(stmt->events))
278  ereport(ERROR,
279  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
280  errmsg("\"%s\" is a view",
282  errdetail("Views cannot have TRUNCATE triggers.")));
283  }
284  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
285  {
286  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
287  stmt->timing != TRIGGER_TYPE_AFTER)
288  ereport(ERROR,
289  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
290  errmsg("\"%s\" is a foreign table",
292  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
293 
294  /*
295  * We disallow constraint triggers to protect the assumption that
296  * triggers on FKs can't be deferred. See notes with AfterTriggers
297  * data structures, below.
298  */
299  if (stmt->isconstraint)
300  ereport(ERROR,
301  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
302  errmsg("\"%s\" is a foreign table",
304  errdetail("Foreign tables cannot have constraint triggers.")));
305  }
306  else
307  ereport(ERROR,
308  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
309  errmsg("relation \"%s\" cannot have triggers",
311  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
312 
314  ereport(ERROR,
315  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
316  errmsg("permission denied: \"%s\" is a system catalog",
317  RelationGetRelationName(rel))));
318 
319  if (stmt->isconstraint)
320  {
321  /*
322  * We must take a lock on the target relation to protect against
323  * concurrent drop. It's not clear that AccessShareLock is strong
324  * enough, but we certainly need at least that much... otherwise, we
325  * might end up creating a pg_constraint entry referencing a
326  * nonexistent table.
327  */
328  if (OidIsValid(refRelOid))
329  {
330  LockRelationOid(refRelOid, AccessShareLock);
331  constrrelid = refRelOid;
332  }
333  else if (stmt->constrrel != NULL)
334  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
335  false);
336  }
337 
338  /* permission checks */
339  if (!isInternal)
340  {
341  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
342  ACL_TRIGGER);
343  if (aclresult != ACLCHECK_OK)
344  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
346 
347  if (OidIsValid(constrrelid))
348  {
349  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
350  ACL_TRIGGER);
351  if (aclresult != ACLCHECK_OK)
352  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
353  get_rel_name(constrrelid));
354  }
355  }
356 
357  /*
358  * When called on a partitioned table to create a FOR EACH ROW trigger
359  * that's not internal, we create one trigger for each partition, too.
360  *
361  * For that, we'd better hold lock on all of them ahead of time.
362  */
363  partition_recurse = !isInternal && stmt->row &&
364  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
365  if (partition_recurse)
367  ShareRowExclusiveLock, NULL));
368 
369  /* Compute tgtype */
370  TRIGGER_CLEAR_TYPE(tgtype);
371  if (stmt->row)
372  TRIGGER_SETT_ROW(tgtype);
373  tgtype |= stmt->timing;
374  tgtype |= stmt->events;
375 
376  /* Disallow ROW-level TRUNCATE triggers */
377  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
378  ereport(ERROR,
379  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
380  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
381 
382  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
383  if (TRIGGER_FOR_INSTEAD(tgtype))
384  {
385  if (!TRIGGER_FOR_ROW(tgtype))
386  ereport(ERROR,
387  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
388  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
389  if (stmt->whenClause)
390  ereport(ERROR,
391  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
392  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
393  if (stmt->columns != NIL)
394  ereport(ERROR,
395  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
396  errmsg("INSTEAD OF triggers cannot have column lists")));
397  }
398 
399  /*
400  * We don't yet support naming ROW transition variables, but the parser
401  * recognizes the syntax so we can give a nicer message here.
402  *
403  * Per standard, REFERENCING TABLE names are only allowed on AFTER
404  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
405  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
406  * only allowed once. Per standard, OLD may not be specified when
407  * creating a trigger only for INSERT, and NEW may not be specified when
408  * creating a trigger only for DELETE.
409  *
410  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
411  * reference both ROW and TABLE transition data.
412  */
413  if (stmt->transitionRels != NIL)
414  {
415  List *varList = stmt->transitionRels;
416  ListCell *lc;
417 
418  foreach(lc, varList)
419  {
421 
422  if (!(tt->isTable))
423  ereport(ERROR,
424  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
425  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
426  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
427 
428  /*
429  * Because of the above test, we omit further ROW-related testing
430  * below. If we later allow naming OLD and NEW ROW variables,
431  * adjustments will be needed below.
432  */
433 
434  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
435  ereport(ERROR,
436  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
437  errmsg("\"%s\" is a foreign table",
439  errdetail("Triggers on foreign tables cannot have transition tables.")));
440 
441  if (rel->rd_rel->relkind == RELKIND_VIEW)
442  ereport(ERROR,
443  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
444  errmsg("\"%s\" is a view",
446  errdetail("Triggers on views cannot have transition tables.")));
447 
448  /*
449  * We currently don't allow row-level triggers with transition
450  * tables on partition or inheritance children. Such triggers
451  * would somehow need to see tuples converted to the format of the
452  * table they're attached to, and it's not clear which subset of
453  * tuples each child should see. See also the prohibitions in
454  * ATExecAttachPartition() and ATExecAddInherit().
455  */
456  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
457  {
458  /* Use appropriate error message. */
459  if (rel->rd_rel->relispartition)
460  ereport(ERROR,
461  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
462  errmsg("ROW triggers with transition tables are not supported on partitions")));
463  else
464  ereport(ERROR,
465  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
466  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
467  }
468 
469  if (stmt->timing != TRIGGER_TYPE_AFTER)
470  ereport(ERROR,
471  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
472  errmsg("transition table name can only be specified for an AFTER trigger")));
473 
474  if (TRIGGER_FOR_TRUNCATE(tgtype))
475  ereport(ERROR,
476  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
477  errmsg("TRUNCATE triggers with transition tables are not supported")));
478 
479  /*
480  * We currently don't allow multi-event triggers ("INSERT OR
481  * UPDATE") with transition tables, because it's not clear how to
482  * handle INSERT ... ON CONFLICT statements which can fire both
483  * INSERT and UPDATE triggers. We show the inserted tuples to
484  * INSERT triggers and the updated tuples to UPDATE triggers, but
485  * it's not yet clear what INSERT OR UPDATE trigger should see.
486  * This restriction could be lifted if we can decide on the right
487  * semantics in a later release.
488  */
489  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
490  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
491  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
492  ereport(ERROR,
493  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
494  errmsg("transition tables cannot be specified for triggers with more than one event")));
495 
496  /*
497  * We currently don't allow column-specific triggers with
498  * transition tables. Per spec, that seems to require
499  * accumulating separate transition tables for each combination of
500  * columns, which is a lot of work for a rather marginal feature.
501  */
502  if (stmt->columns != NIL)
503  ereport(ERROR,
504  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
505  errmsg("transition tables cannot be specified for triggers with column lists")));
506 
507  /*
508  * We disallow constraint triggers with transition tables, to
509  * protect the assumption that such triggers can't be deferred.
510  * See notes with AfterTriggers data structures, below.
511  *
512  * Currently this is enforced by the grammar, so just Assert here.
513  */
514  Assert(!stmt->isconstraint);
515 
516  if (tt->isNew)
517  {
518  if (!(TRIGGER_FOR_INSERT(tgtype) ||
519  TRIGGER_FOR_UPDATE(tgtype)))
520  ereport(ERROR,
521  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
522  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
523 
524  if (newtablename != NULL)
525  ereport(ERROR,
526  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
527  errmsg("NEW TABLE cannot be specified multiple times")));
528 
529  newtablename = tt->name;
530  }
531  else
532  {
533  if (!(TRIGGER_FOR_DELETE(tgtype) ||
534  TRIGGER_FOR_UPDATE(tgtype)))
535  ereport(ERROR,
536  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
537  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
538 
539  if (oldtablename != NULL)
540  ereport(ERROR,
541  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
542  errmsg("OLD TABLE cannot be specified multiple times")));
543 
544  oldtablename = tt->name;
545  }
546  }
547 
548  if (newtablename != NULL && oldtablename != NULL &&
549  strcmp(newtablename, oldtablename) == 0)
550  ereport(ERROR,
551  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
552  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
553  }
554 
555  /*
556  * Parse the WHEN clause, if any and we weren't passed an already
557  * transformed one.
558  *
559  * Note that as a side effect, we fill whenRtable when parsing. If we got
560  * an already parsed clause, this does not occur, which is what we want --
561  * no point in adding redundant dependencies below.
562  */
563  if (!whenClause && stmt->whenClause)
564  {
565  ParseState *pstate;
566  ParseNamespaceItem *nsitem;
567  List *varList;
568  ListCell *lc;
569 
570  /* Set up a pstate to parse with */
571  pstate = make_parsestate(NULL);
572  pstate->p_sourcetext = queryString;
573 
574  /*
575  * Set up nsitems for OLD and NEW references.
576  *
577  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
578  */
579  nsitem = addRangeTableEntryForRelation(pstate, rel,
581  makeAlias("old", NIL),
582  false, false);
583  addNSItemToQuery(pstate, nsitem, false, true, true);
584  nsitem = addRangeTableEntryForRelation(pstate, rel,
586  makeAlias("new", NIL),
587  false, false);
588  addNSItemToQuery(pstate, nsitem, false, true, true);
589 
590  /* Transform expression. Copy to be sure we don't modify original */
591  whenClause = transformWhereClause(pstate,
592  copyObject(stmt->whenClause),
594  "WHEN");
595  /* we have to fix its collations too */
596  assign_expr_collations(pstate, whenClause);
597 
598  /*
599  * Check for disallowed references to OLD/NEW.
600  *
601  * NB: pull_var_clause is okay here only because we don't allow
602  * subselects in WHEN clauses; it would fail to examine the contents
603  * of subselects.
604  */
605  varList = pull_var_clause(whenClause, 0);
606  foreach(lc, varList)
607  {
608  Var *var = (Var *) lfirst(lc);
609 
610  switch (var->varno)
611  {
612  case PRS2_OLD_VARNO:
613  if (!TRIGGER_FOR_ROW(tgtype))
614  ereport(ERROR,
615  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
616  errmsg("statement trigger's WHEN condition cannot reference column values"),
617  parser_errposition(pstate, var->location)));
618  if (TRIGGER_FOR_INSERT(tgtype))
619  ereport(ERROR,
620  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
621  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
622  parser_errposition(pstate, var->location)));
623  /* system columns are okay here */
624  break;
625  case PRS2_NEW_VARNO:
626  if (!TRIGGER_FOR_ROW(tgtype))
627  ereport(ERROR,
628  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
629  errmsg("statement trigger's WHEN condition cannot reference column values"),
630  parser_errposition(pstate, var->location)));
631  if (TRIGGER_FOR_DELETE(tgtype))
632  ereport(ERROR,
633  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
634  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
635  parser_errposition(pstate, var->location)));
636  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
637  ereport(ERROR,
638  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
639  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
640  parser_errposition(pstate, var->location)));
641  if (TRIGGER_FOR_BEFORE(tgtype) &&
642  var->varattno == 0 &&
643  RelationGetDescr(rel)->constr &&
644  RelationGetDescr(rel)->constr->has_generated_stored)
645  ereport(ERROR,
646  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
647  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
648  errdetail("A whole-row reference is used and the table contains generated columns."),
649  parser_errposition(pstate, var->location)));
650  if (TRIGGER_FOR_BEFORE(tgtype) &&
651  var->varattno > 0 &&
652  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
653  ereport(ERROR,
654  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
655  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
656  errdetail("Column \"%s\" is a generated column.",
657  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
658  parser_errposition(pstate, var->location)));
659  break;
660  default:
661  /* can't happen without add_missing_from, so just elog */
662  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
663  break;
664  }
665  }
666 
667  /* we'll need the rtable for recordDependencyOnExpr */
668  whenRtable = pstate->p_rtable;
669 
670  qual = nodeToString(whenClause);
671 
672  free_parsestate(pstate);
673  }
674  else if (!whenClause)
675  {
676  whenClause = NULL;
677  whenRtable = NIL;
678  qual = NULL;
679  }
680  else
681  {
682  qual = nodeToString(whenClause);
683  whenRtable = NIL;
684  }
685 
686  /*
687  * Find and validate the trigger function.
688  */
689  if (!OidIsValid(funcoid))
690  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
691  if (!isInternal)
692  {
693  aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
694  if (aclresult != ACLCHECK_OK)
695  aclcheck_error(aclresult, OBJECT_FUNCTION,
696  NameListToString(stmt->funcname));
697  }
698  funcrettype = get_func_rettype(funcoid);
699  if (funcrettype != TRIGGEROID)
700  ereport(ERROR,
701  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
702  errmsg("function %s must return type %s",
703  NameListToString(stmt->funcname), "trigger")));
704 
705  /*
706  * Scan pg_trigger to see if there is already a trigger of the same name.
707  * Skip this for internally generated triggers, since we'll modify the
708  * name to be unique below.
709  *
710  * NOTE that this is cool only because we have ShareRowExclusiveLock on
711  * the relation, so the trigger set won't be changing underneath us.
712  */
713  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
714  if (!isInternal)
715  {
716  ScanKeyData skeys[2];
717  SysScanDesc tgscan;
718 
719  ScanKeyInit(&skeys[0],
720  Anum_pg_trigger_tgrelid,
721  BTEqualStrategyNumber, F_OIDEQ,
723 
724  ScanKeyInit(&skeys[1],
725  Anum_pg_trigger_tgname,
726  BTEqualStrategyNumber, F_NAMEEQ,
727  CStringGetDatum(stmt->trigname));
728 
729  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
730  NULL, 2, skeys);
731 
732  /* There should be at most one matching tuple */
733  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
734  {
735  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
736 
737  trigoid = oldtrigger->oid;
738  existing_constraint_oid = oldtrigger->tgconstraint;
739  existing_isInternal = oldtrigger->tgisinternal;
740  existing_isClone = OidIsValid(oldtrigger->tgparentid);
741  trigger_exists = true;
742  /* copy the tuple to use in CatalogTupleUpdate() */
743  tuple = heap_copytuple(tuple);
744  }
745  systable_endscan(tgscan);
746  }
747 
748  if (!trigger_exists)
749  {
750  /* Generate the OID for the new trigger. */
751  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
752  Anum_pg_trigger_oid);
753  }
754  else
755  {
756  /*
757  * If OR REPLACE was specified, we'll replace the old trigger;
758  * otherwise complain about the duplicate name.
759  */
760  if (!stmt->replace)
761  ereport(ERROR,
763  errmsg("trigger \"%s\" for relation \"%s\" already exists",
764  stmt->trigname, RelationGetRelationName(rel))));
765 
766  /*
767  * An internal trigger or a child trigger (isClone) cannot be replaced
768  * by a user-defined trigger. However, skip this test when
769  * in_partition, because then we're recursing from a partitioned table
770  * and the check was made at the parent level.
771  */
772  if ((existing_isInternal || existing_isClone) &&
773  !isInternal && !in_partition)
774  ereport(ERROR,
776  errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
777  stmt->trigname, RelationGetRelationName(rel))));
778 
779  /*
780  * It is not allowed to replace with a constraint trigger; gram.y
781  * should have enforced this already.
782  */
783  Assert(!stmt->isconstraint);
784 
785  /*
786  * It is not allowed to replace an existing constraint trigger,
787  * either. (The reason for these restrictions is partly that it seems
788  * difficult to deal with pending trigger events in such cases, and
789  * partly that the command might imply changing the constraint's
790  * properties as well, which doesn't seem nice.)
791  */
792  if (OidIsValid(existing_constraint_oid))
793  ereport(ERROR,
795  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
796  stmt->trigname, RelationGetRelationName(rel))));
797  }
798 
799  /*
800  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
801  * corresponding pg_constraint entry.
802  */
803  if (stmt->isconstraint && !OidIsValid(constraintOid))
804  {
805  /* Internal callers should have made their own constraints */
806  Assert(!isInternal);
807  constraintOid = CreateConstraintEntry(stmt->trigname,
809  CONSTRAINT_TRIGGER,
810  stmt->deferrable,
811  stmt->initdeferred,
812  true,
813  InvalidOid, /* no parent */
814  RelationGetRelid(rel),
815  NULL, /* no conkey */
816  0,
817  0,
818  InvalidOid, /* no domain */
819  InvalidOid, /* no index */
820  InvalidOid, /* no foreign key */
821  NULL,
822  NULL,
823  NULL,
824  NULL,
825  0,
826  ' ',
827  ' ',
828  NULL,
829  0,
830  ' ',
831  NULL, /* no exclusion */
832  NULL, /* no check constraint */
833  NULL,
834  true, /* islocal */
835  0, /* inhcount */
836  true, /* noinherit */
837  isInternal); /* is_internal */
838  }
839 
840  /*
841  * If trigger is internally generated, modify the provided trigger name to
842  * ensure uniqueness by appending the trigger OID. (Callers will usually
843  * supply a simple constant trigger name in these cases.)
844  */
845  if (isInternal)
846  {
847  snprintf(internaltrigname, sizeof(internaltrigname),
848  "%s_%u", stmt->trigname, trigoid);
849  trigname = internaltrigname;
850  }
851  else
852  {
853  /* user-defined trigger; use the specified trigger name as-is */
854  trigname = stmt->trigname;
855  }
856 
857  /*
858  * Build the new pg_trigger tuple.
859  */
860  memset(nulls, false, sizeof(nulls));
861 
862  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
863  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
864  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
865  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
866  CStringGetDatum(trigname));
867  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
868  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
869  values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
870  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
871  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
872  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
873  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
874  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
875  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
876 
877  if (stmt->args)
878  {
879  ListCell *le;
880  char *args;
881  int16 nargs = list_length(stmt->args);
882  int len = 0;
883 
884  foreach(le, stmt->args)
885  {
886  char *ar = strVal(lfirst(le));
887 
888  len += strlen(ar) + 4;
889  for (; *ar; ar++)
890  {
891  if (*ar == '\\')
892  len++;
893  }
894  }
895  args = (char *) palloc(len + 1);
896  args[0] = '\0';
897  foreach(le, stmt->args)
898  {
899  char *s = strVal(lfirst(le));
900  char *d = args + strlen(args);
901 
902  while (*s)
903  {
904  if (*s == '\\')
905  *d++ = '\\';
906  *d++ = *s++;
907  }
908  strcpy(d, "\\000");
909  }
910  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
911  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
913  }
914  else
915  {
916  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
917  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
918  CStringGetDatum(""));
919  }
920 
921  /* build column number array if it's a column-specific trigger */
922  ncolumns = list_length(stmt->columns);
923  if (ncolumns == 0)
924  columns = NULL;
925  else
926  {
927  ListCell *cell;
928  int i = 0;
929 
930  columns = (int16 *) palloc(ncolumns * sizeof(int16));
931  foreach(cell, stmt->columns)
932  {
933  char *name = strVal(lfirst(cell));
934  int16 attnum;
935  int j;
936 
937  /* Lookup column name. System columns are not allowed */
938  attnum = attnameAttNum(rel, name, false);
939  if (attnum == InvalidAttrNumber)
940  ereport(ERROR,
941  (errcode(ERRCODE_UNDEFINED_COLUMN),
942  errmsg("column \"%s\" of relation \"%s\" does not exist",
943  name, RelationGetRelationName(rel))));
944 
945  /* Check for duplicates */
946  for (j = i - 1; j >= 0; j--)
947  {
948  if (columns[j] == attnum)
949  ereport(ERROR,
950  (errcode(ERRCODE_DUPLICATE_COLUMN),
951  errmsg("column \"%s\" specified more than once",
952  name)));
953  }
954 
955  columns[i++] = attnum;
956  }
957  }
958  tgattr = buildint2vector(columns, ncolumns);
959  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
960 
961  /* set tgqual if trigger has WHEN clause */
962  if (qual)
963  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
964  else
965  nulls[Anum_pg_trigger_tgqual - 1] = true;
966 
967  if (oldtablename)
968  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
969  CStringGetDatum(oldtablename));
970  else
971  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
972  if (newtablename)
973  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
974  CStringGetDatum(newtablename));
975  else
976  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
977 
978  /*
979  * Insert or replace tuple in pg_trigger.
980  */
981  if (!trigger_exists)
982  {
983  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
984  CatalogTupleInsert(tgrel, tuple);
985  }
986  else
987  {
988  HeapTuple newtup;
989 
990  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
991  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
992  heap_freetuple(newtup);
993  }
994 
995  heap_freetuple(tuple); /* free either original or new tuple */
997 
998  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
999  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1000  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1001  if (oldtablename)
1002  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1003  if (newtablename)
1004  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1005 
1006  /*
1007  * Update relation's pg_class entry; if necessary; and if not, send an SI
1008  * message to make other backends (and this one) rebuild relcache entries.
1009  */
1010  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1011  tuple = SearchSysCacheCopy1(RELOID,
1013  if (!HeapTupleIsValid(tuple))
1014  elog(ERROR, "cache lookup failed for relation %u",
1015  RelationGetRelid(rel));
1016  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1017  {
1018  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1019 
1020  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1021 
1023  }
1024  else
1026 
1027  heap_freetuple(tuple);
1028  table_close(pgrel, RowExclusiveLock);
1029 
1030  /*
1031  * If we're replacing a trigger, flush all the old dependencies before
1032  * recording new ones.
1033  */
1034  if (trigger_exists)
1035  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1036 
1037  /*
1038  * Record dependencies for trigger. Always place a normal dependency on
1039  * the function.
1040  */
1041  myself.classId = TriggerRelationId;
1042  myself.objectId = trigoid;
1043  myself.objectSubId = 0;
1044 
1045  referenced.classId = ProcedureRelationId;
1046  referenced.objectId = funcoid;
1047  referenced.objectSubId = 0;
1048  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1049 
1050  if (isInternal && OidIsValid(constraintOid))
1051  {
1052  /*
1053  * Internally-generated trigger for a constraint, so make it an
1054  * internal dependency of the constraint. We can skip depending on
1055  * the relation(s), as there'll be an indirect dependency via the
1056  * constraint.
1057  */
1058  referenced.classId = ConstraintRelationId;
1059  referenced.objectId = constraintOid;
1060  referenced.objectSubId = 0;
1061  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1062  }
1063  else
1064  {
1065  /*
1066  * User CREATE TRIGGER, so place dependencies. We make trigger be
1067  * auto-dropped if its relation is dropped or if the FK relation is
1068  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1069  */
1070  referenced.classId = RelationRelationId;
1071  referenced.objectId = RelationGetRelid(rel);
1072  referenced.objectSubId = 0;
1073  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1074 
1075  if (OidIsValid(constrrelid))
1076  {
1077  referenced.classId = RelationRelationId;
1078  referenced.objectId = constrrelid;
1079  referenced.objectSubId = 0;
1080  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1081  }
1082  /* Not possible to have an index dependency in this case */
1083  Assert(!OidIsValid(indexOid));
1084 
1085  /*
1086  * If it's a user-specified constraint trigger, make the constraint
1087  * internally dependent on the trigger instead of vice versa.
1088  */
1089  if (OidIsValid(constraintOid))
1090  {
1091  referenced.classId = ConstraintRelationId;
1092  referenced.objectId = constraintOid;
1093  referenced.objectSubId = 0;
1094  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1095  }
1096 
1097  /*
1098  * If it's a partition trigger, create the partition dependencies.
1099  */
1100  if (OidIsValid(parentTriggerOid))
1101  {
1102  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1103  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1104  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1105  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1106  }
1107  }
1108 
1109  /* If column-specific trigger, add normal dependencies on columns */
1110  if (columns != NULL)
1111  {
1112  int i;
1113 
1114  referenced.classId = RelationRelationId;
1115  referenced.objectId = RelationGetRelid(rel);
1116  for (i = 0; i < ncolumns; i++)
1117  {
1118  referenced.objectSubId = columns[i];
1119  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1120  }
1121  }
1122 
1123  /*
1124  * If it has a WHEN clause, add dependencies on objects mentioned in the
1125  * expression (eg, functions, as well as any columns used).
1126  */
1127  if (whenRtable != NIL)
1128  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1130 
1131  /* Post creation hook for new trigger */
1132  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1133  isInternal);
1134 
1135  /*
1136  * Lastly, create the trigger on child relations, if needed.
1137  */
1138  if (partition_recurse)
1139  {
1140  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1141  int i;
1142  MemoryContext oldcxt,
1143  perChildCxt;
1144 
1146  "part trig clone",
1148 
1149  /*
1150  * We don't currently expect to be called with a valid indexOid. If
1151  * that ever changes then we'll need to write code here to find the
1152  * corresponding child index.
1153  */
1154  Assert(!OidIsValid(indexOid));
1155 
1156  oldcxt = MemoryContextSwitchTo(perChildCxt);
1157 
1158  /* Iterate to create the trigger on each existing partition */
1159  for (i = 0; i < partdesc->nparts; i++)
1160  {
1161  CreateTrigStmt *childStmt;
1162  Relation childTbl;
1163  Node *qual;
1164 
1165  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1166 
1167  /*
1168  * Initialize our fabricated parse node by copying the original
1169  * one, then resetting fields that we pass separately.
1170  */
1171  childStmt = (CreateTrigStmt *) copyObject(stmt);
1172  childStmt->funcname = NIL;
1173  childStmt->whenClause = NULL;
1174 
1175  /* If there is a WHEN clause, create a modified copy of it */
1176  qual = copyObject(whenClause);
1177  qual = (Node *)
1179  childTbl, rel);
1180  qual = (Node *)
1182  childTbl, rel);
1183 
1184  CreateTriggerFiringOn(childStmt, queryString,
1185  partdesc->oids[i], refRelOid,
1187  funcoid, trigoid, qual,
1188  isInternal, true, trigger_fires_when);
1189 
1190  table_close(childTbl, NoLock);
1191 
1192  MemoryContextReset(perChildCxt);
1193  }
1194 
1195  MemoryContextSwitchTo(oldcxt);
1196  MemoryContextDelete(perChildCxt);
1197  }
1198 
1199  /* Keep lock on target rel until end of xact */
1200  table_close(rel, NoLock);
1201 
1202  return myself;
1203 }
1204 
1205 /*
1206  * TriggerSetParentTrigger
1207  * Set a partition's trigger as child of its parent trigger,
1208  * or remove the linkage if parentTrigId is InvalidOid.
1209  *
1210  * This updates the constraint's pg_trigger row to show it as inherited, and
1211  * adds PARTITION dependencies to prevent the trigger from being deleted
1212  * on its own. Alternatively, reverse that.
1213  */
1214 void
1216  Oid childTrigId,
1217  Oid parentTrigId,
1218  Oid childTableId)
1219 {
1220  SysScanDesc tgscan;
1221  ScanKeyData skey[1];
1222  Form_pg_trigger trigForm;
1223  HeapTuple tuple,
1224  newtup;
1225  ObjectAddress depender;
1226  ObjectAddress referenced;
1227 
1228  /*
1229  * Find the trigger to delete.
1230  */
1231  ScanKeyInit(&skey[0],
1232  Anum_pg_trigger_oid,
1233  BTEqualStrategyNumber, F_OIDEQ,
1234  ObjectIdGetDatum(childTrigId));
1235 
1236  tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1237  NULL, 1, skey);
1238 
1239  tuple = systable_getnext(tgscan);
1240  if (!HeapTupleIsValid(tuple))
1241  elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1242  newtup = heap_copytuple(tuple);
1243  trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1244  if (OidIsValid(parentTrigId))
1245  {
1246  /* don't allow setting parent for a constraint that already has one */
1247  if (OidIsValid(trigForm->tgparentid))
1248  elog(ERROR, "trigger %u already has a parent trigger",
1249  childTrigId);
1250 
1251  trigForm->tgparentid = parentTrigId;
1252 
1253  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1254 
1255  ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1256 
1257  ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1258  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1259 
1260  ObjectAddressSet(referenced, RelationRelationId, childTableId);
1261  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1262  }
1263  else
1264  {
1265  trigForm->tgparentid = InvalidOid;
1266 
1267  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1268 
1269  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1270  TriggerRelationId,
1272  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1273  RelationRelationId,
1275  }
1276 
1277  heap_freetuple(newtup);
1278  systable_endscan(tgscan);
1279 }
1280 
1281 
1282 /*
1283  * Guts of trigger deletion.
1284  */
1285 void
1287 {
1288  Relation tgrel;
1289  SysScanDesc tgscan;
1290  ScanKeyData skey[1];
1291  HeapTuple tup;
1292  Oid relid;
1293  Relation rel;
1294 
1295  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1296 
1297  /*
1298  * Find the trigger to delete.
1299  */
1300  ScanKeyInit(&skey[0],
1301  Anum_pg_trigger_oid,
1302  BTEqualStrategyNumber, F_OIDEQ,
1303  ObjectIdGetDatum(trigOid));
1304 
1305  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1306  NULL, 1, skey);
1307 
1308  tup = systable_getnext(tgscan);
1309  if (!HeapTupleIsValid(tup))
1310  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1311 
1312  /*
1313  * Open and exclusive-lock the relation the trigger belongs to.
1314  */
1315  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1316 
1317  rel = table_open(relid, AccessExclusiveLock);
1318 
1319  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1320  rel->rd_rel->relkind != RELKIND_VIEW &&
1321  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1322  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1323  ereport(ERROR,
1324  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1325  errmsg("relation \"%s\" cannot have triggers",
1327  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1328 
1330  ereport(ERROR,
1331  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1332  errmsg("permission denied: \"%s\" is a system catalog",
1333  RelationGetRelationName(rel))));
1334 
1335  /*
1336  * Delete the pg_trigger tuple.
1337  */
1338  CatalogTupleDelete(tgrel, &tup->t_self);
1339 
1340  systable_endscan(tgscan);
1341  table_close(tgrel, RowExclusiveLock);
1342 
1343  /*
1344  * We do not bother to try to determine whether any other triggers remain,
1345  * which would be needed in order to decide whether it's safe to clear the
1346  * relation's relhastriggers. (In any case, there might be a concurrent
1347  * process adding new triggers.) Instead, just force a relcache inval to
1348  * make other backends (and this one too!) rebuild their relcache entries.
1349  * There's no great harm in leaving relhastriggers true even if there are
1350  * no triggers left.
1351  */
1353 
1354  /* Keep lock on trigger's rel until end of xact */
1355  table_close(rel, NoLock);
1356 }
1357 
1358 /*
1359  * get_trigger_oid - Look up a trigger by name to find its OID.
1360  *
1361  * If missing_ok is false, throw an error if trigger not found. If
1362  * true, just return InvalidOid.
1363  */
1364 Oid
1365 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1366 {
1367  Relation tgrel;
1368  ScanKeyData skey[2];
1369  SysScanDesc tgscan;
1370  HeapTuple tup;
1371  Oid oid;
1372 
1373  /*
1374  * Find the trigger, verify permissions, set up object address
1375  */
1376  tgrel = table_open(TriggerRelationId, AccessShareLock);
1377 
1378  ScanKeyInit(&skey[0],
1379  Anum_pg_trigger_tgrelid,
1380  BTEqualStrategyNumber, F_OIDEQ,
1381  ObjectIdGetDatum(relid));
1382  ScanKeyInit(&skey[1],
1383  Anum_pg_trigger_tgname,
1384  BTEqualStrategyNumber, F_NAMEEQ,
1385  CStringGetDatum(trigname));
1386 
1387  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1388  NULL, 2, skey);
1389 
1390  tup = systable_getnext(tgscan);
1391 
1392  if (!HeapTupleIsValid(tup))
1393  {
1394  if (!missing_ok)
1395  ereport(ERROR,
1396  (errcode(ERRCODE_UNDEFINED_OBJECT),
1397  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1398  trigname, get_rel_name(relid))));
1399  oid = InvalidOid;
1400  }
1401  else
1402  {
1403  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1404  }
1405 
1406  systable_endscan(tgscan);
1407  table_close(tgrel, AccessShareLock);
1408  return oid;
1409 }
1410 
1411 /*
1412  * Perform permissions and integrity checks before acquiring a relation lock.
1413  */
1414 static void
1416  void *arg)
1417 {
1418  HeapTuple tuple;
1419  Form_pg_class form;
1420 
1421  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1422  if (!HeapTupleIsValid(tuple))
1423  return; /* concurrently dropped */
1424  form = (Form_pg_class) GETSTRUCT(tuple);
1425 
1426  /* only tables and views can have triggers */
1427  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1428  form->relkind != RELKIND_FOREIGN_TABLE &&
1429  form->relkind != RELKIND_PARTITIONED_TABLE)
1430  ereport(ERROR,
1431  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1432  errmsg("relation \"%s\" cannot have triggers",
1433  rv->relname),
1434  errdetail_relkind_not_supported(form->relkind)));
1435 
1436  /* you must own the table to rename one of its triggers */
1437  if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1439  if (!allowSystemTableMods && IsSystemClass(relid, form))
1440  ereport(ERROR,
1441  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1442  errmsg("permission denied: \"%s\" is a system catalog",
1443  rv->relname)));
1444 
1445  ReleaseSysCache(tuple);
1446 }
1447 
1448 /*
1449  * renametrig - changes the name of a trigger on a relation
1450  *
1451  * trigger name is changed in trigger catalog.
1452  * No record of the previous name is kept.
1453  *
1454  * get proper relrelation from relation catalog (if not arg)
1455  * scan trigger catalog
1456  * for name conflict (within rel)
1457  * for original trigger (if not arg)
1458  * modify tgname in trigger tuple
1459  * update row in catalog
1460  */
1463 {
1464  Oid tgoid;
1465  Relation targetrel;
1466  Relation tgrel;
1467  HeapTuple tuple;
1468  SysScanDesc tgscan;
1469  ScanKeyData key[2];
1470  Oid relid;
1471  ObjectAddress address;
1472 
1473  /*
1474  * Look up name, check permissions, and acquire lock (which we will NOT
1475  * release until end of transaction).
1476  */
1478  0,
1480  NULL);
1481 
1482  /* Have lock already, so just need to build relcache entry. */
1483  targetrel = relation_open(relid, NoLock);
1484 
1485  /*
1486  * On partitioned tables, this operation recurses to partitions. Lock all
1487  * tables upfront.
1488  */
1489  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1490  (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1491 
1492  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1493 
1494  /*
1495  * Search for the trigger to modify.
1496  */
1497  ScanKeyInit(&key[0],
1498  Anum_pg_trigger_tgrelid,
1499  BTEqualStrategyNumber, F_OIDEQ,
1500  ObjectIdGetDatum(relid));
1501  ScanKeyInit(&key[1],
1502  Anum_pg_trigger_tgname,
1503  BTEqualStrategyNumber, F_NAMEEQ,
1504  PointerGetDatum(stmt->subname));
1505  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1506  NULL, 2, key);
1507  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1508  {
1509  Form_pg_trigger trigform;
1510 
1511  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1512  tgoid = trigform->oid;
1513 
1514  /*
1515  * If the trigger descends from a trigger on a parent partitioned
1516  * table, reject the rename. We don't allow a trigger in a partition
1517  * to differ in name from that of its parent: that would lead to an
1518  * inconsistency that pg_dump would not reproduce.
1519  */
1520  if (OidIsValid(trigform->tgparentid))
1521  ereport(ERROR,
1522  errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1523  stmt->subname, RelationGetRelationName(targetrel)),
1524  errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1525  get_rel_name(get_partition_parent(relid, false))));
1526 
1527 
1528  /* Rename the trigger on this relation ... */
1529  renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1530  stmt->subname);
1531 
1532  /* ... and if it is partitioned, recurse to its partitions */
1533  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1534  {
1535  PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1536 
1537  for (int i = 0; i < partdesc->nparts; i++)
1538  {
1539  Oid partitionId = partdesc->oids[i];
1540 
1541  renametrig_partition(tgrel, partitionId, trigform->oid,
1542  stmt->newname, stmt->subname);
1543  }
1544  }
1545  }
1546  else
1547  {
1548  ereport(ERROR,
1549  (errcode(ERRCODE_UNDEFINED_OBJECT),
1550  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1551  stmt->subname, RelationGetRelationName(targetrel))));
1552  }
1553 
1554  ObjectAddressSet(address, TriggerRelationId, tgoid);
1555 
1556  systable_endscan(tgscan);
1557 
1558  table_close(tgrel, RowExclusiveLock);
1559 
1560  /*
1561  * Close rel, but keep exclusive lock!
1562  */
1563  relation_close(targetrel, NoLock);
1564 
1565  return address;
1566 }
1567 
1568 /*
1569  * Subroutine for renametrig -- perform the actual work of renaming one
1570  * trigger on one table.
1571  *
1572  * If the trigger has a name different from the expected one, raise a
1573  * NOTICE about it.
1574  */
1575 static void
1577  const char *newname, const char *expected_name)
1578 {
1579  HeapTuple tuple;
1580  Form_pg_trigger tgform;
1581  ScanKeyData key[2];
1582  SysScanDesc tgscan;
1583 
1584  /* If the trigger already has the new name, nothing to do. */
1585  tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1586  if (strcmp(NameStr(tgform->tgname), newname) == 0)
1587  return;
1588 
1589  /*
1590  * Before actually trying the rename, search for triggers with the same
1591  * name. The update would fail with an ugly message in that case, and it
1592  * is better to throw a nicer error.
1593  */
1594  ScanKeyInit(&key[0],
1595  Anum_pg_trigger_tgrelid,
1596  BTEqualStrategyNumber, F_OIDEQ,
1597  ObjectIdGetDatum(RelationGetRelid(targetrel)));
1598  ScanKeyInit(&key[1],
1599  Anum_pg_trigger_tgname,
1600  BTEqualStrategyNumber, F_NAMEEQ,
1601  PointerGetDatum(newname));
1602  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1603  NULL, 2, key);
1604  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1605  ereport(ERROR,
1607  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1608  newname, RelationGetRelationName(targetrel))));
1609  systable_endscan(tgscan);
1610 
1611  /*
1612  * The target name is free; update the existing pg_trigger tuple with it.
1613  */
1614  tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1615  tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1616 
1617  /*
1618  * If the trigger has a name different from what we expected, let the user
1619  * know. (We can proceed anyway, since we must have reached here following
1620  * a tgparentid link.)
1621  */
1622  if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1623  ereport(NOTICE,
1624  errmsg("renamed trigger \"%s\" on relation \"%s\"",
1625  NameStr(tgform->tgname),
1626  RelationGetRelationName(targetrel)));
1627 
1628  namestrcpy(&tgform->tgname, newname);
1629 
1630  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1631 
1632  InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1633 
1634  /*
1635  * Invalidate relation's relcache entry so that other backends (and this
1636  * one too!) are sent SI message to make them rebuild relcache entries.
1637  * (Ideally this should happen automatically...)
1638  */
1639  CacheInvalidateRelcache(targetrel);
1640 }
1641 
1642 /*
1643  * Subroutine for renametrig -- Helper for recursing to partitions when
1644  * renaming triggers on a partitioned table.
1645  */
1646 static void
1647 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1648  const char *newname, const char *expected_name)
1649 {
1650  SysScanDesc tgscan;
1651  ScanKeyData key;
1652  HeapTuple tuple;
1653 
1654  /*
1655  * Given a relation and the OID of a trigger on parent relation, find the
1656  * corresponding trigger in the child and rename that trigger to the given
1657  * name.
1658  */
1659  ScanKeyInit(&key,
1660  Anum_pg_trigger_tgrelid,
1661  BTEqualStrategyNumber, F_OIDEQ,
1662  ObjectIdGetDatum(partitionId));
1663  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1664  NULL, 1, &key);
1665  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1666  {
1667  Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1668  Relation partitionRel;
1669 
1670  if (tgform->tgparentid != parentTriggerOid)
1671  continue; /* not our trigger */
1672 
1673  partitionRel = table_open(partitionId, NoLock);
1674 
1675  /* Rename the trigger on this partition */
1676  renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1677 
1678  /* And if this relation is partitioned, recurse to its partitions */
1679  if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1680  {
1681  PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1682  true);
1683 
1684  for (int i = 0; i < partdesc->nparts; i++)
1685  {
1686  Oid partoid = partdesc->oids[i];
1687 
1688  renametrig_partition(tgrel, partoid, tgform->oid, newname,
1689  NameStr(tgform->tgname));
1690  }
1691  }
1692  table_close(partitionRel, NoLock);
1693 
1694  /* There should be at most one matching tuple */
1695  break;
1696  }
1697  systable_endscan(tgscan);
1698 }
1699 
1700 /*
1701  * EnableDisableTrigger()
1702  *
1703  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1704  * to change 'tgenabled' field for the specified trigger(s)
1705  *
1706  * rel: relation to process (caller must hold suitable lock on it)
1707  * tgname: name of trigger to process, or NULL to scan all triggers
1708  * tgparent: if not zero, process only triggers with this tgparentid
1709  * fires_when: new value for tgenabled field. In addition to generic
1710  * enablement/disablement, this also defines when the trigger
1711  * should be fired in session replication roles.
1712  * skip_system: if true, skip "system" triggers (constraint triggers)
1713  * recurse: if true, recurse to partitions
1714  *
1715  * Caller should have checked permissions for the table; here we also
1716  * enforce that superuser privilege is required to alter the state of
1717  * system triggers
1718  */
1719 void
1720 EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1721  char fires_when, bool skip_system, bool recurse,
1722  LOCKMODE lockmode)
1723 {
1724  Relation tgrel;
1725  int nkeys;
1726  ScanKeyData keys[2];
1727  SysScanDesc tgscan;
1728  HeapTuple tuple;
1729  bool found;
1730  bool changed;
1731 
1732  /* Scan the relevant entries in pg_triggers */
1733  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1734 
1735  ScanKeyInit(&keys[0],
1736  Anum_pg_trigger_tgrelid,
1737  BTEqualStrategyNumber, F_OIDEQ,
1739  if (tgname)
1740  {
1741  ScanKeyInit(&keys[1],
1742  Anum_pg_trigger_tgname,
1743  BTEqualStrategyNumber, F_NAMEEQ,
1744  CStringGetDatum(tgname));
1745  nkeys = 2;
1746  }
1747  else
1748  nkeys = 1;
1749 
1750  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1751  NULL, nkeys, keys);
1752 
1753  found = changed = false;
1754 
1755  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1756  {
1757  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1758 
1759  if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1760  continue;
1761 
1762  if (oldtrig->tgisinternal)
1763  {
1764  /* system trigger ... ok to process? */
1765  if (skip_system)
1766  continue;
1767  if (!superuser())
1768  ereport(ERROR,
1769  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1770  errmsg("permission denied: \"%s\" is a system trigger",
1771  NameStr(oldtrig->tgname))));
1772  }
1773 
1774  found = true;
1775 
1776  if (oldtrig->tgenabled != fires_when)
1777  {
1778  /* need to change this one ... make a copy to scribble on */
1779  HeapTuple newtup = heap_copytuple(tuple);
1780  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1781 
1782  newtrig->tgenabled = fires_when;
1783 
1784  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1785 
1786  heap_freetuple(newtup);
1787 
1788  changed = true;
1789  }
1790 
1791  /*
1792  * When altering FOR EACH ROW triggers on a partitioned table, do the
1793  * same on the partitions as well, unless ONLY is specified.
1794  *
1795  * Note that we recurse even if we didn't change the trigger above,
1796  * because the partitions' copy of the trigger may have a different
1797  * value of tgenabled than the parent's trigger and thus might need to
1798  * be changed.
1799  */
1800  if (recurse &&
1801  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1802  (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1803  {
1804  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1805  int i;
1806 
1807  for (i = 0; i < partdesc->nparts; i++)
1808  {
1809  Relation part;
1810 
1811  part = relation_open(partdesc->oids[i], lockmode);
1812  /* Match on child triggers' tgparentid, not their name */
1813  EnableDisableTrigger(part, NULL, oldtrig->oid,
1814  fires_when, skip_system, recurse,
1815  lockmode);
1816  table_close(part, NoLock); /* keep lock till commit */
1817  }
1818  }
1819 
1820  InvokeObjectPostAlterHook(TriggerRelationId,
1821  oldtrig->oid, 0);
1822  }
1823 
1824  systable_endscan(tgscan);
1825 
1826  table_close(tgrel, RowExclusiveLock);
1827 
1828  if (tgname && !found)
1829  ereport(ERROR,
1830  (errcode(ERRCODE_UNDEFINED_OBJECT),
1831  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1832  tgname, RelationGetRelationName(rel))));
1833 
1834  /*
1835  * If we changed anything, broadcast a SI inval message to force each
1836  * backend (including our own!) to rebuild relation's relcache entry.
1837  * Otherwise they will fail to apply the change promptly.
1838  */
1839  if (changed)
1841 }
1842 
1843 
1844 /*
1845  * Build trigger data to attach to the given relcache entry.
1846  *
1847  * Note that trigger data attached to a relcache entry must be stored in
1848  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1849  * But we should be running in a less long-lived working context. To avoid
1850  * leaking cache memory if this routine fails partway through, we build a
1851  * temporary TriggerDesc in working memory and then copy the completed
1852  * structure into cache memory.
1853  */
1854 void
1856 {
1857  TriggerDesc *trigdesc;
1858  int numtrigs;
1859  int maxtrigs;
1860  Trigger *triggers;
1861  Relation tgrel;
1862  ScanKeyData skey;
1863  SysScanDesc tgscan;
1864  HeapTuple htup;
1865  MemoryContext oldContext;
1866  int i;
1867 
1868  /*
1869  * Allocate a working array to hold the triggers (the array is extended if
1870  * necessary)
1871  */
1872  maxtrigs = 16;
1873  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1874  numtrigs = 0;
1875 
1876  /*
1877  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1878  * be reading the triggers in name order, except possibly during
1879  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1880  * ensures that triggers will be fired in name order.
1881  */
1882  ScanKeyInit(&skey,
1883  Anum_pg_trigger_tgrelid,
1884  BTEqualStrategyNumber, F_OIDEQ,
1885  ObjectIdGetDatum(RelationGetRelid(relation)));
1886 
1887  tgrel = table_open(TriggerRelationId, AccessShareLock);
1888  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1889  NULL, 1, &skey);
1890 
1891  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1892  {
1893  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1894  Trigger *build;
1895  Datum datum;
1896  bool isnull;
1897 
1898  if (numtrigs >= maxtrigs)
1899  {
1900  maxtrigs *= 2;
1901  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1902  }
1903  build = &(triggers[numtrigs]);
1904 
1905  build->tgoid = pg_trigger->oid;
1907  NameGetDatum(&pg_trigger->tgname)));
1908  build->tgfoid = pg_trigger->tgfoid;
1909  build->tgtype = pg_trigger->tgtype;
1910  build->tgenabled = pg_trigger->tgenabled;
1911  build->tgisinternal = pg_trigger->tgisinternal;
1912  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1913  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1914  build->tgconstrindid = pg_trigger->tgconstrindid;
1915  build->tgconstraint = pg_trigger->tgconstraint;
1916  build->tgdeferrable = pg_trigger->tgdeferrable;
1917  build->tginitdeferred = pg_trigger->tginitdeferred;
1918  build->tgnargs = pg_trigger->tgnargs;
1919  /* tgattr is first var-width field, so OK to access directly */
1920  build->tgnattr = pg_trigger->tgattr.dim1;
1921  if (build->tgnattr > 0)
1922  {
1923  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1924  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1925  build->tgnattr * sizeof(int16));
1926  }
1927  else
1928  build->tgattr = NULL;
1929  if (build->tgnargs > 0)
1930  {
1931  bytea *val;
1932  char *p;
1933 
1935  Anum_pg_trigger_tgargs,
1936  tgrel->rd_att, &isnull));
1937  if (isnull)
1938  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1939  RelationGetRelationName(relation));
1940  p = (char *) VARDATA_ANY(val);
1941  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1942  for (i = 0; i < build->tgnargs; i++)
1943  {
1944  build->tgargs[i] = pstrdup(p);
1945  p += strlen(p) + 1;
1946  }
1947  }
1948  else
1949  build->tgargs = NULL;
1950 
1951  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1952  tgrel->rd_att, &isnull);
1953  if (!isnull)
1954  build->tgoldtable =
1956  else
1957  build->tgoldtable = NULL;
1958 
1959  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1960  tgrel->rd_att, &isnull);
1961  if (!isnull)
1962  build->tgnewtable =
1964  else
1965  build->tgnewtable = NULL;
1966 
1967  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1968  tgrel->rd_att, &isnull);
1969  if (!isnull)
1970  build->tgqual = TextDatumGetCString(datum);
1971  else
1972  build->tgqual = NULL;
1973 
1974  numtrigs++;
1975  }
1976 
1977  systable_endscan(tgscan);
1978  table_close(tgrel, AccessShareLock);
1979 
1980  /* There might not be any triggers */
1981  if (numtrigs == 0)
1982  {
1983  pfree(triggers);
1984  return;
1985  }
1986 
1987  /* Build trigdesc */
1988  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1989  trigdesc->triggers = triggers;
1990  trigdesc->numtriggers = numtrigs;
1991  for (i = 0; i < numtrigs; i++)
1992  SetTriggerFlags(trigdesc, &(triggers[i]));
1993 
1994  /* Copy completed trigdesc into cache storage */
1996  relation->trigdesc = CopyTriggerDesc(trigdesc);
1997  MemoryContextSwitchTo(oldContext);
1998 
1999  /* Release working memory */
2000  FreeTriggerDesc(trigdesc);
2001 }
2002 
2003 /*
2004  * Update the TriggerDesc's hint flags to include the specified trigger
2005  */
2006 static void
2008 {
2009  int16 tgtype = trigger->tgtype;
2010 
2011  trigdesc->trig_insert_before_row |=
2012  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2013  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2014  trigdesc->trig_insert_after_row |=
2015  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2016  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2017  trigdesc->trig_insert_instead_row |=
2018  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2019  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2020  trigdesc->trig_insert_before_statement |=
2021  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2022  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2023  trigdesc->trig_insert_after_statement |=
2024  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2025  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2026  trigdesc->trig_update_before_row |=
2027  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2028  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2029  trigdesc->trig_update_after_row |=
2030  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2031  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2032  trigdesc->trig_update_instead_row |=
2033  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2034  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2035  trigdesc->trig_update_before_statement |=
2036  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2037  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2038  trigdesc->trig_update_after_statement |=
2039  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2040  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2041  trigdesc->trig_delete_before_row |=
2042  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2043  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2044  trigdesc->trig_delete_after_row |=
2045  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2046  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2047  trigdesc->trig_delete_instead_row |=
2048  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2049  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2050  trigdesc->trig_delete_before_statement |=
2051  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2052  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2053  trigdesc->trig_delete_after_statement |=
2054  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2055  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2056  /* there are no row-level truncate triggers */
2057  trigdesc->trig_truncate_before_statement |=
2058  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2059  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2060  trigdesc->trig_truncate_after_statement |=
2061  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2062  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2063 
2064  trigdesc->trig_insert_new_table |=
2065  (TRIGGER_FOR_INSERT(tgtype) &&
2066  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2067  trigdesc->trig_update_old_table |=
2068  (TRIGGER_FOR_UPDATE(tgtype) &&
2069  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2070  trigdesc->trig_update_new_table |=
2071  (TRIGGER_FOR_UPDATE(tgtype) &&
2072  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2073  trigdesc->trig_delete_old_table |=
2074  (TRIGGER_FOR_DELETE(tgtype) &&
2075  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2076 }
2077 
2078 /*
2079  * Copy a TriggerDesc data structure.
2080  *
2081  * The copy is allocated in the current memory context.
2082  */
2083 TriggerDesc *
2085 {
2086  TriggerDesc *newdesc;
2087  Trigger *trigger;
2088  int i;
2089 
2090  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2091  return NULL;
2092 
2093  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2094  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2095 
2096  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2097  memcpy(trigger, trigdesc->triggers,
2098  trigdesc->numtriggers * sizeof(Trigger));
2099  newdesc->triggers = trigger;
2100 
2101  for (i = 0; i < trigdesc->numtriggers; i++)
2102  {
2103  trigger->tgname = pstrdup(trigger->tgname);
2104  if (trigger->tgnattr > 0)
2105  {
2106  int16 *newattr;
2107 
2108  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2109  memcpy(newattr, trigger->tgattr,
2110  trigger->tgnattr * sizeof(int16));
2111  trigger->tgattr = newattr;
2112  }
2113  if (trigger->tgnargs > 0)
2114  {
2115  char **newargs;
2116  int16 j;
2117 
2118  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2119  for (j = 0; j < trigger->tgnargs; j++)
2120  newargs[j] = pstrdup(trigger->tgargs[j]);
2121  trigger->tgargs = newargs;
2122  }
2123  if (trigger->tgqual)
2124  trigger->tgqual = pstrdup(trigger->tgqual);
2125  if (trigger->tgoldtable)
2126  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2127  if (trigger->tgnewtable)
2128  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2129  trigger++;
2130  }
2131 
2132  return newdesc;
2133 }
2134 
2135 /*
2136  * Free a TriggerDesc data structure.
2137  */
2138 void
2140 {
2141  Trigger *trigger;
2142  int i;
2143 
2144  if (trigdesc == NULL)
2145  return;
2146 
2147  trigger = trigdesc->triggers;
2148  for (i = 0; i < trigdesc->numtriggers; i++)
2149  {
2150  pfree(trigger->tgname);
2151  if (trigger->tgnattr > 0)
2152  pfree(trigger->tgattr);
2153  if (trigger->tgnargs > 0)
2154  {
2155  while (--(trigger->tgnargs) >= 0)
2156  pfree(trigger->tgargs[trigger->tgnargs]);
2157  pfree(trigger->tgargs);
2158  }
2159  if (trigger->tgqual)
2160  pfree(trigger->tgqual);
2161  if (trigger->tgoldtable)
2162  pfree(trigger->tgoldtable);
2163  if (trigger->tgnewtable)
2164  pfree(trigger->tgnewtable);
2165  trigger++;
2166  }
2167  pfree(trigdesc->triggers);
2168  pfree(trigdesc);
2169 }
2170 
2171 /*
2172  * Compare two TriggerDesc structures for logical equality.
2173  */
2174 #ifdef NOT_USED
2175 bool
2176 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2177 {
2178  int i,
2179  j;
2180 
2181  /*
2182  * We need not examine the hint flags, just the trigger array itself; if
2183  * we have the same triggers with the same types, the flags should match.
2184  *
2185  * As of 7.3 we assume trigger set ordering is significant in the
2186  * comparison; so we just compare corresponding slots of the two sets.
2187  *
2188  * Note: comparing the stringToNode forms of the WHEN clauses means that
2189  * parse column locations will affect the result. This is okay as long as
2190  * this function is only used for detecting exact equality, as for example
2191  * in checking for staleness of a cache entry.
2192  */
2193  if (trigdesc1 != NULL)
2194  {
2195  if (trigdesc2 == NULL)
2196  return false;
2197  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2198  return false;
2199  for (i = 0; i < trigdesc1->numtriggers; i++)
2200  {
2201  Trigger *trig1 = trigdesc1->triggers + i;
2202  Trigger *trig2 = trigdesc2->triggers + i;
2203 
2204  if (trig1->tgoid != trig2->tgoid)
2205  return false;
2206  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2207  return false;
2208  if (trig1->tgfoid != trig2->tgfoid)
2209  return false;
2210  if (trig1->tgtype != trig2->tgtype)
2211  return false;
2212  if (trig1->tgenabled != trig2->tgenabled)
2213  return false;
2214  if (trig1->tgisinternal != trig2->tgisinternal)
2215  return false;
2216  if (trig1->tgisclone != trig2->tgisclone)
2217  return false;
2218  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2219  return false;
2220  if (trig1->tgconstrindid != trig2->tgconstrindid)
2221  return false;
2222  if (trig1->tgconstraint != trig2->tgconstraint)
2223  return false;
2224  if (trig1->tgdeferrable != trig2->tgdeferrable)
2225  return false;
2226  if (trig1->tginitdeferred != trig2->tginitdeferred)
2227  return false;
2228  if (trig1->tgnargs != trig2->tgnargs)
2229  return false;
2230  if (trig1->tgnattr != trig2->tgnattr)
2231  return false;
2232  if (trig1->tgnattr > 0 &&
2233  memcmp(trig1->tgattr, trig2->tgattr,
2234  trig1->tgnattr * sizeof(int16)) != 0)
2235  return false;
2236  for (j = 0; j < trig1->tgnargs; j++)
2237  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2238  return false;
2239  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2240  /* ok */ ;
2241  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2242  return false;
2243  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2244  return false;
2245  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2246  /* ok */ ;
2247  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2248  return false;
2249  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2250  return false;
2251  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2252  /* ok */ ;
2253  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2254  return false;
2255  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2256  return false;
2257  }
2258  }
2259  else if (trigdesc2 != NULL)
2260  return false;
2261  return true;
2262 }
2263 #endif /* NOT_USED */
2264 
2265 /*
2266  * Check if there is a row-level trigger with transition tables that prevents
2267  * a table from becoming an inheritance child or partition. Return the name
2268  * of the first such incompatible trigger, or NULL if there is none.
2269  */
2270 const char *
2272 {
2273  if (trigdesc != NULL)
2274  {
2275  int i;
2276 
2277  for (i = 0; i < trigdesc->numtriggers; ++i)
2278  {
2279  Trigger *trigger = &trigdesc->triggers[i];
2280 
2281  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2282  return trigger->tgname;
2283  }
2284  }
2285 
2286  return NULL;
2287 }
2288 
2289 /*
2290  * Call a trigger function.
2291  *
2292  * trigdata: trigger descriptor.
2293  * tgindx: trigger's index in finfo and instr arrays.
2294  * finfo: array of cached trigger function call information.
2295  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2296  * per_tuple_context: memory context to execute the function in.
2297  *
2298  * Returns the tuple (or NULL) as returned by the function.
2299  */
2300 static HeapTuple
2302  int tgindx,
2303  FmgrInfo *finfo,
2304  Instrumentation *instr,
2305  MemoryContext per_tuple_context)
2306 {
2307  LOCAL_FCINFO(fcinfo, 0);
2308  PgStat_FunctionCallUsage fcusage;
2309  Datum result;
2310  MemoryContext oldContext;
2311 
2312  /*
2313  * Protect against code paths that may fail to initialize transition table
2314  * info.
2315  */
2316  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2317  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2318  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2319  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2320  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2321  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2322  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2323 
2324  finfo += tgindx;
2325 
2326  /*
2327  * We cache fmgr lookup info, to avoid making the lookup again on each
2328  * call.
2329  */
2330  if (finfo->fn_oid == InvalidOid)
2331  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2332 
2333  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2334 
2335  /*
2336  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2337  */
2338  if (instr)
2339  InstrStartNode(instr + tgindx);
2340 
2341  /*
2342  * Do the function evaluation in the per-tuple memory context, so that
2343  * leaked memory will be reclaimed once per tuple. Note in particular that
2344  * any new tuple created by the trigger function will live till the end of
2345  * the tuple cycle.
2346  */
2347  oldContext = MemoryContextSwitchTo(per_tuple_context);
2348 
2349  /*
2350  * Call the function, passing no arguments but setting a context.
2351  */
2352  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2353  InvalidOid, (Node *) trigdata, NULL);
2354 
2355  pgstat_init_function_usage(fcinfo, &fcusage);
2356 
2357  MyTriggerDepth++;
2358  PG_TRY();
2359  {
2360  result = FunctionCallInvoke(fcinfo);
2361  }
2362  PG_FINALLY();
2363  {
2364  MyTriggerDepth--;
2365  }
2366  PG_END_TRY();
2367 
2368  pgstat_end_function_usage(&fcusage, true);
2369 
2370  MemoryContextSwitchTo(oldContext);
2371 
2372  /*
2373  * Trigger protocol allows function to return a null pointer, but NOT to
2374  * set the isnull result flag.
2375  */
2376  if (fcinfo->isnull)
2377  ereport(ERROR,
2378  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2379  errmsg("trigger function %u returned null value",
2380  fcinfo->flinfo->fn_oid)));
2381 
2382  /*
2383  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2384  * one "tuple returned" (really the number of firings).
2385  */
2386  if (instr)
2387  InstrStopNode(instr + tgindx, 1);
2388 
2389  return (HeapTuple) DatumGetPointer(result);
2390 }
2391 
2392 void
2394 {
2395  TriggerDesc *trigdesc;
2396  int i;
2397  TriggerData LocTriggerData = {0};
2398 
2399  trigdesc = relinfo->ri_TrigDesc;
2400 
2401  if (trigdesc == NULL)
2402  return;
2403  if (!trigdesc->trig_insert_before_statement)
2404  return;
2405 
2406  /* no-op if we already fired BS triggers in this context */
2408  CMD_INSERT))
2409  return;
2410 
2411  LocTriggerData.type = T_TriggerData;
2412  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2414  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2415  for (i = 0; i < trigdesc->numtriggers; i++)
2416  {
2417  Trigger *trigger = &trigdesc->triggers[i];
2418  HeapTuple newtuple;
2419 
2420  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2421  TRIGGER_TYPE_STATEMENT,
2422  TRIGGER_TYPE_BEFORE,
2423  TRIGGER_TYPE_INSERT))
2424  continue;
2425  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2426  NULL, NULL, NULL))
2427  continue;
2428 
2429  LocTriggerData.tg_trigger = trigger;
2430  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2431  i,
2432  relinfo->ri_TrigFunctions,
2433  relinfo->ri_TrigInstrument,
2434  GetPerTupleMemoryContext(estate));
2435 
2436  if (newtuple)
2437  ereport(ERROR,
2438  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2439  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2440  }
2441 }
2442 
2443 void
2445  TransitionCaptureState *transition_capture)
2446 {
2447  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2448 
2449  if (trigdesc && trigdesc->trig_insert_after_statement)
2450  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2452  false, NULL, NULL, NIL, NULL, transition_capture,
2453  false);
2454 }
2455 
2456 bool
2458  TupleTableSlot *slot)
2459 {
2460  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2461  HeapTuple newtuple = NULL;
2462  bool should_free;
2463  TriggerData LocTriggerData = {0};
2464  int i;
2465 
2466  LocTriggerData.type = T_TriggerData;
2467  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2470  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2471  for (i = 0; i < trigdesc->numtriggers; i++)
2472  {
2473  Trigger *trigger = &trigdesc->triggers[i];
2474  HeapTuple oldtuple;
2475 
2476  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2477  TRIGGER_TYPE_ROW,
2478  TRIGGER_TYPE_BEFORE,
2479  TRIGGER_TYPE_INSERT))
2480  continue;
2481  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2482  NULL, NULL, slot))
2483  continue;
2484 
2485  if (!newtuple)
2486  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2487 
2488  LocTriggerData.tg_trigslot = slot;
2489  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2490  LocTriggerData.tg_trigger = trigger;
2491  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2492  i,
2493  relinfo->ri_TrigFunctions,
2494  relinfo->ri_TrigInstrument,
2495  GetPerTupleMemoryContext(estate));
2496  if (newtuple == NULL)
2497  {
2498  if (should_free)
2499  heap_freetuple(oldtuple);
2500  return false; /* "do nothing" */
2501  }
2502  else if (newtuple != oldtuple)
2503  {
2504  ExecForceStoreHeapTuple(newtuple, slot, false);
2505 
2506  /*
2507  * After a tuple in a partition goes through a trigger, the user
2508  * could have changed the partition key enough that the tuple no
2509  * longer fits the partition. Verify that.
2510  */
2511  if (trigger->tgisclone &&
2512  !ExecPartitionCheck(relinfo, slot, estate, false))
2513  ereport(ERROR,
2514  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2515  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2516  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2517  trigger->tgname,
2520 
2521  if (should_free)
2522  heap_freetuple(oldtuple);
2523 
2524  /* signal tuple should be re-fetched if used */
2525  newtuple = NULL;
2526  }
2527  }
2528 
2529  return true;
2530 }
2531 
2532 void
2534  TupleTableSlot *slot, List *recheckIndexes,
2535  TransitionCaptureState *transition_capture)
2536 {
2537  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2538 
2539  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2540  (transition_capture && transition_capture->tcs_insert_new_table))
2541  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2543  true, NULL, slot,
2544  recheckIndexes, NULL,
2545  transition_capture,
2546  false);
2547 }
2548 
2549 bool
2551  TupleTableSlot *slot)
2552 {
2553  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2554  HeapTuple newtuple = NULL;
2555  bool should_free;
2556  TriggerData LocTriggerData = {0};
2557  int i;
2558 
2559  LocTriggerData.type = T_TriggerData;
2560  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2563  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2564  for (i = 0; i < trigdesc->numtriggers; i++)
2565  {
2566  Trigger *trigger = &trigdesc->triggers[i];
2567  HeapTuple oldtuple;
2568 
2569  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2570  TRIGGER_TYPE_ROW,
2571  TRIGGER_TYPE_INSTEAD,
2572  TRIGGER_TYPE_INSERT))
2573  continue;
2574  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2575  NULL, NULL, slot))
2576  continue;
2577 
2578  if (!newtuple)
2579  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2580 
2581  LocTriggerData.tg_trigslot = slot;
2582  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2583  LocTriggerData.tg_trigger = trigger;
2584  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2585  i,
2586  relinfo->ri_TrigFunctions,
2587  relinfo->ri_TrigInstrument,
2588  GetPerTupleMemoryContext(estate));
2589  if (newtuple == NULL)
2590  {
2591  if (should_free)
2592  heap_freetuple(oldtuple);
2593  return false; /* "do nothing" */
2594  }
2595  else if (newtuple != oldtuple)
2596  {
2597  ExecForceStoreHeapTuple(newtuple, slot, false);
2598 
2599  if (should_free)
2600  heap_freetuple(oldtuple);
2601 
2602  /* signal tuple should be re-fetched if used */
2603  newtuple = NULL;
2604  }
2605  }
2606 
2607  return true;
2608 }
2609 
2610 void
2612 {
2613  TriggerDesc *trigdesc;
2614  int i;
2615  TriggerData LocTriggerData = {0};
2616 
2617  trigdesc = relinfo->ri_TrigDesc;
2618 
2619  if (trigdesc == NULL)
2620  return;
2621  if (!trigdesc->trig_delete_before_statement)
2622  return;
2623 
2624  /* no-op if we already fired BS triggers in this context */
2626  CMD_DELETE))
2627  return;
2628 
2629  LocTriggerData.type = T_TriggerData;
2630  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2632  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2633  for (i = 0; i < trigdesc->numtriggers; i++)
2634  {
2635  Trigger *trigger = &trigdesc->triggers[i];
2636  HeapTuple newtuple;
2637 
2638  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2639  TRIGGER_TYPE_STATEMENT,
2640  TRIGGER_TYPE_BEFORE,
2641  TRIGGER_TYPE_DELETE))
2642  continue;
2643  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2644  NULL, NULL, NULL))
2645  continue;
2646 
2647  LocTriggerData.tg_trigger = trigger;
2648  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2649  i,
2650  relinfo->ri_TrigFunctions,
2651  relinfo->ri_TrigInstrument,
2652  GetPerTupleMemoryContext(estate));
2653 
2654  if (newtuple)
2655  ereport(ERROR,
2656  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2657  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2658  }
2659 }
2660 
2661 void
2663  TransitionCaptureState *transition_capture)
2664 {
2665  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2666 
2667  if (trigdesc && trigdesc->trig_delete_after_statement)
2668  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2670  false, NULL, NULL, NIL, NULL, transition_capture,
2671  false);
2672 }
2673 
2674 /*
2675  * Execute BEFORE ROW DELETE triggers.
2676  *
2677  * True indicates caller can proceed with the delete. False indicates caller
2678  * need to suppress the delete and additionally if requested, we need to pass
2679  * back the concurrently updated tuple if any.
2680  */
2681 bool
2683  ResultRelInfo *relinfo,
2684  ItemPointer tupleid,
2685  HeapTuple fdw_trigtuple,
2686  TupleTableSlot **epqslot,
2687  TM_Result *tmresult,
2688  TM_FailureData *tmfd)
2689 {
2690  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2691  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2692  bool result = true;
2693  TriggerData LocTriggerData = {0};
2694  HeapTuple trigtuple;
2695  bool should_free = false;
2696  int i;
2697 
2698  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2699  if (fdw_trigtuple == NULL)
2700  {
2701  TupleTableSlot *epqslot_candidate = NULL;
2702 
2703  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2704  LockTupleExclusive, slot, &epqslot_candidate,
2705  tmresult, tmfd))
2706  return false;
2707 
2708  /*
2709  * If the tuple was concurrently updated and the caller of this
2710  * function requested for the updated tuple, skip the trigger
2711  * execution.
2712  */
2713  if (epqslot_candidate != NULL && epqslot != NULL)
2714  {
2715  *epqslot = epqslot_candidate;
2716  return false;
2717  }
2718 
2719  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2720  }
2721  else
2722  {
2723  trigtuple = fdw_trigtuple;
2724  ExecForceStoreHeapTuple(trigtuple, slot, false);
2725  }
2726 
2727  LocTriggerData.type = T_TriggerData;
2728  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2731  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2732  for (i = 0; i < trigdesc->numtriggers; i++)
2733  {
2734  HeapTuple newtuple;
2735  Trigger *trigger = &trigdesc->triggers[i];
2736 
2737  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2738  TRIGGER_TYPE_ROW,
2739  TRIGGER_TYPE_BEFORE,
2740  TRIGGER_TYPE_DELETE))
2741  continue;
2742  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2743  NULL, slot, NULL))
2744  continue;
2745 
2746  LocTriggerData.tg_trigslot = slot;
2747  LocTriggerData.tg_trigtuple = trigtuple;
2748  LocTriggerData.tg_trigger = trigger;
2749  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2750  i,
2751  relinfo->ri_TrigFunctions,
2752  relinfo->ri_TrigInstrument,
2753  GetPerTupleMemoryContext(estate));
2754  if (newtuple == NULL)
2755  {
2756  result = false; /* tell caller to suppress delete */
2757  break;
2758  }
2759  if (newtuple != trigtuple)
2760  heap_freetuple(newtuple);
2761  }
2762  if (should_free)
2763  heap_freetuple(trigtuple);
2764 
2765  return result;
2766 }
2767 
2768 /*
2769  * Note: is_crosspart_update must be true if the DELETE is being performed
2770  * as part of a cross-partition update.
2771  */
2772 void
2774  ResultRelInfo *relinfo,
2775  ItemPointer tupleid,
2776  HeapTuple fdw_trigtuple,
2777  TransitionCaptureState *transition_capture,
2778  bool is_crosspart_update)
2779 {
2780  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2781 
2782  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2783  (transition_capture && transition_capture->tcs_delete_old_table))
2784  {
2785  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2786 
2787  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2788  if (fdw_trigtuple == NULL)
2789  GetTupleForTrigger(estate,
2790  NULL,
2791  relinfo,
2792  tupleid,
2794  slot,
2795  NULL,
2796  NULL,
2797  NULL);
2798  else
2799  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2800 
2801  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2803  true, slot, NULL, NIL, NULL,
2804  transition_capture,
2805  is_crosspart_update);
2806  }
2807 }
2808 
2809 bool
2811  HeapTuple trigtuple)
2812 {
2813  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2814  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2815  TriggerData LocTriggerData = {0};
2816  int i;
2817 
2818  LocTriggerData.type = T_TriggerData;
2819  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2822  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2823 
2824  ExecForceStoreHeapTuple(trigtuple, slot, false);
2825 
2826  for (i = 0; i < trigdesc->numtriggers; i++)
2827  {
2828  HeapTuple rettuple;
2829  Trigger *trigger = &trigdesc->triggers[i];
2830 
2831  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2832  TRIGGER_TYPE_ROW,
2833  TRIGGER_TYPE_INSTEAD,
2834  TRIGGER_TYPE_DELETE))
2835  continue;
2836  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2837  NULL, slot, NULL))
2838  continue;
2839 
2840  LocTriggerData.tg_trigslot = slot;
2841  LocTriggerData.tg_trigtuple = trigtuple;
2842  LocTriggerData.tg_trigger = trigger;
2843  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2844  i,
2845  relinfo->ri_TrigFunctions,
2846  relinfo->ri_TrigInstrument,
2847  GetPerTupleMemoryContext(estate));
2848  if (rettuple == NULL)
2849  return false; /* Delete was suppressed */
2850  if (rettuple != trigtuple)
2851  heap_freetuple(rettuple);
2852  }
2853  return true;
2854 }
2855 
2856 void
2858 {
2859  TriggerDesc *trigdesc;
2860  int i;
2861  TriggerData LocTriggerData = {0};
2862  Bitmapset *updatedCols;
2863 
2864  trigdesc = relinfo->ri_TrigDesc;
2865 
2866  if (trigdesc == NULL)
2867  return;
2868  if (!trigdesc->trig_update_before_statement)
2869  return;
2870 
2871  /* no-op if we already fired BS triggers in this context */
2873  CMD_UPDATE))
2874  return;
2875 
2876  /* statement-level triggers operate on the parent table */
2877  Assert(relinfo->ri_RootResultRelInfo == NULL);
2878 
2879  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2880 
2881  LocTriggerData.type = T_TriggerData;
2882  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2884  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2885  LocTriggerData.tg_updatedcols = updatedCols;
2886  for (i = 0; i < trigdesc->numtriggers; i++)
2887  {
2888  Trigger *trigger = &trigdesc->triggers[i];
2889  HeapTuple newtuple;
2890 
2891  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2892  TRIGGER_TYPE_STATEMENT,
2893  TRIGGER_TYPE_BEFORE,
2894  TRIGGER_TYPE_UPDATE))
2895  continue;
2896  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2897  updatedCols, NULL, NULL))
2898  continue;
2899 
2900  LocTriggerData.tg_trigger = trigger;
2901  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2902  i,
2903  relinfo->ri_TrigFunctions,
2904  relinfo->ri_TrigInstrument,
2905  GetPerTupleMemoryContext(estate));
2906 
2907  if (newtuple)
2908  ereport(ERROR,
2909  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2910  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2911  }
2912 }
2913 
2914 void
2916  TransitionCaptureState *transition_capture)
2917 {
2918  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2919 
2920  /* statement-level triggers operate on the parent table */
2921  Assert(relinfo->ri_RootResultRelInfo == NULL);
2922 
2923  if (trigdesc && trigdesc->trig_update_after_statement)
2924  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2926  false, NULL, NULL, NIL,
2927  ExecGetAllUpdatedCols(relinfo, estate),
2928  transition_capture,
2929  false);
2930 }
2931 
2932 bool
2934  ResultRelInfo *relinfo,
2935  ItemPointer tupleid,
2936  HeapTuple fdw_trigtuple,
2937  TupleTableSlot *newslot,
2938  TM_Result *tmresult,
2939  TM_FailureData *tmfd)
2940 {
2941  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2942  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2943  HeapTuple newtuple = NULL;
2944  HeapTuple trigtuple;
2945  bool should_free_trig = false;
2946  bool should_free_new = false;
2947  TriggerData LocTriggerData = {0};
2948  int i;
2949  Bitmapset *updatedCols;
2950  LockTupleMode lockmode;
2951 
2952  /* Determine lock mode to use */
2953  lockmode = ExecUpdateLockMode(estate, relinfo);
2954 
2955  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2956  if (fdw_trigtuple == NULL)
2957  {
2958  TupleTableSlot *epqslot_candidate = NULL;
2959 
2960  /* get a copy of the on-disk tuple we are planning to update */
2961  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2962  lockmode, oldslot, &epqslot_candidate,
2963  tmresult, tmfd))
2964  return false; /* cancel the update action */
2965 
2966  /*
2967  * In READ COMMITTED isolation level it's possible that target tuple
2968  * was changed due to concurrent update. In that case we have a raw
2969  * subplan output tuple in epqslot_candidate, and need to form a new
2970  * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2971  * received in newslot. Neither we nor our callers have any further
2972  * interest in the passed-in tuple, so it's okay to overwrite newslot
2973  * with the newer data.
2974  */
2975  if (epqslot_candidate != NULL)
2976  {
2977  TupleTableSlot *epqslot_clean;
2978 
2979  epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2980  oldslot);
2981 
2982  /*
2983  * Typically, the caller's newslot was also generated by
2984  * ExecGetUpdateNewTuple, so that epqslot_clean will be the same
2985  * slot and copying is not needed. But do the right thing if it
2986  * isn't.
2987  */
2988  if (unlikely(newslot != epqslot_clean))
2989  ExecCopySlot(newslot, epqslot_clean);
2990 
2991  /*
2992  * At this point newslot contains a virtual tuple that may
2993  * reference some fields of oldslot's tuple in some disk buffer.
2994  * If that tuple is in a different page than the original target
2995  * tuple, then our only pin on that buffer is oldslot's, and we're
2996  * about to release it. Hence we'd better materialize newslot to
2997  * ensure it doesn't contain references into an unpinned buffer.
2998  * (We'd materialize it below anyway, but too late for safety.)
2999  */
3000  ExecMaterializeSlot(newslot);
3001  }
3002 
3003  /*
3004  * Here we convert oldslot to a materialized slot holding trigtuple.
3005  * Neither slot passed to the triggers will hold any buffer pin.
3006  */
3007  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3008  }
3009  else
3010  {
3011  /* Put the FDW-supplied tuple into oldslot to unify the cases */
3012  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3013  trigtuple = fdw_trigtuple;
3014  }
3015 
3016  LocTriggerData.type = T_TriggerData;
3017  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3020  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3021  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3022  LocTriggerData.tg_updatedcols = updatedCols;
3023  for (i = 0; i < trigdesc->numtriggers; i++)
3024  {
3025  Trigger *trigger = &trigdesc->triggers[i];
3026  HeapTuple oldtuple;
3027 
3028  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3029  TRIGGER_TYPE_ROW,
3030  TRIGGER_TYPE_BEFORE,
3031  TRIGGER_TYPE_UPDATE))
3032  continue;
3033  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3034  updatedCols, oldslot, newslot))
3035  continue;
3036 
3037  if (!newtuple)
3038  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3039 
3040  LocTriggerData.tg_trigslot = oldslot;
3041  LocTriggerData.tg_trigtuple = trigtuple;
3042  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3043  LocTriggerData.tg_newslot = newslot;
3044  LocTriggerData.tg_trigger = trigger;
3045  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3046  i,
3047  relinfo->ri_TrigFunctions,
3048  relinfo->ri_TrigInstrument,
3049  GetPerTupleMemoryContext(estate));
3050 
3051  if (newtuple == NULL)
3052  {
3053  if (should_free_trig)
3054  heap_freetuple(trigtuple);
3055  if (should_free_new)
3056  heap_freetuple(oldtuple);
3057  return false; /* "do nothing" */
3058  }
3059  else if (newtuple != oldtuple)
3060  {
3061  ExecForceStoreHeapTuple(newtuple, newslot, false);
3062 
3063  /*
3064  * If the tuple returned by the trigger / being stored, is the old
3065  * row version, and the heap tuple passed to the trigger was
3066  * allocated locally, materialize the slot. Otherwise we might
3067  * free it while still referenced by the slot.
3068  */
3069  if (should_free_trig && newtuple == trigtuple)
3070  ExecMaterializeSlot(newslot);
3071 
3072  if (should_free_new)
3073  heap_freetuple(oldtuple);
3074 
3075  /* signal tuple should be re-fetched if used */
3076  newtuple = NULL;
3077  }
3078  }
3079  if (should_free_trig)
3080  heap_freetuple(trigtuple);
3081 
3082  return true;
3083 }
3084 
3085 /*
3086  * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3087  * and destination partitions, respectively, of a cross-partition update of
3088  * the root partitioned table mentioned in the query, given by 'relinfo'.
3089  * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3090  * partition, and 'newslot' contains the "new" tuple in the destination
3091  * partition. This interface allows to support the requirements of
3092  * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3093  * that case.
3094  */
3095 void
3097  ResultRelInfo *src_partinfo,
3098  ResultRelInfo *dst_partinfo,
3099  ItemPointer tupleid,
3100  HeapTuple fdw_trigtuple,
3101  TupleTableSlot *newslot,
3102  List *recheckIndexes,
3103  TransitionCaptureState *transition_capture,
3104  bool is_crosspart_update)
3105 {
3106  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3107 
3108  if ((trigdesc && trigdesc->trig_update_after_row) ||
3109  (transition_capture &&
3110  (transition_capture->tcs_update_old_table ||
3111  transition_capture->tcs_update_new_table)))
3112  {
3113  /*
3114  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3115  * update-partition-key operation, then this function is also called
3116  * separately for DELETE and INSERT to capture transition table rows.
3117  * In such case, either old tuple or new tuple can be NULL.
3118  */
3119  TupleTableSlot *oldslot;
3120  ResultRelInfo *tupsrc;
3121 
3122  Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3123  !is_crosspart_update);
3124 
3125  tupsrc = src_partinfo ? src_partinfo : relinfo;
3126  oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3127 
3128  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3129  GetTupleForTrigger(estate,
3130  NULL,
3131  tupsrc,
3132  tupleid,
3134  oldslot,
3135  NULL,
3136  NULL,
3137  NULL);
3138  else if (fdw_trigtuple != NULL)
3139  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3140  else
3141  ExecClearTuple(oldslot);
3142 
3143  AfterTriggerSaveEvent(estate, relinfo,
3144  src_partinfo, dst_partinfo,
3146  true,
3147  oldslot, newslot, recheckIndexes,
3148  ExecGetAllUpdatedCols(relinfo, estate),
3149  transition_capture,
3150  is_crosspart_update);
3151  }
3152 }
3153 
3154 bool
3156  HeapTuple trigtuple, TupleTableSlot *newslot)
3157 {
3158  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3159  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3160  HeapTuple newtuple = NULL;
3161  bool should_free;
3162  TriggerData LocTriggerData = {0};
3163  int i;
3164 
3165  LocTriggerData.type = T_TriggerData;
3166  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3169  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3170 
3171  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3172 
3173  for (i = 0; i < trigdesc->numtriggers; i++)
3174  {
3175  Trigger *trigger = &trigdesc->triggers[i];
3176  HeapTuple oldtuple;
3177 
3178  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3179  TRIGGER_TYPE_ROW,
3180  TRIGGER_TYPE_INSTEAD,
3181  TRIGGER_TYPE_UPDATE))
3182  continue;
3183  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3184  NULL, oldslot, newslot))
3185  continue;
3186 
3187  if (!newtuple)
3188  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3189 
3190  LocTriggerData.tg_trigslot = oldslot;
3191  LocTriggerData.tg_trigtuple = trigtuple;
3192  LocTriggerData.tg_newslot = newslot;
3193  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3194 
3195  LocTriggerData.tg_trigger = trigger;
3196  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3197  i,
3198  relinfo->ri_TrigFunctions,
3199  relinfo->ri_TrigInstrument,
3200  GetPerTupleMemoryContext(estate));
3201  if (newtuple == NULL)
3202  {
3203  return false; /* "do nothing" */
3204  }
3205  else if (newtuple != oldtuple)
3206  {
3207  ExecForceStoreHeapTuple(newtuple, newslot, false);
3208 
3209  if (should_free)
3210  heap_freetuple(oldtuple);
3211 
3212  /* signal tuple should be re-fetched if used */
3213  newtuple = NULL;
3214  }
3215  }
3216 
3217  return true;
3218 }
3219 
3220 void
3222 {
3223  TriggerDesc *trigdesc;
3224  int i;
3225  TriggerData LocTriggerData = {0};
3226 
3227  trigdesc = relinfo->ri_TrigDesc;
3228 
3229  if (trigdesc == NULL)
3230  return;
3231  if (!trigdesc->trig_truncate_before_statement)
3232  return;
3233 
3234  LocTriggerData.type = T_TriggerData;
3235  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3237  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3238 
3239  for (i = 0; i < trigdesc->numtriggers; i++)
3240  {
3241  Trigger *trigger = &trigdesc->triggers[i];
3242  HeapTuple newtuple;
3243 
3244  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3245  TRIGGER_TYPE_STATEMENT,
3246  TRIGGER_TYPE_BEFORE,
3247  TRIGGER_TYPE_TRUNCATE))
3248  continue;
3249  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3250  NULL, NULL, NULL))
3251  continue;
3252 
3253  LocTriggerData.tg_trigger = trigger;
3254  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3255  i,
3256  relinfo->ri_TrigFunctions,
3257  relinfo->ri_TrigInstrument,
3258  GetPerTupleMemoryContext(estate));
3259 
3260  if (newtuple)
3261  ereport(ERROR,
3262  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3263  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3264  }
3265 }
3266 
3267 void
3269 {
3270  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3271 
3272  if (trigdesc && trigdesc->trig_truncate_after_statement)
3273  AfterTriggerSaveEvent(estate, relinfo,
3274  NULL, NULL,
3276  false, NULL, NULL, NIL, NULL, NULL,
3277  false);
3278 }
3279 
3280 
3281 /*
3282  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3283  */
3284 static bool
3286  EPQState *epqstate,
3287  ResultRelInfo *relinfo,
3288  ItemPointer tid,
3289  LockTupleMode lockmode,
3290  TupleTableSlot *oldslot,
3291  TupleTableSlot **epqslot,
3292  TM_Result *tmresultp,
3293  TM_FailureData *tmfdp)
3294 {
3295  Relation relation = relinfo->ri_RelationDesc;
3296 
3297  if (epqslot != NULL)
3298  {
3299  TM_Result test;
3300  TM_FailureData tmfd;
3301  int lockflags = 0;
3302 
3303  *epqslot = NULL;
3304 
3305  /* caller must pass an epqstate if EvalPlanQual is possible */
3306  Assert(epqstate != NULL);
3307 
3308  /*
3309  * lock tuple for update
3310  */
3312  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3313  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3314  estate->es_output_cid,
3315  lockmode, LockWaitBlock,
3316  lockflags,
3317  &tmfd);
3318 
3319  /* Let the caller know about the status of this operation */
3320  if (tmresultp)
3321  *tmresultp = test;
3322  if (tmfdp)
3323  *tmfdp = tmfd;
3324 
3325  switch (test)
3326  {
3327  case TM_SelfModified:
3328 
3329  /*
3330  * The target tuple was already updated or deleted by the
3331  * current command, or by a later command in the current
3332  * transaction. We ignore the tuple in the former case, and
3333  * throw error in the latter case, for the same reasons
3334  * enumerated in ExecUpdate and ExecDelete in
3335  * nodeModifyTable.c.
3336  */
3337  if (tmfd.cmax != estate->es_output_cid)
3338  ereport(ERROR,
3339  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3340  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3341  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3342 
3343  /* treat it as deleted; do not process */
3344  return false;
3345 
3346  case TM_Ok:
3347  if (tmfd.traversed)
3348  {
3349  /*
3350  * Recheck the tuple using EPQ. For MERGE, we leave this
3351  * to the caller (it must do additional rechecking, and
3352  * might end up executing a different action entirely).
3353  */
3354  if (estate->es_plannedstmt->commandType == CMD_MERGE)
3355  {
3356  if (tmresultp)
3357  *tmresultp = TM_Updated;
3358  return false;
3359  }
3360 
3361  *epqslot = EvalPlanQual(epqstate,
3362  relation,
3363  relinfo->ri_RangeTableIndex,
3364  oldslot);
3365 
3366  /*
3367  * If PlanQual failed for updated tuple - we must not
3368  * process this tuple!
3369  */
3370  if (TupIsNull(*epqslot))
3371  {
3372  *epqslot = NULL;
3373  return false;
3374  }
3375  }
3376  break;
3377 
3378  case TM_Updated:
3380  ereport(ERROR,
3382  errmsg("could not serialize access due to concurrent update")));
3383  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3384  break;
3385 
3386  case TM_Deleted:
3388  ereport(ERROR,
3390  errmsg("could not serialize access due to concurrent delete")));
3391  /* tuple was deleted */
3392  return false;
3393 
3394  case TM_Invisible:
3395  elog(ERROR, "attempted to lock invisible tuple");
3396  break;
3397 
3398  default:
3399  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3400  return false; /* keep compiler quiet */
3401  }
3402  }
3403  else
3404  {
3405  /*
3406  * We expect the tuple to be present, thus very simple error handling
3407  * suffices.
3408  */
3409  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3410  oldslot))
3411  elog(ERROR, "failed to fetch tuple for trigger");
3412  }
3413 
3414  return true;
3415 }
3416 
3417 /*
3418  * Is trigger enabled to fire?
3419  */
3420 static bool
3422  Trigger *trigger, TriggerEvent event,
3423  Bitmapset *modifiedCols,
3424  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3425 {
3426  /* Check replication-role-dependent enable state */
3428  {
3429  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3430  trigger->tgenabled == TRIGGER_DISABLED)
3431  return false;
3432  }
3433  else /* ORIGIN or LOCAL role */
3434  {
3435  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3436  trigger->tgenabled == TRIGGER_DISABLED)
3437  return false;
3438  }
3439 
3440  /*
3441  * Check for column-specific trigger (only possible for UPDATE, and in
3442  * fact we *must* ignore tgattr for other event types)
3443  */
3444  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3445  {
3446  int i;
3447  bool modified;
3448 
3449  modified = false;
3450  for (i = 0; i < trigger->tgnattr; i++)
3451  {
3453  modifiedCols))
3454  {
3455  modified = true;
3456  break;
3457  }
3458  }
3459  if (!modified)
3460  return false;
3461  }
3462 
3463  /* Check for WHEN clause */
3464  if (trigger->tgqual)
3465  {
3466  ExprState **predicate;
3467  ExprContext *econtext;
3468  MemoryContext oldContext;
3469  int i;
3470 
3471  Assert(estate != NULL);
3472 
3473  /*
3474  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3475  * matching element of relinfo->ri_TrigWhenExprs[]
3476  */
3477  i = trigger - relinfo->ri_TrigDesc->triggers;
3478  predicate = &relinfo->ri_TrigWhenExprs[i];
3479 
3480  /*
3481  * If first time through for this WHEN expression, build expression
3482  * nodetrees for it. Keep them in the per-query memory context so
3483  * they'll survive throughout the query.
3484  */
3485  if (*predicate == NULL)
3486  {
3487  Node *tgqual;
3488 
3489  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3490  tgqual = stringToNode(trigger->tgqual);
3491  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3494  /* ExecPrepareQual wants implicit-AND form */
3495  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3496  *predicate = ExecPrepareQual((List *) tgqual, estate);
3497  MemoryContextSwitchTo(oldContext);
3498  }
3499 
3500  /*
3501  * We will use the EState's per-tuple context for evaluating WHEN
3502  * expressions (creating it if it's not already there).
3503  */
3504  econtext = GetPerTupleExprContext(estate);
3505 
3506  /*
3507  * Finally evaluate the expression, making the old and/or new tuples
3508  * available as INNER_VAR/OUTER_VAR respectively.
3509  */
3510  econtext->ecxt_innertuple = oldslot;
3511  econtext->ecxt_outertuple = newslot;
3512  if (!ExecQual(*predicate, econtext))
3513  return false;
3514  }
3515 
3516  return true;
3517 }
3518 
3519 
3520 /* ----------
3521  * After-trigger stuff
3522  *
3523  * The AfterTriggersData struct holds data about pending AFTER trigger events
3524  * during the current transaction tree. (BEFORE triggers are fired
3525  * immediately so we don't need any persistent state about them.) The struct
3526  * and most of its subsidiary data are kept in TopTransactionContext; however
3527  * some data that can be discarded sooner appears in the CurTransactionContext
3528  * of the relevant subtransaction. Also, the individual event records are
3529  * kept in a separate sub-context of TopTransactionContext. This is done
3530  * mainly so that it's easy to tell from a memory context dump how much space
3531  * is being eaten by trigger events.
3532  *
3533  * Because the list of pending events can grow large, we go to some
3534  * considerable effort to minimize per-event memory consumption. The event
3535  * records are grouped into chunks and common data for similar events in the
3536  * same chunk is only stored once.
3537  *
3538  * XXX We need to be able to save the per-event data in a file if it grows too
3539  * large.
3540  * ----------
3541  */
3542 
3543 /* Per-trigger SET CONSTRAINT status */
3545 {
3549 
3551 
3552 /*
3553  * SET CONSTRAINT intra-transaction status.
3554  *
3555  * We make this a single palloc'd object so it can be copied and freed easily.
3556  *
3557  * all_isset and all_isdeferred are used to keep track
3558  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3559  *
3560  * trigstates[] stores per-trigger tgisdeferred settings.
3561  */
3563 {
3566  int numstates; /* number of trigstates[] entries in use */
3567  int numalloc; /* allocated size of trigstates[] */
3570 
3572 
3573 
3574 /*
3575  * Per-trigger-event data
3576  *
3577  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3578  * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3579  * Each event record also has an associated AfterTriggerSharedData that is
3580  * shared across all instances of similar events within a "chunk".
3581  *
3582  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3583  * fields. Updates of regular tables use two; inserts and deletes of regular
3584  * tables use one; foreign tables always use zero and save the tuple(s) to a
3585  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3586  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3587  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3588  * tuple(s). This permits storing tuples once regardless of the number of
3589  * row-level triggers on a foreign table.
3590  *
3591  * When updates on partitioned tables cause rows to move between partitions,
3592  * the OIDs of both partitions are stored too, so that the tuples can be
3593  * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3594  * partition update").
3595  *
3596  * Note that we need triggers on foreign tables to be fired in exactly the
3597  * order they were queued, so that the tuples come out of the tuplestore in
3598  * the right order. To ensure that, we forbid deferrable (constraint)
3599  * triggers on foreign tables. This also ensures that such triggers do not
3600  * get deferred into outer trigger query levels, meaning that it's okay to
3601  * destroy the tuplestore at the end of the query level.
3602  *
3603  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3604  * require no ctid field. We lack the flag bit space to neatly represent that
3605  * distinct case, and it seems unlikely to be worth much trouble.
3606  *
3607  * Note: ats_firing_id is initially zero and is set to something else when
3608  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3609  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3610  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3611  * because all instances of the same type of event in a given event list will
3612  * be fired at the same time, if they were queued between the same firing
3613  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3614  * a new event to an existing AfterTriggerSharedData record.
3615  */
3617 
3618 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3619 #define AFTER_TRIGGER_DONE 0x80000000
3620 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3621 /* bits describing the size and tuple sources of this event */
3622 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3623 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3624 #define AFTER_TRIGGER_1CTID 0x10000000
3625 #define AFTER_TRIGGER_2CTID 0x30000000
3626 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3627 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3629 
3631 {
3632  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3633  Oid ats_tgoid; /* the trigger's ID */
3634  Oid ats_relid; /* the relation it's on */
3635  CommandId ats_firing_id; /* ID for firing cycle */
3636  struct AfterTriggersTableData *ats_table; /* transition table access */
3637  Bitmapset *ats_modifiedcols; /* modified columns */
3639 
3641 
3643 {
3644  TriggerFlags ate_flags; /* status bits and offset to shared data */
3645  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3646  ItemPointerData ate_ctid2; /* new updated tuple */
3647 
3648  /*
3649  * During a cross-partition update of a partitioned table, we also store
3650  * the OIDs of source and destination partitions that are needed to fetch
3651  * the old (ctid1) and the new tuple (ctid2) from, respectively.
3652  */
3656 
3657 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3659 {
3664 
3665 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3667 {
3668  TriggerFlags ate_flags; /* status bits and offset to shared data */
3669  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3671 
3672 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3674 {
3675  TriggerFlags ate_flags; /* status bits and offset to shared data */
3677 
3678 #define SizeofTriggerEvent(evt) \
3679  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3680  sizeof(AfterTriggerEventData) : \
3681  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3682  sizeof(AfterTriggerEventDataNoOids) : \
3683  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3684  sizeof(AfterTriggerEventDataOneCtid) : \
3685  sizeof(AfterTriggerEventDataZeroCtids))))
3686 
3687 #define GetTriggerSharedData(evt) \
3688  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3689 
3690 /*
3691  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3692  * larger chunks (a slightly more sophisticated version of an expansible
3693  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3694  * AfterTriggerEventData records; the space between endfree and endptr is
3695  * occupied by AfterTriggerSharedData records.
3696  */
3698 {
3699  struct AfterTriggerEventChunk *next; /* list link */
3700  char *freeptr; /* start of free space in chunk */
3701  char *endfree; /* end of free space in chunk */
3702  char *endptr; /* end of chunk */
3703  /* event data follows here */
3705 
3706 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3707 
3708 /* A list of events */
3710 {
3713  char *tailfree; /* freeptr of tail chunk */
3715 
3716 /* Macros to help in iterating over a list of events */
3717 #define for_each_chunk(cptr, evtlist) \
3718  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3719 #define for_each_event(eptr, cptr) \
3720  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3721  (char *) eptr < (cptr)->freeptr; \
3722  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3723 /* Use this if no special per-chunk processing is needed */
3724 #define for_each_event_chunk(eptr, cptr, evtlist) \
3725  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3726 
3727 /* Macros for iterating from a start point that might not be list start */
3728 #define for_each_chunk_from(cptr) \
3729  for (; cptr != NULL; cptr = cptr->next)
3730 #define for_each_event_from(eptr, cptr) \
3731  for (; \
3732  (char *) eptr < (cptr)->freeptr; \
3733  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3734 
3735 
3736 /*
3737  * All per-transaction data for the AFTER TRIGGERS module.
3738  *
3739  * AfterTriggersData has the following fields:
3740  *
3741  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3742  * We mark firable events with the current firing cycle's ID so that we can
3743  * tell which ones to work on. This ensures sane behavior if a trigger
3744  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3745  * only fire those events that weren't already scheduled for firing.
3746  *
3747  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3748  * This is saved and restored across failed subtransactions.
3749  *
3750  * events is the current list of deferred events. This is global across
3751  * all subtransactions of the current transaction. In a subtransaction
3752  * abort, we know that the events added by the subtransaction are at the
3753  * end of the list, so it is relatively easy to discard them. The event
3754  * list chunks themselves are stored in event_cxt.
3755  *
3756  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3757  * (-1 when the stack is empty).
3758  *
3759  * query_stack[query_depth] is the per-query-level data, including these fields:
3760  *
3761  * events is a list of AFTER trigger events queued by the current query.
3762  * None of these are valid until the matching AfterTriggerEndQuery call
3763  * occurs. At that point we fire immediate-mode triggers, and append any
3764  * deferred events to the main events list.
3765  *
3766  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3767  * needed by events queued by the current query. (Note: we use just one
3768  * tuplestore even though more than one foreign table might be involved.
3769  * This is okay because tuplestores don't really care what's in the tuples
3770  * they store; but it's possible that someday it'd break.)
3771  *
3772  * tables is a List of AfterTriggersTableData structs for target tables
3773  * of the current query (see below).
3774  *
3775  * maxquerydepth is just the allocated length of query_stack.
3776  *
3777  * trans_stack holds per-subtransaction data, including these fields:
3778  *
3779  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3780  * state data. Each subtransaction level that modifies that state first
3781  * saves a copy, which we use to restore the state if we abort.
3782  *
3783  * events is a copy of the events head/tail pointers,
3784  * which we use to restore those values during subtransaction abort.
3785  *
3786  * query_depth is the subtransaction-start-time value of query_depth,
3787  * which we similarly use to clean up at subtransaction abort.
3788  *
3789  * firing_counter is the subtransaction-start-time value of firing_counter.
3790  * We use this to recognize which deferred triggers were fired (or marked
3791  * for firing) within an aborted subtransaction.
3792  *
3793  * We use GetCurrentTransactionNestLevel() to determine the correct array
3794  * index in trans_stack. maxtransdepth is the number of allocated entries in
3795  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3796  * in cases where errors during subxact abort cause multiple invocations
3797  * of AfterTriggerEndSubXact() at the same nesting depth.)
3798  *
3799  * We create an AfterTriggersTableData struct for each target table of the
3800  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3801  * either transition tables or statement-level triggers. This is used to
3802  * hold the relevant transition tables, as well as info tracking whether
3803  * we already queued the statement triggers. (We use that info to prevent
3804  * firing the same statement triggers more than once per statement, or really
3805  * once per transition table set.) These structs, along with the transition
3806  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3807  * That's sufficient lifespan because we don't allow transition tables to be
3808  * used by deferrable triggers, so they only need to survive until
3809  * AfterTriggerEndQuery.
3810  */
3814 
3815 typedef struct AfterTriggersData
3816 {
3817  CommandId firing_counter; /* next firing ID to assign */
3818  SetConstraintState state; /* the active S C state */
3819  AfterTriggerEventList events; /* deferred-event list */
3820  MemoryContext event_cxt; /* memory context for events, if any */
3821 
3822  /* per-query-level data: */
3823  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3824  int query_depth; /* current index in above array */
3825  int maxquerydepth; /* allocated len of above array */
3826 
3827  /* per-subtransaction-level data: */
3828  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3829  int maxtransdepth; /* allocated len of above array */
3831 
3833 {
3834  AfterTriggerEventList events; /* events pending from this query */
3835  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3836  List *tables; /* list of AfterTriggersTableData, see below */
3837 };
3838 
3840 {
3841  /* these fields are just for resetting at subtrans abort: */
3842  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3843  AfterTriggerEventList events; /* saved list pointer */
3844  int query_depth; /* saved query_depth */
3845  CommandId firing_counter; /* saved firing_counter */
3846 };
3847 
3849 {
3850  /* relid + cmdType form the lookup key for these structs: */
3851  Oid relid; /* target table's OID */
3852  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3853  bool closed; /* true when no longer OK to add tuples */
3854  bool before_trig_done; /* did we already queue BS triggers? */
3855  bool after_trig_done; /* did we already queue AS triggers? */
3856  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3857 
3858  /*
3859  * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3860  * MERGE can run all three actions in a single statement. Note that UPDATE
3861  * needs both old and new transition tables whereas INSERT needs only new,
3862  * and DELETE needs only old.
3863  */
3864 
3865  /* "old" transition table for UPDATE, if any */
3867  /* "new" transition table for UPDATE, if any */
3869  /* "old" transition table for DELETE, if any */
3871  /* "new" transition table for INSERT, if any */
3873 
3874  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3875 };
3876 
3878 
3879 static void AfterTriggerExecute(EState *estate,
3880  AfterTriggerEvent event,
3881  ResultRelInfo *relInfo,
3882  ResultRelInfo *src_relInfo,
3883  ResultRelInfo *dst_relInfo,
3884  TriggerDesc *trigdesc,
3885  FmgrInfo *finfo,
3886  Instrumentation *instr,
3887  MemoryContext per_tuple_context,
3888  TupleTableSlot *trig_tuple_slot1,
3889  TupleTableSlot *trig_tuple_slot2);
3891  CmdType cmdType);
3893  TupleDesc tupdesc);
3895  TupleTableSlot *oldslot,
3896  TupleTableSlot *newslot,
3897  TransitionCaptureState *transition_capture);
3898 static void TransitionTableAddTuple(EState *estate,
3899  TransitionCaptureState *transition_capture,
3900  ResultRelInfo *relinfo,
3901  TupleTableSlot *slot,
3902  TupleTableSlot *original_insert_tuple,
3903  Tuplestorestate *tuplestore);
3905 static SetConstraintState SetConstraintStateCreate(int numalloc);
3908  Oid tgoid, bool tgisdeferred);
3909 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3910 
3911 
3912 /*
3913  * Get the FDW tuplestore for the current trigger query level, creating it
3914  * if necessary.
3915  */
3916 static Tuplestorestate *
3918 {
3919  Tuplestorestate *ret;
3920 
3922  if (ret == NULL)
3923  {
3924  MemoryContext oldcxt;
3925  ResourceOwner saveResourceOwner;
3926 
3927  /*
3928  * Make the tuplestore valid until end of subtransaction. We really
3929  * only need it until AfterTriggerEndQuery().
3930  */
3932  saveResourceOwner = CurrentResourceOwner;
3934 
3935  ret = tuplestore_begin_heap(false, false, work_mem);
3936 
3937  CurrentResourceOwner = saveResourceOwner;
3938  MemoryContextSwitchTo(oldcxt);
3939 
3941  }
3942 
3943  return ret;
3944 }
3945 
3946 /* ----------
3947  * afterTriggerCheckState()
3948  *
3949  * Returns true if the trigger event is actually in state DEFERRED.
3950  * ----------
3951  */
3952 static bool
3954 {
3955  Oid tgoid = evtshared->ats_tgoid;
3957  int i;
3958 
3959  /*
3960  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3961  * constraints declared NOT DEFERRABLE), the state is always false.
3962  */
3963  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3964  return false;
3965 
3966  /*
3967  * If constraint state exists, SET CONSTRAINTS might have been executed
3968  * either for this trigger or for all triggers.
3969  */
3970  if (state != NULL)
3971  {
3972  /* Check for SET CONSTRAINTS for this specific trigger. */
3973  for (i = 0; i < state->numstates; i++)
3974  {
3975  if (state->trigstates[i].sct_tgoid == tgoid)
3976  return state->trigstates[i].sct_tgisdeferred;
3977  }
3978 
3979  /* Check for SET CONSTRAINTS ALL. */
3980  if (state->all_isset)
3981  return state->all_isdeferred;
3982  }
3983 
3984  /*
3985  * Otherwise return the default state for the trigger.
3986  */
3987  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3988 }
3989 
3990 /* ----------
3991  * afterTriggerCopyBitmap()
3992  *
3993  * Copy bitmap into AfterTriggerEvents memory context, which is where the after
3994  * trigger events are kept.
3995  * ----------
3996  */
3997 static Bitmapset *
3999 {
4000  Bitmapset *dst;
4001  MemoryContext oldcxt;
4002 
4003  if (src == NULL)
4004  return NULL;
4005 
4006  /* Create event context if we didn't already */
4007  if (afterTriggers.event_cxt == NULL)
4010  "AfterTriggerEvents",
4012 
4014 
4015  dst = bms_copy(src);
4016 
4017  MemoryContextSwitchTo(oldcxt);
4018 
4019  return dst;
4020 }
4021 
4022 /* ----------
4023  * afterTriggerAddEvent()
4024  *
4025  * Add a new trigger event to the specified queue.
4026  * The passed-in event data is copied.
4027  * ----------
4028  */
4029 static void
4031  AfterTriggerEvent event, AfterTriggerShared evtshared)
4032 {
4033  Size eventsize = SizeofTriggerEvent(event);
4034  Size needed = eventsize + sizeof(AfterTriggerSharedData);
4036  AfterTriggerShared newshared;
4037  AfterTriggerEvent newevent;
4038 
4039  /*
4040  * If empty list or not enough room in the tail chunk, make a new chunk.
4041  * We assume here that a new shared record will always be needed.
4042  */
4043  chunk = events->tail;
4044  if (chunk == NULL ||
4045  chunk->endfree - chunk->freeptr < needed)
4046  {
4047  Size chunksize;
4048 
4049  /* Create event context if we didn't already */
4050  if (afterTriggers.event_cxt == NULL)
4053  "AfterTriggerEvents",
4055 
4056  /*
4057  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4058  * These numbers are fairly arbitrary, though there is a hard limit at
4059  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4060  * shared records using the available space in ate_flags. Another
4061  * constraint is that if the chunk size gets too huge, the search loop
4062  * below would get slow given a (not too common) usage pattern with
4063  * many distinct event types in a chunk. Therefore, we double the
4064  * preceding chunk size only if there weren't too many shared records
4065  * in the preceding chunk; otherwise we halve it. This gives us some
4066  * ability to adapt to the actual usage pattern of the current query
4067  * while still having large chunk sizes in typical usage. All chunk
4068  * sizes used should be MAXALIGN multiples, to ensure that the shared
4069  * records will be aligned safely.
4070  */
4071 #define MIN_CHUNK_SIZE 1024
4072 #define MAX_CHUNK_SIZE (1024*1024)
4073 
4074 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4075 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4076 #endif
4077 
4078  if (chunk == NULL)
4079  chunksize = MIN_CHUNK_SIZE;
4080  else
4081  {
4082  /* preceding chunk size... */
4083  chunksize = chunk->endptr - (char *) chunk;
4084  /* check number of shared records in preceding chunk */
4085  if ((chunk->endptr - chunk->endfree) <=
4086  (100 * sizeof(AfterTriggerSharedData)))
4087  chunksize *= 2; /* okay, double it */
4088  else
4089  chunksize /= 2; /* too many shared records */
4090  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4091  }
4093  chunk->next = NULL;
4094  chunk->freeptr = CHUNK_DATA_START(chunk);
4095  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4096  Assert(chunk->endfree - chunk->freeptr >= needed);
4097 
4098  if (events->head == NULL)
4099  events->head = chunk;
4100  else
4101  events->tail->next = chunk;
4102  events->tail = chunk;
4103  /* events->tailfree is now out of sync, but we'll fix it below */
4104  }
4105 
4106  /*
4107  * Try to locate a matching shared-data record already in the chunk. If
4108  * none, make a new one.
4109  */
4110  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4111  (char *) newshared >= chunk->endfree;
4112  newshared--)
4113  {
4114  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4115  newshared->ats_relid == evtshared->ats_relid &&
4116  newshared->ats_event == evtshared->ats_event &&
4117  newshared->ats_table == evtshared->ats_table &&
4118  newshared->ats_firing_id == 0)
4119  break;
4120  }
4121  if ((char *) newshared < chunk->endfree)
4122  {
4123  *newshared = *evtshared;
4124  newshared->ats_firing_id = 0; /* just to be sure */
4125  chunk->endfree = (char *) newshared;
4126  }
4127 
4128  /* Insert the data */
4129  newevent = (AfterTriggerEvent) chunk->freeptr;
4130  memcpy(newevent, event, eventsize);
4131  /* ... and link the new event to its shared record */
4132  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4133  newevent->ate_flags |= (char *) newshared - (char *) newevent;
4134 
4135  chunk->freeptr += eventsize;
4136  events->tailfree = chunk->freeptr;
4137 }
4138 
4139 /* ----------
4140  * afterTriggerFreeEventList()
4141  *
4142  * Free all the event storage in the given list.
4143  * ----------
4144  */
4145 static void
4147 {
4149 
4150  while ((chunk = events->head) != NULL)
4151  {
4152  events->head = chunk->next;
4153  pfree(chunk);
4154  }
4155  events->tail = NULL;
4156  events->tailfree = NULL;
4157 }
4158 
4159 /* ----------
4160  * afterTriggerRestoreEventList()
4161  *
4162  * Restore an event list to its prior length, removing all the events
4163  * added since it had the value old_events.
4164  * ----------
4165  */
4166 static void
4168  const AfterTriggerEventList *old_events)
4169 {
4171  AfterTriggerEventChunk *next_chunk;
4172 
4173  if (old_events->tail == NULL)
4174  {
4175  /* restoring to a completely empty state, so free everything */
4176  afterTriggerFreeEventList(events);
4177  }
4178  else
4179  {
4180  *events = *old_events;
4181  /* free any chunks after the last one we want to keep */
4182  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4183  {
4184  next_chunk = chunk->next;
4185  pfree(chunk);
4186  }
4187  /* and clean up the tail chunk to be the right length */
4188  events->tail->next = NULL;
4189  events->tail->freeptr = events->tailfree;
4190 
4191  /*
4192  * We don't make any effort to remove now-unused shared data records.
4193  * They might still be useful, anyway.
4194  */
4195  }
4196 }
4197 
4198 /* ----------
4199  * afterTriggerDeleteHeadEventChunk()
4200  *
4201  * Remove the first chunk of events from the query level's event list.
4202  * Keep any event list pointers elsewhere in the query level's data
4203  * structures in sync.
4204  * ----------
4205  */
4206 static void
4208 {
4209  AfterTriggerEventChunk *target = qs->events.head;
4210  ListCell *lc;
4211 
4212  Assert(target && target->next);
4213 
4214  /*
4215  * First, update any pointers in the per-table data, so that they won't be
4216  * dangling. Resetting obsoleted pointers to NULL will make
4217  * cancel_prior_stmt_triggers start from the list head, which is fine.
4218  */
4219  foreach(lc, qs->tables)
4220  {
4222 
4223  if (table->after_trig_done &&
4224  table->after_trig_events.tail == target)
4225  {
4226  table->after_trig_events.head = NULL;
4227  table->after_trig_events.tail = NULL;
4228  table->after_trig_events.tailfree = NULL;
4229  }
4230  }
4231 
4232  /* Now we can flush the head chunk */
4233  qs->events.head = target->next;
4234  pfree(target);
4235 }
4236 
4237 
4238 /* ----------
4239  * AfterTriggerExecute()
4240  *
4241  * Fetch the required tuples back from the heap and fire one
4242  * single trigger function.
4243  *
4244  * Frequently, this will be fired many times in a row for triggers of
4245  * a single relation. Therefore, we cache the open relation and provide
4246  * fmgr lookup cache space at the caller level. (For triggers fired at
4247  * the end of a query, we can even piggyback on the executor's state.)
4248  *
4249  * When fired for a cross-partition update of a partitioned table, the old
4250  * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4251  * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4252  * both are converted into the root partitioned table's format before passing
4253  * to the trigger function.
4254  *
4255  * event: event currently being fired.
4256  * relInfo: result relation for event.
4257  * src_relInfo: source partition of a cross-partition update
4258  * dst_relInfo: its destination partition
4259  * trigdesc: working copy of rel's trigger info.
4260  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4261  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4262  * or NULL if no instrumentation is wanted.
4263  * per_tuple_context: memory context to call trigger function in.
4264  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4265  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4266  * ----------
4267  */
4268 static void
4270  AfterTriggerEvent event,
4271  ResultRelInfo *relInfo,
4272  ResultRelInfo *src_relInfo,
4273  ResultRelInfo *dst_relInfo,
4274  TriggerDesc *trigdesc,
4275  FmgrInfo *finfo, Instrumentation *instr,
4276  MemoryContext per_tuple_context,
4277  TupleTableSlot *trig_tuple_slot1,
4278  TupleTableSlot *trig_tuple_slot2)
4279 {
4280  Relation rel = relInfo->ri_RelationDesc;
4281  Relation src_rel = src_relInfo->ri_RelationDesc;
4282  Relation dst_rel = dst_relInfo->ri_RelationDesc;
4283  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4284  Oid tgoid = evtshared->ats_tgoid;
4285  TriggerData LocTriggerData = {0};
4286  HeapTuple rettuple;
4287  int tgindx;
4288  bool should_free_trig = false;
4289  bool should_free_new = false;
4290 
4291  /*
4292  * Locate trigger in trigdesc.
4293  */
4294  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4295  {
4296  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4297  {
4298  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4299  break;
4300  }
4301  }
4302  if (LocTriggerData.tg_trigger == NULL)
4303  elog(ERROR, "could not find trigger %u", tgoid);
4304 
4305  /*
4306  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4307  * to include time spent re-fetching tuples in the trigger cost.
4308  */
4309  if (instr)
4310  InstrStartNode(instr + tgindx);
4311 
4312  /*
4313  * Fetch the required tuple(s).
4314  */
4315  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4316  {
4318  {
4319  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4320 
4321  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4322  trig_tuple_slot1))
4323  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4324 
4325  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4327  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4328  trig_tuple_slot2))
4329  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4330  }
4331  /* fall through */
4333 
4334  /*
4335  * Store tuple in the slot so that tg_trigtuple does not reference
4336  * tuplestore memory. (It is formally possible for the trigger
4337  * function to queue trigger events that add to the same
4338  * tuplestore, which can push other tuples out of memory.) The
4339  * distinction is academic, because we start with a minimal tuple
4340  * that is stored as a heap tuple, constructed in different memory
4341  * context, in the slot anyway.
4342  */
4343  LocTriggerData.tg_trigslot = trig_tuple_slot1;
4344  LocTriggerData.tg_trigtuple =
4345  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4346 
4347  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4349  {
4350  LocTriggerData.tg_newslot = trig_tuple_slot2;
4351  LocTriggerData.tg_newtuple =
4352  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4353  }
4354  else
4355  {
4356  LocTriggerData.tg_newtuple = NULL;
4357  }
4358  break;
4359 
4360  default:
4361  if (ItemPointerIsValid(&(event->ate_ctid1)))
4362  {
4363  TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4364  src_relInfo);
4365 
4366  if (!table_tuple_fetch_row_version(src_rel,
4367  &(event->ate_ctid1),
4368  SnapshotAny,
4369  src_slot))
4370  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4371 
4372  /*
4373  * Store the tuple fetched from the source partition into the
4374  * target (root partitioned) table slot, converting if needed.
4375  */
4376  if (src_relInfo != relInfo)
4377  {
4378  TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4379 
4380  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4381  if (map)
4382  {
4384  src_slot,
4385  LocTriggerData.tg_trigslot);
4386  }
4387  else
4388  ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4389  }
4390  else
4391  LocTriggerData.tg_trigslot = src_slot;
4392  LocTriggerData.tg_trigtuple =
4393  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4394  }
4395  else
4396  {
4397  LocTriggerData.tg_trigtuple = NULL;
4398  }
4399 
4400  /* don't touch ctid2 if not there */
4402  (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4403  ItemPointerIsValid(&(event->ate_ctid2)))
4404  {
4405  TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4406  dst_relInfo);
4407 
4408  if (!table_tuple_fetch_row_version(dst_rel,
4409  &(event->ate_ctid2),
4410  SnapshotAny,
4411  dst_slot))
4412  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4413 
4414  /*
4415  * Store the tuple fetched from the destination partition into
4416  * the target (root partitioned) table slot, converting if
4417  * needed.
4418  */
4419  if (dst_relInfo != relInfo)
4420  {
4421  TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4422 
4423  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4424  if (map)
4425  {
4427  dst_slot,
4428  LocTriggerData.tg_newslot);
4429  }
4430  else
4431  ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4432  }
4433  else
4434  LocTriggerData.tg_newslot = dst_slot;
4435  LocTriggerData.tg_newtuple =
4436  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4437  }
4438  else
4439  {
4440  LocTriggerData.tg_newtuple = NULL;
4441  }
4442  }
4443 
4444  /*
4445  * Set up the tuplestore information to let the trigger have access to
4446  * transition tables. When we first make a transition table available to
4447  * a trigger, mark it "closed" so that it cannot change anymore. If any
4448  * additional events of the same type get queued in the current trigger
4449  * query level, they'll go into new transition tables.
4450  */
4451  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4452  if (evtshared->ats_table)
4453  {
4454  if (LocTriggerData.tg_trigger->tgoldtable)
4455  {
4456  if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4457  LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4458  else
4459  LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4460  evtshared->ats_table->closed = true;
4461  }
4462 
4463  if (LocTriggerData.tg_trigger->tgnewtable)
4464  {
4465  if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4466  LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4467  else
4468  LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4469  evtshared->ats_table->closed = true;
4470  }
4471  }
4472 
4473  /*
4474  * Setup the remaining trigger information
4475  */
4476  LocTriggerData.type = T_TriggerData;
4477  LocTriggerData.tg_event =
4479  LocTriggerData.tg_relation = rel;
4480  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4481  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4482 
4483  MemoryContextReset(per_tuple_context);
4484 
4485  /*
4486  * Call the trigger and throw away any possibly returned updated tuple.
4487  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4488  */
4489  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4490  tgindx,
4491  finfo,
4492  NULL,
4493  per_tuple_context);
4494  if (rettuple != NULL &&
4495  rettuple != LocTriggerData.tg_trigtuple &&
4496  rettuple != LocTriggerData.tg_newtuple)
4497  heap_freetuple(rettuple);
4498 
4499  /*
4500  * Release resources
4501  */
4502  if (should_free_trig)
4503  heap_freetuple(LocTriggerData.tg_trigtuple);
4504  if (should_free_new)
4505  heap_freetuple(LocTriggerData.tg_newtuple);
4506 
4507  /* don't clear slots' contents if foreign table */
4508  if (trig_tuple_slot1 == NULL)
4509  {
4510  if (LocTriggerData.tg_trigslot)
4511  ExecClearTuple(LocTriggerData.tg_trigslot);
4512  if (LocTriggerData.tg_newslot)
4513  ExecClearTuple(LocTriggerData.tg_newslot);
4514  }
4515 
4516  /*
4517  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4518  * one "tuple returned" (really the number of firings).
4519  */
4520  if (instr)
4521  InstrStopNode(instr + tgindx, 1);
4522 }
4523 
4524 
4525 /*
4526  * afterTriggerMarkEvents()
4527  *
4528  * Scan the given event list for not yet invoked events. Mark the ones
4529  * that can be invoked now with the current firing ID.
4530  *
4531  * If move_list isn't NULL, events that are not to be invoked now are
4532  * transferred to move_list.
4533  *
4534  * When immediate_only is true, do not invoke currently-deferred triggers.
4535  * (This will be false only at main transaction exit.)
4536  *
4537  * Returns true if any invokable events were found.
4538  */
4539 static bool
4541  AfterTriggerEventList *move_list,
4542  bool immediate_only)
4543 {
4544  bool found = false;
4545  bool deferred_found = false;
4546  AfterTriggerEvent event;
4548 
4549  for_each_event_chunk(event, chunk, *events)
4550  {
4551  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4552  bool defer_it = false;
4553 
4554  if (!(event->ate_flags &
4556  {
4557  /*
4558  * This trigger hasn't been called or scheduled yet. Check if we
4559  * should call it now.
4560  */
4561  if (immediate_only && afterTriggerCheckState(evtshared))
4562  {
4563  defer_it = true;
4564  }
4565  else
4566  {
4567  /*
4568  * Mark it as to be fired in this firing cycle.
4569  */
4571  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4572  found = true;
4573  }
4574  }
4575 
4576  /*
4577  * If it's deferred, move it to move_list, if requested.
4578  */
4579  if (defer_it && move_list != NULL)
4580  {
4581  deferred_found = true;
4582  /* add it to move_list */
4583  afterTriggerAddEvent(move_list, event, evtshared);
4584  /* mark original copy "done" so we don't do it again */
4585  event->ate_flags |= AFTER_TRIGGER_DONE;
4586  }
4587  }
4588 
4589  /*
4590  * We could allow deferred triggers if, before the end of the
4591  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4592  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4593  */
4594  if (deferred_found && InSecurityRestrictedOperation())
4595  ereport(ERROR,
4596  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4597  errmsg("cannot fire deferred trigger within security-restricted operation")));
4598 
4599  return found;
4600 }
4601 
4602 /*
4603  * afterTriggerInvokeEvents()
4604  *
4605  * Scan the given event list for events that are marked as to be fired
4606  * in the current firing cycle, and fire them.
4607  *
4608  * If estate isn't NULL, we use its result relation info to avoid repeated
4609  * openings and closing of trigger target relations. If it is NULL, we
4610  * make one locally to cache the info in case there are multiple trigger
4611  * events per rel.
4612  *
4613  * When delete_ok is true, it's safe to delete fully-processed events.
4614  * (We are not very tense about that: we simply reset a chunk to be empty
4615  * if all its events got fired. The objective here is just to avoid useless
4616  * rescanning of events when a trigger queues new events during transaction
4617  * end, so it's not necessary to worry much about the case where only
4618  * some events are fired.)
4619  *
4620  * Returns true if no unfired events remain in the list (this allows us
4621  * to avoid repeating afterTriggerMarkEvents).
4622  */
4623 static bool
4625  CommandId firing_id,
4626  EState *estate,
4627  bool delete_ok)
4628 {
4629  bool all_fired = true;
4631  MemoryContext per_tuple_context;
4632  bool local_estate = false;
4633  ResultRelInfo *rInfo = NULL;
4634  Relation rel = NULL;
4635  TriggerDesc *trigdesc = NULL;
4636  FmgrInfo *finfo = NULL;
4637  Instrumentation *instr = NULL;
4638  TupleTableSlot *slot1 = NULL,
4639  *slot2 = NULL;
4640 
4641  /* Make a local EState if need be */
4642  if (estate == NULL)
4643  {
4644  estate = CreateExecutorState();
4645  local_estate = true;
4646  }
4647 
4648  /* Make a per-tuple memory context for trigger function calls */
4649  per_tuple_context =
4651  "AfterTriggerTupleContext",
4653 
4654  for_each_chunk(chunk, *events)
4655  {
4656  AfterTriggerEvent event;
4657  bool all_fired_in_chunk = true;
4658 
4659  for_each_event(event, chunk)
4660  {
4661  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4662 
4663  /*
4664  * Is it one for me to fire?
4665  */
4666  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4667  evtshared->ats_firing_id == firing_id)
4668  {
4669  ResultRelInfo *src_rInfo,
4670  *dst_rInfo;
4671 
4672  /*
4673  * So let's fire it... but first, find the correct relation if
4674  * this is not the same relation as before.
4675  */
4676  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4677  {
4678  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4679  NULL);
4680  rel = rInfo->ri_RelationDesc;
4681  /* Catch calls with insufficient relcache refcounting */
4683  trigdesc = rInfo->ri_TrigDesc;
4684  finfo = rInfo->ri_TrigFunctions;
4685  instr = rInfo->ri_TrigInstrument;
4686  if (slot1 != NULL)
4687  {
4690  slot1 = slot2 = NULL;
4691  }
4692  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4693  {
4694  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4696  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4698  }
4699  if (trigdesc == NULL) /* should not happen */
4700  elog(ERROR, "relation %u has no triggers",
4701  evtshared->ats_relid);
4702  }
4703 
4704  /*
4705  * Look up source and destination partition result rels of a
4706  * cross-partition update event.
4707  */
4708  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4710  {
4711  Assert(OidIsValid(event->ate_src_part) &&
4712  OidIsValid(event->ate_dst_part));
4713  src_rInfo = ExecGetTriggerResultRel(estate,
4714  event->ate_src_part,
4715  rInfo);
4716  dst_rInfo = ExecGetTriggerResultRel(estate,
4717  event->ate_dst_part,
4718  rInfo);
4719  }
4720  else
4721  src_rInfo = dst_rInfo = rInfo;
4722 
4723  /*
4724  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4725  * still set, so recursive examinations of the event list
4726  * won't try to re-fire it.
4727  */
4728  AfterTriggerExecute(estate, event, rInfo,
4729  src_rInfo, dst_rInfo,
4730  trigdesc, finfo, instr,
4731  per_tuple_context, slot1, slot2);
4732 
4733  /*
4734  * Mark the event as done.
4735  */
4736  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4737  event->ate_flags |= AFTER_TRIGGER_DONE;
4738  }
4739  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4740  {
4741  /* something remains to be done */
4742  all_fired = all_fired_in_chunk = false;
4743  }
4744  }
4745 
4746  /* Clear the chunk if delete_ok and nothing left of interest */
4747  if (delete_ok && all_fired_in_chunk)
4748  {
4749  chunk->freeptr = CHUNK_DATA_START(chunk);
4750  chunk->endfree = chunk->endptr;
4751 
4752  /*
4753  * If it's last chunk, must sync event list's tailfree too. Note
4754  * that delete_ok must NOT be passed as true if there could be
4755  * additional AfterTriggerEventList values pointing at this event
4756  * list, since we'd fail to fix their copies of tailfree.
4757  */
4758  if (chunk == events->tail)
4759  events->tailfree = chunk->freeptr;
4760  }
4761  }
4762  if (slot1 != NULL)
4763  {
4766  }
4767 
4768  /* Release working resources */
4769  MemoryContextDelete(per_tuple_context);
4770 
4771  if (local_estate)
4772  {
4773  ExecCloseResultRelations(estate);
4774  ExecResetTupleTable(estate->es_tupleTable, false);
4775  FreeExecutorState(estate);
4776  }
4777 
4778  return all_fired;
4779 }
4780 
4781 
4782 /*
4783  * GetAfterTriggersTableData
4784  *
4785  * Find or create an AfterTriggersTableData struct for the specified
4786  * trigger event (relation + operation type). Ignore existing structs
4787  * marked "closed"; we don't want to put any additional tuples into them,
4788  * nor change their stmt-triggers-fired state.
4789  *
4790  * Note: the AfterTriggersTableData list is allocated in the current
4791  * (sub)transaction's CurTransactionContext. This is OK because
4792  * we don't need it to live past AfterTriggerEndQuery.
4793  */
4794 static AfterTriggersTableData *
4796 {
4797  AfterTriggersTableData *table;
4799  MemoryContext oldcxt;
4800  ListCell *lc;
4801 
4802  /* Caller should have ensured query_depth is OK. */
4806 
4807  foreach(lc, qs->tables)
4808  {
4809  table = (AfterTriggersTableData *) lfirst(lc);
4810  if (table->relid == relid && table->cmdType == cmdType &&
4811  !table->closed)
4812  return table;
4813  }
4814 
4816 
4818  table->relid = relid;
4819  table->cmdType = cmdType;
4820  qs->tables = lappend(qs->tables, table);
4821 
4822  MemoryContextSwitchTo(oldcxt);
4823 
4824  return table;
4825 }
4826 
4827 /*
4828  * Returns a TupleTableSlot suitable for holding the tuples to be put
4829  * into AfterTriggersTableData's transition table tuplestores.
4830  */
4831 static TupleTableSlot *
4833  TupleDesc tupdesc)
4834 {
4835  /* Create it if not already done. */
4836  if (!table->storeslot)
4837  {
4838  MemoryContext oldcxt;
4839 
4840  /*
4841  * We need this slot only until AfterTriggerEndQuery, but making it
4842  * last till end-of-subxact is good enough. It'll be freed by
4843  * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4844  * a different lifespan, so we'd better make a copy of that.
4845  */
4847  tupdesc = CreateTupleDescCopy(tupdesc);
4848  table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4849  MemoryContextSwitchTo(oldcxt);
4850  }
4851 
4852  return table->storeslot;
4853 }
4854 
4855 /*
4856  * MakeTransitionCaptureState
4857  *
4858  * Make a TransitionCaptureState object for the given TriggerDesc, target
4859  * relation, and operation type. The TCS object holds all the state needed
4860  * to decide whether to capture tuples in transition tables.
4861  *
4862  * If there are no triggers in 'trigdesc' that request relevant transition
4863  * tables, then return NULL.
4864  *
4865  * The resulting object can be passed to the ExecAR* functions. When
4866  * dealing with child tables, the caller can set tcs_original_insert_tuple
4867  * to avoid having to reconstruct the original tuple in the root table's
4868  * format.
4869  *
4870  * Note that we copy the flags from a parent table into this struct (rather
4871  * than subsequently using the relation's TriggerDesc directly) so that we can
4872  * use it to control collection of transition tuples from child tables.
4873  *
4874  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4875  * on the same table during one query should share one transition table.
4876  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4877  * looked up using the table OID + CmdType, and are merely referenced by
4878  * the TransitionCaptureState objects we hand out to callers.
4879  */
4882 {
4884  bool need_old_upd,
4885  need_new_upd,
4886  need_old_del,
4887  need_new_ins;
4888  AfterTriggersTableData *table;
4889  MemoryContext oldcxt;
4890  ResourceOwner saveResourceOwner;
4891 
4892  if (trigdesc == NULL)
4893  return NULL;
4894 
4895  /* Detect which table(s) we need. */
4896  switch (cmdType)
4897  {
4898  case CMD_INSERT:
4899  need_old_upd = need_old_del = need_new_upd = false;
4900  need_new_ins = trigdesc->trig_insert_new_table;
4901  break;
4902  case CMD_UPDATE:
4903  need_old_upd = trigdesc->trig_update_old_table;
4904  need_new_upd = trigdesc->trig_update_new_table;
4905  need_old_del = need_new_ins = false;
4906  break;
4907  case CMD_DELETE:
4908  need_old_del = trigdesc->trig_delete_old_table;
4909  need_old_upd = need_new_upd = need_new_ins = false;
4910  break;
4911  case CMD_MERGE:
4912  need_old_upd = trigdesc->trig_update_old_table;
4913  need_new_upd = trigdesc->trig_update_new_table;
4914  need_old_del = trigdesc->trig_delete_old_table;
4915  need_new_ins = trigdesc->trig_insert_new_table;
4916  break;
4917  default:
4918  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4919  /* keep compiler quiet */
4920  need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4921  break;
4922  }
4923  if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4924  return NULL;
4925 
4926  /* Check state, like AfterTriggerSaveEvent. */
4927  if (afterTriggers.query_depth < 0)
4928  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4929 
4930  /* Be sure we have enough space to record events at this query depth. */
4933 
4934  /*
4935  * Find or create an AfterTriggersTableData struct to hold the
4936  * tuplestore(s). If there's a matching struct but it's marked closed,
4937  * ignore it; we need a newer one.
4938  *
4939  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4940  * allocated in the current (sub)transaction's CurTransactionContext, and
4941  * the tuplestores are managed by the (sub)transaction's resource owner.
4942  * This is sufficient lifespan because we do not allow triggers using
4943  * transition tables to be deferrable; they will be fired during
4944  * AfterTriggerEndQuery, after which it's okay to delete the data.
4945  */
4946  table = GetAfterTriggersTableData(relid, cmdType);
4947 
4948  /* Now create required tuplestore(s), if we don't have them already. */
4950  saveResourceOwner = CurrentResourceOwner;
4952 
4953  if (need_old_upd && table->old_upd_tuplestore == NULL)
4954  table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4955  if (need_new_upd && table->new_upd_tuplestore == NULL)
4956  table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4957  if (need_old_del && table->old_del_tuplestore == NULL)
4958  table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4959  if (need_new_ins && table->new_ins_tuplestore == NULL)
4960  table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4961 
4962  CurrentResourceOwner = saveResourceOwner;
4963  MemoryContextSwitchTo(oldcxt);
4964 
4965  /* Now build the TransitionCaptureState struct, in caller's context */
4967  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4968  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4969  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4970  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4971  state->tcs_private = table;
4972 
4973  return state;
4974 }
4975 
4976 
4977 /* ----------
4978  * AfterTriggerBeginXact()
4979  *
4980  * Called at transaction start (either BEGIN or implicit for single
4981  * statement outside of transaction block).
4982  * ----------
4983  */
4984 void
4986 {
4987  /*
4988  * Initialize after-trigger state structure to empty
4989  */
4990  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4992 
4993  /*
4994  * Verify that there is no leftover state remaining. If these assertions
4995  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4996  * up properly.
4997  */
4998  Assert(afterTriggers.state == NULL);
4999  Assert(afterTriggers.query_stack == NULL);
5001  Assert(afterTriggers.event_cxt == NULL);
5002  Assert(afterTriggers.events.head == NULL);
5003  Assert(afterTriggers.trans_stack == NULL);
5005 }
5006 
5007 
5008 /* ----------
5009  * AfterTriggerBeginQuery()
5010  *
5011  * Called just before we start processing a single query within a
5012  * transaction (or subtransaction). Most of the real work gets deferred
5013  * until somebody actually tries to queue a trigger event.
5014  * ----------
5015  */
5016 void
5018 {
5019  /* Increase the query stack depth */
5021 }
5022 
5023 
5024 /* ----------
5025  * AfterTriggerEndQuery()
5026  *
5027  * Called after one query has been completely processed. At this time
5028  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
5029  * transfer deferred trigger events to the global deferred-trigger list.
5030  *
5031  * Note that this must be called BEFORE closing down the executor
5032  * with ExecutorEnd, because we make use of the EState's info about
5033  * target relations. Normally it is called from ExecutorFinish.
5034  * ----------
5035  */
5036 void
5038 {
5040 
5041  /* Must be inside a query, too */
5043 
5044  /*
5045  * If we never even got as far as initializing the event stack, there
5046  * certainly won't be any events, so exit quickly.
5047  */
5049  {
5051  return;
5052  }
5053 
5054  /*
5055  * Process all immediate-mode triggers queued by the query, and move the
5056  * deferred ones to the main list of deferred events.
5057  *
5058  * Notice that we decide which ones will be fired, and put the deferred
5059  * ones on the main list, before anything is actually fired. This ensures
5060  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5061  * IMMEDIATE: all events we have decided to defer will be available for it
5062  * to fire.
5063  *
5064  * We loop in case a trigger queues more events at the same query level.
5065  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5066  * will instead fire any triggers in a dedicated query level. Foreign key
5067  * enforcement triggers do add to the current query level, thanks to their
5068  * passing fire_triggers = false to SPI_execute_snapshot(). Other
5069  * C-language triggers might do likewise.
5070  *
5071  * If we find no firable events, we don't have to increment
5072  * firing_counter.
5073  */
5075 
5076  for (;;)
5077  {
5079  {
5080  CommandId firing_id = afterTriggers.firing_counter++;
5081  AfterTriggerEventChunk *oldtail = qs->events.tail;
5082 
5083  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5084  break; /* all fired */
5085 
5086  /*
5087  * Firing a trigger could result in query_stack being repalloc'd,
5088  * so we must recalculate qs after each afterTriggerInvokeEvents
5089  * call. Furthermore, it's unsafe to pass delete_ok = true here,
5090  * because that could cause afterTriggerInvokeEvents to try to
5091  * access qs->events after the stack has been repalloc'd.
5092  */
5094 
5095  /*
5096  * We'll need to scan the events list again. To reduce the cost
5097  * of doing so, get rid of completely-fired chunks. We know that
5098  * all events were marked IN_PROGRESS or DONE at the conclusion of
5099  * afterTriggerMarkEvents, so any still-interesting events must
5100  * have been added after that, and so must be in the chunk that
5101  * was then the tail chunk, or in later chunks. So, zap all
5102  * chunks before oldtail. This is approximately the same set of
5103  * events we would have gotten rid of by passing delete_ok = true.
5104  */
5105  Assert(oldtail != NULL);
5106  while (qs->events.head != oldtail)
5108  }
5109  else
5110  break;
5111  }
5112 
5113  /* Release query-level-local storage, including tuplestores if any */
5115 
5117 }
5118 
5119 
5120 /*
5121  * AfterTriggerFreeQuery
5122  * Release subsidiary storage for a trigger query level.
5123  * This includes closing down tuplestores.
5124  * Note: it's important for this to be safe if interrupted by an error
5125  * and then called again for the same query level.
5126  */
5127 static void
5129 {
5130  Tuplestorestate *ts;
5131  List *tables;
5132  ListCell *lc;
5133 
5134  /* Drop the trigger events */
5136 
5137  /* Drop FDW tuplestore if any */
5138  ts = qs->fdw_tuplestore;
5139  qs->fdw_tuplestore = NULL;
5140  if (ts)
5141  tuplestore_end(ts);
5142 
5143  /* Release per-table subsidiary storage */
5144  tables = qs->tables;
5145  foreach(lc, tables)
5146  {
5148 
5149  ts = table->old_upd_tuplestore;
5150  table->old_upd_tuplestore = NULL;
5151  if (ts)
5152  tuplestore_end(ts);
5153  ts = table->new_upd_tuplestore;
5154  table->new_upd_tuplestore = NULL;
5155  if (ts)
5156  tuplestore_end(ts);
5157  ts = table->old_del_tuplestore;
5158  table->old_del_tuplestore = NULL;
5159  if (ts)
5160  tuplestore_end(ts);
5161  ts = table->new_ins_tuplestore;
5162  table->new_ins_tuplestore = NULL;
5163  if (ts)
5164  tuplestore_end(ts);
5165  if (table->storeslot)
5166  {
5167  TupleTableSlot *slot = table->storeslot;
5168 
5169  table->storeslot = NULL;
5171  }
5172  }
5173 
5174  /*
5175  * Now free the AfterTriggersTableData structs and list cells. Reset list
5176  * pointer first; if list_free_deep somehow gets an error, better to leak
5177  * that storage than have an infinite loop.
5178  */
5179  qs->tables = NIL;
5180  list_free_deep(tables);
5181 }
5182 
5183 
5184 /* ----------
5185  * AfterTriggerFireDeferred()
5186  *
5187  * Called just before the current transaction is committed. At this
5188  * time we invoke all pending DEFERRED triggers.
5189  *
5190  * It is possible for other modules to queue additional deferred triggers
5191  * during pre-commit processing; therefore xact.c may have to call this
5192  * multiple times.
5193  * ----------
5194  */
5195 void
5197 {
5198  AfterTriggerEventList *events;
5199  bool snap_pushed = false;
5200 
5201  /* Must not be inside a query */
5203 
5204  /*
5205  * If there are any triggers to fire, make sure we have set a snapshot for
5206  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5207  * can't assume ActiveSnapshot is valid on entry.)
5208  */
5209  events = &afterTriggers.events;
5210  if (events->head != NULL)
5211  {
5213  snap_pushed = true;
5214  }
5215 
5216  /*
5217  * Run all the remaining triggers. Loop until they are all gone, in case
5218  * some trigger queues more for us to do.
5219  */
5220  while (afterTriggerMarkEvents(events, NULL, false))
5221  {
5222  CommandId firing_id = afterTriggers.firing_counter++;
5223 
5224  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5225  break; /* all fired */
5226  }
5227 
5228  /*
5229  * We don't bother freeing the event list, since it will go away anyway
5230  * (and more efficiently than via pfree) in AfterTriggerEndXact.
5231  */
5232 
5233  if (snap_pushed)
5235 }
5236 
5237 
5238 /* ----------
5239  * AfterTriggerEndXact()
5240  *
5241  * The current transaction is finishing.
5242  *
5243  * Any unfired triggers are canceled so we simply throw
5244  * away anything we know.
5245  *
5246  * Note: it is possible for this to be called repeatedly in case of
5247  * error during transaction abort; therefore, do not complain if
5248  * already closed down.
5249  * ----------
5250  */
5251 void
5252 AfterTriggerEndXact(bool isCommit)
5253 {
5254  /*
5255  * Forget the pending-events list.
5256  *
5257  * Since all the info is in TopTransactionContext or children thereof, we
5258  * don't really need to do anything to reclaim memory. However, the
5259  * pending-events list could be large, and so it's useful to discard it as
5260  * soon as possible --- especially if we are aborting because we ran out
5261  * of memory for the list!
5262  */
5264  {
5266  afterTriggers.event_cxt = NULL;
5267  afterTriggers.events.head = NULL;
5268  afterTriggers.events.tail = NULL;
5269  afterTriggers.events.tailfree = NULL;
5270  }
5271 
5272  /*
5273  * Forget any subtransaction state as well. Since this can't be very
5274  * large, we let the eventual reset of TopTransactionContext free the
5275  * memory instead of doing it here.
5276  */
5277  afterTriggers.trans_stack = NULL;
5279 
5280 
5281  /*
5282  * Forget the query stack and constraint-related state information. As
5283  * with the subtransaction state information, we don't bother freeing the
5284  * memory here.
5285  */
5286  afterTriggers.query_stack = NULL;
5288  afterTriggers.state = NULL;
5289 
5290  /* No more afterTriggers manipulation until next transaction starts. */
5292 }
5293 
5294 /*
5295  * AfterTriggerBeginSubXact()
5296  *
5297  * Start a subtransaction.
5298  */
5299 void
5301 {
5302  int my_level = GetCurrentTransactionNestLevel();
5303 
5304  /*
5305  * Allocate more space in the trans_stack if needed. (Note: because the
5306  * minimum nest level of a subtransaction is 2, we waste the first couple
5307  * entries of the array; not worth the notational effort to avoid it.)
5308  */
5309  while (my_level >= afterTriggers.maxtransdepth)
5310  {
5311  if (afterTriggers.maxtransdepth == 0)
5312  {
5313  /* Arbitrarily initialize for max of 8 subtransaction levels */
5316  8 * sizeof(AfterTriggersTransData));
5318  }
5319  else
5320  {
5321  /* repalloc will keep the stack in the same context */
5322  int new_alloc = afterTriggers.maxtransdepth * 2;
5323 
5326  new_alloc * sizeof(AfterTriggersTransData));
5327  afterTriggers.maxtransdepth = new_alloc;
5328  }
5329  }
5330 
5331  /*
5332  * Push the current information into the stack. The SET CONSTRAINTS state
5333  * is not saved until/unless changed. Likewise, we don't make a
5334  * per-subtransaction event context until needed.
5335  */
5336  afterTriggers.trans_stack[my_level].state = NULL;
5340 }
5341 
5342 /*
5343  * AfterTriggerEndSubXact()
5344  *
5345  * The current subtransaction is ending.
5346  */
5347 void
5349 {
5350  int my_level = GetCurrentTransactionNestLevel();
5352  AfterTriggerEvent event;
5354  CommandId subxact_firing_id;
5355 
5356  /*
5357  * Pop the prior state if needed.
5358  */
5359  if (isCommit)
5360  {
5361  Assert(my_level < afterTriggers.maxtransdepth);
5362  /* If we saved a prior state, we don't need it anymore */
5363  state = afterTriggers.trans_stack[my_level].state;
5364  if (state != NULL)
5365  pfree(state);
5366  /* this avoids double pfree if error later: */
5367  afterTriggers.trans_stack[my_level].state = NULL;
5370  }
5371  else
5372  {
5373  /*
5374  * Aborting. It is possible subxact start failed before calling
5375  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5376  * trans_stack levels that aren't there.
5377  */
5378  if (my_level >= afterTriggers.maxtransdepth)
5379  return;
5380 
5381  /*
5382  * Release query-level storage for queries being aborted, and restore
5383  * query_depth to its pre-subxact value. This assumes that a
5384  * subtransaction will not add events to query levels started in a
5385  * earlier transaction state.
5386  */
5388  {
5392  }
5395 
5396  /*
5397  * Restore the global deferred-event list to its former length,
5398  * discarding any events queued by the subxact.
5399  */
5401  &afterTriggers.trans_stack[my_level].events);
5402 
5403  /*
5404  * Restore the trigger state. If the saved state is NULL, then this
5405  * subxact didn't save it, so it doesn't need restoring.
5406  */
5407  state = afterTriggers.trans_stack[my_level].state;
5408  if (state != NULL)
5409  {
5412  }
5413  /* this avoids double pfree if error later: */
5414  afterTriggers.trans_stack[my_level].state = NULL;
5415 
5416  /*
5417  * Scan for any remaining deferred events that were marked DONE or IN
5418  * PROGRESS by this subxact or a child, and un-mark them. We can
5419  * recognize such events because they have a firing ID greater than or
5420  * equal to the firing_counter value we saved at subtransaction start.
5421  * (This essentially assumes that the current subxact includes all
5422  * subxacts started after it.)
5423  */
5424  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5426  {
5427  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5428 
5429  if (event->ate_flags &
5431  {
5432  if (evtshared->ats_firing_id >= subxact_firing_id)
5433  event->ate_flags &=
5435  }
5436  }
5437  }
5438 }
5439 
5440 /*
5441  * Get the transition table for the given event and depending on whether we are
5442  * processing the old or the new tuple.
5443  */
5444 static Tuplestorestate *
5446  TupleTableSlot *oldslot,
5447  TupleTableSlot *newslot,
5448  TransitionCaptureState *transition_capture)
5449 {
5450  Tuplestorestate *tuplestore = NULL;
5451  bool delete_old_table = transition_capture->tcs_delete_old_table;
5452  bool update_old_table = transition_capture->tcs_update_old_table;
5453  bool update_new_table = transition_capture->tcs_update_new_table;
5454  bool insert_new_table = transition_capture->tcs_insert_new_table;
5455 
5456  /*
5457  * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5458  * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5459  * non-NULL. But for UPDATE events fired for capturing transition tuples
5460  * during UPDATE partition-key row movement, OLD is NULL when the event is
5461  * for a row being inserted, whereas NEW is NULL when the event is for a
5462  * row being deleted.
5463  */
5464  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5465  TupIsNull(oldslot)));
5466  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5467  TupIsNull(newslot)));
5468 
5469  if (!TupIsNull(oldslot))
5470  {
5471  Assert(TupIsNull(newslot));
5472  if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5473  tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5474  else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5475  tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5476  }
5477  else if (!TupIsNull(newslot))
5478  {
5479  Assert(TupIsNull(oldslot));
5480  if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5481  tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5482  else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5483  tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5484  }
5485 
5486  return tuplestore;
5487 }
5488 
5489 /*
5490  * Add the given heap tuple to the given tuplestore, applying the conversion
5491  * map if necessary.
5492  *
5493  * If original_insert_tuple is given, we can add that tuple without conversion.
5494  */
5495 static void
5497  TransitionCaptureState *transition_capture,
5498  ResultRelInfo *relinfo,
5499  TupleTableSlot *slot,
5500  TupleTableSlot *original_insert_tuple,
5501  Tuplestorestate *tuplestore)
5502 {
5503  TupleConversionMap *map;
5504 
5505  /*
5506  * Nothing needs to be done if we don't have a tuplestore.
5507  */
5508  if (tuplestore == NULL)
5509  return;
5510 
5511  if (original_insert_tuple)
5512  tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5513  else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5514  {
5515  AfterTriggersTableData *table = transition_capture->tcs_private;
5516  TupleTableSlot *storeslot;
5517 
5518  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5519  execute_attr_map_slot(map->attrMap, slot, storeslot);
5520  tuplestore_puttupleslot(tuplestore, storeslot);
5521  }
5522  else
5523  tuplestore_puttupleslot(tuplestore, slot);
5524 }
5525 
5526 /* ----------
5527  * AfterTriggerEnlargeQueryState()
5528  *
5529  * Prepare the necessary state so that we can record AFTER trigger events
5530  * queued by a query. It is allowed to have nested queries within a
5531  * (sub)transaction, so we need to have separate state for each query
5532  * nesting level.
5533  * ----------
5534  */
5535 static void
5537 {
5538  int init_depth = afterTriggers.maxquerydepth;
5539 
5541 
5542  if (afterTriggers.maxquerydepth == 0)
5543  {
5544  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5545 
5548  new_alloc * sizeof(AfterTriggersQueryData));
5549  afterTriggers.maxquerydepth = new_alloc;
5550  }
5551  else
5552  {
5553  /* repalloc will keep the stack in the same context */
5554  int old_alloc = afterTriggers.maxquerydepth;
5555  int new_alloc = Max(afterTriggers.query_depth + 1,
5556  old_alloc * 2);
5557 
5560  new_alloc * sizeof(AfterTriggersQueryData));
5561  afterTriggers.maxquerydepth = new_alloc;
5562  }
5563 
5564  /* Initialize new array entries to empty */
5565  while (init_depth < afterTriggers.maxquerydepth)
5566  {