PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/indexing.h"
26 #include "catalog/objectaccess.h"
27 #include "catalog/partition.h"
28 #include "catalog/pg_constraint.h"
29 #include "catalog/pg_inherits.h"
30 #include "catalog/pg_proc.h"
31 #include "catalog/pg_trigger.h"
32 #include "catalog/pg_type.h"
33 #include "commands/dbcommands.h"
34 #include "commands/trigger.h"
35 #include "executor/executor.h"
36 #include "miscadmin.h"
37 #include "nodes/bitmapset.h"
38 #include "nodes/makefuncs.h"
39 #include "optimizer/optimizer.h"
40 #include "parser/parse_clause.h"
41 #include "parser/parse_collate.h"
42 #include "parser/parse_func.h"
43 #include "parser/parse_relation.h"
44 #include "partitioning/partdesc.h"
45 #include "pgstat.h"
46 #include "rewrite/rewriteManip.h"
47 #include "storage/lmgr.h"
48 #include "utils/acl.h"
49 #include "utils/builtins.h"
50 #include "utils/fmgroids.h"
51 #include "utils/guc_hooks.h"
52 #include "utils/inval.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/plancache.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tuplestore.h"
60 
61 
62 /* GUC variables */
64 
65 /* How many levels deep into trigger execution are we? */
66 static int MyTriggerDepth = 0;
67 
68 /* Local function prototypes */
69 static void renametrig_internal(Relation tgrel, Relation targetrel,
70  HeapTuple trigtup, const char *newname,
71  const char *expected_name);
72 static void renametrig_partition(Relation tgrel, Oid partitionId,
73  Oid parentTriggerOid, const char *newname,
74  const char *expected_name);
75 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
76 static bool GetTupleForTrigger(EState *estate,
77  EPQState *epqstate,
78  ResultRelInfo *relinfo,
79  ItemPointer tid,
80  LockTupleMode lockmode,
81  TupleTableSlot *oldslot,
82  TupleTableSlot **epqslot,
83  TM_Result *tmresultp,
84  TM_FailureData *tmfdp);
85 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
86  Trigger *trigger, TriggerEvent event,
87  Bitmapset *modifiedCols,
88  TupleTableSlot *oldslot, TupleTableSlot *newslot);
90  int tgindx,
91  FmgrInfo *finfo,
92  Instrumentation *instr,
93  MemoryContext per_tuple_context);
94 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
95  ResultRelInfo *src_partinfo,
96  ResultRelInfo *dst_partinfo,
97  int event, bool row_trigger,
98  TupleTableSlot *oldslot, TupleTableSlot *newslot,
99  List *recheckIndexes, Bitmapset *modifiedCols,
100  TransitionCaptureState *transition_capture,
101  bool is_crosspart_update);
102 static void AfterTriggerEnlargeQueryState(void);
103 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
104 
105 
106 /*
107  * Create a trigger. Returns the address of the created trigger.
108  *
109  * queryString is the source text of the CREATE TRIGGER command.
110  * This must be supplied if a whenClause is specified, else it can be NULL.
111  *
112  * relOid, if nonzero, is the relation on which the trigger should be
113  * created. If zero, the name provided in the statement will be looked up.
114  *
115  * refRelOid, if nonzero, is the relation to which the constraint trigger
116  * refers. If zero, the constraint relation name provided in the statement
117  * will be looked up as needed.
118  *
119  * constraintOid, if nonzero, says that this trigger is being created
120  * internally to implement that constraint. A suitable pg_depend entry will
121  * be made to link the trigger to that constraint. constraintOid is zero when
122  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
123  * TRIGGER, we build a pg_constraint entry internally.)
124  *
125  * indexOid, if nonzero, is the OID of an index associated with the constraint.
126  * We do nothing with this except store it into pg_trigger.tgconstrindid;
127  * but when creating a trigger for a deferrable unique constraint on a
128  * partitioned table, its children are looked up. Note we don't cope with
129  * invalid indexes in that case.
130  *
131  * funcoid, if nonzero, is the OID of the function to invoke. When this is
132  * given, stmt->funcname is ignored.
133  *
134  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
135  * if that trigger is dropped, this one should be too. There are two cases
136  * when a nonzero value is passed for this: 1) when this function recurses to
137  * create the trigger on partitions, 2) when creating child foreign key
138  * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
139  *
140  * If whenClause is passed, it is an already-transformed expression for
141  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
142  *
143  * If isInternal is true then this is an internally-generated trigger.
144  * This argument sets the tgisinternal field of the pg_trigger entry, and
145  * if true causes us to modify the given trigger name to ensure uniqueness.
146  *
147  * When isInternal is not true we require ACL_TRIGGER permissions on the
148  * relation, as well as ACL_EXECUTE on the trigger function. For internal
149  * triggers the caller must apply any required permission checks.
150  *
151  * When called on partitioned tables, this function recurses to create the
152  * trigger on all the partitions, except if isInternal is true, in which
153  * case caller is expected to execute recursion on its own. in_partition
154  * indicates such a recursive call; outside callers should pass "false"
155  * (but see CloneRowTriggersToPartition).
156  */
158 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
159  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
160  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
161  bool isInternal, bool in_partition)
162 {
163  return
164  CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
165  constraintOid, indexOid, funcoid,
166  parentTriggerOid, whenClause, isInternal,
167  in_partition, TRIGGER_FIRES_ON_ORIGIN);
168 }
169 
170 /*
171  * Like the above; additionally the firing condition
172  * (always/origin/replica/disabled) can be specified.
173  */
175 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
176  Oid relOid, Oid refRelOid, Oid constraintOid,
177  Oid indexOid, Oid funcoid, Oid parentTriggerOid,
178  Node *whenClause, bool isInternal, bool in_partition,
179  char trigger_fires_when)
180 {
181  int16 tgtype;
182  int ncolumns;
183  int16 *columns;
184  int2vector *tgattr;
185  List *whenRtable;
186  char *qual;
187  Datum values[Natts_pg_trigger];
188  bool nulls[Natts_pg_trigger];
189  Relation rel;
190  AclResult aclresult;
191  Relation tgrel;
192  Relation pgrel;
193  HeapTuple tuple = NULL;
194  Oid funcrettype;
195  Oid trigoid = InvalidOid;
196  char internaltrigname[NAMEDATALEN];
197  char *trigname;
198  Oid constrrelid = InvalidOid;
199  ObjectAddress myself,
200  referenced;
201  char *oldtablename = NULL;
202  char *newtablename = NULL;
203  bool partition_recurse;
204  bool trigger_exists = false;
205  Oid existing_constraint_oid = InvalidOid;
206  bool existing_isInternal = false;
207  bool existing_isClone = false;
208 
209  if (OidIsValid(relOid))
210  rel = table_open(relOid, ShareRowExclusiveLock);
211  else
212  rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
213 
214  /*
215  * Triggers must be on tables or views, and there are additional
216  * relation-type-specific restrictions.
217  */
218  if (rel->rd_rel->relkind == RELKIND_RELATION)
219  {
220  /* Tables can't have INSTEAD OF triggers */
221  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
222  stmt->timing != TRIGGER_TYPE_AFTER)
223  ereport(ERROR,
224  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
225  errmsg("\"%s\" is a table",
227  errdetail("Tables cannot have INSTEAD OF triggers.")));
228  }
229  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
230  {
231  /* Partitioned tables can't have INSTEAD OF triggers */
232  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
233  stmt->timing != TRIGGER_TYPE_AFTER)
234  ereport(ERROR,
235  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
236  errmsg("\"%s\" is a table",
238  errdetail("Tables cannot have INSTEAD OF triggers.")));
239 
240  /*
241  * FOR EACH ROW triggers have further restrictions
242  */
243  if (stmt->row)
244  {
245  /*
246  * Disallow use of transition tables.
247  *
248  * Note that we have another restriction about transition tables
249  * in partitions; search for 'has_superclass' below for an
250  * explanation. The check here is just to protect from the fact
251  * that if we allowed it here, the creation would succeed for a
252  * partitioned table with no partitions, but would be blocked by
253  * the other restriction when the first partition was created,
254  * which is very unfriendly behavior.
255  */
256  if (stmt->transitionRels != NIL)
257  ereport(ERROR,
258  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
259  errmsg("\"%s\" is a partitioned table",
261  errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
262  }
263  }
264  else if (rel->rd_rel->relkind == RELKIND_VIEW)
265  {
266  /*
267  * Views can have INSTEAD OF triggers (which we check below are
268  * row-level), or statement-level BEFORE/AFTER triggers.
269  */
270  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
271  ereport(ERROR,
272  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
273  errmsg("\"%s\" is a view",
275  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
276  /* Disallow TRUNCATE triggers on VIEWs */
277  if (TRIGGER_FOR_TRUNCATE(stmt->events))
278  ereport(ERROR,
279  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
280  errmsg("\"%s\" is a view",
282  errdetail("Views cannot have TRUNCATE triggers.")));
283  }
284  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
285  {
286  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
287  stmt->timing != TRIGGER_TYPE_AFTER)
288  ereport(ERROR,
289  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
290  errmsg("\"%s\" is a foreign table",
292  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
293 
294  /*
295  * We disallow constraint triggers to protect the assumption that
296  * triggers on FKs can't be deferred. See notes with AfterTriggers
297  * data structures, below.
298  */
299  if (stmt->isconstraint)
300  ereport(ERROR,
301  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
302  errmsg("\"%s\" is a foreign table",
304  errdetail("Foreign tables cannot have constraint triggers.")));
305  }
306  else
307  ereport(ERROR,
308  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
309  errmsg("relation \"%s\" cannot have triggers",
311  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
312 
314  ereport(ERROR,
315  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
316  errmsg("permission denied: \"%s\" is a system catalog",
317  RelationGetRelationName(rel))));
318 
319  if (stmt->isconstraint)
320  {
321  /*
322  * We must take a lock on the target relation to protect against
323  * concurrent drop. It's not clear that AccessShareLock is strong
324  * enough, but we certainly need at least that much... otherwise, we
325  * might end up creating a pg_constraint entry referencing a
326  * nonexistent table.
327  */
328  if (OidIsValid(refRelOid))
329  {
330  LockRelationOid(refRelOid, AccessShareLock);
331  constrrelid = refRelOid;
332  }
333  else if (stmt->constrrel != NULL)
334  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
335  false);
336  }
337 
338  /* permission checks */
339  if (!isInternal)
340  {
341  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
342  ACL_TRIGGER);
343  if (aclresult != ACLCHECK_OK)
344  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
346 
347  if (OidIsValid(constrrelid))
348  {
349  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
350  ACL_TRIGGER);
351  if (aclresult != ACLCHECK_OK)
352  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
353  get_rel_name(constrrelid));
354  }
355  }
356 
357  /*
358  * When called on a partitioned table to create a FOR EACH ROW trigger
359  * that's not internal, we create one trigger for each partition, too.
360  *
361  * For that, we'd better hold lock on all of them ahead of time.
362  */
363  partition_recurse = !isInternal && stmt->row &&
364  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
365  if (partition_recurse)
367  ShareRowExclusiveLock, NULL));
368 
369  /* Compute tgtype */
370  TRIGGER_CLEAR_TYPE(tgtype);
371  if (stmt->row)
372  TRIGGER_SETT_ROW(tgtype);
373  tgtype |= stmt->timing;
374  tgtype |= stmt->events;
375 
376  /* Disallow ROW-level TRUNCATE triggers */
377  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
378  ereport(ERROR,
379  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
380  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
381 
382  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
383  if (TRIGGER_FOR_INSTEAD(tgtype))
384  {
385  if (!TRIGGER_FOR_ROW(tgtype))
386  ereport(ERROR,
387  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
388  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
389  if (stmt->whenClause)
390  ereport(ERROR,
391  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
392  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
393  if (stmt->columns != NIL)
394  ereport(ERROR,
395  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
396  errmsg("INSTEAD OF triggers cannot have column lists")));
397  }
398 
399  /*
400  * We don't yet support naming ROW transition variables, but the parser
401  * recognizes the syntax so we can give a nicer message here.
402  *
403  * Per standard, REFERENCING TABLE names are only allowed on AFTER
404  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
405  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
406  * only allowed once. Per standard, OLD may not be specified when
407  * creating a trigger only for INSERT, and NEW may not be specified when
408  * creating a trigger only for DELETE.
409  *
410  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
411  * reference both ROW and TABLE transition data.
412  */
413  if (stmt->transitionRels != NIL)
414  {
415  List *varList = stmt->transitionRels;
416  ListCell *lc;
417 
418  foreach(lc, varList)
419  {
421 
422  if (!(tt->isTable))
423  ereport(ERROR,
424  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
425  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
426  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
427 
428  /*
429  * Because of the above test, we omit further ROW-related testing
430  * below. If we later allow naming OLD and NEW ROW variables,
431  * adjustments will be needed below.
432  */
433 
434  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
435  ereport(ERROR,
436  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
437  errmsg("\"%s\" is a foreign table",
439  errdetail("Triggers on foreign tables cannot have transition tables.")));
440 
441  if (rel->rd_rel->relkind == RELKIND_VIEW)
442  ereport(ERROR,
443  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
444  errmsg("\"%s\" is a view",
446  errdetail("Triggers on views cannot have transition tables.")));
447 
448  /*
449  * We currently don't allow row-level triggers with transition
450  * tables on partition or inheritance children. Such triggers
451  * would somehow need to see tuples converted to the format of the
452  * table they're attached to, and it's not clear which subset of
453  * tuples each child should see. See also the prohibitions in
454  * ATExecAttachPartition() and ATExecAddInherit().
455  */
456  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
457  {
458  /* Use appropriate error message. */
459  if (rel->rd_rel->relispartition)
460  ereport(ERROR,
461  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
462  errmsg("ROW triggers with transition tables are not supported on partitions")));
463  else
464  ereport(ERROR,
465  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
466  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
467  }
468 
469  if (stmt->timing != TRIGGER_TYPE_AFTER)
470  ereport(ERROR,
471  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
472  errmsg("transition table name can only be specified for an AFTER trigger")));
473 
474  if (TRIGGER_FOR_TRUNCATE(tgtype))
475  ereport(ERROR,
476  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
477  errmsg("TRUNCATE triggers with transition tables are not supported")));
478 
479  /*
480  * We currently don't allow multi-event triggers ("INSERT OR
481  * UPDATE") with transition tables, because it's not clear how to
482  * handle INSERT ... ON CONFLICT statements which can fire both
483  * INSERT and UPDATE triggers. We show the inserted tuples to
484  * INSERT triggers and the updated tuples to UPDATE triggers, but
485  * it's not yet clear what INSERT OR UPDATE trigger should see.
486  * This restriction could be lifted if we can decide on the right
487  * semantics in a later release.
488  */
489  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
490  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
491  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
492  ereport(ERROR,
493  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
494  errmsg("transition tables cannot be specified for triggers with more than one event")));
495 
496  /*
497  * We currently don't allow column-specific triggers with
498  * transition tables. Per spec, that seems to require
499  * accumulating separate transition tables for each combination of
500  * columns, which is a lot of work for a rather marginal feature.
501  */
502  if (stmt->columns != NIL)
503  ereport(ERROR,
504  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
505  errmsg("transition tables cannot be specified for triggers with column lists")));
506 
507  /*
508  * We disallow constraint triggers with transition tables, to
509  * protect the assumption that such triggers can't be deferred.
510  * See notes with AfterTriggers data structures, below.
511  *
512  * Currently this is enforced by the grammar, so just Assert here.
513  */
514  Assert(!stmt->isconstraint);
515 
516  if (tt->isNew)
517  {
518  if (!(TRIGGER_FOR_INSERT(tgtype) ||
519  TRIGGER_FOR_UPDATE(tgtype)))
520  ereport(ERROR,
521  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
522  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
523 
524  if (newtablename != NULL)
525  ereport(ERROR,
526  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
527  errmsg("NEW TABLE cannot be specified multiple times")));
528 
529  newtablename = tt->name;
530  }
531  else
532  {
533  if (!(TRIGGER_FOR_DELETE(tgtype) ||
534  TRIGGER_FOR_UPDATE(tgtype)))
535  ereport(ERROR,
536  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
537  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
538 
539  if (oldtablename != NULL)
540  ereport(ERROR,
541  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
542  errmsg("OLD TABLE cannot be specified multiple times")));
543 
544  oldtablename = tt->name;
545  }
546  }
547 
548  if (newtablename != NULL && oldtablename != NULL &&
549  strcmp(newtablename, oldtablename) == 0)
550  ereport(ERROR,
551  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
552  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
553  }
554 
555  /*
556  * Parse the WHEN clause, if any and we weren't passed an already
557  * transformed one.
558  *
559  * Note that as a side effect, we fill whenRtable when parsing. If we got
560  * an already parsed clause, this does not occur, which is what we want --
561  * no point in adding redundant dependencies below.
562  */
563  if (!whenClause && stmt->whenClause)
564  {
565  ParseState *pstate;
566  ParseNamespaceItem *nsitem;
567  List *varList;
568  ListCell *lc;
569 
570  /* Set up a pstate to parse with */
571  pstate = make_parsestate(NULL);
572  pstate->p_sourcetext = queryString;
573 
574  /*
575  * Set up nsitems for OLD and NEW references.
576  *
577  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
578  */
579  nsitem = addRangeTableEntryForRelation(pstate, rel,
581  makeAlias("old", NIL),
582  false, false);
583  addNSItemToQuery(pstate, nsitem, false, true, true);
584  nsitem = addRangeTableEntryForRelation(pstate, rel,
586  makeAlias("new", NIL),
587  false, false);
588  addNSItemToQuery(pstate, nsitem, false, true, true);
589 
590  /* Transform expression. Copy to be sure we don't modify original */
591  whenClause = transformWhereClause(pstate,
592  copyObject(stmt->whenClause),
594  "WHEN");
595  /* we have to fix its collations too */
596  assign_expr_collations(pstate, whenClause);
597 
598  /*
599  * Check for disallowed references to OLD/NEW.
600  *
601  * NB: pull_var_clause is okay here only because we don't allow
602  * subselects in WHEN clauses; it would fail to examine the contents
603  * of subselects.
604  */
605  varList = pull_var_clause(whenClause, 0);
606  foreach(lc, varList)
607  {
608  Var *var = (Var *) lfirst(lc);
609 
610  switch (var->varno)
611  {
612  case PRS2_OLD_VARNO:
613  if (!TRIGGER_FOR_ROW(tgtype))
614  ereport(ERROR,
615  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
616  errmsg("statement trigger's WHEN condition cannot reference column values"),
617  parser_errposition(pstate, var->location)));
618  if (TRIGGER_FOR_INSERT(tgtype))
619  ereport(ERROR,
620  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
621  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
622  parser_errposition(pstate, var->location)));
623  /* system columns are okay here */
624  break;
625  case PRS2_NEW_VARNO:
626  if (!TRIGGER_FOR_ROW(tgtype))
627  ereport(ERROR,
628  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
629  errmsg("statement trigger's WHEN condition cannot reference column values"),
630  parser_errposition(pstate, var->location)));
631  if (TRIGGER_FOR_DELETE(tgtype))
632  ereport(ERROR,
633  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
634  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
635  parser_errposition(pstate, var->location)));
636  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
637  ereport(ERROR,
638  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
639  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
640  parser_errposition(pstate, var->location)));
641  if (TRIGGER_FOR_BEFORE(tgtype) &&
642  var->varattno == 0 &&
643  RelationGetDescr(rel)->constr &&
644  RelationGetDescr(rel)->constr->has_generated_stored)
645  ereport(ERROR,
646  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
647  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
648  errdetail("A whole-row reference is used and the table contains generated columns."),
649  parser_errposition(pstate, var->location)));
650  if (TRIGGER_FOR_BEFORE(tgtype) &&
651  var->varattno > 0 &&
652  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
653  ereport(ERROR,
654  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
655  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
656  errdetail("Column \"%s\" is a generated column.",
657  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
658  parser_errposition(pstate, var->location)));
659  break;
660  default:
661  /* can't happen without add_missing_from, so just elog */
662  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
663  break;
664  }
665  }
666 
667  /* we'll need the rtable for recordDependencyOnExpr */
668  whenRtable = pstate->p_rtable;
669 
670  qual = nodeToString(whenClause);
671 
672  free_parsestate(pstate);
673  }
674  else if (!whenClause)
675  {
676  whenClause = NULL;
677  whenRtable = NIL;
678  qual = NULL;
679  }
680  else
681  {
682  qual = nodeToString(whenClause);
683  whenRtable = NIL;
684  }
685 
686  /*
687  * Find and validate the trigger function.
688  */
689  if (!OidIsValid(funcoid))
690  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
691  if (!isInternal)
692  {
693  aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
694  if (aclresult != ACLCHECK_OK)
695  aclcheck_error(aclresult, OBJECT_FUNCTION,
696  NameListToString(stmt->funcname));
697  }
698  funcrettype = get_func_rettype(funcoid);
699  if (funcrettype != TRIGGEROID)
700  ereport(ERROR,
701  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
702  errmsg("function %s must return type %s",
703  NameListToString(stmt->funcname), "trigger")));
704 
705  /*
706  * Scan pg_trigger to see if there is already a trigger of the same name.
707  * Skip this for internally generated triggers, since we'll modify the
708  * name to be unique below.
709  *
710  * NOTE that this is cool only because we have ShareRowExclusiveLock on
711  * the relation, so the trigger set won't be changing underneath us.
712  */
713  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
714  if (!isInternal)
715  {
716  ScanKeyData skeys[2];
717  SysScanDesc tgscan;
718 
719  ScanKeyInit(&skeys[0],
720  Anum_pg_trigger_tgrelid,
721  BTEqualStrategyNumber, F_OIDEQ,
723 
724  ScanKeyInit(&skeys[1],
725  Anum_pg_trigger_tgname,
726  BTEqualStrategyNumber, F_NAMEEQ,
727  CStringGetDatum(stmt->trigname));
728 
729  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
730  NULL, 2, skeys);
731 
732  /* There should be at most one matching tuple */
733  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
734  {
735  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
736 
737  trigoid = oldtrigger->oid;
738  existing_constraint_oid = oldtrigger->tgconstraint;
739  existing_isInternal = oldtrigger->tgisinternal;
740  existing_isClone = OidIsValid(oldtrigger->tgparentid);
741  trigger_exists = true;
742  /* copy the tuple to use in CatalogTupleUpdate() */
743  tuple = heap_copytuple(tuple);
744  }
745  systable_endscan(tgscan);
746  }
747 
748  if (!trigger_exists)
749  {
750  /* Generate the OID for the new trigger. */
751  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
752  Anum_pg_trigger_oid);
753  }
754  else
755  {
756  /*
757  * If OR REPLACE was specified, we'll replace the old trigger;
758  * otherwise complain about the duplicate name.
759  */
760  if (!stmt->replace)
761  ereport(ERROR,
763  errmsg("trigger \"%s\" for relation \"%s\" already exists",
764  stmt->trigname, RelationGetRelationName(rel))));
765 
766  /*
767  * An internal trigger or a child trigger (isClone) cannot be replaced
768  * by a user-defined trigger. However, skip this test when
769  * in_partition, because then we're recursing from a partitioned table
770  * and the check was made at the parent level.
771  */
772  if ((existing_isInternal || existing_isClone) &&
773  !isInternal && !in_partition)
774  ereport(ERROR,
776  errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
777  stmt->trigname, RelationGetRelationName(rel))));
778 
779  /*
780  * It is not allowed to replace with a constraint trigger; gram.y
781  * should have enforced this already.
782  */
783  Assert(!stmt->isconstraint);
784 
785  /*
786  * It is not allowed to replace an existing constraint trigger,
787  * either. (The reason for these restrictions is partly that it seems
788  * difficult to deal with pending trigger events in such cases, and
789  * partly that the command might imply changing the constraint's
790  * properties as well, which doesn't seem nice.)
791  */
792  if (OidIsValid(existing_constraint_oid))
793  ereport(ERROR,
795  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
796  stmt->trigname, RelationGetRelationName(rel))));
797  }
798 
799  /*
800  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
801  * corresponding pg_constraint entry.
802  */
803  if (stmt->isconstraint && !OidIsValid(constraintOid))
804  {
805  /* Internal callers should have made their own constraints */
806  Assert(!isInternal);
807  constraintOid = CreateConstraintEntry(stmt->trigname,
809  CONSTRAINT_TRIGGER,
810  stmt->deferrable,
811  stmt->initdeferred,
812  true,
813  InvalidOid, /* no parent */
814  RelationGetRelid(rel),
815  NULL, /* no conkey */
816  0,
817  0,
818  InvalidOid, /* no domain */
819  InvalidOid, /* no index */
820  InvalidOid, /* no foreign key */
821  NULL,
822  NULL,
823  NULL,
824  NULL,
825  0,
826  ' ',
827  ' ',
828  NULL,
829  0,
830  ' ',
831  NULL, /* no exclusion */
832  NULL, /* no check constraint */
833  NULL,
834  true, /* islocal */
835  0, /* inhcount */
836  true, /* noinherit */
837  false, /* conperiod */
838  isInternal); /* is_internal */
839  }
840 
841  /*
842  * If trigger is internally generated, modify the provided trigger name to
843  * ensure uniqueness by appending the trigger OID. (Callers will usually
844  * supply a simple constant trigger name in these cases.)
845  */
846  if (isInternal)
847  {
848  snprintf(internaltrigname, sizeof(internaltrigname),
849  "%s_%u", stmt->trigname, trigoid);
850  trigname = internaltrigname;
851  }
852  else
853  {
854  /* user-defined trigger; use the specified trigger name as-is */
855  trigname = stmt->trigname;
856  }
857 
858  /*
859  * Build the new pg_trigger tuple.
860  */
861  memset(nulls, false, sizeof(nulls));
862 
863  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
864  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
865  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
866  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
867  CStringGetDatum(trigname));
868  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
869  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
870  values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
871  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
872  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
873  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
874  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
875  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
876  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
877 
878  if (stmt->args)
879  {
880  ListCell *le;
881  char *args;
882  int16 nargs = list_length(stmt->args);
883  int len = 0;
884 
885  foreach(le, stmt->args)
886  {
887  char *ar = strVal(lfirst(le));
888 
889  len += strlen(ar) + 4;
890  for (; *ar; ar++)
891  {
892  if (*ar == '\\')
893  len++;
894  }
895  }
896  args = (char *) palloc(len + 1);
897  args[0] = '\0';
898  foreach(le, stmt->args)
899  {
900  char *s = strVal(lfirst(le));
901  char *d = args + strlen(args);
902 
903  while (*s)
904  {
905  if (*s == '\\')
906  *d++ = '\\';
907  *d++ = *s++;
908  }
909  strcpy(d, "\\000");
910  }
911  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
912  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
914  }
915  else
916  {
917  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
918  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
919  CStringGetDatum(""));
920  }
921 
922  /* build column number array if it's a column-specific trigger */
923  ncolumns = list_length(stmt->columns);
924  if (ncolumns == 0)
925  columns = NULL;
926  else
927  {
928  ListCell *cell;
929  int i = 0;
930 
931  columns = (int16 *) palloc(ncolumns * sizeof(int16));
932  foreach(cell, stmt->columns)
933  {
934  char *name = strVal(lfirst(cell));
935  int16 attnum;
936  int j;
937 
938  /* Lookup column name. System columns are not allowed */
939  attnum = attnameAttNum(rel, name, false);
940  if (attnum == InvalidAttrNumber)
941  ereport(ERROR,
942  (errcode(ERRCODE_UNDEFINED_COLUMN),
943  errmsg("column \"%s\" of relation \"%s\" does not exist",
944  name, RelationGetRelationName(rel))));
945 
946  /* Check for duplicates */
947  for (j = i - 1; j >= 0; j--)
948  {
949  if (columns[j] == attnum)
950  ereport(ERROR,
951  (errcode(ERRCODE_DUPLICATE_COLUMN),
952  errmsg("column \"%s\" specified more than once",
953  name)));
954  }
955 
956  columns[i++] = attnum;
957  }
958  }
959  tgattr = buildint2vector(columns, ncolumns);
960  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
961 
962  /* set tgqual if trigger has WHEN clause */
963  if (qual)
964  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
965  else
966  nulls[Anum_pg_trigger_tgqual - 1] = true;
967 
968  if (oldtablename)
969  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
970  CStringGetDatum(oldtablename));
971  else
972  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
973  if (newtablename)
974  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
975  CStringGetDatum(newtablename));
976  else
977  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
978 
979  /*
980  * Insert or replace tuple in pg_trigger.
981  */
982  if (!trigger_exists)
983  {
984  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
985  CatalogTupleInsert(tgrel, tuple);
986  }
987  else
988  {
989  HeapTuple newtup;
990 
991  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
992  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
993  heap_freetuple(newtup);
994  }
995 
996  heap_freetuple(tuple); /* free either original or new tuple */
998 
999  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1000  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1001  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1002  if (oldtablename)
1003  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1004  if (newtablename)
1005  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1006 
1007  /*
1008  * Update relation's pg_class entry; if necessary; and if not, send an SI
1009  * message to make other backends (and this one) rebuild relcache entries.
1010  */
1011  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1012  tuple = SearchSysCacheCopy1(RELOID,
1014  if (!HeapTupleIsValid(tuple))
1015  elog(ERROR, "cache lookup failed for relation %u",
1016  RelationGetRelid(rel));
1017  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1018  {
1019  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1020 
1021  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1022 
1024  }
1025  else
1027 
1028  heap_freetuple(tuple);
1029  table_close(pgrel, RowExclusiveLock);
1030 
1031  /*
1032  * If we're replacing a trigger, flush all the old dependencies before
1033  * recording new ones.
1034  */
1035  if (trigger_exists)
1036  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1037 
1038  /*
1039  * Record dependencies for trigger. Always place a normal dependency on
1040  * the function.
1041  */
1042  myself.classId = TriggerRelationId;
1043  myself.objectId = trigoid;
1044  myself.objectSubId = 0;
1045 
1046  referenced.classId = ProcedureRelationId;
1047  referenced.objectId = funcoid;
1048  referenced.objectSubId = 0;
1049  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1050 
1051  if (isInternal && OidIsValid(constraintOid))
1052  {
1053  /*
1054  * Internally-generated trigger for a constraint, so make it an
1055  * internal dependency of the constraint. We can skip depending on
1056  * the relation(s), as there'll be an indirect dependency via the
1057  * constraint.
1058  */
1059  referenced.classId = ConstraintRelationId;
1060  referenced.objectId = constraintOid;
1061  referenced.objectSubId = 0;
1062  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1063  }
1064  else
1065  {
1066  /*
1067  * User CREATE TRIGGER, so place dependencies. We make trigger be
1068  * auto-dropped if its relation is dropped or if the FK relation is
1069  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1070  */
1071  referenced.classId = RelationRelationId;
1072  referenced.objectId = RelationGetRelid(rel);
1073  referenced.objectSubId = 0;
1074  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1075 
1076  if (OidIsValid(constrrelid))
1077  {
1078  referenced.classId = RelationRelationId;
1079  referenced.objectId = constrrelid;
1080  referenced.objectSubId = 0;
1081  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1082  }
1083  /* Not possible to have an index dependency in this case */
1084  Assert(!OidIsValid(indexOid));
1085 
1086  /*
1087  * If it's a user-specified constraint trigger, make the constraint
1088  * internally dependent on the trigger instead of vice versa.
1089  */
1090  if (OidIsValid(constraintOid))
1091  {
1092  referenced.classId = ConstraintRelationId;
1093  referenced.objectId = constraintOid;
1094  referenced.objectSubId = 0;
1095  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1096  }
1097 
1098  /*
1099  * If it's a partition trigger, create the partition dependencies.
1100  */
1101  if (OidIsValid(parentTriggerOid))
1102  {
1103  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1104  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1105  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1106  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1107  }
1108  }
1109 
1110  /* If column-specific trigger, add normal dependencies on columns */
1111  if (columns != NULL)
1112  {
1113  int i;
1114 
1115  referenced.classId = RelationRelationId;
1116  referenced.objectId = RelationGetRelid(rel);
1117  for (i = 0; i < ncolumns; i++)
1118  {
1119  referenced.objectSubId = columns[i];
1120  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1121  }
1122  }
1123 
1124  /*
1125  * If it has a WHEN clause, add dependencies on objects mentioned in the
1126  * expression (eg, functions, as well as any columns used).
1127  */
1128  if (whenRtable != NIL)
1129  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1131 
1132  /* Post creation hook for new trigger */
1133  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1134  isInternal);
1135 
1136  /*
1137  * Lastly, create the trigger on child relations, if needed.
1138  */
1139  if (partition_recurse)
1140  {
1141  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1142  int i;
1143  MemoryContext oldcxt,
1144  perChildCxt;
1145 
1147  "part trig clone",
1149 
1150  /*
1151  * We don't currently expect to be called with a valid indexOid. If
1152  * that ever changes then we'll need to write code here to find the
1153  * corresponding child index.
1154  */
1155  Assert(!OidIsValid(indexOid));
1156 
1157  oldcxt = MemoryContextSwitchTo(perChildCxt);
1158 
1159  /* Iterate to create the trigger on each existing partition */
1160  for (i = 0; i < partdesc->nparts; i++)
1161  {
1162  CreateTrigStmt *childStmt;
1163  Relation childTbl;
1164  Node *qual;
1165 
1166  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1167 
1168  /*
1169  * Initialize our fabricated parse node by copying the original
1170  * one, then resetting fields that we pass separately.
1171  */
1172  childStmt = (CreateTrigStmt *) copyObject(stmt);
1173  childStmt->funcname = NIL;
1174  childStmt->whenClause = NULL;
1175 
1176  /* If there is a WHEN clause, create a modified copy of it */
1177  qual = copyObject(whenClause);
1178  qual = (Node *)
1180  childTbl, rel);
1181  qual = (Node *)
1183  childTbl, rel);
1184 
1185  CreateTriggerFiringOn(childStmt, queryString,
1186  partdesc->oids[i], refRelOid,
1188  funcoid, trigoid, qual,
1189  isInternal, true, trigger_fires_when);
1190 
1191  table_close(childTbl, NoLock);
1192 
1193  MemoryContextReset(perChildCxt);
1194  }
1195 
1196  MemoryContextSwitchTo(oldcxt);
1197  MemoryContextDelete(perChildCxt);
1198  }
1199 
1200  /* Keep lock on target rel until end of xact */
1201  table_close(rel, NoLock);
1202 
1203  return myself;
1204 }
1205 
1206 /*
1207  * TriggerSetParentTrigger
1208  * Set a partition's trigger as child of its parent trigger,
1209  * or remove the linkage if parentTrigId is InvalidOid.
1210  *
1211  * This updates the constraint's pg_trigger row to show it as inherited, and
1212  * adds PARTITION dependencies to prevent the trigger from being deleted
1213  * on its own. Alternatively, reverse that.
1214  */
1215 void
1217  Oid childTrigId,
1218  Oid parentTrigId,
1219  Oid childTableId)
1220 {
1221  SysScanDesc tgscan;
1222  ScanKeyData skey[1];
1223  Form_pg_trigger trigForm;
1224  HeapTuple tuple,
1225  newtup;
1226  ObjectAddress depender;
1227  ObjectAddress referenced;
1228 
1229  /*
1230  * Find the trigger to delete.
1231  */
1232  ScanKeyInit(&skey[0],
1233  Anum_pg_trigger_oid,
1234  BTEqualStrategyNumber, F_OIDEQ,
1235  ObjectIdGetDatum(childTrigId));
1236 
1237  tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1238  NULL, 1, skey);
1239 
1240  tuple = systable_getnext(tgscan);
1241  if (!HeapTupleIsValid(tuple))
1242  elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1243  newtup = heap_copytuple(tuple);
1244  trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1245  if (OidIsValid(parentTrigId))
1246  {
1247  /* don't allow setting parent for a constraint that already has one */
1248  if (OidIsValid(trigForm->tgparentid))
1249  elog(ERROR, "trigger %u already has a parent trigger",
1250  childTrigId);
1251 
1252  trigForm->tgparentid = parentTrigId;
1253 
1254  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1255 
1256  ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1257 
1258  ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1259  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1260 
1261  ObjectAddressSet(referenced, RelationRelationId, childTableId);
1262  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1263  }
1264  else
1265  {
1266  trigForm->tgparentid = InvalidOid;
1267 
1268  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1269 
1270  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1271  TriggerRelationId,
1273  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1274  RelationRelationId,
1276  }
1277 
1278  heap_freetuple(newtup);
1279  systable_endscan(tgscan);
1280 }
1281 
1282 
1283 /*
1284  * Guts of trigger deletion.
1285  */
1286 void
1288 {
1289  Relation tgrel;
1290  SysScanDesc tgscan;
1291  ScanKeyData skey[1];
1292  HeapTuple tup;
1293  Oid relid;
1294  Relation rel;
1295 
1296  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1297 
1298  /*
1299  * Find the trigger to delete.
1300  */
1301  ScanKeyInit(&skey[0],
1302  Anum_pg_trigger_oid,
1303  BTEqualStrategyNumber, F_OIDEQ,
1304  ObjectIdGetDatum(trigOid));
1305 
1306  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1307  NULL, 1, skey);
1308 
1309  tup = systable_getnext(tgscan);
1310  if (!HeapTupleIsValid(tup))
1311  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1312 
1313  /*
1314  * Open and exclusive-lock the relation the trigger belongs to.
1315  */
1316  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1317 
1318  rel = table_open(relid, AccessExclusiveLock);
1319 
1320  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1321  rel->rd_rel->relkind != RELKIND_VIEW &&
1322  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1323  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1324  ereport(ERROR,
1325  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1326  errmsg("relation \"%s\" cannot have triggers",
1328  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1329 
1331  ereport(ERROR,
1332  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1333  errmsg("permission denied: \"%s\" is a system catalog",
1334  RelationGetRelationName(rel))));
1335 
1336  /*
1337  * Delete the pg_trigger tuple.
1338  */
1339  CatalogTupleDelete(tgrel, &tup->t_self);
1340 
1341  systable_endscan(tgscan);
1342  table_close(tgrel, RowExclusiveLock);
1343 
1344  /*
1345  * We do not bother to try to determine whether any other triggers remain,
1346  * which would be needed in order to decide whether it's safe to clear the
1347  * relation's relhastriggers. (In any case, there might be a concurrent
1348  * process adding new triggers.) Instead, just force a relcache inval to
1349  * make other backends (and this one too!) rebuild their relcache entries.
1350  * There's no great harm in leaving relhastriggers true even if there are
1351  * no triggers left.
1352  */
1354 
1355  /* Keep lock on trigger's rel until end of xact */
1356  table_close(rel, NoLock);
1357 }
1358 
1359 /*
1360  * get_trigger_oid - Look up a trigger by name to find its OID.
1361  *
1362  * If missing_ok is false, throw an error if trigger not found. If
1363  * true, just return InvalidOid.
1364  */
1365 Oid
1366 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1367 {
1368  Relation tgrel;
1369  ScanKeyData skey[2];
1370  SysScanDesc tgscan;
1371  HeapTuple tup;
1372  Oid oid;
1373 
1374  /*
1375  * Find the trigger, verify permissions, set up object address
1376  */
1377  tgrel = table_open(TriggerRelationId, AccessShareLock);
1378 
1379  ScanKeyInit(&skey[0],
1380  Anum_pg_trigger_tgrelid,
1381  BTEqualStrategyNumber, F_OIDEQ,
1382  ObjectIdGetDatum(relid));
1383  ScanKeyInit(&skey[1],
1384  Anum_pg_trigger_tgname,
1385  BTEqualStrategyNumber, F_NAMEEQ,
1386  CStringGetDatum(trigname));
1387 
1388  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1389  NULL, 2, skey);
1390 
1391  tup = systable_getnext(tgscan);
1392 
1393  if (!HeapTupleIsValid(tup))
1394  {
1395  if (!missing_ok)
1396  ereport(ERROR,
1397  (errcode(ERRCODE_UNDEFINED_OBJECT),
1398  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1399  trigname, get_rel_name(relid))));
1400  oid = InvalidOid;
1401  }
1402  else
1403  {
1404  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1405  }
1406 
1407  systable_endscan(tgscan);
1408  table_close(tgrel, AccessShareLock);
1409  return oid;
1410 }
1411 
1412 /*
1413  * Perform permissions and integrity checks before acquiring a relation lock.
1414  */
1415 static void
1417  void *arg)
1418 {
1419  HeapTuple tuple;
1420  Form_pg_class form;
1421 
1422  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1423  if (!HeapTupleIsValid(tuple))
1424  return; /* concurrently dropped */
1425  form = (Form_pg_class) GETSTRUCT(tuple);
1426 
1427  /* only tables and views can have triggers */
1428  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1429  form->relkind != RELKIND_FOREIGN_TABLE &&
1430  form->relkind != RELKIND_PARTITIONED_TABLE)
1431  ereport(ERROR,
1432  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1433  errmsg("relation \"%s\" cannot have triggers",
1434  rv->relname),
1435  errdetail_relkind_not_supported(form->relkind)));
1436 
1437  /* you must own the table to rename one of its triggers */
1438  if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1440  if (!allowSystemTableMods && IsSystemClass(relid, form))
1441  ereport(ERROR,
1442  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1443  errmsg("permission denied: \"%s\" is a system catalog",
1444  rv->relname)));
1445 
1446  ReleaseSysCache(tuple);
1447 }
1448 
1449 /*
1450  * renametrig - changes the name of a trigger on a relation
1451  *
1452  * trigger name is changed in trigger catalog.
1453  * No record of the previous name is kept.
1454  *
1455  * get proper relrelation from relation catalog (if not arg)
1456  * scan trigger catalog
1457  * for name conflict (within rel)
1458  * for original trigger (if not arg)
1459  * modify tgname in trigger tuple
1460  * update row in catalog
1461  */
1464 {
1465  Oid tgoid;
1466  Relation targetrel;
1467  Relation tgrel;
1468  HeapTuple tuple;
1469  SysScanDesc tgscan;
1470  ScanKeyData key[2];
1471  Oid relid;
1472  ObjectAddress address;
1473 
1474  /*
1475  * Look up name, check permissions, and acquire lock (which we will NOT
1476  * release until end of transaction).
1477  */
1479  0,
1481  NULL);
1482 
1483  /* Have lock already, so just need to build relcache entry. */
1484  targetrel = relation_open(relid, NoLock);
1485 
1486  /*
1487  * On partitioned tables, this operation recurses to partitions. Lock all
1488  * tables upfront.
1489  */
1490  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1491  (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1492 
1493  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1494 
1495  /*
1496  * Search for the trigger to modify.
1497  */
1498  ScanKeyInit(&key[0],
1499  Anum_pg_trigger_tgrelid,
1500  BTEqualStrategyNumber, F_OIDEQ,
1501  ObjectIdGetDatum(relid));
1502  ScanKeyInit(&key[1],
1503  Anum_pg_trigger_tgname,
1504  BTEqualStrategyNumber, F_NAMEEQ,
1505  PointerGetDatum(stmt->subname));
1506  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1507  NULL, 2, key);
1508  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1509  {
1510  Form_pg_trigger trigform;
1511 
1512  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1513  tgoid = trigform->oid;
1514 
1515  /*
1516  * If the trigger descends from a trigger on a parent partitioned
1517  * table, reject the rename. We don't allow a trigger in a partition
1518  * to differ in name from that of its parent: that would lead to an
1519  * inconsistency that pg_dump would not reproduce.
1520  */
1521  if (OidIsValid(trigform->tgparentid))
1522  ereport(ERROR,
1523  errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1524  errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1525  stmt->subname, RelationGetRelationName(targetrel)),
1526  errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1527  get_rel_name(get_partition_parent(relid, false))));
1528 
1529 
1530  /* Rename the trigger on this relation ... */
1531  renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1532  stmt->subname);
1533 
1534  /* ... and if it is partitioned, recurse to its partitions */
1535  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1536  {
1537  PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1538 
1539  for (int i = 0; i < partdesc->nparts; i++)
1540  {
1541  Oid partitionId = partdesc->oids[i];
1542 
1543  renametrig_partition(tgrel, partitionId, trigform->oid,
1544  stmt->newname, stmt->subname);
1545  }
1546  }
1547  }
1548  else
1549  {
1550  ereport(ERROR,
1551  (errcode(ERRCODE_UNDEFINED_OBJECT),
1552  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1553  stmt->subname, RelationGetRelationName(targetrel))));
1554  }
1555 
1556  ObjectAddressSet(address, TriggerRelationId, tgoid);
1557 
1558  systable_endscan(tgscan);
1559 
1560  table_close(tgrel, RowExclusiveLock);
1561 
1562  /*
1563  * Close rel, but keep exclusive lock!
1564  */
1565  relation_close(targetrel, NoLock);
1566 
1567  return address;
1568 }
1569 
1570 /*
1571  * Subroutine for renametrig -- perform the actual work of renaming one
1572  * trigger on one table.
1573  *
1574  * If the trigger has a name different from the expected one, raise a
1575  * NOTICE about it.
1576  */
1577 static void
1579  const char *newname, const char *expected_name)
1580 {
1581  HeapTuple tuple;
1582  Form_pg_trigger tgform;
1583  ScanKeyData key[2];
1584  SysScanDesc tgscan;
1585 
1586  /* If the trigger already has the new name, nothing to do. */
1587  tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1588  if (strcmp(NameStr(tgform->tgname), newname) == 0)
1589  return;
1590 
1591  /*
1592  * Before actually trying the rename, search for triggers with the same
1593  * name. The update would fail with an ugly message in that case, and it
1594  * is better to throw a nicer error.
1595  */
1596  ScanKeyInit(&key[0],
1597  Anum_pg_trigger_tgrelid,
1598  BTEqualStrategyNumber, F_OIDEQ,
1599  ObjectIdGetDatum(RelationGetRelid(targetrel)));
1600  ScanKeyInit(&key[1],
1601  Anum_pg_trigger_tgname,
1602  BTEqualStrategyNumber, F_NAMEEQ,
1603  PointerGetDatum(newname));
1604  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1605  NULL, 2, key);
1606  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1607  ereport(ERROR,
1609  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1610  newname, RelationGetRelationName(targetrel))));
1611  systable_endscan(tgscan);
1612 
1613  /*
1614  * The target name is free; update the existing pg_trigger tuple with it.
1615  */
1616  tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1617  tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1618 
1619  /*
1620  * If the trigger has a name different from what we expected, let the user
1621  * know. (We can proceed anyway, since we must have reached here following
1622  * a tgparentid link.)
1623  */
1624  if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1625  ereport(NOTICE,
1626  errmsg("renamed trigger \"%s\" on relation \"%s\"",
1627  NameStr(tgform->tgname),
1628  RelationGetRelationName(targetrel)));
1629 
1630  namestrcpy(&tgform->tgname, newname);
1631 
1632  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1633 
1634  InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1635 
1636  /*
1637  * Invalidate relation's relcache entry so that other backends (and this
1638  * one too!) are sent SI message to make them rebuild relcache entries.
1639  * (Ideally this should happen automatically...)
1640  */
1641  CacheInvalidateRelcache(targetrel);
1642 }
1643 
1644 /*
1645  * Subroutine for renametrig -- Helper for recursing to partitions when
1646  * renaming triggers on a partitioned table.
1647  */
1648 static void
1649 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1650  const char *newname, const char *expected_name)
1651 {
1652  SysScanDesc tgscan;
1653  ScanKeyData key;
1654  HeapTuple tuple;
1655 
1656  /*
1657  * Given a relation and the OID of a trigger on parent relation, find the
1658  * corresponding trigger in the child and rename that trigger to the given
1659  * name.
1660  */
1661  ScanKeyInit(&key,
1662  Anum_pg_trigger_tgrelid,
1663  BTEqualStrategyNumber, F_OIDEQ,
1664  ObjectIdGetDatum(partitionId));
1665  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1666  NULL, 1, &key);
1667  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1668  {
1669  Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1670  Relation partitionRel;
1671 
1672  if (tgform->tgparentid != parentTriggerOid)
1673  continue; /* not our trigger */
1674 
1675  partitionRel = table_open(partitionId, NoLock);
1676 
1677  /* Rename the trigger on this partition */
1678  renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1679 
1680  /* And if this relation is partitioned, recurse to its partitions */
1681  if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1682  {
1683  PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1684  true);
1685 
1686  for (int i = 0; i < partdesc->nparts; i++)
1687  {
1688  Oid partoid = partdesc->oids[i];
1689 
1690  renametrig_partition(tgrel, partoid, tgform->oid, newname,
1691  NameStr(tgform->tgname));
1692  }
1693  }
1694  table_close(partitionRel, NoLock);
1695 
1696  /* There should be at most one matching tuple */
1697  break;
1698  }
1699  systable_endscan(tgscan);
1700 }
1701 
1702 /*
1703  * EnableDisableTrigger()
1704  *
1705  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1706  * to change 'tgenabled' field for the specified trigger(s)
1707  *
1708  * rel: relation to process (caller must hold suitable lock on it)
1709  * tgname: name of trigger to process, or NULL to scan all triggers
1710  * tgparent: if not zero, process only triggers with this tgparentid
1711  * fires_when: new value for tgenabled field. In addition to generic
1712  * enablement/disablement, this also defines when the trigger
1713  * should be fired in session replication roles.
1714  * skip_system: if true, skip "system" triggers (constraint triggers)
1715  * recurse: if true, recurse to partitions
1716  *
1717  * Caller should have checked permissions for the table; here we also
1718  * enforce that superuser privilege is required to alter the state of
1719  * system triggers
1720  */
1721 void
1722 EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1723  char fires_when, bool skip_system, bool recurse,
1724  LOCKMODE lockmode)
1725 {
1726  Relation tgrel;
1727  int nkeys;
1728  ScanKeyData keys[2];
1729  SysScanDesc tgscan;
1730  HeapTuple tuple;
1731  bool found;
1732  bool changed;
1733 
1734  /* Scan the relevant entries in pg_triggers */
1735  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1736 
1737  ScanKeyInit(&keys[0],
1738  Anum_pg_trigger_tgrelid,
1739  BTEqualStrategyNumber, F_OIDEQ,
1741  if (tgname)
1742  {
1743  ScanKeyInit(&keys[1],
1744  Anum_pg_trigger_tgname,
1745  BTEqualStrategyNumber, F_NAMEEQ,
1746  CStringGetDatum(tgname));
1747  nkeys = 2;
1748  }
1749  else
1750  nkeys = 1;
1751 
1752  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1753  NULL, nkeys, keys);
1754 
1755  found = changed = false;
1756 
1757  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1758  {
1759  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1760 
1761  if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1762  continue;
1763 
1764  if (oldtrig->tgisinternal)
1765  {
1766  /* system trigger ... ok to process? */
1767  if (skip_system)
1768  continue;
1769  if (!superuser())
1770  ereport(ERROR,
1771  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1772  errmsg("permission denied: \"%s\" is a system trigger",
1773  NameStr(oldtrig->tgname))));
1774  }
1775 
1776  found = true;
1777 
1778  if (oldtrig->tgenabled != fires_when)
1779  {
1780  /* need to change this one ... make a copy to scribble on */
1781  HeapTuple newtup = heap_copytuple(tuple);
1782  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1783 
1784  newtrig->tgenabled = fires_when;
1785 
1786  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1787 
1788  heap_freetuple(newtup);
1789 
1790  changed = true;
1791  }
1792 
1793  /*
1794  * When altering FOR EACH ROW triggers on a partitioned table, do the
1795  * same on the partitions as well, unless ONLY is specified.
1796  *
1797  * Note that we recurse even if we didn't change the trigger above,
1798  * because the partitions' copy of the trigger may have a different
1799  * value of tgenabled than the parent's trigger and thus might need to
1800  * be changed.
1801  */
1802  if (recurse &&
1803  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1804  (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1805  {
1806  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1807  int i;
1808 
1809  for (i = 0; i < partdesc->nparts; i++)
1810  {
1811  Relation part;
1812 
1813  part = relation_open(partdesc->oids[i], lockmode);
1814  /* Match on child triggers' tgparentid, not their name */
1815  EnableDisableTrigger(part, NULL, oldtrig->oid,
1816  fires_when, skip_system, recurse,
1817  lockmode);
1818  table_close(part, NoLock); /* keep lock till commit */
1819  }
1820  }
1821 
1822  InvokeObjectPostAlterHook(TriggerRelationId,
1823  oldtrig->oid, 0);
1824  }
1825 
1826  systable_endscan(tgscan);
1827 
1828  table_close(tgrel, RowExclusiveLock);
1829 
1830  if (tgname && !found)
1831  ereport(ERROR,
1832  (errcode(ERRCODE_UNDEFINED_OBJECT),
1833  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1834  tgname, RelationGetRelationName(rel))));
1835 
1836  /*
1837  * If we changed anything, broadcast a SI inval message to force each
1838  * backend (including our own!) to rebuild relation's relcache entry.
1839  * Otherwise they will fail to apply the change promptly.
1840  */
1841  if (changed)
1843 }
1844 
1845 
1846 /*
1847  * Build trigger data to attach to the given relcache entry.
1848  *
1849  * Note that trigger data attached to a relcache entry must be stored in
1850  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1851  * But we should be running in a less long-lived working context. To avoid
1852  * leaking cache memory if this routine fails partway through, we build a
1853  * temporary TriggerDesc in working memory and then copy the completed
1854  * structure into cache memory.
1855  */
1856 void
1858 {
1859  TriggerDesc *trigdesc;
1860  int numtrigs;
1861  int maxtrigs;
1862  Trigger *triggers;
1863  Relation tgrel;
1864  ScanKeyData skey;
1865  SysScanDesc tgscan;
1866  HeapTuple htup;
1867  MemoryContext oldContext;
1868  int i;
1869 
1870  /*
1871  * Allocate a working array to hold the triggers (the array is extended if
1872  * necessary)
1873  */
1874  maxtrigs = 16;
1875  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1876  numtrigs = 0;
1877 
1878  /*
1879  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1880  * be reading the triggers in name order, except possibly during
1881  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1882  * ensures that triggers will be fired in name order.
1883  */
1884  ScanKeyInit(&skey,
1885  Anum_pg_trigger_tgrelid,
1886  BTEqualStrategyNumber, F_OIDEQ,
1887  ObjectIdGetDatum(RelationGetRelid(relation)));
1888 
1889  tgrel = table_open(TriggerRelationId, AccessShareLock);
1890  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1891  NULL, 1, &skey);
1892 
1893  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1894  {
1895  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1896  Trigger *build;
1897  Datum datum;
1898  bool isnull;
1899 
1900  if (numtrigs >= maxtrigs)
1901  {
1902  maxtrigs *= 2;
1903  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1904  }
1905  build = &(triggers[numtrigs]);
1906 
1907  build->tgoid = pg_trigger->oid;
1909  NameGetDatum(&pg_trigger->tgname)));
1910  build->tgfoid = pg_trigger->tgfoid;
1911  build->tgtype = pg_trigger->tgtype;
1912  build->tgenabled = pg_trigger->tgenabled;
1913  build->tgisinternal = pg_trigger->tgisinternal;
1914  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1915  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1916  build->tgconstrindid = pg_trigger->tgconstrindid;
1917  build->tgconstraint = pg_trigger->tgconstraint;
1918  build->tgdeferrable = pg_trigger->tgdeferrable;
1919  build->tginitdeferred = pg_trigger->tginitdeferred;
1920  build->tgnargs = pg_trigger->tgnargs;
1921  /* tgattr is first var-width field, so OK to access directly */
1922  build->tgnattr = pg_trigger->tgattr.dim1;
1923  if (build->tgnattr > 0)
1924  {
1925  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1926  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1927  build->tgnattr * sizeof(int16));
1928  }
1929  else
1930  build->tgattr = NULL;
1931  if (build->tgnargs > 0)
1932  {
1933  bytea *val;
1934  char *p;
1935 
1937  Anum_pg_trigger_tgargs,
1938  tgrel->rd_att, &isnull));
1939  if (isnull)
1940  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1941  RelationGetRelationName(relation));
1942  p = (char *) VARDATA_ANY(val);
1943  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1944  for (i = 0; i < build->tgnargs; i++)
1945  {
1946  build->tgargs[i] = pstrdup(p);
1947  p += strlen(p) + 1;
1948  }
1949  }
1950  else
1951  build->tgargs = NULL;
1952 
1953  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1954  tgrel->rd_att, &isnull);
1955  if (!isnull)
1956  build->tgoldtable =
1958  else
1959  build->tgoldtable = NULL;
1960 
1961  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1962  tgrel->rd_att, &isnull);
1963  if (!isnull)
1964  build->tgnewtable =
1966  else
1967  build->tgnewtable = NULL;
1968 
1969  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1970  tgrel->rd_att, &isnull);
1971  if (!isnull)
1972  build->tgqual = TextDatumGetCString(datum);
1973  else
1974  build->tgqual = NULL;
1975 
1976  numtrigs++;
1977  }
1978 
1979  systable_endscan(tgscan);
1980  table_close(tgrel, AccessShareLock);
1981 
1982  /* There might not be any triggers */
1983  if (numtrigs == 0)
1984  {
1985  pfree(triggers);
1986  return;
1987  }
1988 
1989  /* Build trigdesc */
1990  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1991  trigdesc->triggers = triggers;
1992  trigdesc->numtriggers = numtrigs;
1993  for (i = 0; i < numtrigs; i++)
1994  SetTriggerFlags(trigdesc, &(triggers[i]));
1995 
1996  /* Copy completed trigdesc into cache storage */
1998  relation->trigdesc = CopyTriggerDesc(trigdesc);
1999  MemoryContextSwitchTo(oldContext);
2000 
2001  /* Release working memory */
2002  FreeTriggerDesc(trigdesc);
2003 }
2004 
2005 /*
2006  * Update the TriggerDesc's hint flags to include the specified trigger
2007  */
2008 static void
2010 {
2011  int16 tgtype = trigger->tgtype;
2012 
2013  trigdesc->trig_insert_before_row |=
2014  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2015  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2016  trigdesc->trig_insert_after_row |=
2017  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2018  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2019  trigdesc->trig_insert_instead_row |=
2020  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2021  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2022  trigdesc->trig_insert_before_statement |=
2023  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2024  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2025  trigdesc->trig_insert_after_statement |=
2026  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2027  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2028  trigdesc->trig_update_before_row |=
2029  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2030  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2031  trigdesc->trig_update_after_row |=
2032  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2033  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2034  trigdesc->trig_update_instead_row |=
2035  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2036  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2037  trigdesc->trig_update_before_statement |=
2038  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2039  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2040  trigdesc->trig_update_after_statement |=
2041  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2042  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2043  trigdesc->trig_delete_before_row |=
2044  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2045  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2046  trigdesc->trig_delete_after_row |=
2047  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2048  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2049  trigdesc->trig_delete_instead_row |=
2050  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2051  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2052  trigdesc->trig_delete_before_statement |=
2053  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2054  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2055  trigdesc->trig_delete_after_statement |=
2056  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2057  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2058  /* there are no row-level truncate triggers */
2059  trigdesc->trig_truncate_before_statement |=
2060  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2061  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2062  trigdesc->trig_truncate_after_statement |=
2063  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2064  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2065 
2066  trigdesc->trig_insert_new_table |=
2067  (TRIGGER_FOR_INSERT(tgtype) &&
2068  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2069  trigdesc->trig_update_old_table |=
2070  (TRIGGER_FOR_UPDATE(tgtype) &&
2071  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2072  trigdesc->trig_update_new_table |=
2073  (TRIGGER_FOR_UPDATE(tgtype) &&
2074  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2075  trigdesc->trig_delete_old_table |=
2076  (TRIGGER_FOR_DELETE(tgtype) &&
2077  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2078 }
2079 
2080 /*
2081  * Copy a TriggerDesc data structure.
2082  *
2083  * The copy is allocated in the current memory context.
2084  */
2085 TriggerDesc *
2087 {
2088  TriggerDesc *newdesc;
2089  Trigger *trigger;
2090  int i;
2091 
2092  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2093  return NULL;
2094 
2095  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2096  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2097 
2098  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2099  memcpy(trigger, trigdesc->triggers,
2100  trigdesc->numtriggers * sizeof(Trigger));
2101  newdesc->triggers = trigger;
2102 
2103  for (i = 0; i < trigdesc->numtriggers; i++)
2104  {
2105  trigger->tgname = pstrdup(trigger->tgname);
2106  if (trigger->tgnattr > 0)
2107  {
2108  int16 *newattr;
2109 
2110  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2111  memcpy(newattr, trigger->tgattr,
2112  trigger->tgnattr * sizeof(int16));
2113  trigger->tgattr = newattr;
2114  }
2115  if (trigger->tgnargs > 0)
2116  {
2117  char **newargs;
2118  int16 j;
2119 
2120  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2121  for (j = 0; j < trigger->tgnargs; j++)
2122  newargs[j] = pstrdup(trigger->tgargs[j]);
2123  trigger->tgargs = newargs;
2124  }
2125  if (trigger->tgqual)
2126  trigger->tgqual = pstrdup(trigger->tgqual);
2127  if (trigger->tgoldtable)
2128  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2129  if (trigger->tgnewtable)
2130  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2131  trigger++;
2132  }
2133 
2134  return newdesc;
2135 }
2136 
2137 /*
2138  * Free a TriggerDesc data structure.
2139  */
2140 void
2142 {
2143  Trigger *trigger;
2144  int i;
2145 
2146  if (trigdesc == NULL)
2147  return;
2148 
2149  trigger = trigdesc->triggers;
2150  for (i = 0; i < trigdesc->numtriggers; i++)
2151  {
2152  pfree(trigger->tgname);
2153  if (trigger->tgnattr > 0)
2154  pfree(trigger->tgattr);
2155  if (trigger->tgnargs > 0)
2156  {
2157  while (--(trigger->tgnargs) >= 0)
2158  pfree(trigger->tgargs[trigger->tgnargs]);
2159  pfree(trigger->tgargs);
2160  }
2161  if (trigger->tgqual)
2162  pfree(trigger->tgqual);
2163  if (trigger->tgoldtable)
2164  pfree(trigger->tgoldtable);
2165  if (trigger->tgnewtable)
2166  pfree(trigger->tgnewtable);
2167  trigger++;
2168  }
2169  pfree(trigdesc->triggers);
2170  pfree(trigdesc);
2171 }
2172 
2173 /*
2174  * Compare two TriggerDesc structures for logical equality.
2175  */
2176 #ifdef NOT_USED
2177 bool
2178 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2179 {
2180  int i,
2181  j;
2182 
2183  /*
2184  * We need not examine the hint flags, just the trigger array itself; if
2185  * we have the same triggers with the same types, the flags should match.
2186  *
2187  * As of 7.3 we assume trigger set ordering is significant in the
2188  * comparison; so we just compare corresponding slots of the two sets.
2189  *
2190  * Note: comparing the stringToNode forms of the WHEN clauses means that
2191  * parse column locations will affect the result. This is okay as long as
2192  * this function is only used for detecting exact equality, as for example
2193  * in checking for staleness of a cache entry.
2194  */
2195  if (trigdesc1 != NULL)
2196  {
2197  if (trigdesc2 == NULL)
2198  return false;
2199  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2200  return false;
2201  for (i = 0; i < trigdesc1->numtriggers; i++)
2202  {
2203  Trigger *trig1 = trigdesc1->triggers + i;
2204  Trigger *trig2 = trigdesc2->triggers + i;
2205 
2206  if (trig1->tgoid != trig2->tgoid)
2207  return false;
2208  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2209  return false;
2210  if (trig1->tgfoid != trig2->tgfoid)
2211  return false;
2212  if (trig1->tgtype != trig2->tgtype)
2213  return false;
2214  if (trig1->tgenabled != trig2->tgenabled)
2215  return false;
2216  if (trig1->tgisinternal != trig2->tgisinternal)
2217  return false;
2218  if (trig1->tgisclone != trig2->tgisclone)
2219  return false;
2220  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2221  return false;
2222  if (trig1->tgconstrindid != trig2->tgconstrindid)
2223  return false;
2224  if (trig1->tgconstraint != trig2->tgconstraint)
2225  return false;
2226  if (trig1->tgdeferrable != trig2->tgdeferrable)
2227  return false;
2228  if (trig1->tginitdeferred != trig2->tginitdeferred)
2229  return false;
2230  if (trig1->tgnargs != trig2->tgnargs)
2231  return false;
2232  if (trig1->tgnattr != trig2->tgnattr)
2233  return false;
2234  if (trig1->tgnattr > 0 &&
2235  memcmp(trig1->tgattr, trig2->tgattr,
2236  trig1->tgnattr * sizeof(int16)) != 0)
2237  return false;
2238  for (j = 0; j < trig1->tgnargs; j++)
2239  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2240  return false;
2241  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2242  /* ok */ ;
2243  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2244  return false;
2245  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2246  return false;
2247  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2248  /* ok */ ;
2249  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2250  return false;
2251  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2252  return false;
2253  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2254  /* ok */ ;
2255  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2256  return false;
2257  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2258  return false;
2259  }
2260  }
2261  else if (trigdesc2 != NULL)
2262  return false;
2263  return true;
2264 }
2265 #endif /* NOT_USED */
2266 
2267 /*
2268  * Check if there is a row-level trigger with transition tables that prevents
2269  * a table from becoming an inheritance child or partition. Return the name
2270  * of the first such incompatible trigger, or NULL if there is none.
2271  */
2272 const char *
2274 {
2275  if (trigdesc != NULL)
2276  {
2277  int i;
2278 
2279  for (i = 0; i < trigdesc->numtriggers; ++i)
2280  {
2281  Trigger *trigger = &trigdesc->triggers[i];
2282 
2283  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2284  return trigger->tgname;
2285  }
2286  }
2287 
2288  return NULL;
2289 }
2290 
2291 /*
2292  * Call a trigger function.
2293  *
2294  * trigdata: trigger descriptor.
2295  * tgindx: trigger's index in finfo and instr arrays.
2296  * finfo: array of cached trigger function call information.
2297  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2298  * per_tuple_context: memory context to execute the function in.
2299  *
2300  * Returns the tuple (or NULL) as returned by the function.
2301  */
2302 static HeapTuple
2304  int tgindx,
2305  FmgrInfo *finfo,
2306  Instrumentation *instr,
2307  MemoryContext per_tuple_context)
2308 {
2309  LOCAL_FCINFO(fcinfo, 0);
2310  PgStat_FunctionCallUsage fcusage;
2311  Datum result;
2312  MemoryContext oldContext;
2313 
2314  /*
2315  * Protect against code paths that may fail to initialize transition table
2316  * info.
2317  */
2318  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2319  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2320  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2321  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2322  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2323  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2324  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2325 
2326  finfo += tgindx;
2327 
2328  /*
2329  * We cache fmgr lookup info, to avoid making the lookup again on each
2330  * call.
2331  */
2332  if (finfo->fn_oid == InvalidOid)
2333  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2334 
2335  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2336 
2337  /*
2338  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2339  */
2340  if (instr)
2341  InstrStartNode(instr + tgindx);
2342 
2343  /*
2344  * Do the function evaluation in the per-tuple memory context, so that
2345  * leaked memory will be reclaimed once per tuple. Note in particular that
2346  * any new tuple created by the trigger function will live till the end of
2347  * the tuple cycle.
2348  */
2349  oldContext = MemoryContextSwitchTo(per_tuple_context);
2350 
2351  /*
2352  * Call the function, passing no arguments but setting a context.
2353  */
2354  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2355  InvalidOid, (Node *) trigdata, NULL);
2356 
2357  pgstat_init_function_usage(fcinfo, &fcusage);
2358 
2359  MyTriggerDepth++;
2360  PG_TRY();
2361  {
2362  result = FunctionCallInvoke(fcinfo);
2363  }
2364  PG_FINALLY();
2365  {
2366  MyTriggerDepth--;
2367  }
2368  PG_END_TRY();
2369 
2370  pgstat_end_function_usage(&fcusage, true);
2371 
2372  MemoryContextSwitchTo(oldContext);
2373 
2374  /*
2375  * Trigger protocol allows function to return a null pointer, but NOT to
2376  * set the isnull result flag.
2377  */
2378  if (fcinfo->isnull)
2379  ereport(ERROR,
2380  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2381  errmsg("trigger function %u returned null value",
2382  fcinfo->flinfo->fn_oid)));
2383 
2384  /*
2385  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2386  * one "tuple returned" (really the number of firings).
2387  */
2388  if (instr)
2389  InstrStopNode(instr + tgindx, 1);
2390 
2391  return (HeapTuple) DatumGetPointer(result);
2392 }
2393 
2394 void
2396 {
2397  TriggerDesc *trigdesc;
2398  int i;
2399  TriggerData LocTriggerData = {0};
2400 
2401  trigdesc = relinfo->ri_TrigDesc;
2402 
2403  if (trigdesc == NULL)
2404  return;
2405  if (!trigdesc->trig_insert_before_statement)
2406  return;
2407 
2408  /* no-op if we already fired BS triggers in this context */
2410  CMD_INSERT))
2411  return;
2412 
2413  LocTriggerData.type = T_TriggerData;
2414  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2416  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2417  for (i = 0; i < trigdesc->numtriggers; i++)
2418  {
2419  Trigger *trigger = &trigdesc->triggers[i];
2420  HeapTuple newtuple;
2421 
2422  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2423  TRIGGER_TYPE_STATEMENT,
2424  TRIGGER_TYPE_BEFORE,
2425  TRIGGER_TYPE_INSERT))
2426  continue;
2427  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2428  NULL, NULL, NULL))
2429  continue;
2430 
2431  LocTriggerData.tg_trigger = trigger;
2432  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2433  i,
2434  relinfo->ri_TrigFunctions,
2435  relinfo->ri_TrigInstrument,
2436  GetPerTupleMemoryContext(estate));
2437 
2438  if (newtuple)
2439  ereport(ERROR,
2440  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2441  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2442  }
2443 }
2444 
2445 void
2447  TransitionCaptureState *transition_capture)
2448 {
2449  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2450 
2451  if (trigdesc && trigdesc->trig_insert_after_statement)
2452  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2454  false, NULL, NULL, NIL, NULL, transition_capture,
2455  false);
2456 }
2457 
2458 bool
2460  TupleTableSlot *slot)
2461 {
2462  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2463  HeapTuple newtuple = NULL;
2464  bool should_free;
2465  TriggerData LocTriggerData = {0};
2466  int i;
2467 
2468  LocTriggerData.type = T_TriggerData;
2469  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2472  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2473  for (i = 0; i < trigdesc->numtriggers; i++)
2474  {
2475  Trigger *trigger = &trigdesc->triggers[i];
2476  HeapTuple oldtuple;
2477 
2478  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2479  TRIGGER_TYPE_ROW,
2480  TRIGGER_TYPE_BEFORE,
2481  TRIGGER_TYPE_INSERT))
2482  continue;
2483  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2484  NULL, NULL, slot))
2485  continue;
2486 
2487  if (!newtuple)
2488  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2489 
2490  LocTriggerData.tg_trigslot = slot;
2491  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2492  LocTriggerData.tg_trigger = trigger;
2493  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2494  i,
2495  relinfo->ri_TrigFunctions,
2496  relinfo->ri_TrigInstrument,
2497  GetPerTupleMemoryContext(estate));
2498  if (newtuple == NULL)
2499  {
2500  if (should_free)
2501  heap_freetuple(oldtuple);
2502  return false; /* "do nothing" */
2503  }
2504  else if (newtuple != oldtuple)
2505  {
2506  ExecForceStoreHeapTuple(newtuple, slot, false);
2507 
2508  /*
2509  * After a tuple in a partition goes through a trigger, the user
2510  * could have changed the partition key enough that the tuple no
2511  * longer fits the partition. Verify that.
2512  */
2513  if (trigger->tgisclone &&
2514  !ExecPartitionCheck(relinfo, slot, estate, false))
2515  ereport(ERROR,
2516  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2517  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2518  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2519  trigger->tgname,
2522 
2523  if (should_free)
2524  heap_freetuple(oldtuple);
2525 
2526  /* signal tuple should be re-fetched if used */
2527  newtuple = NULL;
2528  }
2529  }
2530 
2531  return true;
2532 }
2533 
2534 void
2536  TupleTableSlot *slot, List *recheckIndexes,
2537  TransitionCaptureState *transition_capture)
2538 {
2539  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2540 
2541  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2542  (transition_capture && transition_capture->tcs_insert_new_table))
2543  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2545  true, NULL, slot,
2546  recheckIndexes, NULL,
2547  transition_capture,
2548  false);
2549 }
2550 
2551 bool
2553  TupleTableSlot *slot)
2554 {
2555  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2556  HeapTuple newtuple = NULL;
2557  bool should_free;
2558  TriggerData LocTriggerData = {0};
2559  int i;
2560 
2561  LocTriggerData.type = T_TriggerData;
2562  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2565  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2566  for (i = 0; i < trigdesc->numtriggers; i++)
2567  {
2568  Trigger *trigger = &trigdesc->triggers[i];
2569  HeapTuple oldtuple;
2570 
2571  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2572  TRIGGER_TYPE_ROW,
2573  TRIGGER_TYPE_INSTEAD,
2574  TRIGGER_TYPE_INSERT))
2575  continue;
2576  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2577  NULL, NULL, slot))
2578  continue;
2579 
2580  if (!newtuple)
2581  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2582 
2583  LocTriggerData.tg_trigslot = slot;
2584  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2585  LocTriggerData.tg_trigger = trigger;
2586  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2587  i,
2588  relinfo->ri_TrigFunctions,
2589  relinfo->ri_TrigInstrument,
2590  GetPerTupleMemoryContext(estate));
2591  if (newtuple == NULL)
2592  {
2593  if (should_free)
2594  heap_freetuple(oldtuple);
2595  return false; /* "do nothing" */
2596  }
2597  else if (newtuple != oldtuple)
2598  {
2599  ExecForceStoreHeapTuple(newtuple, slot, false);
2600 
2601  if (should_free)
2602  heap_freetuple(oldtuple);
2603 
2604  /* signal tuple should be re-fetched if used */
2605  newtuple = NULL;
2606  }
2607  }
2608 
2609  return true;
2610 }
2611 
2612 void
2614 {
2615  TriggerDesc *trigdesc;
2616  int i;
2617  TriggerData LocTriggerData = {0};
2618 
2619  trigdesc = relinfo->ri_TrigDesc;
2620 
2621  if (trigdesc == NULL)
2622  return;
2623  if (!trigdesc->trig_delete_before_statement)
2624  return;
2625 
2626  /* no-op if we already fired BS triggers in this context */
2628  CMD_DELETE))
2629  return;
2630 
2631  LocTriggerData.type = T_TriggerData;
2632  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2634  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2635  for (i = 0; i < trigdesc->numtriggers; i++)
2636  {
2637  Trigger *trigger = &trigdesc->triggers[i];
2638  HeapTuple newtuple;
2639 
2640  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2641  TRIGGER_TYPE_STATEMENT,
2642  TRIGGER_TYPE_BEFORE,
2643  TRIGGER_TYPE_DELETE))
2644  continue;
2645  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2646  NULL, NULL, NULL))
2647  continue;
2648 
2649  LocTriggerData.tg_trigger = trigger;
2650  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2651  i,
2652  relinfo->ri_TrigFunctions,
2653  relinfo->ri_TrigInstrument,
2654  GetPerTupleMemoryContext(estate));
2655 
2656  if (newtuple)
2657  ereport(ERROR,
2658  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2659  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2660  }
2661 }
2662 
2663 void
2665  TransitionCaptureState *transition_capture)
2666 {
2667  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2668 
2669  if (trigdesc && trigdesc->trig_delete_after_statement)
2670  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2672  false, NULL, NULL, NIL, NULL, transition_capture,
2673  false);
2674 }
2675 
2676 /*
2677  * Execute BEFORE ROW DELETE triggers.
2678  *
2679  * True indicates caller can proceed with the delete. False indicates caller
2680  * need to suppress the delete and additionally if requested, we need to pass
2681  * back the concurrently updated tuple if any.
2682  */
2683 bool
2685  ResultRelInfo *relinfo,
2686  ItemPointer tupleid,
2687  HeapTuple fdw_trigtuple,
2688  TupleTableSlot **epqslot,
2689  TM_Result *tmresult,
2690  TM_FailureData *tmfd)
2691 {
2692  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2693  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2694  bool result = true;
2695  TriggerData LocTriggerData = {0};
2696  HeapTuple trigtuple;
2697  bool should_free = false;
2698  int i;
2699 
2700  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2701  if (fdw_trigtuple == NULL)
2702  {
2703  TupleTableSlot *epqslot_candidate = NULL;
2704 
2705  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2706  LockTupleExclusive, slot, &epqslot_candidate,
2707  tmresult, tmfd))
2708  return false;
2709 
2710  /*
2711  * If the tuple was concurrently updated and the caller of this
2712  * function requested for the updated tuple, skip the trigger
2713  * execution.
2714  */
2715  if (epqslot_candidate != NULL && epqslot != NULL)
2716  {
2717  *epqslot = epqslot_candidate;
2718  return false;
2719  }
2720 
2721  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2722  }
2723  else
2724  {
2725  trigtuple = fdw_trigtuple;
2726  ExecForceStoreHeapTuple(trigtuple, slot, false);
2727  }
2728 
2729  LocTriggerData.type = T_TriggerData;
2730  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2733  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2734  for (i = 0; i < trigdesc->numtriggers; i++)
2735  {
2736  HeapTuple newtuple;
2737  Trigger *trigger = &trigdesc->triggers[i];
2738 
2739  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2740  TRIGGER_TYPE_ROW,
2741  TRIGGER_TYPE_BEFORE,
2742  TRIGGER_TYPE_DELETE))
2743  continue;
2744  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2745  NULL, slot, NULL))
2746  continue;
2747 
2748  LocTriggerData.tg_trigslot = slot;
2749  LocTriggerData.tg_trigtuple = trigtuple;
2750  LocTriggerData.tg_trigger = trigger;
2751  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2752  i,
2753  relinfo->ri_TrigFunctions,
2754  relinfo->ri_TrigInstrument,
2755  GetPerTupleMemoryContext(estate));
2756  if (newtuple == NULL)
2757  {
2758  result = false; /* tell caller to suppress delete */
2759  break;
2760  }
2761  if (newtuple != trigtuple)
2762  heap_freetuple(newtuple);
2763  }
2764  if (should_free)
2765  heap_freetuple(trigtuple);
2766 
2767  return result;
2768 }
2769 
2770 /*
2771  * Note: is_crosspart_update must be true if the DELETE is being performed
2772  * as part of a cross-partition update.
2773  */
2774 void
2776  ResultRelInfo *relinfo,
2777  ItemPointer tupleid,
2778  HeapTuple fdw_trigtuple,
2779  TransitionCaptureState *transition_capture,
2780  bool is_crosspart_update)
2781 {
2782  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2783 
2784  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2785  (transition_capture && transition_capture->tcs_delete_old_table))
2786  {
2787  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2788 
2789  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2790  if (fdw_trigtuple == NULL)
2791  GetTupleForTrigger(estate,
2792  NULL,
2793  relinfo,
2794  tupleid,
2796  slot,
2797  NULL,
2798  NULL,
2799  NULL);
2800  else
2801  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2802 
2803  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2805  true, slot, NULL, NIL, NULL,
2806  transition_capture,
2807  is_crosspart_update);
2808  }
2809 }
2810 
2811 bool
2813  HeapTuple trigtuple)
2814 {
2815  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2816  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2817  TriggerData LocTriggerData = {0};
2818  int i;
2819 
2820  LocTriggerData.type = T_TriggerData;
2821  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2824  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2825 
2826  ExecForceStoreHeapTuple(trigtuple, slot, false);
2827 
2828  for (i = 0; i < trigdesc->numtriggers; i++)
2829  {
2830  HeapTuple rettuple;
2831  Trigger *trigger = &trigdesc->triggers[i];
2832 
2833  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2834  TRIGGER_TYPE_ROW,
2835  TRIGGER_TYPE_INSTEAD,
2836  TRIGGER_TYPE_DELETE))
2837  continue;
2838  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2839  NULL, slot, NULL))
2840  continue;
2841 
2842  LocTriggerData.tg_trigslot = slot;
2843  LocTriggerData.tg_trigtuple = trigtuple;
2844  LocTriggerData.tg_trigger = trigger;
2845  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2846  i,
2847  relinfo->ri_TrigFunctions,
2848  relinfo->ri_TrigInstrument,
2849  GetPerTupleMemoryContext(estate));
2850  if (rettuple == NULL)
2851  return false; /* Delete was suppressed */
2852  if (rettuple != trigtuple)
2853  heap_freetuple(rettuple);
2854  }
2855  return true;
2856 }
2857 
2858 void
2860 {
2861  TriggerDesc *trigdesc;
2862  int i;
2863  TriggerData LocTriggerData = {0};
2864  Bitmapset *updatedCols;
2865 
2866  trigdesc = relinfo->ri_TrigDesc;
2867 
2868  if (trigdesc == NULL)
2869  return;
2870  if (!trigdesc->trig_update_before_statement)
2871  return;
2872 
2873  /* no-op if we already fired BS triggers in this context */
2875  CMD_UPDATE))
2876  return;
2877 
2878  /* statement-level triggers operate on the parent table */
2879  Assert(relinfo->ri_RootResultRelInfo == NULL);
2880 
2881  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2882 
2883  LocTriggerData.type = T_TriggerData;
2884  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2886  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2887  LocTriggerData.tg_updatedcols = updatedCols;
2888  for (i = 0; i < trigdesc->numtriggers; i++)
2889  {
2890  Trigger *trigger = &trigdesc->triggers[i];
2891  HeapTuple newtuple;
2892 
2893  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2894  TRIGGER_TYPE_STATEMENT,
2895  TRIGGER_TYPE_BEFORE,
2896  TRIGGER_TYPE_UPDATE))
2897  continue;
2898  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2899  updatedCols, NULL, NULL))
2900  continue;
2901 
2902  LocTriggerData.tg_trigger = trigger;
2903  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2904  i,
2905  relinfo->ri_TrigFunctions,
2906  relinfo->ri_TrigInstrument,
2907  GetPerTupleMemoryContext(estate));
2908 
2909  if (newtuple)
2910  ereport(ERROR,
2911  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2912  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2913  }
2914 }
2915 
2916 void
2918  TransitionCaptureState *transition_capture)
2919 {
2920  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2921 
2922  /* statement-level triggers operate on the parent table */
2923  Assert(relinfo->ri_RootResultRelInfo == NULL);
2924 
2925  if (trigdesc && trigdesc->trig_update_after_statement)
2926  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2928  false, NULL, NULL, NIL,
2929  ExecGetAllUpdatedCols(relinfo, estate),
2930  transition_capture,
2931  false);
2932 }
2933 
2934 bool
2936  ResultRelInfo *relinfo,
2937  ItemPointer tupleid,
2938  HeapTuple fdw_trigtuple,
2939  TupleTableSlot *newslot,
2940  TM_Result *tmresult,
2941  TM_FailureData *tmfd)
2942 {
2943  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2944  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2945  HeapTuple newtuple = NULL;
2946  HeapTuple trigtuple;
2947  bool should_free_trig = false;
2948  bool should_free_new = false;
2949  TriggerData LocTriggerData = {0};
2950  int i;
2951  Bitmapset *updatedCols;
2952  LockTupleMode lockmode;
2953 
2954  /* Determine lock mode to use */
2955  lockmode = ExecUpdateLockMode(estate, relinfo);
2956 
2957  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2958  if (fdw_trigtuple == NULL)
2959  {
2960  TupleTableSlot *epqslot_candidate = NULL;
2961 
2962  /* get a copy of the on-disk tuple we are planning to update */
2963  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2964  lockmode, oldslot, &epqslot_candidate,
2965  tmresult, tmfd))
2966  return false; /* cancel the update action */
2967 
2968  /*
2969  * In READ COMMITTED isolation level it's possible that target tuple
2970  * was changed due to concurrent update. In that case we have a raw
2971  * subplan output tuple in epqslot_candidate, and need to form a new
2972  * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2973  * received in newslot. Neither we nor our callers have any further
2974  * interest in the passed-in tuple, so it's okay to overwrite newslot
2975  * with the newer data.
2976  */
2977  if (epqslot_candidate != NULL)
2978  {
2979  TupleTableSlot *epqslot_clean;
2980 
2981  epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2982  oldslot);
2983 
2984  /*
2985  * Typically, the caller's newslot was also generated by
2986  * ExecGetUpdateNewTuple, so that epqslot_clean will be the same
2987  * slot and copying is not needed. But do the right thing if it
2988  * isn't.
2989  */
2990  if (unlikely(newslot != epqslot_clean))
2991  ExecCopySlot(newslot, epqslot_clean);
2992 
2993  /*
2994  * At this point newslot contains a virtual tuple that may
2995  * reference some fields of oldslot's tuple in some disk buffer.
2996  * If that tuple is in a different page than the original target
2997  * tuple, then our only pin on that buffer is oldslot's, and we're
2998  * about to release it. Hence we'd better materialize newslot to
2999  * ensure it doesn't contain references into an unpinned buffer.
3000  * (We'd materialize it below anyway, but too late for safety.)
3001  */
3002  ExecMaterializeSlot(newslot);
3003  }
3004 
3005  /*
3006  * Here we convert oldslot to a materialized slot holding trigtuple.
3007  * Neither slot passed to the triggers will hold any buffer pin.
3008  */
3009  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3010  }
3011  else
3012  {
3013  /* Put the FDW-supplied tuple into oldslot to unify the cases */
3014  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3015  trigtuple = fdw_trigtuple;
3016  }
3017 
3018  LocTriggerData.type = T_TriggerData;
3019  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3022  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3023  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3024  LocTriggerData.tg_updatedcols = updatedCols;
3025  for (i = 0; i < trigdesc->numtriggers; i++)
3026  {
3027  Trigger *trigger = &trigdesc->triggers[i];
3028  HeapTuple oldtuple;
3029 
3030  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3031  TRIGGER_TYPE_ROW,
3032  TRIGGER_TYPE_BEFORE,
3033  TRIGGER_TYPE_UPDATE))
3034  continue;
3035  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3036  updatedCols, oldslot, newslot))
3037  continue;
3038 
3039  if (!newtuple)
3040  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3041 
3042  LocTriggerData.tg_trigslot = oldslot;
3043  LocTriggerData.tg_trigtuple = trigtuple;
3044  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3045  LocTriggerData.tg_newslot = newslot;
3046  LocTriggerData.tg_trigger = trigger;
3047  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3048  i,
3049  relinfo->ri_TrigFunctions,
3050  relinfo->ri_TrigInstrument,
3051  GetPerTupleMemoryContext(estate));
3052 
3053  if (newtuple == NULL)
3054  {
3055  if (should_free_trig)
3056  heap_freetuple(trigtuple);
3057  if (should_free_new)
3058  heap_freetuple(oldtuple);
3059  return false; /* "do nothing" */
3060  }
3061  else if (newtuple != oldtuple)
3062  {
3063  ExecForceStoreHeapTuple(newtuple, newslot, false);
3064 
3065  /*
3066  * If the tuple returned by the trigger / being stored, is the old
3067  * row version, and the heap tuple passed to the trigger was
3068  * allocated locally, materialize the slot. Otherwise we might
3069  * free it while still referenced by the slot.
3070  */
3071  if (should_free_trig && newtuple == trigtuple)
3072  ExecMaterializeSlot(newslot);
3073 
3074  if (should_free_new)
3075  heap_freetuple(oldtuple);
3076 
3077  /* signal tuple should be re-fetched if used */
3078  newtuple = NULL;
3079  }
3080  }
3081  if (should_free_trig)
3082  heap_freetuple(trigtuple);
3083 
3084  return true;
3085 }
3086 
3087 /*
3088  * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3089  * and destination partitions, respectively, of a cross-partition update of
3090  * the root partitioned table mentioned in the query, given by 'relinfo'.
3091  * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3092  * partition, and 'newslot' contains the "new" tuple in the destination
3093  * partition. This interface allows to support the requirements of
3094  * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3095  * that case.
3096  */
3097 void
3099  ResultRelInfo *src_partinfo,
3100  ResultRelInfo *dst_partinfo,
3101  ItemPointer tupleid,
3102  HeapTuple fdw_trigtuple,
3103  TupleTableSlot *newslot,
3104  List *recheckIndexes,
3105  TransitionCaptureState *transition_capture,
3106  bool is_crosspart_update)
3107 {
3108  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3109 
3110  if ((trigdesc && trigdesc->trig_update_after_row) ||
3111  (transition_capture &&
3112  (transition_capture->tcs_update_old_table ||
3113  transition_capture->tcs_update_new_table)))
3114  {
3115  /*
3116  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3117  * update-partition-key operation, then this function is also called
3118  * separately for DELETE and INSERT to capture transition table rows.
3119  * In such case, either old tuple or new tuple can be NULL.
3120  */
3121  TupleTableSlot *oldslot;
3122  ResultRelInfo *tupsrc;
3123 
3124  Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3125  !is_crosspart_update);
3126 
3127  tupsrc = src_partinfo ? src_partinfo : relinfo;
3128  oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3129 
3130  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3131  GetTupleForTrigger(estate,
3132  NULL,
3133  tupsrc,
3134  tupleid,
3136  oldslot,
3137  NULL,
3138  NULL,
3139  NULL);
3140  else if (fdw_trigtuple != NULL)
3141  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3142  else
3143  ExecClearTuple(oldslot);
3144 
3145  AfterTriggerSaveEvent(estate, relinfo,
3146  src_partinfo, dst_partinfo,
3148  true,
3149  oldslot, newslot, recheckIndexes,
3150  ExecGetAllUpdatedCols(relinfo, estate),
3151  transition_capture,
3152  is_crosspart_update);
3153  }
3154 }
3155 
3156 bool
3158  HeapTuple trigtuple, TupleTableSlot *newslot)
3159 {
3160  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3161  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3162  HeapTuple newtuple = NULL;
3163  bool should_free;
3164  TriggerData LocTriggerData = {0};
3165  int i;
3166 
3167  LocTriggerData.type = T_TriggerData;
3168  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3171  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3172 
3173  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3174 
3175  for (i = 0; i < trigdesc->numtriggers; i++)
3176  {
3177  Trigger *trigger = &trigdesc->triggers[i];
3178  HeapTuple oldtuple;
3179 
3180  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3181  TRIGGER_TYPE_ROW,
3182  TRIGGER_TYPE_INSTEAD,
3183  TRIGGER_TYPE_UPDATE))
3184  continue;
3185  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3186  NULL, oldslot, newslot))
3187  continue;
3188 
3189  if (!newtuple)
3190  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3191 
3192  LocTriggerData.tg_trigslot = oldslot;
3193  LocTriggerData.tg_trigtuple = trigtuple;
3194  LocTriggerData.tg_newslot = newslot;
3195  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3196 
3197  LocTriggerData.tg_trigger = trigger;
3198  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3199  i,
3200  relinfo->ri_TrigFunctions,
3201  relinfo->ri_TrigInstrument,
3202  GetPerTupleMemoryContext(estate));
3203  if (newtuple == NULL)
3204  {
3205  return false; /* "do nothing" */
3206  }
3207  else if (newtuple != oldtuple)
3208  {
3209  ExecForceStoreHeapTuple(newtuple, newslot, false);
3210 
3211  if (should_free)
3212  heap_freetuple(oldtuple);
3213 
3214  /* signal tuple should be re-fetched if used */
3215  newtuple = NULL;
3216  }
3217  }
3218 
3219  return true;
3220 }
3221 
3222 void
3224 {
3225  TriggerDesc *trigdesc;
3226  int i;
3227  TriggerData LocTriggerData = {0};
3228 
3229  trigdesc = relinfo->ri_TrigDesc;
3230 
3231  if (trigdesc == NULL)
3232  return;
3233  if (!trigdesc->trig_truncate_before_statement)
3234  return;
3235 
3236  LocTriggerData.type = T_TriggerData;
3237  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3239  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3240 
3241  for (i = 0; i < trigdesc->numtriggers; i++)
3242  {
3243  Trigger *trigger = &trigdesc->triggers[i];
3244  HeapTuple newtuple;
3245 
3246  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3247  TRIGGER_TYPE_STATEMENT,
3248  TRIGGER_TYPE_BEFORE,
3249  TRIGGER_TYPE_TRUNCATE))
3250  continue;
3251  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3252  NULL, NULL, NULL))
3253  continue;
3254 
3255  LocTriggerData.tg_trigger = trigger;
3256  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3257  i,
3258  relinfo->ri_TrigFunctions,
3259  relinfo->ri_TrigInstrument,
3260  GetPerTupleMemoryContext(estate));
3261 
3262  if (newtuple)
3263  ereport(ERROR,
3264  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3265  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3266  }
3267 }
3268 
3269 void
3271 {
3272  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3273 
3274  if (trigdesc && trigdesc->trig_truncate_after_statement)
3275  AfterTriggerSaveEvent(estate, relinfo,
3276  NULL, NULL,
3278  false, NULL, NULL, NIL, NULL, NULL,
3279  false);
3280 }
3281 
3282 
3283 /*
3284  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3285  */
3286 static bool
3288  EPQState *epqstate,
3289  ResultRelInfo *relinfo,
3290  ItemPointer tid,
3291  LockTupleMode lockmode,
3292  TupleTableSlot *oldslot,
3293  TupleTableSlot **epqslot,
3294  TM_Result *tmresultp,
3295  TM_FailureData *tmfdp)
3296 {
3297  Relation relation = relinfo->ri_RelationDesc;
3298 
3299  if (epqslot != NULL)
3300  {
3301  TM_Result test;
3302  TM_FailureData tmfd;
3303  int lockflags = 0;
3304 
3305  *epqslot = NULL;
3306 
3307  /* caller must pass an epqstate if EvalPlanQual is possible */
3308  Assert(epqstate != NULL);
3309 
3310  /*
3311  * lock tuple for update
3312  */
3314  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3315  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3316  estate->es_output_cid,
3317  lockmode, LockWaitBlock,
3318  lockflags,
3319  &tmfd);
3320 
3321  /* Let the caller know about the status of this operation */
3322  if (tmresultp)
3323  *tmresultp = test;
3324  if (tmfdp)
3325  *tmfdp = tmfd;
3326 
3327  switch (test)
3328  {
3329  case TM_SelfModified:
3330 
3331  /*
3332  * The target tuple was already updated or deleted by the
3333  * current command, or by a later command in the current
3334  * transaction. We ignore the tuple in the former case, and
3335  * throw error in the latter case, for the same reasons
3336  * enumerated in ExecUpdate and ExecDelete in
3337  * nodeModifyTable.c.
3338  */
3339  if (tmfd.cmax != estate->es_output_cid)
3340  ereport(ERROR,
3341  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3342  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3343  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3344 
3345  /* treat it as deleted; do not process */
3346  return false;
3347 
3348  case TM_Ok:
3349  if (tmfd.traversed)
3350  {
3351  /*
3352  * Recheck the tuple using EPQ. For MERGE, we leave this
3353  * to the caller (it must do additional rechecking, and
3354  * might end up executing a different action entirely).
3355  */
3356  if (estate->es_plannedstmt->commandType == CMD_MERGE)
3357  {
3358  if (tmresultp)
3359  *tmresultp = TM_Updated;
3360  return false;
3361  }
3362 
3363  *epqslot = EvalPlanQual(epqstate,
3364  relation,
3365  relinfo->ri_RangeTableIndex,
3366  oldslot);
3367 
3368  /*
3369  * If PlanQual failed for updated tuple - we must not
3370  * process this tuple!
3371  */
3372  if (TupIsNull(*epqslot))
3373  {
3374  *epqslot = NULL;
3375  return false;
3376  }
3377  }
3378  break;
3379 
3380  case TM_Updated:
3382  ereport(ERROR,
3384  errmsg("could not serialize access due to concurrent update")));
3385  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3386  break;
3387 
3388  case TM_Deleted:
3390  ereport(ERROR,
3392  errmsg("could not serialize access due to concurrent delete")));
3393  /* tuple was deleted */
3394  return false;
3395 
3396  case TM_Invisible:
3397  elog(ERROR, "attempted to lock invisible tuple");
3398  break;
3399 
3400  default:
3401  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3402  return false; /* keep compiler quiet */
3403  }
3404  }
3405  else
3406  {
3407  /*
3408  * We expect the tuple to be present, thus very simple error handling
3409  * suffices.
3410  */
3411  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3412  oldslot))
3413  elog(ERROR, "failed to fetch tuple for trigger");
3414  }
3415 
3416  return true;
3417 }
3418 
3419 /*
3420  * Is trigger enabled to fire?
3421  */
3422 static bool
3424  Trigger *trigger, TriggerEvent event,
3425  Bitmapset *modifiedCols,
3426  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3427 {
3428  /* Check replication-role-dependent enable state */
3430  {
3431  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3432  trigger->tgenabled == TRIGGER_DISABLED)
3433  return false;
3434  }
3435  else /* ORIGIN or LOCAL role */
3436  {
3437  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3438  trigger->tgenabled == TRIGGER_DISABLED)
3439  return false;
3440  }
3441 
3442  /*
3443  * Check for column-specific trigger (only possible for UPDATE, and in
3444  * fact we *must* ignore tgattr for other event types)
3445  */
3446  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3447  {
3448  int i;
3449  bool modified;
3450 
3451  modified = false;
3452  for (i = 0; i < trigger->tgnattr; i++)
3453  {
3455  modifiedCols))
3456  {
3457  modified = true;
3458  break;
3459  }
3460  }
3461  if (!modified)
3462  return false;
3463  }
3464 
3465  /* Check for WHEN clause */
3466  if (trigger->tgqual)
3467  {
3468  ExprState **predicate;
3469  ExprContext *econtext;
3470  MemoryContext oldContext;
3471  int i;
3472 
3473  Assert(estate != NULL);
3474 
3475  /*
3476  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3477  * matching element of relinfo->ri_TrigWhenExprs[]
3478  */
3479  i = trigger - relinfo->ri_TrigDesc->triggers;
3480  predicate = &relinfo->ri_TrigWhenExprs[i];
3481 
3482  /*
3483  * If first time through for this WHEN expression, build expression
3484  * nodetrees for it. Keep them in the per-query memory context so
3485  * they'll survive throughout the query.
3486  */
3487  if (*predicate == NULL)
3488  {
3489  Node *tgqual;
3490 
3491  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3492  tgqual = stringToNode(trigger->tgqual);
3493  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3496  /* ExecPrepareQual wants implicit-AND form */
3497  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3498  *predicate = ExecPrepareQual((List *) tgqual, estate);
3499  MemoryContextSwitchTo(oldContext);
3500  }
3501 
3502  /*
3503  * We will use the EState's per-tuple context for evaluating WHEN
3504  * expressions (creating it if it's not already there).
3505  */
3506  econtext = GetPerTupleExprContext(estate);
3507 
3508  /*
3509  * Finally evaluate the expression, making the old and/or new tuples
3510  * available as INNER_VAR/OUTER_VAR respectively.
3511  */
3512  econtext->ecxt_innertuple = oldslot;
3513  econtext->ecxt_outertuple = newslot;
3514  if (!ExecQual(*predicate, econtext))
3515  return false;
3516  }
3517 
3518  return true;
3519 }
3520 
3521 
3522 /* ----------
3523  * After-trigger stuff
3524  *
3525  * The AfterTriggersData struct holds data about pending AFTER trigger events
3526  * during the current transaction tree. (BEFORE triggers are fired
3527  * immediately so we don't need any persistent state about them.) The struct
3528  * and most of its subsidiary data are kept in TopTransactionContext; however
3529  * some data that can be discarded sooner appears in the CurTransactionContext
3530  * of the relevant subtransaction. Also, the individual event records are
3531  * kept in a separate sub-context of TopTransactionContext. This is done
3532  * mainly so that it's easy to tell from a memory context dump how much space
3533  * is being eaten by trigger events.
3534  *
3535  * Because the list of pending events can grow large, we go to some
3536  * considerable effort to minimize per-event memory consumption. The event
3537  * records are grouped into chunks and common data for similar events in the
3538  * same chunk is only stored once.
3539  *
3540  * XXX We need to be able to save the per-event data in a file if it grows too
3541  * large.
3542  * ----------
3543  */
3544 
3545 /* Per-trigger SET CONSTRAINT status */
3547 {
3551 
3553 
3554 /*
3555  * SET CONSTRAINT intra-transaction status.
3556  *
3557  * We make this a single palloc'd object so it can be copied and freed easily.
3558  *
3559  * all_isset and all_isdeferred are used to keep track
3560  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3561  *
3562  * trigstates[] stores per-trigger tgisdeferred settings.
3563  */
3565 {
3568  int numstates; /* number of trigstates[] entries in use */
3569  int numalloc; /* allocated size of trigstates[] */
3572 
3574 
3575 
3576 /*
3577  * Per-trigger-event data
3578  *
3579  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3580  * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3581  * Each event record also has an associated AfterTriggerSharedData that is
3582  * shared across all instances of similar events within a "chunk".
3583  *
3584  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3585  * fields. Updates of regular tables use two; inserts and deletes of regular
3586  * tables use one; foreign tables always use zero and save the tuple(s) to a
3587  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3588  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3589  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3590  * tuple(s). This permits storing tuples once regardless of the number of
3591  * row-level triggers on a foreign table.
3592  *
3593  * When updates on partitioned tables cause rows to move between partitions,
3594  * the OIDs of both partitions are stored too, so that the tuples can be
3595  * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3596  * partition update").
3597  *
3598  * Note that we need triggers on foreign tables to be fired in exactly the
3599  * order they were queued, so that the tuples come out of the tuplestore in
3600  * the right order. To ensure that, we forbid deferrable (constraint)
3601  * triggers on foreign tables. This also ensures that such triggers do not
3602  * get deferred into outer trigger query levels, meaning that it's okay to
3603  * destroy the tuplestore at the end of the query level.
3604  *
3605  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3606  * require no ctid field. We lack the flag bit space to neatly represent that
3607  * distinct case, and it seems unlikely to be worth much trouble.
3608  *
3609  * Note: ats_firing_id is initially zero and is set to something else when
3610  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3611  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3612  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3613  * because all instances of the same type of event in a given event list will
3614  * be fired at the same time, if they were queued between the same firing
3615  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3616  * a new event to an existing AfterTriggerSharedData record.
3617  */
3619 
3620 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3621 #define AFTER_TRIGGER_DONE 0x80000000
3622 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3623 /* bits describing the size and tuple sources of this event */
3624 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3625 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3626 #define AFTER_TRIGGER_1CTID 0x10000000
3627 #define AFTER_TRIGGER_2CTID 0x30000000
3628 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3629 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3631 
3633 {
3634  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3635  Oid ats_tgoid; /* the trigger's ID */
3636  Oid ats_relid; /* the relation it's on */
3637  CommandId ats_firing_id; /* ID for firing cycle */
3638  struct AfterTriggersTableData *ats_table; /* transition table access */
3639  Bitmapset *ats_modifiedcols; /* modified columns */
3641 
3643 
3645 {
3646  TriggerFlags ate_flags; /* status bits and offset to shared data */
3647  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3648  ItemPointerData ate_ctid2; /* new updated tuple */
3649 
3650  /*
3651  * During a cross-partition update of a partitioned table, we also store
3652  * the OIDs of source and destination partitions that are needed to fetch
3653  * the old (ctid1) and the new tuple (ctid2) from, respectively.
3654  */
3658 
3659 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3661 {
3666 
3667 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3669 {
3670  TriggerFlags ate_flags; /* status bits and offset to shared data */
3671  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3673 
3674 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3676 {
3677  TriggerFlags ate_flags; /* status bits and offset to shared data */
3679 
3680 #define SizeofTriggerEvent(evt) \
3681  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3682  sizeof(AfterTriggerEventData) : \
3683  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3684  sizeof(AfterTriggerEventDataNoOids) : \
3685  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3686  sizeof(AfterTriggerEventDataOneCtid) : \
3687  sizeof(AfterTriggerEventDataZeroCtids))))
3688 
3689 #define GetTriggerSharedData(evt) \
3690  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3691 
3692 /*
3693  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3694  * larger chunks (a slightly more sophisticated version of an expansible
3695  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3696  * AfterTriggerEventData records; the space between endfree and endptr is
3697  * occupied by AfterTriggerSharedData records.
3698  */
3700 {
3701  struct AfterTriggerEventChunk *next; /* list link */
3702  char *freeptr; /* start of free space in chunk */
3703  char *endfree; /* end of free space in chunk */
3704  char *endptr; /* end of chunk */
3705  /* event data follows here */
3707 
3708 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3709 
3710 /* A list of events */
3712 {
3715  char *tailfree; /* freeptr of tail chunk */
3717 
3718 /* Macros to help in iterating over a list of events */
3719 #define for_each_chunk(cptr, evtlist) \
3720  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3721 #define for_each_event(eptr, cptr) \
3722  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3723  (char *) eptr < (cptr)->freeptr; \
3724  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3725 /* Use this if no special per-chunk processing is needed */
3726 #define for_each_event_chunk(eptr, cptr, evtlist) \
3727  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3728 
3729 /* Macros for iterating from a start point that might not be list start */
3730 #define for_each_chunk_from(cptr) \
3731  for (; cptr != NULL; cptr = cptr->next)
3732 #define for_each_event_from(eptr, cptr) \
3733  for (; \
3734  (char *) eptr < (cptr)->freeptr; \
3735  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3736 
3737 
3738 /*
3739  * All per-transaction data for the AFTER TRIGGERS module.
3740  *
3741  * AfterTriggersData has the following fields:
3742  *
3743  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3744  * We mark firable events with the current firing cycle's ID so that we can
3745  * tell which ones to work on. This ensures sane behavior if a trigger
3746  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3747  * only fire those events that weren't already scheduled for firing.
3748  *
3749  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3750  * This is saved and restored across failed subtransactions.
3751  *
3752  * events is the current list of deferred events. This is global across
3753  * all subtransactions of the current transaction. In a subtransaction
3754  * abort, we know that the events added by the subtransaction are at the
3755  * end of the list, so it is relatively easy to discard them. The event
3756  * list chunks themselves are stored in event_cxt.
3757  *
3758  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3759  * (-1 when the stack is empty).
3760  *
3761  * query_stack[query_depth] is the per-query-level data, including these fields:
3762  *
3763  * events is a list of AFTER trigger events queued by the current query.
3764  * None of these are valid until the matching AfterTriggerEndQuery call
3765  * occurs. At that point we fire immediate-mode triggers, and append any
3766  * deferred events to the main events list.
3767  *
3768  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3769  * needed by events queued by the current query. (Note: we use just one
3770  * tuplestore even though more than one foreign table might be involved.
3771  * This is okay because tuplestores don't really care what's in the tuples
3772  * they store; but it's possible that someday it'd break.)
3773  *
3774  * tables is a List of AfterTriggersTableData structs for target tables
3775  * of the current query (see below).
3776  *
3777  * maxquerydepth is just the allocated length of query_stack.
3778  *
3779  * trans_stack holds per-subtransaction data, including these fields:
3780  *
3781  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3782  * state data. Each subtransaction level that modifies that state first
3783  * saves a copy, which we use to restore the state if we abort.
3784  *
3785  * events is a copy of the events head/tail pointers,
3786  * which we use to restore those values during subtransaction abort.
3787  *
3788  * query_depth is the subtransaction-start-time value of query_depth,
3789  * which we similarly use to clean up at subtransaction abort.
3790  *
3791  * firing_counter is the subtransaction-start-time value of firing_counter.
3792  * We use this to recognize which deferred triggers were fired (or marked
3793  * for firing) within an aborted subtransaction.
3794  *
3795  * We use GetCurrentTransactionNestLevel() to determine the correct array
3796  * index in trans_stack. maxtransdepth is the number of allocated entries in
3797  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3798  * in cases where errors during subxact abort cause multiple invocations
3799  * of AfterTriggerEndSubXact() at the same nesting depth.)
3800  *
3801  * We create an AfterTriggersTableData struct for each target table of the
3802  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3803  * either transition tables or statement-level triggers. This is used to
3804  * hold the relevant transition tables, as well as info tracking whether
3805  * we already queued the statement triggers. (We use that info to prevent
3806  * firing the same statement triggers more than once per statement, or really
3807  * once per transition table set.) These structs, along with the transition
3808  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3809  * That's sufficient lifespan because we don't allow transition tables to be
3810  * used by deferrable triggers, so they only need to survive until
3811  * AfterTriggerEndQuery.
3812  */
3816 
3817 typedef struct AfterTriggersData
3818 {
3819  CommandId firing_counter; /* next firing ID to assign */
3820  SetConstraintState state; /* the active S C state */
3821  AfterTriggerEventList events; /* deferred-event list */
3822  MemoryContext event_cxt; /* memory context for events, if any */
3823 
3824  /* per-query-level data: */
3825  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3826  int query_depth; /* current index in above array */
3827  int maxquerydepth; /* allocated len of above array */
3828 
3829  /* per-subtransaction-level data: */
3830  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3831  int maxtransdepth; /* allocated len of above array */
3833 
3835 {
3836  AfterTriggerEventList events; /* events pending from this query */
3837  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3838  List *tables; /* list of AfterTriggersTableData, see below */
3839 };
3840 
3842 {
3843  /* these fields are just for resetting at subtrans abort: */
3844  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3845  AfterTriggerEventList events; /* saved list pointer */
3846  int query_depth; /* saved query_depth */
3847  CommandId firing_counter; /* saved firing_counter */
3848 };
3849 
3851 {
3852  /* relid + cmdType form the lookup key for these structs: */
3853  Oid relid; /* target table's OID */
3854  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3855  bool closed; /* true when no longer OK to add tuples */
3856  bool before_trig_done; /* did we already queue BS triggers? */
3857  bool after_trig_done; /* did we already queue AS triggers? */
3858  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3859 
3860  /*
3861  * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3862  * MERGE can run all three actions in a single statement. Note that UPDATE
3863  * needs both old and new transition tables whereas INSERT needs only new,
3864  * and DELETE needs only old.
3865  */
3866 
3867  /* "old" transition table for UPDATE, if any */
3869  /* "new" transition table for UPDATE, if any */
3871  /* "old" transition table for DELETE, if any */
3873  /* "new" transition table for INSERT, if any */
3875 
3876  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3877 };
3878 
3880 
3881 static void AfterTriggerExecute(EState *estate,
3882  AfterTriggerEvent event,
3883  ResultRelInfo *relInfo,
3884  ResultRelInfo *src_relInfo,
3885  ResultRelInfo *dst_relInfo,
3886  TriggerDesc *trigdesc,
3887  FmgrInfo *finfo,
3888  Instrumentation *instr,
3889  MemoryContext per_tuple_context,
3890  TupleTableSlot *trig_tuple_slot1,
3891  TupleTableSlot *trig_tuple_slot2);
3893  CmdType cmdType);
3895  TupleDesc tupdesc);
3897  TupleTableSlot *oldslot,
3898  TupleTableSlot *newslot,
3899  TransitionCaptureState *transition_capture);
3900 static void TransitionTableAddTuple(EState *estate,
3901  TransitionCaptureState *transition_capture,
3902  ResultRelInfo *relinfo,
3903  TupleTableSlot *slot,
3904  TupleTableSlot *original_insert_tuple,
3905  Tuplestorestate *tuplestore);
3907 static SetConstraintState SetConstraintStateCreate(int numalloc);
3910  Oid tgoid, bool tgisdeferred);
3911 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3912 
3913 
3914 /*
3915  * Get the FDW tuplestore for the current trigger query level, creating it
3916  * if necessary.
3917  */
3918 static Tuplestorestate *
3920 {
3921  Tuplestorestate *ret;
3922 
3924  if (ret == NULL)
3925  {
3926  MemoryContext oldcxt;
3927  ResourceOwner saveResourceOwner;
3928 
3929  /*
3930  * Make the tuplestore valid until end of subtransaction. We really
3931  * only need it until AfterTriggerEndQuery().
3932  */
3934  saveResourceOwner = CurrentResourceOwner;
3936 
3937  ret = tuplestore_begin_heap(false, false, work_mem);
3938 
3939  CurrentResourceOwner = saveResourceOwner;
3940  MemoryContextSwitchTo(oldcxt);
3941 
3943  }
3944 
3945  return ret;
3946 }
3947 
3948 /* ----------
3949  * afterTriggerCheckState()
3950  *
3951  * Returns true if the trigger event is actually in state DEFERRED.
3952  * ----------
3953  */
3954 static bool
3956 {
3957  Oid tgoid = evtshared->ats_tgoid;
3959  int i;
3960 
3961  /*
3962  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3963  * constraints declared NOT DEFERRABLE), the state is always false.
3964  */
3965  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3966  return false;
3967 
3968  /*
3969  * If constraint state exists, SET CONSTRAINTS might have been executed
3970  * either for this trigger or for all triggers.
3971  */
3972  if (state != NULL)
3973  {
3974  /* Check for SET CONSTRAINTS for this specific trigger. */
3975  for (i = 0; i < state->numstates; i++)
3976  {
3977  if (state->trigstates[i].sct_tgoid == tgoid)
3978  return state->trigstates[i].sct_tgisdeferred;
3979  }
3980 
3981  /* Check for SET CONSTRAINTS ALL. */
3982  if (state->all_isset)
3983  return state->all_isdeferred;
3984  }
3985 
3986  /*
3987  * Otherwise return the default state for the trigger.
3988  */
3989  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3990 }
3991 
3992 /* ----------
3993  * afterTriggerCopyBitmap()
3994  *
3995  * Copy bitmap into AfterTriggerEvents memory context, which is where the after
3996  * trigger events are kept.
3997  * ----------
3998  */
3999 static Bitmapset *
4001 {
4002  Bitmapset *dst;
4003  MemoryContext oldcxt;
4004 
4005  if (src == NULL)
4006  return NULL;
4007 
4008  /* Create event context if we didn't already */
4009  if (afterTriggers.event_cxt == NULL)
4012  "AfterTriggerEvents",
4014 
4016 
4017  dst = bms_copy(src);
4018 
4019  MemoryContextSwitchTo(oldcxt);
4020 
4021  return dst;
4022 }
4023 
4024 /* ----------
4025  * afterTriggerAddEvent()
4026  *
4027  * Add a new trigger event to the specified queue.
4028  * The passed-in event data is copied.
4029  * ----------
4030  */
4031 static void
4033  AfterTriggerEvent event, AfterTriggerShared evtshared)
4034 {
4035  Size eventsize = SizeofTriggerEvent(event);
4036  Size needed = eventsize + sizeof(AfterTriggerSharedData);
4038  AfterTriggerShared newshared;
4039  AfterTriggerEvent newevent;
4040 
4041  /*
4042  * If empty list or not enough room in the tail chunk, make a new chunk.
4043  * We assume here that a new shared record will always be needed.
4044  */
4045  chunk = events->tail;
4046  if (chunk == NULL ||
4047  chunk->endfree - chunk->freeptr < needed)
4048  {
4049  Size chunksize;
4050 
4051  /* Create event context if we didn't already */
4052  if (afterTriggers.event_cxt == NULL)
4055  "AfterTriggerEvents",
4057 
4058  /*
4059  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4060  * These numbers are fairly arbitrary, though there is a hard limit at
4061  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4062  * shared records using the available space in ate_flags. Another
4063  * constraint is that if the chunk size gets too huge, the search loop
4064  * below would get slow given a (not too common) usage pattern with
4065  * many distinct event types in a chunk. Therefore, we double the
4066  * preceding chunk size only if there weren't too many shared records
4067  * in the preceding chunk; otherwise we halve it. This gives us some
4068  * ability to adapt to the actual usage pattern of the current query
4069  * while still having large chunk sizes in typical usage. All chunk
4070  * sizes used should be MAXALIGN multiples, to ensure that the shared
4071  * records will be aligned safely.
4072  */
4073 #define MIN_CHUNK_SIZE 1024
4074 #define MAX_CHUNK_SIZE (1024*1024)
4075 
4076 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4077 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4078 #endif
4079 
4080  if (chunk == NULL)
4081  chunksize = MIN_CHUNK_SIZE;
4082  else
4083  {
4084  /* preceding chunk size... */
4085  chunksize = chunk->endptr - (char *) chunk;
4086  /* check number of shared records in preceding chunk */
4087  if ((chunk->endptr - chunk->endfree) <=
4088  (100 * sizeof(AfterTriggerSharedData)))
4089  chunksize *= 2; /* okay, double it */
4090  else
4091  chunksize /= 2; /* too many shared records */
4092  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4093  }
4095  chunk->next = NULL;
4096  chunk->freeptr = CHUNK_DATA_START(chunk);
4097  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4098  Assert(chunk->endfree - chunk->freeptr >= needed);
4099 
4100  if (events->head == NULL)
4101  events->head = chunk;
4102  else
4103  events->tail->next = chunk;
4104  events->tail = chunk;
4105  /* events->tailfree is now out of sync, but we'll fix it below */
4106  }
4107 
4108  /*
4109  * Try to locate a matching shared-data record already in the chunk. If
4110  * none, make a new one.
4111  */
4112  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4113  (char *) newshared >= chunk->endfree;
4114  newshared--)
4115  {
4116  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4117  newshared->ats_relid == evtshared->ats_relid &&
4118  newshared->ats_event == evtshared->ats_event &&
4119  newshared->ats_table == evtshared->ats_table &&
4120  newshared->ats_firing_id == 0)
4121  break;
4122  }
4123  if ((char *) newshared < chunk->endfree)
4124  {
4125  *newshared = *evtshared;
4126  newshared->ats_firing_id = 0; /* just to be sure */
4127  chunk->endfree = (char *) newshared;
4128  }
4129 
4130  /* Insert the data */
4131  newevent = (AfterTriggerEvent) chunk->freeptr;
4132  memcpy(newevent, event, eventsize);
4133  /* ... and link the new event to its shared record */
4134  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4135  newevent->ate_flags |= (char *) newshared - (char *) newevent;
4136 
4137  chunk->freeptr += eventsize;
4138  events->tailfree = chunk->freeptr;
4139 }
4140 
4141 /* ----------
4142  * afterTriggerFreeEventList()
4143  *
4144  * Free all the event storage in the given list.
4145  * ----------
4146  */
4147 static void
4149 {
4151 
4152  while ((chunk = events->head) != NULL)
4153  {
4154  events->head = chunk->next;
4155  pfree(chunk);
4156  }
4157  events->tail = NULL;
4158  events->tailfree = NULL;
4159 }
4160 
4161 /* ----------
4162  * afterTriggerRestoreEventList()
4163  *
4164  * Restore an event list to its prior length, removing all the events
4165  * added since it had the value old_events.
4166  * ----------
4167  */
4168 static void
4170  const AfterTriggerEventList *old_events)
4171 {
4173  AfterTriggerEventChunk *next_chunk;
4174 
4175  if (old_events->tail == NULL)
4176  {
4177  /* restoring to a completely empty state, so free everything */
4178  afterTriggerFreeEventList(events);
4179  }
4180  else
4181  {
4182  *events = *old_events;
4183  /* free any chunks after the last one we want to keep */
4184  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4185  {
4186  next_chunk = chunk->next;
4187  pfree(chunk);
4188  }
4189  /* and clean up the tail chunk to be the right length */
4190  events->tail->next = NULL;
4191  events->tail->freeptr = events->tailfree;
4192 
4193  /*
4194  * We don't make any effort to remove now-unused shared data records.
4195  * They might still be useful, anyway.
4196  */
4197  }
4198 }
4199 
4200 /* ----------
4201  * afterTriggerDeleteHeadEventChunk()
4202  *
4203  * Remove the first chunk of events from the query level's event list.
4204  * Keep any event list pointers elsewhere in the query level's data
4205  * structures in sync.
4206  * ----------
4207  */
4208 static void
4210 {
4211  AfterTriggerEventChunk *target = qs->events.head;
4212  ListCell *lc;
4213 
4214  Assert(target && target->next);
4215 
4216  /*
4217  * First, update any pointers in the per-table data, so that they won't be
4218  * dangling. Resetting obsoleted pointers to NULL will make
4219  * cancel_prior_stmt_triggers start from the list head, which is fine.
4220  */
4221  foreach(lc, qs->tables)
4222  {
4224 
4225  if (table->after_trig_done &&
4226  table->after_trig_events.tail == target)
4227  {
4228  table->after_trig_events.head = NULL;
4229  table->after_trig_events.tail = NULL;
4230  table->after_trig_events.tailfree = NULL;
4231  }
4232  }
4233 
4234  /* Now we can flush the head chunk */
4235  qs->events.head = target->next;
4236  pfree(target);
4237 }
4238 
4239 
4240 /* ----------
4241  * AfterTriggerExecute()
4242  *
4243  * Fetch the required tuples back from the heap and fire one
4244  * single trigger function.
4245  *
4246  * Frequently, this will be fired many times in a row for triggers of
4247  * a single relation. Therefore, we cache the open relation and provide
4248  * fmgr lookup cache space at the caller level. (For triggers fired at
4249  * the end of a query, we can even piggyback on the executor's state.)
4250  *
4251  * When fired for a cross-partition update of a partitioned table, the old
4252  * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4253  * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4254  * both are converted into the root partitioned table's format before passing
4255  * to the trigger function.
4256  *
4257  * event: event currently being fired.
4258  * relInfo: result relation for event.
4259  * src_relInfo: source partition of a cross-partition update
4260  * dst_relInfo: its destination partition
4261  * trigdesc: working copy of rel's trigger info.
4262  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4263  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4264  * or NULL if no instrumentation is wanted.
4265  * per_tuple_context: memory context to call trigger function in.
4266  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4267  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4268  * ----------
4269  */
4270 static void
4272  AfterTriggerEvent event,
4273  ResultRelInfo *relInfo,
4274  ResultRelInfo *src_relInfo,
4275  ResultRelInfo *dst_relInfo,
4276  TriggerDesc *trigdesc,
4277  FmgrInfo *finfo, Instrumentation *instr,
4278  MemoryContext per_tuple_context,
4279  TupleTableSlot *trig_tuple_slot1,
4280  TupleTableSlot *trig_tuple_slot2)
4281 {
4282  Relation rel = relInfo->ri_RelationDesc;
4283  Relation src_rel = src_relInfo->ri_RelationDesc;
4284  Relation dst_rel = dst_relInfo->ri_RelationDesc;
4285  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4286  Oid tgoid = evtshared->ats_tgoid;
4287  TriggerData LocTriggerData = {0};
4288  HeapTuple rettuple;
4289  int tgindx;
4290  bool should_free_trig = false;
4291  bool should_free_new = false;
4292 
4293  /*
4294  * Locate trigger in trigdesc. It might not be present, and in fact the
4295  * trigdesc could be NULL, if the trigger was dropped since the event was
4296  * queued. In that case, silently do nothing.
4297  */
4298  if (trigdesc == NULL)
4299  return;
4300  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4301  {
4302  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4303  {
4304  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4305  break;
4306  }
4307  }
4308  if (LocTriggerData.tg_trigger == NULL)
4309  return;
4310 
4311  /*
4312  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4313  * to include time spent re-fetching tuples in the trigger cost.
4314  */
4315  if (instr)
4316  InstrStartNode(instr + tgindx);
4317 
4318  /*
4319  * Fetch the required tuple(s).
4320  */
4321  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4322  {
4324  {
4325  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4326 
4327  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4328  trig_tuple_slot1))
4329  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4330 
4331  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4333  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4334  trig_tuple_slot2))
4335  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4336  }
4337  /* fall through */
4339 
4340  /*
4341  * Store tuple in the slot so that tg_trigtuple does not reference
4342  * tuplestore memory. (It is formally possible for the trigger
4343  * function to queue trigger events that add to the same
4344  * tuplestore, which can push other tuples out of memory.) The
4345  * distinction is academic, because we start with a minimal tuple
4346  * that is stored as a heap tuple, constructed in different memory
4347  * context, in the slot anyway.
4348  */
4349  LocTriggerData.tg_trigslot = trig_tuple_slot1;
4350  LocTriggerData.tg_trigtuple =
4351  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4352 
4353  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4355  {
4356  LocTriggerData.tg_newslot = trig_tuple_slot2;
4357  LocTriggerData.tg_newtuple =
4358  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4359  }
4360  else
4361  {
4362  LocTriggerData.tg_newtuple = NULL;
4363  }
4364  break;
4365 
4366  default:
4367  if (ItemPointerIsValid(&(event->ate_ctid1)))
4368  {
4369  TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4370  src_relInfo);
4371 
4372  if (!table_tuple_fetch_row_version(src_rel,
4373  &(event->ate_ctid1),
4374  SnapshotAny,
4375  src_slot))
4376  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4377 
4378  /*
4379  * Store the tuple fetched from the source partition into the
4380  * target (root partitioned) table slot, converting if needed.
4381  */
4382  if (src_relInfo != relInfo)
4383  {
4384  TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4385 
4386  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4387  if (map)
4388  {
4390  src_slot,
4391  LocTriggerData.tg_trigslot);
4392  }
4393  else
4394  ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4395  }
4396  else
4397  LocTriggerData.tg_trigslot = src_slot;
4398  LocTriggerData.tg_trigtuple =
4399  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4400  }
4401  else
4402  {
4403  LocTriggerData.tg_trigtuple = NULL;
4404  }
4405 
4406  /* don't touch ctid2 if not there */
4408  (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4409  ItemPointerIsValid(&(event->ate_ctid2)))
4410  {
4411  TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4412  dst_relInfo);
4413 
4414  if (!table_tuple_fetch_row_version(dst_rel,
4415  &(event->ate_ctid2),
4416  SnapshotAny,
4417  dst_slot))
4418  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4419 
4420  /*
4421  * Store the tuple fetched from the destination partition into
4422  * the target (root partitioned) table slot, converting if
4423  * needed.
4424  */
4425  if (dst_relInfo != relInfo)
4426  {
4427  TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4428 
4429  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4430  if (map)
4431  {
4433  dst_slot,
4434  LocTriggerData.tg_newslot);
4435  }
4436  else
4437  ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4438  }
4439  else
4440  LocTriggerData.tg_newslot = dst_slot;
4441  LocTriggerData.tg_newtuple =
4442  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4443  }
4444  else
4445  {
4446  LocTriggerData.tg_newtuple = NULL;
4447  }
4448  }
4449 
4450  /*
4451  * Set up the tuplestore information to let the trigger have access to
4452  * transition tables. When we first make a transition table available to
4453  * a trigger, mark it "closed" so that it cannot change anymore. If any
4454  * additional events of the same type get queued in the current trigger
4455  * query level, they'll go into new transition tables.
4456  */
4457  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4458  if (evtshared->ats_table)
4459  {
4460  if (LocTriggerData.tg_trigger->tgoldtable)
4461  {
4462  if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4463  LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4464  else
4465  LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4466  evtshared->ats_table->closed = true;
4467  }
4468 
4469  if (LocTriggerData.tg_trigger->tgnewtable)
4470  {
4471  if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4472  LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4473  else
4474  LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4475  evtshared->ats_table->closed = true;
4476  }
4477  }
4478 
4479  /*
4480  * Setup the remaining trigger information
4481  */
4482  LocTriggerData.type = T_TriggerData;
4483  LocTriggerData.tg_event =
4485  LocTriggerData.tg_relation = rel;
4486  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4487  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4488 
4489  MemoryContextReset(per_tuple_context);
4490 
4491  /*
4492  * Call the trigger and throw away any possibly returned updated tuple.
4493  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4494  */
4495  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4496  tgindx,
4497  finfo,
4498  NULL,
4499  per_tuple_context);
4500  if (rettuple != NULL &&
4501  rettuple != LocTriggerData.tg_trigtuple &&
4502  rettuple != LocTriggerData.tg_newtuple)
4503  heap_freetuple(rettuple);
4504 
4505  /*
4506  * Release resources
4507  */
4508  if (should_free_trig)
4509  heap_freetuple(LocTriggerData.tg_trigtuple);
4510  if (should_free_new)
4511  heap_freetuple(LocTriggerData.tg_newtuple);
4512 
4513  /* don't clear slots' contents if foreign table */
4514  if (trig_tuple_slot1 == NULL)
4515  {
4516  if (LocTriggerData.tg_trigslot)
4517  ExecClearTuple(LocTriggerData.tg_trigslot);
4518  if (LocTriggerData.tg_newslot)
4519  ExecClearTuple(LocTriggerData.tg_newslot);
4520  }
4521 
4522  /*
4523  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4524  * one "tuple returned" (really the number of firings).
4525  */
4526  if (instr)
4527  InstrStopNode(instr + tgindx, 1);
4528 }
4529 
4530 
4531 /*
4532  * afterTriggerMarkEvents()
4533  *
4534  * Scan the given event list for not yet invoked events. Mark the ones
4535  * that can be invoked now with the current firing ID.
4536  *
4537  * If move_list isn't NULL, events that are not to be invoked now are
4538  * transferred to move_list.
4539  *
4540  * When immediate_only is true, do not invoke currently-deferred triggers.
4541  * (This will be false only at main transaction exit.)
4542  *
4543  * Returns true if any invokable events were found.
4544  */
4545 static bool
4547  AfterTriggerEventList *move_list,
4548  bool immediate_only)
4549 {
4550  bool found = false;
4551  bool deferred_found = false;
4552  AfterTriggerEvent event;
4554 
4555  for_each_event_chunk(event, chunk, *events)
4556  {
4557  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4558  bool defer_it = false;
4559 
4560  if (!(event->ate_flags &
4562  {
4563  /*
4564  * This trigger hasn't been called or scheduled yet. Check if we
4565  * should call it now.
4566  */
4567  if (immediate_only && afterTriggerCheckState(evtshared))
4568  {
4569  defer_it = true;
4570  }
4571  else
4572  {
4573  /*
4574  * Mark it as to be fired in this firing cycle.
4575  */
4577  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4578  found = true;
4579  }
4580  }
4581 
4582  /*
4583  * If it's deferred, move it to move_list, if requested.
4584  */
4585  if (defer_it && move_list != NULL)
4586  {
4587  deferred_found = true;
4588  /* add it to move_list */
4589  afterTriggerAddEvent(move_list, event, evtshared);
4590  /* mark original copy "done" so we don't do it again */
4591  event->ate_flags |= AFTER_TRIGGER_DONE;
4592  }
4593  }
4594 
4595  /*
4596  * We could allow deferred triggers if, before the end of the
4597  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4598  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4599  */
4600  if (deferred_found && InSecurityRestrictedOperation())
4601  ereport(ERROR,
4602  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4603  errmsg("cannot fire deferred trigger within security-restricted operation")));
4604 
4605  return found;
4606 }
4607 
4608 /*
4609  * afterTriggerInvokeEvents()
4610  *
4611  * Scan the given event list for events that are marked as to be fired
4612  * in the current firing cycle, and fire them.
4613  *
4614  * If estate isn't NULL, we use its result relation info to avoid repeated
4615  * openings and closing of trigger target relations. If it is NULL, we
4616  * make one locally to cache the info in case there are multiple trigger
4617  * events per rel.
4618  *
4619  * When delete_ok is true, it's safe to delete fully-processed events.
4620  * (We are not very tense about that: we simply reset a chunk to be empty
4621  * if all its events got fired. The objective here is just to avoid useless
4622  * rescanning of events when a trigger queues new events during transaction
4623  * end, so it's not necessary to worry much about the case where only
4624  * some events are fired.)
4625  *
4626  * Returns true if no unfired events remain in the list (this allows us
4627  * to avoid repeating afterTriggerMarkEvents).
4628  */
4629 static bool
4631  CommandId firing_id,
4632  EState *estate,
4633  bool delete_ok)
4634 {
4635  bool all_fired = true;
4637  MemoryContext per_tuple_context;
4638  bool local_estate = false;
4639  ResultRelInfo *rInfo = NULL;
4640  Relation rel = NULL;
4641  TriggerDesc *trigdesc = NULL;
4642  FmgrInfo *finfo = NULL;
4643  Instrumentation *instr = NULL;
4644  TupleTableSlot *slot1 = NULL,
4645  *slot2 = NULL;
4646 
4647  /* Make a local EState if need be */
4648  if (estate == NULL)
4649  {
4650  estate = CreateExecutorState();
4651  local_estate = true;
4652  }
4653 
4654  /* Make a per-tuple memory context for trigger function calls */
4655  per_tuple_context =
4657  "AfterTriggerTupleContext",
4659 
4660  for_each_chunk(chunk, *events)
4661  {
4662  AfterTriggerEvent event;
4663  bool all_fired_in_chunk = true;
4664 
4665  for_each_event(event, chunk)
4666  {
4667  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4668 
4669  /*
4670  * Is it one for me to fire?
4671  */
4672  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4673  evtshared->ats_firing_id == firing_id)
4674  {
4675  ResultRelInfo *src_rInfo,
4676  *dst_rInfo;
4677 
4678  /*
4679  * So let's fire it... but first, find the correct relation if
4680  * this is not the same relation as before.
4681  */
4682  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4683  {
4684  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4685  NULL);
4686  rel = rInfo->ri_RelationDesc;
4687  /* Catch calls with insufficient relcache refcounting */
4689  trigdesc = rInfo->ri_TrigDesc;
4690  /* caution: trigdesc could be NULL here */
4691  finfo = rInfo->ri_TrigFunctions;
4692  instr = rInfo->ri_TrigInstrument;
4693  if (slot1 != NULL)
4694  {
4697  slot1 = slot2 = NULL;
4698  }
4699  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4700  {
4701  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4703  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4705  }
4706  }
4707 
4708  /*
4709  * Look up source and destination partition result rels of a
4710  * cross-partition update event.
4711  */
4712  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4714  {
4715  Assert(OidIsValid(event->ate_src_part) &&
4716  OidIsValid(event->ate_dst_part));
4717  src_rInfo = ExecGetTriggerResultRel(estate,
4718  event->ate_src_part,
4719  rInfo);
4720  dst_rInfo = ExecGetTriggerResultRel(estate,
4721  event->ate_dst_part,
4722  rInfo);
4723  }
4724  else
4725  src_rInfo = dst_rInfo = rInfo;
4726 
4727  /*
4728  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4729  * still set, so recursive examinations of the event list
4730  * won't try to re-fire it.
4731  */
4732  AfterTriggerExecute(estate, event, rInfo,
4733  src_rInfo, dst_rInfo,
4734  trigdesc, finfo, instr,
4735  per_tuple_context, slot1, slot2);
4736 
4737  /*
4738  * Mark the event as done.
4739  */
4740  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4741  event->ate_flags |= AFTER_TRIGGER_DONE;
4742  }
4743  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4744  {
4745  /* something remains to be done */
4746  all_fired = all_fired_in_chunk = false;
4747  }
4748  }
4749 
4750  /* Clear the chunk if delete_ok and nothing left of interest */
4751  if (delete_ok && all_fired_in_chunk)
4752  {
4753  chunk->freeptr = CHUNK_DATA_START(chunk);
4754  chunk->endfree = chunk->endptr;
4755 
4756  /*
4757  * If it's last chunk, must sync event list's tailfree too. Note
4758  * that delete_ok must NOT be passed as true if there could be
4759  * additional AfterTriggerEventList values pointing at this event
4760  * list, since we'd fail to fix their copies of tailfree.
4761  */
4762  if (chunk == events->tail)
4763  events->tailfree = chunk->freeptr;
4764  }
4765  }
4766  if (slot1 != NULL)
4767  {
4770  }
4771 
4772  /* Release working resources */
4773  MemoryContextDelete(per_tuple_context);
4774 
4775  if (local_estate)
4776  {
4777  ExecCloseResultRelations(estate);
4778  ExecResetTupleTable(estate->es_tupleTable, false);
4779  FreeExecutorState(estate);
4780  }
4781 
4782  return all_fired;
4783 }
4784 
4785 
4786 /*
4787  * GetAfterTriggersTableData
4788  *
4789  * Find or create an AfterTriggersTableData struct for the specified
4790  * trigger event (relation + operation type). Ignore existing structs
4791  * marked "closed"; we don't want to put any additional tuples into them,
4792  * nor change their stmt-triggers-fired state.
4793  *
4794  * Note: the AfterTriggersTableData list is allocated in the current
4795  * (sub)transaction's CurTransactionContext. This is OK because
4796  * we don't need it to live past AfterTriggerEndQuery.
4797  */
4798 static AfterTriggersTableData *
4800 {
4801  AfterTriggersTableData *table;
4803  MemoryContext oldcxt;
4804  ListCell *lc;
4805 
4806  /* Caller should have ensured query_depth is OK. */
4810 
4811  foreach(lc, qs->tables)
4812  {
4813  table = (AfterTriggersTableData *) lfirst(lc);
4814  if (table->relid == relid && table->cmdType == cmdType &&
4815  !table->closed)
4816  return table;
4817  }
4818 
4820 
4822  table->relid = relid;
4823  table->cmdType = cmdType;
4824  qs->tables = lappend(qs->tables, table);
4825 
4826  MemoryContextSwitchTo(oldcxt);
4827 
4828  return table;
4829 }
4830 
4831 /*
4832  * Returns a TupleTableSlot suitable for holding the tuples to be put
4833  * into AfterTriggersTableData's transition table tuplestores.
4834  */
4835 static TupleTableSlot *
4837  TupleDesc tupdesc)
4838 {
4839  /* Create it if not already done. */
4840  if (!table->storeslot)
4841  {
4842  MemoryContext oldcxt;
4843 
4844  /*
4845  * We need this slot only until AfterTriggerEndQuery, but making it
4846  * last till end-of-subxact is good enough. It'll be freed by
4847  * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4848  * a different lifespan, so we'd better make a copy of that.
4849  */
4851  tupdesc = CreateTupleDescCopy(tupdesc);
4852  table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4853  MemoryContextSwitchTo(oldcxt);
4854  }
4855 
4856  return table->storeslot;
4857 }
4858 
4859 /*
4860  * MakeTransitionCaptureState
4861  *
4862  * Make a TransitionCaptureState object for the given TriggerDesc, target
4863  * relation, and operation type. The TCS object holds all the state needed
4864  * to decide whether to capture tuples in transition tables.
4865  *
4866  * If there are no triggers in 'trigdesc' that request relevant transition
4867  * tables, then return NULL.
4868  *
4869  * The resulting object can be passed to the ExecAR* functions. When
4870  * dealing with child tables, the caller can set tcs_original_insert_tuple
4871  * to avoid having to reconstruct the original tuple in the root table's
4872  * format.
4873  *
4874  * Note that we copy the flags from a parent table into this struct (rather
4875  * than subsequently using the relation's TriggerDesc directly) so that we can
4876  * use it to control collection of transition tuples from child tables.
4877  *
4878  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4879  * on the same table during one query should share one transition table.
4880  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4881  * looked up using the table OID + CmdType, and are merely referenced by
4882  * the TransitionCaptureState objects we hand out to callers.
4883  */
4886 {
4888  bool need_old_upd,
4889  need_new_upd,
4890  need_old_del,
4891  need_new_ins;
4892  AfterTriggersTableData *table;
4893  MemoryContext oldcxt;
4894  ResourceOwner saveResourceOwner;
4895 
4896  if (trigdesc == NULL)
4897  return NULL;
4898 
4899  /* Detect which table(s) we need. */
4900  switch (cmdType)
4901  {
4902  case CMD_INSERT:
4903  need_old_upd = need_old_del = need_new_upd = false;
4904  need_new_ins = trigdesc->trig_insert_new_table;
4905  break;
4906  case CMD_UPDATE:
4907  need_old_upd = trigdesc->trig_update_old_table;
4908  need_new_upd = trigdesc->trig_update_new_table;
4909  need_old_del = need_new_ins = false;
4910  break;
4911  case CMD_DELETE:
4912  need_old_del = trigdesc->trig_delete_old_table;
4913  need_old_upd = need_new_upd = need_new_ins = false;
4914  break;
4915  case CMD_MERGE:
4916  need_old_upd = trigdesc->trig_update_old_table;
4917  need_new_upd = trigdesc->trig_update_new_table;
4918  need_old_del = trigdesc->trig_delete_old_table;
4919  need_new_ins = trigdesc->trig_insert_new_table;
4920  break;
4921  default:
4922  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4923  /* keep compiler quiet */
4924  need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4925  break;
4926  }
4927  if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4928  return NULL;
4929 
4930  /* Check state, like AfterTriggerSaveEvent. */
4931  if (afterTriggers.query_depth < 0)
4932  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4933 
4934  /* Be sure we have enough space to record events at this query depth. */
4937 
4938  /*
4939  * Find or create an AfterTriggersTableData struct to hold the
4940  * tuplestore(s). If there's a matching struct but it's marked closed,
4941  * ignore it; we need a newer one.
4942  *
4943  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4944  * allocated in the current (sub)transaction's CurTransactionContext, and
4945  * the tuplestores are managed by the (sub)transaction's resource owner.
4946  * This is sufficient lifespan because we do not allow triggers using
4947  * transition tables to be deferrable; they will be fired during
4948  * AfterTriggerEndQuery, after which it's okay to delete the data.
4949  */
4950  table = GetAfterTriggersTableData(relid, cmdType);
4951 
4952  /* Now create required tuplestore(s), if we don't have them already. */
4954  saveResourceOwner = CurrentResourceOwner;
4956 
4957  if (need_old_upd && table->old_upd_tuplestore == NULL)
4958  table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4959  if (need_new_upd && table->new_upd_tuplestore == NULL)
4960  table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4961  if (need_old_del && table->old_del_tuplestore == NULL)
4962  table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4963  if (need_new_ins && table->new_ins_tuplestore == NULL)
4964  table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4965 
4966  CurrentResourceOwner = saveResourceOwner;
4967  MemoryContextSwitchTo(oldcxt);
4968 
4969  /* Now build the TransitionCaptureState struct, in caller's context */
4971  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4972  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4973  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4974  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4975  state->tcs_private = table;
4976 
4977  return state;
4978 }
4979 
4980 
4981 /* ----------
4982  * AfterTriggerBeginXact()
4983  *
4984  * Called at transaction start (either BEGIN or implicit for single
4985  * statement outside of transaction block).
4986  * ----------
4987  */
4988 void
4990 {
4991  /*
4992  * Initialize after-trigger state structure to empty
4993  */
4994  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4996 
4997  /*
4998  * Verify that there is no leftover state remaining. If these assertions
4999  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
5000  * up properly.
5001  */
5002  Assert(afterTriggers.state == NULL);
5003  Assert(afterTriggers.query_stack == NULL);
5005  Assert(afterTriggers.event_cxt == NULL);
5006  Assert(afterTriggers.events.head == NULL);
5007  Assert(afterTriggers.trans_stack == NULL);
5009 }
5010 
5011 
5012 /* ----------
5013  * AfterTriggerBeginQuery()
5014  *
5015  * Called just before we start processing a single query within a
5016  * transaction (or subtransaction). Most of the real work gets deferred
5017  * until somebody actually tries to queue a trigger event.
5018  * ----------
5019  */
5020 void
5022 {
5023  /* Increase the query stack depth */
5025 }
5026 
5027 
5028 /* ----------
5029  * AfterTriggerEndQuery()
5030  *
5031  * Called after one query has been completely processed. At this time
5032  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
5033  * transfer deferred trigger events to the global deferred-trigger list.
5034  *
5035  * Note that this must be called BEFORE closing down the executor
5036  * with ExecutorEnd, because we make use of the EState's info about
5037  * target relations. Normally it is called from ExecutorFinish.
5038  * ----------
5039  */
5040 void
5042 {
5044 
5045  /* Must be inside a query, too */
5047 
5048  /*
5049  * If we never even got as far as initializing the event stack, there
5050  * certainly won't be any events, so exit quickly.
5051  */
5053  {
5055  return;
5056  }
5057 
5058  /*
5059  * Process all immediate-mode triggers queued by the query, and move the
5060  * deferred ones to the main list of deferred events.
5061  *
5062  * Notice that we decide which ones will be fired, and put the deferred
5063  * ones on the main list, before anything is actually fired. This ensures
5064  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5065  * IMMEDIATE: all events we have decided to defer will be available for it
5066  * to fire.
5067  *
5068  * We loop in case a trigger queues more events at the same query level.
5069  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5070  * will instead fire any triggers in a dedicated query level. Foreign key
5071  * enforcement triggers do add to the current query level, thanks to their
5072  * passing fire_triggers = false to SPI_execute_snapshot(). Other
5073  * C-language triggers might do likewise.
5074  *
5075  * If we find no firable events, we don't have to increment
5076  * firing_counter.
5077  */
5079 
5080  for (;;)
5081  {
5083  {
5084  CommandId firing_id = afterTriggers.firing_counter++;
5085  AfterTriggerEventChunk *oldtail = qs->events.tail;
5086 
5087  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5088  break; /* all fired */
5089 
5090  /*
5091  * Firing a trigger could result in query_stack being repalloc'd,
5092  * so we must recalculate qs after each afterTriggerInvokeEvents
5093  * call. Furthermore, it's unsafe to pass delete_ok = true here,
5094  * because that could cause afterTriggerInvokeEvents to try to
5095  * access qs->events after the stack has been repalloc'd.
5096  */
5098 
5099  /*
5100  * We'll need to scan the events list again. To reduce the cost
5101  * of doing so, get rid of completely-fired chunks. We know that
5102  * all events were marked IN_PROGRESS or DONE at the conclusion of
5103  * afterTriggerMarkEvents, so any still-interesting events must
5104  * have been added after that, and so must be in the chunk that
5105  * was then the tail chunk, or in later chunks. So, zap all
5106  * chunks before oldtail. This is approximately the same set of
5107  * events we would have gotten rid of by passing delete_ok = true.
5108  */
5109  Assert(oldtail != NULL);
5110  while (qs->events.head != oldtail)
5112  }
5113  else
5114  break;
5115  }
5116 
5117  /* Release query-level-local storage, including tuplestores if any */
5119 
5121 }
5122 
5123 
5124 /*
5125  * AfterTriggerFreeQuery
5126  * Release subsidiary storage for a trigger query level.
5127  * This includes closing down tuplestores.
5128  * Note: it's important for this to be safe if interrupted by an error
5129  * and then called again for the same query level.
5130  */
5131 static void
5133 {
5134  Tuplestorestate *ts;
5135  List *tables;
5136  ListCell *lc;
5137 
5138  /* Drop the trigger events */
5140 
5141  /* Drop FDW tuplestore if any */
5142  ts = qs->fdw_tuplestore;
5143  qs->fdw_tuplestore = NULL;
5144  if (ts)
5145  tuplestore_end(ts);
5146 
5147  /* Release per-table subsidiary storage */
5148  tables = qs->tables;
5149  foreach(lc, tables)
5150  {
5152 
5153  ts = table->old_upd_tuplestore;
5154  table->old_upd_tuplestore = NULL;
5155  if (ts)
5156  tuplestore_end(ts);
5157  ts = table->new_upd_tuplestore;
5158  table->new_upd_tuplestore = NULL;
5159  if (ts)
5160  tuplestore_end(ts);
5161  ts = table->old_del_tuplestore;
5162  table->old_del_tuplestore = NULL;
5163  if (ts)
5164  tuplestore_end(ts);
5165  ts = table->new_ins_tuplestore;
5166  table->new_ins_tuplestore = NULL;
5167  if (ts)
5168  tuplestore_end(ts);
5169  if (table->storeslot)
5170  {
5171  TupleTableSlot *slot = table->storeslot;
5172 
5173  table->storeslot = NULL;
5175  }
5176  }
5177 
5178  /*
5179  * Now free the AfterTriggersTableData structs and list cells. Reset list
5180  * pointer first; if list_free_deep somehow gets an error, better to leak
5181  * that storage than have an infinite loop.
5182  */
5183  qs->tables = NIL;
5184  list_free_deep(tables);
5185 }
5186 
5187 
5188 /* ----------
5189  * AfterTriggerFireDeferred()
5190  *
5191  * Called just before the current transaction is committed. At this
5192  * time we invoke all pending DEFERRED triggers.
5193  *
5194  * It is possible for other modules to queue additional deferred triggers
5195  * during pre-commit processing; therefore xact.c may have to call this
5196  * multiple times.
5197  * ----------
5198  */
5199 void
5201 {
5202  AfterTriggerEventList *events;
5203  bool snap_pushed = false;
5204 
5205  /* Must not be inside a query */
5207 
5208  /*
5209  * If there are any triggers to fire, make sure we have set a snapshot for
5210  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5211  * can't assume ActiveSnapshot is valid on entry.)
5212  */
5213  events = &afterTriggers.events;
5214  if (events->head != NULL)
5215  {
5217  snap_pushed = true;
5218  }
5219 
5220  /*
5221  * Run all the remaining triggers. Loop until they are all gone, in case
5222  * some trigger queues more for us to do.
5223  */
5224  while (afterTriggerMarkEvents(events, NULL, false))
5225  {
5226  CommandId firing_id = afterTriggers.firing_counter++;
5227 
5228  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5229  break; /* all fired */
5230  }
5231 
5232  /*
5233  * We don't bother freeing the event list, since it will go away anyway
5234  * (and more efficiently than via pfree) in AfterTriggerEndXact.
5235  */
5236 
5237  if (snap_pushed)
5239 }
5240 
5241 
5242 /* ----------
5243  * AfterTriggerEndXact()
5244  *
5245  * The current transaction is finishing.
5246  *
5247  * Any unfired triggers are canceled so we simply throw
5248  * away anything we know.
5249  *
5250  * Note: it is possible for this to be called repeatedly in case of
5251  * error during transaction abort; therefore, do not complain if
5252  * already closed down.
5253  * ----------
5254  */
5255 void
5256 AfterTriggerEndXact(bool isCommit)
5257 {
5258  /*
5259  * Forget the pending-events list.
5260  *
5261  * Since all the info is in TopTransactionContext or children thereof, we
5262  * don't really need to do anything to reclaim memory. However, the
5263  * pending-events list could be large, and so it's useful to discard it as
5264  * soon as possible --- especially if we are aborting because we ran out
5265  * of memory for the list!
5266  */
5268  {
5270  afterTriggers.event_cxt = NULL;
5271  afterTriggers.events.head = NULL;
5272  afterTriggers.events.tail = NULL;
5273  afterTriggers.events.tailfree = NULL;
5274  }
5275 
5276  /*
5277  * Forget any subtransaction state as well. Since this can't be very
5278  * large, we let the eventual reset of TopTransactionContext free the
5279  * memory instead of doing it here.
5280  */
5281  afterTriggers.trans_stack = NULL;
5283 
5284 
5285  /*
5286  * Forget the query stack and constraint-related state information. As
5287  * with the subtransaction state information, we don't bother freeing the
5288  * memory here.
5289  */
5290  afterTriggers.query_stack = NULL;
5292  afterTriggers.state = NULL;
5293 
5294  /* No more afterTriggers manipulation until next transaction starts. */
5296 }
5297 
5298 /*
5299  * AfterTriggerBeginSubXact()
5300  *
5301  * Start a subtransaction.
5302  */
5303 void
5305 {
5306  int my_level = GetCurrentTransactionNestLevel();
5307 
5308  /*
5309  * Allocate more space in the trans_stack if needed. (Note: because the
5310  * minimum nest level of a subtransaction is 2, we waste the first couple
5311  * entries of the array; not worth the notational effort to avoid it.)
5312  */
5313  while (my_level >= afterTriggers.maxtransdepth)
5314  {
5315  if (afterTriggers.maxtransdepth == 0)
5316  {
5317  /* Arbitrarily initialize for max of 8 subtransaction levels */
5320  8 * sizeof(AfterTriggersTransData));
5322  }
5323  else
5324  {
5325  /* repalloc will keep the stack in the same context */
5326  int new_alloc = afterTriggers.maxtransdepth * 2;
5327 
5330  new_alloc * sizeof(AfterTriggersTransData));
5331  afterTriggers.maxtransdepth = new_alloc;
5332  }
5333  }
5334 
5335  /*
5336  * Push the current information into the stack. The SET CONSTRAINTS state
5337  * is not saved until/unless changed. Likewise, we don't make a
5338  * per-subtransaction event context until needed.
5339  */
5340  afterTriggers.trans_stack[my_level].state = NULL;
5344 }
5345 
5346 /*
5347  * AfterTriggerEndSubXact()
5348  *
5349  * The current subtransaction is ending.
5350  */
5351 void
5353 {
5354  int my_level = GetCurrentTransactionNestLevel();
5356  AfterTriggerEvent event;
5358  CommandId subxact_firing_id;
5359 
5360  /*
5361  * Pop the prior state if needed.
5362  */
5363  if (isCommit)
5364  {
5365  Assert(my_level < afterTriggers.maxtransdepth);
5366  /* If we saved a prior state, we don't need it anymore */
5367  state = afterTriggers.trans_stack[my_level].state;
5368  if (state != NULL)
5369  pfree(state);
5370  /* this avoids double pfree if error later: */
5371  afterTriggers.trans_stack[my_level].state = NULL;
5374  }
5375  else
5376  {
5377  /*
5378  * Aborting. It is possible subxact start failed before calling
5379  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5380  * trans_stack levels that aren't there.
5381  */
5382  if (my_level >= afterTriggers.maxtransdepth)
5383  return;
5384 
5385  /*
5386  * Release query-level storage for queries being aborted, and restore
5387  * query_depth to its pre-subxact value. This assumes that a
5388  * subtransaction will not add events to query levels started in a
5389  * earlier transaction state.
5390  */
5392  {
5396  }
5399 
5400  /*
5401  * Restore the global deferred-event list to its former length,
5402  * discarding any events queued by the subxact.
5403  */
5405  &afterTriggers.trans_stack[my_level].events);
5406 
5407  /*
5408  * Restore the trigger state. If the saved state is NULL, then this
5409  * subxact didn't save it, so it doesn't need restoring.
5410  */
5411  state = afterTriggers.trans_stack[my_level].state;
5412  if (state != NULL)
5413  {
5416  }
5417  /* this avoids double pfree if error later: */
5418  afterTriggers.trans_stack[my_level].state = NULL;
5419 
5420  /*
5421  * Scan for any remaining deferred events that were marked DONE or IN
5422  * PROGRESS by this subxact or a child, and un-mark them. We can
5423  * recognize such events because they have a firing ID greater than or
5424  * equal to the firing_counter value we saved at subtransaction start.
5425  * (This essentially assumes that the current subxact includes all
5426  * subxacts started after it.)
5427  */
5428  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5430  {
5431  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5432 
5433  if (event->ate_flags &
5435  {
5436  if (evtshared->ats_firing_id >= subxact_firing_id)
5437  event->ate_flags &=
5439  }
5440  }
5441  }
5442 }
5443 
5444 /*
5445  * Get the transition table for the given event and depending on whether we are
5446  * processing the old or the new tuple.
5447  */
5448 static Tuplestorestate *
5450  TupleTableSlot *oldslot,
5451  TupleTableSlot *newslot,
5452  TransitionCaptureState *transition_capture)
5453 {
5454  Tuplestorestate *tuplestore = NULL;
5455  bool delete_old_table = transition_capture->tcs_delete_old_table;
5456  bool update_old_table = transition_capture->tcs_update_old_table;
5457  bool update_new_table = transition_capture->tcs_update_new_table;
5458  bool insert_new_table = transition_capture->tcs_insert_new_table;
5459 
5460  /*
5461  * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5462  * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5463  * non-NULL. But for UPDATE events fired for capturing transition tuples
5464  * during UPDATE partition-key row movement, OLD is NULL when the event is
5465  * for a row being inserted, whereas NEW is NULL when the event is for a
5466  * row being deleted.
5467  */
5468  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5469  TupIsNull(oldslot)));
5470  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5471  TupIsNull(newslot)));
5472 
5473  if (!TupIsNull(oldslot))
5474  {
5475  Assert(TupIsNull(newslot));
5476  if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5477  tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5478  else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5479  tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5480  }
5481  else if (!TupIsNull(newslot))
5482  {
5483  Assert(TupIsNull(oldslot));
5484  if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5485  tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5486  else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5487  tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5488  }
5489 
5490  return tuplestore;
5491 }
5492 
5493 /*
5494  * Add the given heap tuple to the given tuplestore, applying the conversion
5495  * map if necessary.
5496  *
5497  * If original_insert_tuple is given, we can add that tuple without conversion.
5498  */
5499 static void
5501  TransitionCaptureState *transition_capture,
5502  ResultRelInfo *relinfo,
5503  TupleTableSlot *slot,
5504  TupleTableSlot *original_insert_tuple,
5505  Tuplestorestate *tuplestore)
5506 {
5507  TupleConversionMap *map;
5508 
5509  /*
5510  * Nothing needs to be done if we don't have a tuplestore.
5511  */
5512  if (tuplestore == NULL)
5513  return;
5514 
5515  if (original_insert_tuple)
5516  tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5517  else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5518  {
5519  AfterTriggersTableData *table = transition_capture->tcs_private;
5520  TupleTableSlot *storeslot;
5521 
5522  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5523  execute_attr_map_slot(map->attrMap, slot, storeslot);
5524  tuplestore_puttupleslot(tuplestore, storeslot);
5525  }
5526  else
5527  tuplestore_puttupleslot(tuplestore, slot);
5528 }
5529 
5530 /* ----------
5531  * AfterTriggerEnlargeQueryState()
5532  *
5533  * Prepare the necessary state so that we can record AFTER trigger events
5534  * queued by a query. It is allowed to have nested queries within a
5535  * (sub)transaction, so we need to have separate state for each query
5536  * nesting level.
5537  * ----------
5538  */
5539 static void
5541 {
5542  int init_depth = afterTriggers.maxquerydepth;
5543 
5545 
5546  if (afterTriggers.maxquerydepth == 0)
5547  {
5548  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5549 
5552  new_alloc * sizeof(AfterTriggersQueryData));
5553  afterTriggers.maxquerydepth = new_alloc;
5554  }
5555  else
5556  {
5557  /* repalloc will keep the stack in the same context */
5558  int old_alloc = afterTriggers.maxquerydepth;
5559  int new_alloc = Max(afterTriggers.query_depth + 1,
5560  old_alloc * 2);
5561 
5564  new_alloc * sizeof(AfterTriggersQueryData));
5565  afterTriggers.maxquerydepth = new_alloc;
5566  }
5567 
5568  /* Initialize new array entries to empty */
5569  while (init_depth <