PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/guc_hooks.h"
59 #include "utils/inval.h"
60 #include "utils/lsyscache.h"
61 #include "utils/memutils.h"
62 #include "utils/plancache.h"
63 #include "utils/rel.h"
64 #include "utils/snapmgr.h"
65 #include "utils/syscache.h"
66 #include "utils/tuplestore.h"
67 
68 
69 /* GUC variables */
71 
72 /* How many levels deep into trigger execution are we? */
73 static int MyTriggerDepth = 0;
74 
75 /* Local function prototypes */
76 static void renametrig_internal(Relation tgrel, Relation targetrel,
77  HeapTuple trigtup, const char *newname,
78  const char *expected_name);
79 static void renametrig_partition(Relation tgrel, Oid partitionId,
80  Oid parentTriggerOid, const char *newname,
81  const char *expected_name);
82 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
83 static bool GetTupleForTrigger(EState *estate,
84  EPQState *epqstate,
85  ResultRelInfo *relinfo,
86  ItemPointer tid,
87  LockTupleMode lockmode,
88  TupleTableSlot *oldslot,
89  TupleTableSlot **epqslot,
90  TM_FailureData *tmfdp);
91 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
92  Trigger *trigger, TriggerEvent event,
93  Bitmapset *modifiedCols,
94  TupleTableSlot *oldslot, TupleTableSlot *newslot);
96  int tgindx,
97  FmgrInfo *finfo,
98  Instrumentation *instr,
99  MemoryContext per_tuple_context);
100 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
101  ResultRelInfo *src_partinfo,
102  ResultRelInfo *dst_partinfo,
103  int event, bool row_trigger,
104  TupleTableSlot *oldslot, TupleTableSlot *newslot,
105  List *recheckIndexes, Bitmapset *modifiedCols,
106  TransitionCaptureState *transition_capture,
107  bool is_crosspart_update);
108 static void AfterTriggerEnlargeQueryState(void);
109 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
110 
111 
112 /*
113  * Create a trigger. Returns the address of the created trigger.
114  *
115  * queryString is the source text of the CREATE TRIGGER command.
116  * This must be supplied if a whenClause is specified, else it can be NULL.
117  *
118  * relOid, if nonzero, is the relation on which the trigger should be
119  * created. If zero, the name provided in the statement will be looked up.
120  *
121  * refRelOid, if nonzero, is the relation to which the constraint trigger
122  * refers. If zero, the constraint relation name provided in the statement
123  * will be looked up as needed.
124  *
125  * constraintOid, if nonzero, says that this trigger is being created
126  * internally to implement that constraint. A suitable pg_depend entry will
127  * be made to link the trigger to that constraint. constraintOid is zero when
128  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
129  * TRIGGER, we build a pg_constraint entry internally.)
130  *
131  * indexOid, if nonzero, is the OID of an index associated with the constraint.
132  * We do nothing with this except store it into pg_trigger.tgconstrindid;
133  * but when creating a trigger for a deferrable unique constraint on a
134  * partitioned table, its children are looked up. Note we don't cope with
135  * invalid indexes in that case.
136  *
137  * funcoid, if nonzero, is the OID of the function to invoke. When this is
138  * given, stmt->funcname is ignored.
139  *
140  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
141  * if that trigger is dropped, this one should be too. There are two cases
142  * when a nonzero value is passed for this: 1) when this function recurses to
143  * create the trigger on partitions, 2) when creating child foreign key
144  * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
145  *
146  * If whenClause is passed, it is an already-transformed expression for
147  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
148  *
149  * If isInternal is true then this is an internally-generated trigger.
150  * This argument sets the tgisinternal field of the pg_trigger entry, and
151  * if true causes us to modify the given trigger name to ensure uniqueness.
152  *
153  * When isInternal is not true we require ACL_TRIGGER permissions on the
154  * relation, as well as ACL_EXECUTE on the trigger function. For internal
155  * triggers the caller must apply any required permission checks.
156  *
157  * When called on partitioned tables, this function recurses to create the
158  * trigger on all the partitions, except if isInternal is true, in which
159  * case caller is expected to execute recursion on its own. in_partition
160  * indicates such a recursive call; outside callers should pass "false"
161  * (but see CloneRowTriggersToPartition).
162  */
164 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
165  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
166  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
167  bool isInternal, bool in_partition)
168 {
169  return
170  CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
171  constraintOid, indexOid, funcoid,
172  parentTriggerOid, whenClause, isInternal,
173  in_partition, TRIGGER_FIRES_ON_ORIGIN);
174 }
175 
176 /*
177  * Like the above; additionally the firing condition
178  * (always/origin/replica/disabled) can be specified.
179  */
181 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
182  Oid relOid, Oid refRelOid, Oid constraintOid,
183  Oid indexOid, Oid funcoid, Oid parentTriggerOid,
184  Node *whenClause, bool isInternal, bool in_partition,
185  char trigger_fires_when)
186 {
187  int16 tgtype;
188  int ncolumns;
189  int16 *columns;
190  int2vector *tgattr;
191  List *whenRtable;
192  char *qual;
193  Datum values[Natts_pg_trigger];
194  bool nulls[Natts_pg_trigger];
195  Relation rel;
196  AclResult aclresult;
197  Relation tgrel;
198  Relation pgrel;
199  HeapTuple tuple = NULL;
200  Oid funcrettype;
201  Oid trigoid = InvalidOid;
202  char internaltrigname[NAMEDATALEN];
203  char *trigname;
204  Oid constrrelid = InvalidOid;
205  ObjectAddress myself,
206  referenced;
207  char *oldtablename = NULL;
208  char *newtablename = NULL;
209  bool partition_recurse;
210  bool trigger_exists = false;
211  Oid existing_constraint_oid = InvalidOid;
212  bool existing_isInternal = false;
213  bool existing_isClone = false;
214 
215  if (OidIsValid(relOid))
216  rel = table_open(relOid, ShareRowExclusiveLock);
217  else
219 
220  /*
221  * Triggers must be on tables or views, and there are additional
222  * relation-type-specific restrictions.
223  */
224  if (rel->rd_rel->relkind == RELKIND_RELATION)
225  {
226  /* Tables can't have INSTEAD OF triggers */
227  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
228  stmt->timing != TRIGGER_TYPE_AFTER)
229  ereport(ERROR,
230  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
231  errmsg("\"%s\" is a table",
233  errdetail("Tables cannot have INSTEAD OF triggers.")));
234  }
235  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
236  {
237  /* Partitioned tables can't have INSTEAD OF triggers */
238  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
239  stmt->timing != TRIGGER_TYPE_AFTER)
240  ereport(ERROR,
241  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
242  errmsg("\"%s\" is a table",
244  errdetail("Tables cannot have INSTEAD OF triggers.")));
245 
246  /*
247  * FOR EACH ROW triggers have further restrictions
248  */
249  if (stmt->row)
250  {
251  /*
252  * Disallow use of transition tables.
253  *
254  * Note that we have another restriction about transition tables
255  * in partitions; search for 'has_superclass' below for an
256  * explanation. The check here is just to protect from the fact
257  * that if we allowed it here, the creation would succeed for a
258  * partitioned table with no partitions, but would be blocked by
259  * the other restriction when the first partition was created,
260  * which is very unfriendly behavior.
261  */
262  if (stmt->transitionRels != NIL)
263  ereport(ERROR,
264  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
265  errmsg("\"%s\" is a partitioned table",
267  errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
268  }
269  }
270  else if (rel->rd_rel->relkind == RELKIND_VIEW)
271  {
272  /*
273  * Views can have INSTEAD OF triggers (which we check below are
274  * row-level), or statement-level BEFORE/AFTER triggers.
275  */
276  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
277  ereport(ERROR,
278  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
279  errmsg("\"%s\" is a view",
281  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
282  /* Disallow TRUNCATE triggers on VIEWs */
283  if (TRIGGER_FOR_TRUNCATE(stmt->events))
284  ereport(ERROR,
285  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
286  errmsg("\"%s\" is a view",
288  errdetail("Views cannot have TRUNCATE triggers.")));
289  }
290  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
291  {
292  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
293  stmt->timing != TRIGGER_TYPE_AFTER)
294  ereport(ERROR,
295  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
296  errmsg("\"%s\" is a foreign table",
298  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
299 
300  /*
301  * We disallow constraint triggers to protect the assumption that
302  * triggers on FKs can't be deferred. See notes with AfterTriggers
303  * data structures, below.
304  */
305  if (stmt->isconstraint)
306  ereport(ERROR,
307  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
308  errmsg("\"%s\" is a foreign table",
310  errdetail("Foreign tables cannot have constraint triggers.")));
311  }
312  else
313  ereport(ERROR,
314  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
315  errmsg("relation \"%s\" cannot have triggers",
317  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
318 
320  ereport(ERROR,
321  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
322  errmsg("permission denied: \"%s\" is a system catalog",
323  RelationGetRelationName(rel))));
324 
325  if (stmt->isconstraint)
326  {
327  /*
328  * We must take a lock on the target relation to protect against
329  * concurrent drop. It's not clear that AccessShareLock is strong
330  * enough, but we certainly need at least that much... otherwise, we
331  * might end up creating a pg_constraint entry referencing a
332  * nonexistent table.
333  */
334  if (OidIsValid(refRelOid))
335  {
336  LockRelationOid(refRelOid, AccessShareLock);
337  constrrelid = refRelOid;
338  }
339  else if (stmt->constrrel != NULL)
340  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
341  false);
342  }
343 
344  /* permission checks */
345  if (!isInternal)
346  {
347  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
348  ACL_TRIGGER);
349  if (aclresult != ACLCHECK_OK)
350  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
352 
353  if (OidIsValid(constrrelid))
354  {
355  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
356  ACL_TRIGGER);
357  if (aclresult != ACLCHECK_OK)
358  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
359  get_rel_name(constrrelid));
360  }
361  }
362 
363  /*
364  * When called on a partitioned table to create a FOR EACH ROW trigger
365  * that's not internal, we create one trigger for each partition, too.
366  *
367  * For that, we'd better hold lock on all of them ahead of time.
368  */
369  partition_recurse = !isInternal && stmt->row &&
370  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
371  if (partition_recurse)
373  ShareRowExclusiveLock, NULL));
374 
375  /* Compute tgtype */
376  TRIGGER_CLEAR_TYPE(tgtype);
377  if (stmt->row)
378  TRIGGER_SETT_ROW(tgtype);
379  tgtype |= stmt->timing;
380  tgtype |= stmt->events;
381 
382  /* Disallow ROW-level TRUNCATE triggers */
383  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
384  ereport(ERROR,
385  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
386  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
387 
388  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
389  if (TRIGGER_FOR_INSTEAD(tgtype))
390  {
391  if (!TRIGGER_FOR_ROW(tgtype))
392  ereport(ERROR,
393  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
394  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
395  if (stmt->whenClause)
396  ereport(ERROR,
397  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
398  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
399  if (stmt->columns != NIL)
400  ereport(ERROR,
401  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
402  errmsg("INSTEAD OF triggers cannot have column lists")));
403  }
404 
405  /*
406  * We don't yet support naming ROW transition variables, but the parser
407  * recognizes the syntax so we can give a nicer message here.
408  *
409  * Per standard, REFERENCING TABLE names are only allowed on AFTER
410  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
411  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
412  * only allowed once. Per standard, OLD may not be specified when
413  * creating a trigger only for INSERT, and NEW may not be specified when
414  * creating a trigger only for DELETE.
415  *
416  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
417  * reference both ROW and TABLE transition data.
418  */
419  if (stmt->transitionRels != NIL)
420  {
421  List *varList = stmt->transitionRels;
422  ListCell *lc;
423 
424  foreach(lc, varList)
425  {
427 
428  if (!(tt->isTable))
429  ereport(ERROR,
430  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
431  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
432  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
433 
434  /*
435  * Because of the above test, we omit further ROW-related testing
436  * below. If we later allow naming OLD and NEW ROW variables,
437  * adjustments will be needed below.
438  */
439 
440  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
441  ereport(ERROR,
442  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
443  errmsg("\"%s\" is a foreign table",
445  errdetail("Triggers on foreign tables cannot have transition tables.")));
446 
447  if (rel->rd_rel->relkind == RELKIND_VIEW)
448  ereport(ERROR,
449  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
450  errmsg("\"%s\" is a view",
452  errdetail("Triggers on views cannot have transition tables.")));
453 
454  /*
455  * We currently don't allow row-level triggers with transition
456  * tables on partition or inheritance children. Such triggers
457  * would somehow need to see tuples converted to the format of the
458  * table they're attached to, and it's not clear which subset of
459  * tuples each child should see. See also the prohibitions in
460  * ATExecAttachPartition() and ATExecAddInherit().
461  */
462  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
463  {
464  /* Use appropriate error message. */
465  if (rel->rd_rel->relispartition)
466  ereport(ERROR,
467  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
468  errmsg("ROW triggers with transition tables are not supported on partitions")));
469  else
470  ereport(ERROR,
471  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
472  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
473  }
474 
475  if (stmt->timing != TRIGGER_TYPE_AFTER)
476  ereport(ERROR,
477  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
478  errmsg("transition table name can only be specified for an AFTER trigger")));
479 
480  if (TRIGGER_FOR_TRUNCATE(tgtype))
481  ereport(ERROR,
482  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
483  errmsg("TRUNCATE triggers with transition tables are not supported")));
484 
485  /*
486  * We currently don't allow multi-event triggers ("INSERT OR
487  * UPDATE") with transition tables, because it's not clear how to
488  * handle INSERT ... ON CONFLICT statements which can fire both
489  * INSERT and UPDATE triggers. We show the inserted tuples to
490  * INSERT triggers and the updated tuples to UPDATE triggers, but
491  * it's not yet clear what INSERT OR UPDATE trigger should see.
492  * This restriction could be lifted if we can decide on the right
493  * semantics in a later release.
494  */
495  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
496  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
497  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
498  ereport(ERROR,
499  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
500  errmsg("transition tables cannot be specified for triggers with more than one event")));
501 
502  /*
503  * We currently don't allow column-specific triggers with
504  * transition tables. Per spec, that seems to require
505  * accumulating separate transition tables for each combination of
506  * columns, which is a lot of work for a rather marginal feature.
507  */
508  if (stmt->columns != NIL)
509  ereport(ERROR,
510  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
511  errmsg("transition tables cannot be specified for triggers with column lists")));
512 
513  /*
514  * We disallow constraint triggers with transition tables, to
515  * protect the assumption that such triggers can't be deferred.
516  * See notes with AfterTriggers data structures, below.
517  *
518  * Currently this is enforced by the grammar, so just Assert here.
519  */
520  Assert(!stmt->isconstraint);
521 
522  if (tt->isNew)
523  {
524  if (!(TRIGGER_FOR_INSERT(tgtype) ||
525  TRIGGER_FOR_UPDATE(tgtype)))
526  ereport(ERROR,
527  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
528  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
529 
530  if (newtablename != NULL)
531  ereport(ERROR,
532  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
533  errmsg("NEW TABLE cannot be specified multiple times")));
534 
535  newtablename = tt->name;
536  }
537  else
538  {
539  if (!(TRIGGER_FOR_DELETE(tgtype) ||
540  TRIGGER_FOR_UPDATE(tgtype)))
541  ereport(ERROR,
542  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
543  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
544 
545  if (oldtablename != NULL)
546  ereport(ERROR,
547  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
548  errmsg("OLD TABLE cannot be specified multiple times")));
549 
550  oldtablename = tt->name;
551  }
552  }
553 
554  if (newtablename != NULL && oldtablename != NULL &&
555  strcmp(newtablename, oldtablename) == 0)
556  ereport(ERROR,
557  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
558  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
559  }
560 
561  /*
562  * Parse the WHEN clause, if any and we weren't passed an already
563  * transformed one.
564  *
565  * Note that as a side effect, we fill whenRtable when parsing. If we got
566  * an already parsed clause, this does not occur, which is what we want --
567  * no point in adding redundant dependencies below.
568  */
569  if (!whenClause && stmt->whenClause)
570  {
571  ParseState *pstate;
572  ParseNamespaceItem *nsitem;
573  List *varList;
574  ListCell *lc;
575 
576  /* Set up a pstate to parse with */
577  pstate = make_parsestate(NULL);
578  pstate->p_sourcetext = queryString;
579 
580  /*
581  * Set up nsitems for OLD and NEW references.
582  *
583  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
584  */
585  nsitem = addRangeTableEntryForRelation(pstate, rel,
587  makeAlias("old", NIL),
588  false, false);
589  addNSItemToQuery(pstate, nsitem, false, true, true);
590  nsitem = addRangeTableEntryForRelation(pstate, rel,
592  makeAlias("new", NIL),
593  false, false);
594  addNSItemToQuery(pstate, nsitem, false, true, true);
595 
596  /* Transform expression. Copy to be sure we don't modify original */
597  whenClause = transformWhereClause(pstate,
598  copyObject(stmt->whenClause),
600  "WHEN");
601  /* we have to fix its collations too */
602  assign_expr_collations(pstate, whenClause);
603 
604  /*
605  * Check for disallowed references to OLD/NEW.
606  *
607  * NB: pull_var_clause is okay here only because we don't allow
608  * subselects in WHEN clauses; it would fail to examine the contents
609  * of subselects.
610  */
611  varList = pull_var_clause(whenClause, 0);
612  foreach(lc, varList)
613  {
614  Var *var = (Var *) lfirst(lc);
615 
616  switch (var->varno)
617  {
618  case PRS2_OLD_VARNO:
619  if (!TRIGGER_FOR_ROW(tgtype))
620  ereport(ERROR,
621  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
622  errmsg("statement trigger's WHEN condition cannot reference column values"),
623  parser_errposition(pstate, var->location)));
624  if (TRIGGER_FOR_INSERT(tgtype))
625  ereport(ERROR,
626  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
627  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
628  parser_errposition(pstate, var->location)));
629  /* system columns are okay here */
630  break;
631  case PRS2_NEW_VARNO:
632  if (!TRIGGER_FOR_ROW(tgtype))
633  ereport(ERROR,
634  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
635  errmsg("statement trigger's WHEN condition cannot reference column values"),
636  parser_errposition(pstate, var->location)));
637  if (TRIGGER_FOR_DELETE(tgtype))
638  ereport(ERROR,
639  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
640  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
641  parser_errposition(pstate, var->location)));
642  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
643  ereport(ERROR,
644  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
645  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
646  parser_errposition(pstate, var->location)));
647  if (TRIGGER_FOR_BEFORE(tgtype) &&
648  var->varattno == 0 &&
649  RelationGetDescr(rel)->constr &&
650  RelationGetDescr(rel)->constr->has_generated_stored)
651  ereport(ERROR,
652  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
653  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
654  errdetail("A whole-row reference is used and the table contains generated columns."),
655  parser_errposition(pstate, var->location)));
656  if (TRIGGER_FOR_BEFORE(tgtype) &&
657  var->varattno > 0 &&
658  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
659  ereport(ERROR,
660  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
661  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
662  errdetail("Column \"%s\" is a generated column.",
663  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
664  parser_errposition(pstate, var->location)));
665  break;
666  default:
667  /* can't happen without add_missing_from, so just elog */
668  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
669  break;
670  }
671  }
672 
673  /* we'll need the rtable for recordDependencyOnExpr */
674  whenRtable = pstate->p_rtable;
675 
676  qual = nodeToString(whenClause);
677 
678  free_parsestate(pstate);
679  }
680  else if (!whenClause)
681  {
682  whenClause = NULL;
683  whenRtable = NIL;
684  qual = NULL;
685  }
686  else
687  {
688  qual = nodeToString(whenClause);
689  whenRtable = NIL;
690  }
691 
692  /*
693  * Find and validate the trigger function.
694  */
695  if (!OidIsValid(funcoid))
696  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
697  if (!isInternal)
698  {
699  aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
700  if (aclresult != ACLCHECK_OK)
701  aclcheck_error(aclresult, OBJECT_FUNCTION,
702  NameListToString(stmt->funcname));
703  }
704  funcrettype = get_func_rettype(funcoid);
705  if (funcrettype != TRIGGEROID)
706  ereport(ERROR,
707  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
708  errmsg("function %s must return type %s",
709  NameListToString(stmt->funcname), "trigger")));
710 
711  /*
712  * Scan pg_trigger to see if there is already a trigger of the same name.
713  * Skip this for internally generated triggers, since we'll modify the
714  * name to be unique below.
715  *
716  * NOTE that this is cool only because we have ShareRowExclusiveLock on
717  * the relation, so the trigger set won't be changing underneath us.
718  */
719  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
720  if (!isInternal)
721  {
722  ScanKeyData skeys[2];
723  SysScanDesc tgscan;
724 
725  ScanKeyInit(&skeys[0],
726  Anum_pg_trigger_tgrelid,
727  BTEqualStrategyNumber, F_OIDEQ,
729 
730  ScanKeyInit(&skeys[1],
731  Anum_pg_trigger_tgname,
732  BTEqualStrategyNumber, F_NAMEEQ,
733  CStringGetDatum(stmt->trigname));
734 
735  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
736  NULL, 2, skeys);
737 
738  /* There should be at most one matching tuple */
739  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
740  {
741  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
742 
743  trigoid = oldtrigger->oid;
744  existing_constraint_oid = oldtrigger->tgconstraint;
745  existing_isInternal = oldtrigger->tgisinternal;
746  existing_isClone = OidIsValid(oldtrigger->tgparentid);
747  trigger_exists = true;
748  /* copy the tuple to use in CatalogTupleUpdate() */
749  tuple = heap_copytuple(tuple);
750  }
751  systable_endscan(tgscan);
752  }
753 
754  if (!trigger_exists)
755  {
756  /* Generate the OID for the new trigger. */
757  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
758  Anum_pg_trigger_oid);
759  }
760  else
761  {
762  /*
763  * If OR REPLACE was specified, we'll replace the old trigger;
764  * otherwise complain about the duplicate name.
765  */
766  if (!stmt->replace)
767  ereport(ERROR,
769  errmsg("trigger \"%s\" for relation \"%s\" already exists",
770  stmt->trigname, RelationGetRelationName(rel))));
771 
772  /*
773  * An internal trigger or a child trigger (isClone) cannot be replaced
774  * by a user-defined trigger. However, skip this test when
775  * in_partition, because then we're recursing from a partitioned table
776  * and the check was made at the parent level.
777  */
778  if ((existing_isInternal || existing_isClone) &&
779  !isInternal && !in_partition)
780  ereport(ERROR,
782  errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
783  stmt->trigname, RelationGetRelationName(rel))));
784 
785  /*
786  * It is not allowed to replace with a constraint trigger; gram.y
787  * should have enforced this already.
788  */
789  Assert(!stmt->isconstraint);
790 
791  /*
792  * It is not allowed to replace an existing constraint trigger,
793  * either. (The reason for these restrictions is partly that it seems
794  * difficult to deal with pending trigger events in such cases, and
795  * partly that the command might imply changing the constraint's
796  * properties as well, which doesn't seem nice.)
797  */
798  if (OidIsValid(existing_constraint_oid))
799  ereport(ERROR,
801  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
802  stmt->trigname, RelationGetRelationName(rel))));
803  }
804 
805  /*
806  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
807  * corresponding pg_constraint entry.
808  */
809  if (stmt->isconstraint && !OidIsValid(constraintOid))
810  {
811  /* Internal callers should have made their own constraints */
812  Assert(!isInternal);
813  constraintOid = CreateConstraintEntry(stmt->trigname,
815  CONSTRAINT_TRIGGER,
816  stmt->deferrable,
817  stmt->initdeferred,
818  true,
819  InvalidOid, /* no parent */
820  RelationGetRelid(rel),
821  NULL, /* no conkey */
822  0,
823  0,
824  InvalidOid, /* no domain */
825  InvalidOid, /* no index */
826  InvalidOid, /* no foreign key */
827  NULL,
828  NULL,
829  NULL,
830  NULL,
831  0,
832  ' ',
833  ' ',
834  NULL,
835  0,
836  ' ',
837  NULL, /* no exclusion */
838  NULL, /* no check constraint */
839  NULL,
840  true, /* islocal */
841  0, /* inhcount */
842  true, /* noinherit */
843  isInternal); /* is_internal */
844  }
845 
846  /*
847  * If trigger is internally generated, modify the provided trigger name to
848  * ensure uniqueness by appending the trigger OID. (Callers will usually
849  * supply a simple constant trigger name in these cases.)
850  */
851  if (isInternal)
852  {
853  snprintf(internaltrigname, sizeof(internaltrigname),
854  "%s_%u", stmt->trigname, trigoid);
855  trigname = internaltrigname;
856  }
857  else
858  {
859  /* user-defined trigger; use the specified trigger name as-is */
860  trigname = stmt->trigname;
861  }
862 
863  /*
864  * Build the new pg_trigger tuple.
865  *
866  * When we're creating a trigger in a partition, we mark it as internal,
867  * even though we don't do the isInternal magic in this function. This
868  * makes the triggers in partitions identical to the ones in the
869  * partitioned tables, except that they are marked internal.
870  */
871  memset(nulls, false, sizeof(nulls));
872 
873  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
874  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
875  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
876  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
877  CStringGetDatum(trigname));
878  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
879  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
880  values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
881  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
882  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
883  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
884  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
885  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
886  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
887 
888  if (stmt->args)
889  {
890  ListCell *le;
891  char *args;
892  int16 nargs = list_length(stmt->args);
893  int len = 0;
894 
895  foreach(le, stmt->args)
896  {
897  char *ar = strVal(lfirst(le));
898 
899  len += strlen(ar) + 4;
900  for (; *ar; ar++)
901  {
902  if (*ar == '\\')
903  len++;
904  }
905  }
906  args = (char *) palloc(len + 1);
907  args[0] = '\0';
908  foreach(le, stmt->args)
909  {
910  char *s = strVal(lfirst(le));
911  char *d = args + strlen(args);
912 
913  while (*s)
914  {
915  if (*s == '\\')
916  *d++ = '\\';
917  *d++ = *s++;
918  }
919  strcpy(d, "\\000");
920  }
921  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
922  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
924  }
925  else
926  {
927  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
928  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
929  CStringGetDatum(""));
930  }
931 
932  /* build column number array if it's a column-specific trigger */
933  ncolumns = list_length(stmt->columns);
934  if (ncolumns == 0)
935  columns = NULL;
936  else
937  {
938  ListCell *cell;
939  int i = 0;
940 
941  columns = (int16 *) palloc(ncolumns * sizeof(int16));
942  foreach(cell, stmt->columns)
943  {
944  char *name = strVal(lfirst(cell));
945  int16 attnum;
946  int j;
947 
948  /* Lookup column name. System columns are not allowed */
949  attnum = attnameAttNum(rel, name, false);
950  if (attnum == InvalidAttrNumber)
951  ereport(ERROR,
952  (errcode(ERRCODE_UNDEFINED_COLUMN),
953  errmsg("column \"%s\" of relation \"%s\" does not exist",
954  name, RelationGetRelationName(rel))));
955 
956  /* Check for duplicates */
957  for (j = i - 1; j >= 0; j--)
958  {
959  if (columns[j] == attnum)
960  ereport(ERROR,
961  (errcode(ERRCODE_DUPLICATE_COLUMN),
962  errmsg("column \"%s\" specified more than once",
963  name)));
964  }
965 
966  columns[i++] = attnum;
967  }
968  }
969  tgattr = buildint2vector(columns, ncolumns);
970  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
971 
972  /* set tgqual if trigger has WHEN clause */
973  if (qual)
974  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
975  else
976  nulls[Anum_pg_trigger_tgqual - 1] = true;
977 
978  if (oldtablename)
979  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
980  CStringGetDatum(oldtablename));
981  else
982  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
983  if (newtablename)
984  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
985  CStringGetDatum(newtablename));
986  else
987  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
988 
989  /*
990  * Insert or replace tuple in pg_trigger.
991  */
992  if (!trigger_exists)
993  {
994  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
995  CatalogTupleInsert(tgrel, tuple);
996  }
997  else
998  {
999  HeapTuple newtup;
1000 
1001  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
1002  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
1003  heap_freetuple(newtup);
1004  }
1005 
1006  heap_freetuple(tuple); /* free either original or new tuple */
1007  table_close(tgrel, RowExclusiveLock);
1008 
1009  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1010  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1011  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1012  if (oldtablename)
1013  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1014  if (newtablename)
1015  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1016 
1017  /*
1018  * Update relation's pg_class entry; if necessary; and if not, send an SI
1019  * message to make other backends (and this one) rebuild relcache entries.
1020  */
1021  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1022  tuple = SearchSysCacheCopy1(RELOID,
1024  if (!HeapTupleIsValid(tuple))
1025  elog(ERROR, "cache lookup failed for relation %u",
1026  RelationGetRelid(rel));
1027  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1028  {
1029  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1030 
1031  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1032 
1034  }
1035  else
1037 
1038  heap_freetuple(tuple);
1039  table_close(pgrel, RowExclusiveLock);
1040 
1041  /*
1042  * If we're replacing a trigger, flush all the old dependencies before
1043  * recording new ones.
1044  */
1045  if (trigger_exists)
1046  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1047 
1048  /*
1049  * Record dependencies for trigger. Always place a normal dependency on
1050  * the function.
1051  */
1052  myself.classId = TriggerRelationId;
1053  myself.objectId = trigoid;
1054  myself.objectSubId = 0;
1055 
1056  referenced.classId = ProcedureRelationId;
1057  referenced.objectId = funcoid;
1058  referenced.objectSubId = 0;
1059  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1060 
1061  if (isInternal && OidIsValid(constraintOid))
1062  {
1063  /*
1064  * Internally-generated trigger for a constraint, so make it an
1065  * internal dependency of the constraint. We can skip depending on
1066  * the relation(s), as there'll be an indirect dependency via the
1067  * constraint.
1068  */
1069  referenced.classId = ConstraintRelationId;
1070  referenced.objectId = constraintOid;
1071  referenced.objectSubId = 0;
1072  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1073  }
1074  else
1075  {
1076  /*
1077  * User CREATE TRIGGER, so place dependencies. We make trigger be
1078  * auto-dropped if its relation is dropped or if the FK relation is
1079  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1080  */
1081  referenced.classId = RelationRelationId;
1082  referenced.objectId = RelationGetRelid(rel);
1083  referenced.objectSubId = 0;
1084  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1085 
1086  if (OidIsValid(constrrelid))
1087  {
1088  referenced.classId = RelationRelationId;
1089  referenced.objectId = constrrelid;
1090  referenced.objectSubId = 0;
1091  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1092  }
1093  /* Not possible to have an index dependency in this case */
1094  Assert(!OidIsValid(indexOid));
1095 
1096  /*
1097  * If it's a user-specified constraint trigger, make the constraint
1098  * internally dependent on the trigger instead of vice versa.
1099  */
1100  if (OidIsValid(constraintOid))
1101  {
1102  referenced.classId = ConstraintRelationId;
1103  referenced.objectId = constraintOid;
1104  referenced.objectSubId = 0;
1105  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1106  }
1107 
1108  /*
1109  * If it's a partition trigger, create the partition dependencies.
1110  */
1111  if (OidIsValid(parentTriggerOid))
1112  {
1113  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1114  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1115  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1116  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1117  }
1118  }
1119 
1120  /* If column-specific trigger, add normal dependencies on columns */
1121  if (columns != NULL)
1122  {
1123  int i;
1124 
1125  referenced.classId = RelationRelationId;
1126  referenced.objectId = RelationGetRelid(rel);
1127  for (i = 0; i < ncolumns; i++)
1128  {
1129  referenced.objectSubId = columns[i];
1130  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1131  }
1132  }
1133 
1134  /*
1135  * If it has a WHEN clause, add dependencies on objects mentioned in the
1136  * expression (eg, functions, as well as any columns used).
1137  */
1138  if (whenRtable != NIL)
1139  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1141 
1142  /* Post creation hook for new trigger */
1143  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1144  isInternal);
1145 
1146  /*
1147  * Lastly, create the trigger on child relations, if needed.
1148  */
1149  if (partition_recurse)
1150  {
1151  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1152  int i;
1153  MemoryContext oldcxt,
1154  perChildCxt;
1155 
1157  "part trig clone",
1159 
1160  /*
1161  * We don't currently expect to be called with a valid indexOid. If
1162  * that ever changes then we'll need to write code here to find the
1163  * corresponding child index.
1164  */
1165  Assert(!OidIsValid(indexOid));
1166 
1167  oldcxt = MemoryContextSwitchTo(perChildCxt);
1168 
1169  /* Iterate to create the trigger on each existing partition */
1170  for (i = 0; i < partdesc->nparts; i++)
1171  {
1172  CreateTrigStmt *childStmt;
1173  Relation childTbl;
1174  Node *qual;
1175 
1176  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1177 
1178  /*
1179  * Initialize our fabricated parse node by copying the original
1180  * one, then resetting fields that we pass separately.
1181  */
1182  childStmt = (CreateTrigStmt *) copyObject(stmt);
1183  childStmt->funcname = NIL;
1184  childStmt->whenClause = NULL;
1185 
1186  /* If there is a WHEN clause, create a modified copy of it */
1187  qual = copyObject(whenClause);
1188  qual = (Node *)
1190  childTbl, rel);
1191  qual = (Node *)
1193  childTbl, rel);
1194 
1195  CreateTriggerFiringOn(childStmt, queryString,
1196  partdesc->oids[i], refRelOid,
1198  funcoid, trigoid, qual,
1199  isInternal, true, trigger_fires_when);
1200 
1201  table_close(childTbl, NoLock);
1202 
1203  MemoryContextReset(perChildCxt);
1204  }
1205 
1206  MemoryContextSwitchTo(oldcxt);
1207  MemoryContextDelete(perChildCxt);
1208  }
1209 
1210  /* Keep lock on target rel until end of xact */
1211  table_close(rel, NoLock);
1212 
1213  return myself;
1214 }
1215 
1216 /*
1217  * TriggerSetParentTrigger
1218  * Set a partition's trigger as child of its parent trigger,
1219  * or remove the linkage if parentTrigId is InvalidOid.
1220  *
1221  * This updates the constraint's pg_trigger row to show it as inherited, and
1222  * adds PARTITION dependencies to prevent the trigger from being deleted
1223  * on its own. Alternatively, reverse that.
1224  */
1225 void
1227  Oid childTrigId,
1228  Oid parentTrigId,
1229  Oid childTableId)
1230 {
1231  SysScanDesc tgscan;
1232  ScanKeyData skey[1];
1233  Form_pg_trigger trigForm;
1234  HeapTuple tuple,
1235  newtup;
1236  ObjectAddress depender;
1237  ObjectAddress referenced;
1238 
1239  /*
1240  * Find the trigger to delete.
1241  */
1242  ScanKeyInit(&skey[0],
1243  Anum_pg_trigger_oid,
1244  BTEqualStrategyNumber, F_OIDEQ,
1245  ObjectIdGetDatum(childTrigId));
1246 
1247  tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1248  NULL, 1, skey);
1249 
1250  tuple = systable_getnext(tgscan);
1251  if (!HeapTupleIsValid(tuple))
1252  elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1253  newtup = heap_copytuple(tuple);
1254  trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1255  if (OidIsValid(parentTrigId))
1256  {
1257  /* don't allow setting parent for a constraint that already has one */
1258  if (OidIsValid(trigForm->tgparentid))
1259  elog(ERROR, "trigger %u already has a parent trigger",
1260  childTrigId);
1261 
1262  trigForm->tgparentid = parentTrigId;
1263 
1264  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1265 
1266  ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1267 
1268  ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1269  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1270 
1271  ObjectAddressSet(referenced, RelationRelationId, childTableId);
1272  recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1273  }
1274  else
1275  {
1276  trigForm->tgparentid = InvalidOid;
1277 
1278  CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1279 
1280  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1281  TriggerRelationId,
1283  deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1284  RelationRelationId,
1286  }
1287 
1288  heap_freetuple(newtup);
1289  systable_endscan(tgscan);
1290 }
1291 
1292 
1293 /*
1294  * Guts of trigger deletion.
1295  */
1296 void
1298 {
1299  Relation tgrel;
1300  SysScanDesc tgscan;
1301  ScanKeyData skey[1];
1302  HeapTuple tup;
1303  Oid relid;
1304  Relation rel;
1305 
1306  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1307 
1308  /*
1309  * Find the trigger to delete.
1310  */
1311  ScanKeyInit(&skey[0],
1312  Anum_pg_trigger_oid,
1313  BTEqualStrategyNumber, F_OIDEQ,
1314  ObjectIdGetDatum(trigOid));
1315 
1316  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1317  NULL, 1, skey);
1318 
1319  tup = systable_getnext(tgscan);
1320  if (!HeapTupleIsValid(tup))
1321  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1322 
1323  /*
1324  * Open and exclusive-lock the relation the trigger belongs to.
1325  */
1326  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1327 
1328  rel = table_open(relid, AccessExclusiveLock);
1329 
1330  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1331  rel->rd_rel->relkind != RELKIND_VIEW &&
1332  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1333  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1334  ereport(ERROR,
1335  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1336  errmsg("relation \"%s\" cannot have triggers",
1338  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1339 
1341  ereport(ERROR,
1342  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1343  errmsg("permission denied: \"%s\" is a system catalog",
1344  RelationGetRelationName(rel))));
1345 
1346  /*
1347  * Delete the pg_trigger tuple.
1348  */
1349  CatalogTupleDelete(tgrel, &tup->t_self);
1350 
1351  systable_endscan(tgscan);
1352  table_close(tgrel, RowExclusiveLock);
1353 
1354  /*
1355  * We do not bother to try to determine whether any other triggers remain,
1356  * which would be needed in order to decide whether it's safe to clear the
1357  * relation's relhastriggers. (In any case, there might be a concurrent
1358  * process adding new triggers.) Instead, just force a relcache inval to
1359  * make other backends (and this one too!) rebuild their relcache entries.
1360  * There's no great harm in leaving relhastriggers true even if there are
1361  * no triggers left.
1362  */
1364 
1365  /* Keep lock on trigger's rel until end of xact */
1366  table_close(rel, NoLock);
1367 }
1368 
1369 /*
1370  * get_trigger_oid - Look up a trigger by name to find its OID.
1371  *
1372  * If missing_ok is false, throw an error if trigger not found. If
1373  * true, just return InvalidOid.
1374  */
1375 Oid
1376 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1377 {
1378  Relation tgrel;
1379  ScanKeyData skey[2];
1380  SysScanDesc tgscan;
1381  HeapTuple tup;
1382  Oid oid;
1383 
1384  /*
1385  * Find the trigger, verify permissions, set up object address
1386  */
1387  tgrel = table_open(TriggerRelationId, AccessShareLock);
1388 
1389  ScanKeyInit(&skey[0],
1390  Anum_pg_trigger_tgrelid,
1391  BTEqualStrategyNumber, F_OIDEQ,
1392  ObjectIdGetDatum(relid));
1393  ScanKeyInit(&skey[1],
1394  Anum_pg_trigger_tgname,
1395  BTEqualStrategyNumber, F_NAMEEQ,
1396  CStringGetDatum(trigname));
1397 
1398  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1399  NULL, 2, skey);
1400 
1401  tup = systable_getnext(tgscan);
1402 
1403  if (!HeapTupleIsValid(tup))
1404  {
1405  if (!missing_ok)
1406  ereport(ERROR,
1407  (errcode(ERRCODE_UNDEFINED_OBJECT),
1408  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1409  trigname, get_rel_name(relid))));
1410  oid = InvalidOid;
1411  }
1412  else
1413  {
1414  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1415  }
1416 
1417  systable_endscan(tgscan);
1418  table_close(tgrel, AccessShareLock);
1419  return oid;
1420 }
1421 
1422 /*
1423  * Perform permissions and integrity checks before acquiring a relation lock.
1424  */
1425 static void
1427  void *arg)
1428 {
1429  HeapTuple tuple;
1430  Form_pg_class form;
1431 
1432  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1433  if (!HeapTupleIsValid(tuple))
1434  return; /* concurrently dropped */
1435  form = (Form_pg_class) GETSTRUCT(tuple);
1436 
1437  /* only tables and views can have triggers */
1438  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1439  form->relkind != RELKIND_FOREIGN_TABLE &&
1440  form->relkind != RELKIND_PARTITIONED_TABLE)
1441  ereport(ERROR,
1442  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1443  errmsg("relation \"%s\" cannot have triggers",
1444  rv->relname),
1445  errdetail_relkind_not_supported(form->relkind)));
1446 
1447  /* you must own the table to rename one of its triggers */
1448  if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1450  if (!allowSystemTableMods && IsSystemClass(relid, form))
1451  ereport(ERROR,
1452  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1453  errmsg("permission denied: \"%s\" is a system catalog",
1454  rv->relname)));
1455 
1456  ReleaseSysCache(tuple);
1457 }
1458 
1459 /*
1460  * renametrig - changes the name of a trigger on a relation
1461  *
1462  * trigger name is changed in trigger catalog.
1463  * No record of the previous name is kept.
1464  *
1465  * get proper relrelation from relation catalog (if not arg)
1466  * scan trigger catalog
1467  * for name conflict (within rel)
1468  * for original trigger (if not arg)
1469  * modify tgname in trigger tuple
1470  * update row in catalog
1471  */
1474 {
1475  Oid tgoid;
1476  Relation targetrel;
1477  Relation tgrel;
1478  HeapTuple tuple;
1479  SysScanDesc tgscan;
1480  ScanKeyData key[2];
1481  Oid relid;
1482  ObjectAddress address;
1483 
1484  /*
1485  * Look up name, check permissions, and acquire lock (which we will NOT
1486  * release until end of transaction).
1487  */
1489  0,
1491  NULL);
1492 
1493  /* Have lock already, so just need to build relcache entry. */
1494  targetrel = relation_open(relid, NoLock);
1495 
1496  /*
1497  * On partitioned tables, this operation recurses to partitions. Lock all
1498  * tables upfront.
1499  */
1500  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1501  (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1502 
1503  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1504 
1505  /*
1506  * Search for the trigger to modify.
1507  */
1508  ScanKeyInit(&key[0],
1509  Anum_pg_trigger_tgrelid,
1510  BTEqualStrategyNumber, F_OIDEQ,
1511  ObjectIdGetDatum(relid));
1512  ScanKeyInit(&key[1],
1513  Anum_pg_trigger_tgname,
1514  BTEqualStrategyNumber, F_NAMEEQ,
1515  PointerGetDatum(stmt->subname));
1516  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1517  NULL, 2, key);
1518  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1519  {
1520  Form_pg_trigger trigform;
1521 
1522  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1523  tgoid = trigform->oid;
1524 
1525  /*
1526  * If the trigger descends from a trigger on a parent partitioned
1527  * table, reject the rename. We don't allow a trigger in a partition
1528  * to differ in name from that of its parent: that would lead to an
1529  * inconsistency that pg_dump would not reproduce.
1530  */
1531  if (OidIsValid(trigform->tgparentid))
1532  ereport(ERROR,
1533  errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1534  stmt->subname, RelationGetRelationName(targetrel)),
1535  errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1536  get_rel_name(get_partition_parent(relid, false))));
1537 
1538 
1539  /* Rename the trigger on this relation ... */
1540  renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1541  stmt->subname);
1542 
1543  /* ... and if it is partitioned, recurse to its partitions */
1544  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1545  {
1546  PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1547 
1548  for (int i = 0; i < partdesc->nparts; i++)
1549  {
1550  Oid partitionId = partdesc->oids[i];
1551 
1552  renametrig_partition(tgrel, partitionId, trigform->oid,
1553  stmt->newname, stmt->subname);
1554  }
1555  }
1556  }
1557  else
1558  {
1559  ereport(ERROR,
1560  (errcode(ERRCODE_UNDEFINED_OBJECT),
1561  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1562  stmt->subname, RelationGetRelationName(targetrel))));
1563  }
1564 
1565  ObjectAddressSet(address, TriggerRelationId, tgoid);
1566 
1567  systable_endscan(tgscan);
1568 
1569  table_close(tgrel, RowExclusiveLock);
1570 
1571  /*
1572  * Close rel, but keep exclusive lock!
1573  */
1574  relation_close(targetrel, NoLock);
1575 
1576  return address;
1577 }
1578 
1579 /*
1580  * Subroutine for renametrig -- perform the actual work of renaming one
1581  * trigger on one table.
1582  *
1583  * If the trigger has a name different from the expected one, raise a
1584  * NOTICE about it.
1585  */
1586 static void
1588  const char *newname, const char *expected_name)
1589 {
1590  HeapTuple tuple;
1591  Form_pg_trigger tgform;
1592  ScanKeyData key[2];
1593  SysScanDesc tgscan;
1594 
1595  /* If the trigger already has the new name, nothing to do. */
1596  tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1597  if (strcmp(NameStr(tgform->tgname), newname) == 0)
1598  return;
1599 
1600  /*
1601  * Before actually trying the rename, search for triggers with the same
1602  * name. The update would fail with an ugly message in that case, and it
1603  * is better to throw a nicer error.
1604  */
1605  ScanKeyInit(&key[0],
1606  Anum_pg_trigger_tgrelid,
1607  BTEqualStrategyNumber, F_OIDEQ,
1608  ObjectIdGetDatum(RelationGetRelid(targetrel)));
1609  ScanKeyInit(&key[1],
1610  Anum_pg_trigger_tgname,
1611  BTEqualStrategyNumber, F_NAMEEQ,
1612  PointerGetDatum(newname));
1613  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1614  NULL, 2, key);
1615  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1616  ereport(ERROR,
1618  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1619  newname, RelationGetRelationName(targetrel))));
1620  systable_endscan(tgscan);
1621 
1622  /*
1623  * The target name is free; update the existing pg_trigger tuple with it.
1624  */
1625  tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1626  tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1627 
1628  /*
1629  * If the trigger has a name different from what we expected, let the user
1630  * know. (We can proceed anyway, since we must have reached here following
1631  * a tgparentid link.)
1632  */
1633  if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1634  ereport(NOTICE,
1635  errmsg("renamed trigger \"%s\" on relation \"%s\"",
1636  NameStr(tgform->tgname),
1637  RelationGetRelationName(targetrel)));
1638 
1639  namestrcpy(&tgform->tgname, newname);
1640 
1641  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1642 
1643  InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1644 
1645  /*
1646  * Invalidate relation's relcache entry so that other backends (and this
1647  * one too!) are sent SI message to make them rebuild relcache entries.
1648  * (Ideally this should happen automatically...)
1649  */
1650  CacheInvalidateRelcache(targetrel);
1651 }
1652 
1653 /*
1654  * Subroutine for renametrig -- Helper for recursing to partitions when
1655  * renaming triggers on a partitioned table.
1656  */
1657 static void
1658 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1659  const char *newname, const char *expected_name)
1660 {
1661  SysScanDesc tgscan;
1662  ScanKeyData key;
1663  HeapTuple tuple;
1664 
1665  /*
1666  * Given a relation and the OID of a trigger on parent relation, find the
1667  * corresponding trigger in the child and rename that trigger to the given
1668  * name.
1669  */
1670  ScanKeyInit(&key,
1671  Anum_pg_trigger_tgrelid,
1672  BTEqualStrategyNumber, F_OIDEQ,
1673  ObjectIdGetDatum(partitionId));
1674  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1675  NULL, 1, &key);
1676  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1677  {
1678  Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1679  Relation partitionRel;
1680 
1681  if (tgform->tgparentid != parentTriggerOid)
1682  continue; /* not our trigger */
1683 
1684  partitionRel = table_open(partitionId, NoLock);
1685 
1686  /* Rename the trigger on this partition */
1687  renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1688 
1689  /* And if this relation is partitioned, recurse to its partitions */
1690  if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1691  {
1692  PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1693  true);
1694 
1695  for (int i = 0; i < partdesc->nparts; i++)
1696  {
1697  Oid partoid = partdesc->oids[i];
1698 
1699  renametrig_partition(tgrel, partoid, tgform->oid, newname,
1700  NameStr(tgform->tgname));
1701  }
1702  }
1703  table_close(partitionRel, NoLock);
1704 
1705  /* There should be at most one matching tuple */
1706  break;
1707  }
1708  systable_endscan(tgscan);
1709 }
1710 
1711 /*
1712  * EnableDisableTrigger()
1713  *
1714  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1715  * to change 'tgenabled' field for the specified trigger(s)
1716  *
1717  * rel: relation to process (caller must hold suitable lock on it)
1718  * tgname: trigger to process, or NULL to scan all triggers
1719  * fires_when: new value for tgenabled field. In addition to generic
1720  * enablement/disablement, this also defines when the trigger
1721  * should be fired in session replication roles.
1722  * skip_system: if true, skip "system" triggers (constraint triggers)
1723  * recurse: if true, recurse to partitions
1724  *
1725  * Caller should have checked permissions for the table; here we also
1726  * enforce that superuser privilege is required to alter the state of
1727  * system triggers
1728  */
1729 void
1730 EnableDisableTrigger(Relation rel, const char *tgname,
1731  char fires_when, bool skip_system, bool recurse,
1732  LOCKMODE lockmode)
1733 {
1734  Relation tgrel;
1735  int nkeys;
1736  ScanKeyData keys[2];
1737  SysScanDesc tgscan;
1738  HeapTuple tuple;
1739  bool found;
1740  bool changed;
1741 
1742  /* Scan the relevant entries in pg_triggers */
1743  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1744 
1745  ScanKeyInit(&keys[0],
1746  Anum_pg_trigger_tgrelid,
1747  BTEqualStrategyNumber, F_OIDEQ,
1749  if (tgname)
1750  {
1751  ScanKeyInit(&keys[1],
1752  Anum_pg_trigger_tgname,
1753  BTEqualStrategyNumber, F_NAMEEQ,
1754  CStringGetDatum(tgname));
1755  nkeys = 2;
1756  }
1757  else
1758  nkeys = 1;
1759 
1760  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1761  NULL, nkeys, keys);
1762 
1763  found = changed = false;
1764 
1765  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1766  {
1767  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1768 
1769  if (oldtrig->tgisinternal)
1770  {
1771  /* system trigger ... ok to process? */
1772  if (skip_system)
1773  continue;
1774  if (!superuser())
1775  ereport(ERROR,
1776  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1777  errmsg("permission denied: \"%s\" is a system trigger",
1778  NameStr(oldtrig->tgname))));
1779  }
1780 
1781  found = true;
1782 
1783  if (oldtrig->tgenabled != fires_when)
1784  {
1785  /* need to change this one ... make a copy to scribble on */
1786  HeapTuple newtup = heap_copytuple(tuple);
1787  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1788 
1789  newtrig->tgenabled = fires_when;
1790 
1791  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1792 
1793  heap_freetuple(newtup);
1794 
1795  changed = true;
1796  }
1797 
1798  /*
1799  * When altering FOR EACH ROW triggers on a partitioned table, do the
1800  * same on the partitions as well, unless ONLY is specified.
1801  *
1802  * Note that we recurse even if we didn't change the trigger above,
1803  * because the partitions' copy of the trigger may have a different
1804  * value of tgenabled than the parent's trigger and thus might need to
1805  * be changed.
1806  */
1807  if (recurse &&
1808  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1809  (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1810  {
1811  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1812  int i;
1813 
1814  for (i = 0; i < partdesc->nparts; i++)
1815  {
1816  Relation part;
1817 
1818  part = relation_open(partdesc->oids[i], lockmode);
1819  EnableDisableTrigger(part, NameStr(oldtrig->tgname),
1820  fires_when, skip_system, recurse,
1821  lockmode);
1822  table_close(part, NoLock); /* keep lock till commit */
1823  }
1824  }
1825 
1826  InvokeObjectPostAlterHook(TriggerRelationId,
1827  oldtrig->oid, 0);
1828  }
1829 
1830  systable_endscan(tgscan);
1831 
1832  table_close(tgrel, RowExclusiveLock);
1833 
1834  if (tgname && !found)
1835  ereport(ERROR,
1836  (errcode(ERRCODE_UNDEFINED_OBJECT),
1837  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1838  tgname, RelationGetRelationName(rel))));
1839 
1840  /*
1841  * If we changed anything, broadcast a SI inval message to force each
1842  * backend (including our own!) to rebuild relation's relcache entry.
1843  * Otherwise they will fail to apply the change promptly.
1844  */
1845  if (changed)
1847 }
1848 
1849 
1850 /*
1851  * Build trigger data to attach to the given relcache entry.
1852  *
1853  * Note that trigger data attached to a relcache entry must be stored in
1854  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1855  * But we should be running in a less long-lived working context. To avoid
1856  * leaking cache memory if this routine fails partway through, we build a
1857  * temporary TriggerDesc in working memory and then copy the completed
1858  * structure into cache memory.
1859  */
1860 void
1862 {
1863  TriggerDesc *trigdesc;
1864  int numtrigs;
1865  int maxtrigs;
1866  Trigger *triggers;
1867  Relation tgrel;
1868  ScanKeyData skey;
1869  SysScanDesc tgscan;
1870  HeapTuple htup;
1871  MemoryContext oldContext;
1872  int i;
1873 
1874  /*
1875  * Allocate a working array to hold the triggers (the array is extended if
1876  * necessary)
1877  */
1878  maxtrigs = 16;
1879  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1880  numtrigs = 0;
1881 
1882  /*
1883  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1884  * be reading the triggers in name order, except possibly during
1885  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1886  * ensures that triggers will be fired in name order.
1887  */
1888  ScanKeyInit(&skey,
1889  Anum_pg_trigger_tgrelid,
1890  BTEqualStrategyNumber, F_OIDEQ,
1891  ObjectIdGetDatum(RelationGetRelid(relation)));
1892 
1893  tgrel = table_open(TriggerRelationId, AccessShareLock);
1894  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1895  NULL, 1, &skey);
1896 
1897  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1898  {
1899  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1900  Trigger *build;
1901  Datum datum;
1902  bool isnull;
1903 
1904  if (numtrigs >= maxtrigs)
1905  {
1906  maxtrigs *= 2;
1907  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1908  }
1909  build = &(triggers[numtrigs]);
1910 
1911  build->tgoid = pg_trigger->oid;
1913  NameGetDatum(&pg_trigger->tgname)));
1914  build->tgfoid = pg_trigger->tgfoid;
1915  build->tgtype = pg_trigger->tgtype;
1916  build->tgenabled = pg_trigger->tgenabled;
1917  build->tgisinternal = pg_trigger->tgisinternal;
1918  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1919  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1920  build->tgconstrindid = pg_trigger->tgconstrindid;
1921  build->tgconstraint = pg_trigger->tgconstraint;
1922  build->tgdeferrable = pg_trigger->tgdeferrable;
1923  build->tginitdeferred = pg_trigger->tginitdeferred;
1924  build->tgnargs = pg_trigger->tgnargs;
1925  /* tgattr is first var-width field, so OK to access directly */
1926  build->tgnattr = pg_trigger->tgattr.dim1;
1927  if (build->tgnattr > 0)
1928  {
1929  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1930  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1931  build->tgnattr * sizeof(int16));
1932  }
1933  else
1934  build->tgattr = NULL;
1935  if (build->tgnargs > 0)
1936  {
1937  bytea *val;
1938  char *p;
1939 
1941  Anum_pg_trigger_tgargs,
1942  tgrel->rd_att, &isnull));
1943  if (isnull)
1944  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1945  RelationGetRelationName(relation));
1946  p = (char *) VARDATA_ANY(val);
1947  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1948  for (i = 0; i < build->tgnargs; i++)
1949  {
1950  build->tgargs[i] = pstrdup(p);
1951  p += strlen(p) + 1;
1952  }
1953  }
1954  else
1955  build->tgargs = NULL;
1956 
1957  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1958  tgrel->rd_att, &isnull);
1959  if (!isnull)
1960  build->tgoldtable =
1962  else
1963  build->tgoldtable = NULL;
1964 
1965  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1966  tgrel->rd_att, &isnull);
1967  if (!isnull)
1968  build->tgnewtable =
1970  else
1971  build->tgnewtable = NULL;
1972 
1973  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1974  tgrel->rd_att, &isnull);
1975  if (!isnull)
1976  build->tgqual = TextDatumGetCString(datum);
1977  else
1978  build->tgqual = NULL;
1979 
1980  numtrigs++;
1981  }
1982 
1983  systable_endscan(tgscan);
1984  table_close(tgrel, AccessShareLock);
1985 
1986  /* There might not be any triggers */
1987  if (numtrigs == 0)
1988  {
1989  pfree(triggers);
1990  return;
1991  }
1992 
1993  /* Build trigdesc */
1994  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1995  trigdesc->triggers = triggers;
1996  trigdesc->numtriggers = numtrigs;
1997  for (i = 0; i < numtrigs; i++)
1998  SetTriggerFlags(trigdesc, &(triggers[i]));
1999 
2000  /* Copy completed trigdesc into cache storage */
2002  relation->trigdesc = CopyTriggerDesc(trigdesc);
2003  MemoryContextSwitchTo(oldContext);
2004 
2005  /* Release working memory */
2006  FreeTriggerDesc(trigdesc);
2007 }
2008 
2009 /*
2010  * Update the TriggerDesc's hint flags to include the specified trigger
2011  */
2012 static void
2014 {
2015  int16 tgtype = trigger->tgtype;
2016 
2017  trigdesc->trig_insert_before_row |=
2018  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2019  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2020  trigdesc->trig_insert_after_row |=
2021  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2022  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2023  trigdesc->trig_insert_instead_row |=
2024  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2025  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2026  trigdesc->trig_insert_before_statement |=
2027  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2028  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2029  trigdesc->trig_insert_after_statement |=
2030  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2031  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2032  trigdesc->trig_update_before_row |=
2033  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2034  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2035  trigdesc->trig_update_after_row |=
2036  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2037  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2038  trigdesc->trig_update_instead_row |=
2039  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2040  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2041  trigdesc->trig_update_before_statement |=
2042  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2043  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2044  trigdesc->trig_update_after_statement |=
2045  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2046  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2047  trigdesc->trig_delete_before_row |=
2048  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2049  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2050  trigdesc->trig_delete_after_row |=
2051  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2052  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2053  trigdesc->trig_delete_instead_row |=
2054  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2055  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2056  trigdesc->trig_delete_before_statement |=
2057  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2058  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2059  trigdesc->trig_delete_after_statement |=
2060  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2061  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2062  /* there are no row-level truncate triggers */
2063  trigdesc->trig_truncate_before_statement |=
2064  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2065  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2066  trigdesc->trig_truncate_after_statement |=
2067  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2068  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2069 
2070  trigdesc->trig_insert_new_table |=
2071  (TRIGGER_FOR_INSERT(tgtype) &&
2072  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2073  trigdesc->trig_update_old_table |=
2074  (TRIGGER_FOR_UPDATE(tgtype) &&
2075  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2076  trigdesc->trig_update_new_table |=
2077  (TRIGGER_FOR_UPDATE(tgtype) &&
2078  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2079  trigdesc->trig_delete_old_table |=
2080  (TRIGGER_FOR_DELETE(tgtype) &&
2081  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2082 }
2083 
2084 /*
2085  * Copy a TriggerDesc data structure.
2086  *
2087  * The copy is allocated in the current memory context.
2088  */
2089 TriggerDesc *
2091 {
2092  TriggerDesc *newdesc;
2093  Trigger *trigger;
2094  int i;
2095 
2096  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2097  return NULL;
2098 
2099  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2100  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2101 
2102  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2103  memcpy(trigger, trigdesc->triggers,
2104  trigdesc->numtriggers * sizeof(Trigger));
2105  newdesc->triggers = trigger;
2106 
2107  for (i = 0; i < trigdesc->numtriggers; i++)
2108  {
2109  trigger->tgname = pstrdup(trigger->tgname);
2110  if (trigger->tgnattr > 0)
2111  {
2112  int16 *newattr;
2113 
2114  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2115  memcpy(newattr, trigger->tgattr,
2116  trigger->tgnattr * sizeof(int16));
2117  trigger->tgattr = newattr;
2118  }
2119  if (trigger->tgnargs > 0)
2120  {
2121  char **newargs;
2122  int16 j;
2123 
2124  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2125  for (j = 0; j < trigger->tgnargs; j++)
2126  newargs[j] = pstrdup(trigger->tgargs[j]);
2127  trigger->tgargs = newargs;
2128  }
2129  if (trigger->tgqual)
2130  trigger->tgqual = pstrdup(trigger->tgqual);
2131  if (trigger->tgoldtable)
2132  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2133  if (trigger->tgnewtable)
2134  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2135  trigger++;
2136  }
2137 
2138  return newdesc;
2139 }
2140 
2141 /*
2142  * Free a TriggerDesc data structure.
2143  */
2144 void
2146 {
2147  Trigger *trigger;
2148  int i;
2149 
2150  if (trigdesc == NULL)
2151  return;
2152 
2153  trigger = trigdesc->triggers;
2154  for (i = 0; i < trigdesc->numtriggers; i++)
2155  {
2156  pfree(trigger->tgname);
2157  if (trigger->tgnattr > 0)
2158  pfree(trigger->tgattr);
2159  if (trigger->tgnargs > 0)
2160  {
2161  while (--(trigger->tgnargs) >= 0)
2162  pfree(trigger->tgargs[trigger->tgnargs]);
2163  pfree(trigger->tgargs);
2164  }
2165  if (trigger->tgqual)
2166  pfree(trigger->tgqual);
2167  if (trigger->tgoldtable)
2168  pfree(trigger->tgoldtable);
2169  if (trigger->tgnewtable)
2170  pfree(trigger->tgnewtable);
2171  trigger++;
2172  }
2173  pfree(trigdesc->triggers);
2174  pfree(trigdesc);
2175 }
2176 
2177 /*
2178  * Compare two TriggerDesc structures for logical equality.
2179  */
2180 #ifdef NOT_USED
2181 bool
2182 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2183 {
2184  int i,
2185  j;
2186 
2187  /*
2188  * We need not examine the hint flags, just the trigger array itself; if
2189  * we have the same triggers with the same types, the flags should match.
2190  *
2191  * As of 7.3 we assume trigger set ordering is significant in the
2192  * comparison; so we just compare corresponding slots of the two sets.
2193  *
2194  * Note: comparing the stringToNode forms of the WHEN clauses means that
2195  * parse column locations will affect the result. This is okay as long as
2196  * this function is only used for detecting exact equality, as for example
2197  * in checking for staleness of a cache entry.
2198  */
2199  if (trigdesc1 != NULL)
2200  {
2201  if (trigdesc2 == NULL)
2202  return false;
2203  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2204  return false;
2205  for (i = 0; i < trigdesc1->numtriggers; i++)
2206  {
2207  Trigger *trig1 = trigdesc1->triggers + i;
2208  Trigger *trig2 = trigdesc2->triggers + i;
2209 
2210  if (trig1->tgoid != trig2->tgoid)
2211  return false;
2212  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2213  return false;
2214  if (trig1->tgfoid != trig2->tgfoid)
2215  return false;
2216  if (trig1->tgtype != trig2->tgtype)
2217  return false;
2218  if (trig1->tgenabled != trig2->tgenabled)
2219  return false;
2220  if (trig1->tgisinternal != trig2->tgisinternal)
2221  return false;
2222  if (trig1->tgisclone != trig2->tgisclone)
2223  return false;
2224  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2225  return false;
2226  if (trig1->tgconstrindid != trig2->tgconstrindid)
2227  return false;
2228  if (trig1->tgconstraint != trig2->tgconstraint)
2229  return false;
2230  if (trig1->tgdeferrable != trig2->tgdeferrable)
2231  return false;
2232  if (trig1->tginitdeferred != trig2->tginitdeferred)
2233  return false;
2234  if (trig1->tgnargs != trig2->tgnargs)
2235  return false;
2236  if (trig1->tgnattr != trig2->tgnattr)
2237  return false;
2238  if (trig1->tgnattr > 0 &&
2239  memcmp(trig1->tgattr, trig2->tgattr,
2240  trig1->tgnattr * sizeof(int16)) != 0)
2241  return false;
2242  for (j = 0; j < trig1->tgnargs; j++)
2243  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2244  return false;
2245  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2246  /* ok */ ;
2247  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2248  return false;
2249  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2250  return false;
2251  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2252  /* ok */ ;
2253  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2254  return false;
2255  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2256  return false;
2257  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2258  /* ok */ ;
2259  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2260  return false;
2261  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2262  return false;
2263  }
2264  }
2265  else if (trigdesc2 != NULL)
2266  return false;
2267  return true;
2268 }
2269 #endif /* NOT_USED */
2270 
2271 /*
2272  * Check if there is a row-level trigger with transition tables that prevents
2273  * a table from becoming an inheritance child or partition. Return the name
2274  * of the first such incompatible trigger, or NULL if there is none.
2275  */
2276 const char *
2278 {
2279  if (trigdesc != NULL)
2280  {
2281  int i;
2282 
2283  for (i = 0; i < trigdesc->numtriggers; ++i)
2284  {
2285  Trigger *trigger = &trigdesc->triggers[i];
2286 
2287  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2288  return trigger->tgname;
2289  }
2290  }
2291 
2292  return NULL;
2293 }
2294 
2295 /*
2296  * Call a trigger function.
2297  *
2298  * trigdata: trigger descriptor.
2299  * tgindx: trigger's index in finfo and instr arrays.
2300  * finfo: array of cached trigger function call information.
2301  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2302  * per_tuple_context: memory context to execute the function in.
2303  *
2304  * Returns the tuple (or NULL) as returned by the function.
2305  */
2306 static HeapTuple
2308  int tgindx,
2309  FmgrInfo *finfo,
2310  Instrumentation *instr,
2311  MemoryContext per_tuple_context)
2312 {
2313  LOCAL_FCINFO(fcinfo, 0);
2314  PgStat_FunctionCallUsage fcusage;
2315  Datum result;
2316  MemoryContext oldContext;
2317 
2318  /*
2319  * Protect against code paths that may fail to initialize transition table
2320  * info.
2321  */
2322  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2323  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2324  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2325  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2326  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2327  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2328  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2329 
2330  finfo += tgindx;
2331 
2332  /*
2333  * We cache fmgr lookup info, to avoid making the lookup again on each
2334  * call.
2335  */
2336  if (finfo->fn_oid == InvalidOid)
2337  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2338 
2339  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2340 
2341  /*
2342  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2343  */
2344  if (instr)
2345  InstrStartNode(instr + tgindx);
2346 
2347  /*
2348  * Do the function evaluation in the per-tuple memory context, so that
2349  * leaked memory will be reclaimed once per tuple. Note in particular that
2350  * any new tuple created by the trigger function will live till the end of
2351  * the tuple cycle.
2352  */
2353  oldContext = MemoryContextSwitchTo(per_tuple_context);
2354 
2355  /*
2356  * Call the function, passing no arguments but setting a context.
2357  */
2358  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2359  InvalidOid, (Node *) trigdata, NULL);
2360 
2361  pgstat_init_function_usage(fcinfo, &fcusage);
2362 
2363  MyTriggerDepth++;
2364  PG_TRY();
2365  {
2366  result = FunctionCallInvoke(fcinfo);
2367  }
2368  PG_FINALLY();
2369  {
2370  MyTriggerDepth--;
2371  }
2372  PG_END_TRY();
2373 
2374  pgstat_end_function_usage(&fcusage, true);
2375 
2376  MemoryContextSwitchTo(oldContext);
2377 
2378  /*
2379  * Trigger protocol allows function to return a null pointer, but NOT to
2380  * set the isnull result flag.
2381  */
2382  if (fcinfo->isnull)
2383  ereport(ERROR,
2384  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2385  errmsg("trigger function %u returned null value",
2386  fcinfo->flinfo->fn_oid)));
2387 
2388  /*
2389  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2390  * one "tuple returned" (really the number of firings).
2391  */
2392  if (instr)
2393  InstrStopNode(instr + tgindx, 1);
2394 
2395  return (HeapTuple) DatumGetPointer(result);
2396 }
2397 
2398 void
2400 {
2401  TriggerDesc *trigdesc;
2402  int i;
2403  TriggerData LocTriggerData = {0};
2404 
2405  trigdesc = relinfo->ri_TrigDesc;
2406 
2407  if (trigdesc == NULL)
2408  return;
2409  if (!trigdesc->trig_insert_before_statement)
2410  return;
2411 
2412  /* no-op if we already fired BS triggers in this context */
2414  CMD_INSERT))
2415  return;
2416 
2417  LocTriggerData.type = T_TriggerData;
2418  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2420  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2421  for (i = 0; i < trigdesc->numtriggers; i++)
2422  {
2423  Trigger *trigger = &trigdesc->triggers[i];
2424  HeapTuple newtuple;
2425 
2426  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2427  TRIGGER_TYPE_STATEMENT,
2428  TRIGGER_TYPE_BEFORE,
2429  TRIGGER_TYPE_INSERT))
2430  continue;
2431  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2432  NULL, NULL, NULL))
2433  continue;
2434 
2435  LocTriggerData.tg_trigger = trigger;
2436  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2437  i,
2438  relinfo->ri_TrigFunctions,
2439  relinfo->ri_TrigInstrument,
2440  GetPerTupleMemoryContext(estate));
2441 
2442  if (newtuple)
2443  ereport(ERROR,
2444  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2445  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2446  }
2447 }
2448 
2449 void
2451  TransitionCaptureState *transition_capture)
2452 {
2453  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2454 
2455  if (trigdesc && trigdesc->trig_insert_after_statement)
2456  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2458  false, NULL, NULL, NIL, NULL, transition_capture,
2459  false);
2460 }
2461 
2462 bool
2464  TupleTableSlot *slot)
2465 {
2466  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2467  HeapTuple newtuple = NULL;
2468  bool should_free;
2469  TriggerData LocTriggerData = {0};
2470  int i;
2471 
2472  LocTriggerData.type = T_TriggerData;
2473  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2476  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2477  for (i = 0; i < trigdesc->numtriggers; i++)
2478  {
2479  Trigger *trigger = &trigdesc->triggers[i];
2480  HeapTuple oldtuple;
2481 
2482  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2483  TRIGGER_TYPE_ROW,
2484  TRIGGER_TYPE_BEFORE,
2485  TRIGGER_TYPE_INSERT))
2486  continue;
2487  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2488  NULL, NULL, slot))
2489  continue;
2490 
2491  if (!newtuple)
2492  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2493 
2494  LocTriggerData.tg_trigslot = slot;
2495  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2496  LocTriggerData.tg_trigger = trigger;
2497  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2498  i,
2499  relinfo->ri_TrigFunctions,
2500  relinfo->ri_TrigInstrument,
2501  GetPerTupleMemoryContext(estate));
2502  if (newtuple == NULL)
2503  {
2504  if (should_free)
2505  heap_freetuple(oldtuple);
2506  return false; /* "do nothing" */
2507  }
2508  else if (newtuple != oldtuple)
2509  {
2510  ExecForceStoreHeapTuple(newtuple, slot, false);
2511 
2512  /*
2513  * After a tuple in a partition goes through a trigger, the user
2514  * could have changed the partition key enough that the tuple no
2515  * longer fits the partition. Verify that.
2516  */
2517  if (trigger->tgisclone &&
2518  !ExecPartitionCheck(relinfo, slot, estate, false))
2519  ereport(ERROR,
2520  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2521  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2522  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2523  trigger->tgname,
2526 
2527  if (should_free)
2528  heap_freetuple(oldtuple);
2529 
2530  /* signal tuple should be re-fetched if used */
2531  newtuple = NULL;
2532  }
2533  }
2534 
2535  return true;
2536 }
2537 
2538 void
2540  TupleTableSlot *slot, List *recheckIndexes,
2541  TransitionCaptureState *transition_capture)
2542 {
2543  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2544 
2545  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2546  (transition_capture && transition_capture->tcs_insert_new_table))
2547  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2549  true, NULL, slot,
2550  recheckIndexes, NULL,
2551  transition_capture,
2552  false);
2553 }
2554 
2555 bool
2557  TupleTableSlot *slot)
2558 {
2559  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2560  HeapTuple newtuple = NULL;
2561  bool should_free;
2562  TriggerData LocTriggerData = {0};
2563  int i;
2564 
2565  LocTriggerData.type = T_TriggerData;
2566  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2569  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2570  for (i = 0; i < trigdesc->numtriggers; i++)
2571  {
2572  Trigger *trigger = &trigdesc->triggers[i];
2573  HeapTuple oldtuple;
2574 
2575  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2576  TRIGGER_TYPE_ROW,
2577  TRIGGER_TYPE_INSTEAD,
2578  TRIGGER_TYPE_INSERT))
2579  continue;
2580  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2581  NULL, NULL, slot))
2582  continue;
2583 
2584  if (!newtuple)
2585  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2586 
2587  LocTriggerData.tg_trigslot = slot;
2588  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2589  LocTriggerData.tg_trigger = trigger;
2590  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2591  i,
2592  relinfo->ri_TrigFunctions,
2593  relinfo->ri_TrigInstrument,
2594  GetPerTupleMemoryContext(estate));
2595  if (newtuple == NULL)
2596  {
2597  if (should_free)
2598  heap_freetuple(oldtuple);
2599  return false; /* "do nothing" */
2600  }
2601  else if (newtuple != oldtuple)
2602  {
2603  ExecForceStoreHeapTuple(newtuple, slot, false);
2604 
2605  if (should_free)
2606  heap_freetuple(oldtuple);
2607 
2608  /* signal tuple should be re-fetched if used */
2609  newtuple = NULL;
2610  }
2611  }
2612 
2613  return true;
2614 }
2615 
2616 void
2618 {
2619  TriggerDesc *trigdesc;
2620  int i;
2621  TriggerData LocTriggerData = {0};
2622 
2623  trigdesc = relinfo->ri_TrigDesc;
2624 
2625  if (trigdesc == NULL)
2626  return;
2627  if (!trigdesc->trig_delete_before_statement)
2628  return;
2629 
2630  /* no-op if we already fired BS triggers in this context */
2632  CMD_DELETE))
2633  return;
2634 
2635  LocTriggerData.type = T_TriggerData;
2636  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2638  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2639  for (i = 0; i < trigdesc->numtriggers; i++)
2640  {
2641  Trigger *trigger = &trigdesc->triggers[i];
2642  HeapTuple newtuple;
2643 
2644  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2645  TRIGGER_TYPE_STATEMENT,
2646  TRIGGER_TYPE_BEFORE,
2647  TRIGGER_TYPE_DELETE))
2648  continue;
2649  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2650  NULL, NULL, NULL))
2651  continue;
2652 
2653  LocTriggerData.tg_trigger = trigger;
2654  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2655  i,
2656  relinfo->ri_TrigFunctions,
2657  relinfo->ri_TrigInstrument,
2658  GetPerTupleMemoryContext(estate));
2659 
2660  if (newtuple)
2661  ereport(ERROR,
2662  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2663  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2664  }
2665 }
2666 
2667 void
2669  TransitionCaptureState *transition_capture)
2670 {
2671  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2672 
2673  if (trigdesc && trigdesc->trig_delete_after_statement)
2674  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2676  false, NULL, NULL, NIL, NULL, transition_capture,
2677  false);
2678 }
2679 
2680 /*
2681  * Execute BEFORE ROW DELETE triggers.
2682  *
2683  * True indicates caller can proceed with the delete. False indicates caller
2684  * need to suppress the delete and additionally if requested, we need to pass
2685  * back the concurrently updated tuple if any.
2686  */
2687 bool
2689  ResultRelInfo *relinfo,
2690  ItemPointer tupleid,
2691  HeapTuple fdw_trigtuple,
2692  TupleTableSlot **epqslot)
2693 {
2694  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2695  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2696  bool result = true;
2697  TriggerData LocTriggerData = {0};
2698  HeapTuple trigtuple;
2699  bool should_free = false;
2700  int i;
2701 
2702  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2703  if (fdw_trigtuple == NULL)
2704  {
2705  TupleTableSlot *epqslot_candidate = NULL;
2706 
2707  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2708  LockTupleExclusive, slot, &epqslot_candidate,
2709  NULL))
2710  return false;
2711 
2712  /*
2713  * If the tuple was concurrently updated and the caller of this
2714  * function requested for the updated tuple, skip the trigger
2715  * execution.
2716  */
2717  if (epqslot_candidate != NULL && epqslot != NULL)
2718  {
2719  *epqslot = epqslot_candidate;
2720  return false;
2721  }
2722 
2723  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2724  }
2725  else
2726  {
2727  trigtuple = fdw_trigtuple;
2728  ExecForceStoreHeapTuple(trigtuple, slot, false);
2729  }
2730 
2731  LocTriggerData.type = T_TriggerData;
2732  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2735  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2736  for (i = 0; i < trigdesc->numtriggers; i++)
2737  {
2738  HeapTuple newtuple;
2739  Trigger *trigger = &trigdesc->triggers[i];
2740 
2741  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2742  TRIGGER_TYPE_ROW,
2743  TRIGGER_TYPE_BEFORE,
2744  TRIGGER_TYPE_DELETE))
2745  continue;
2746  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2747  NULL, slot, NULL))
2748  continue;
2749 
2750  LocTriggerData.tg_trigslot = slot;
2751  LocTriggerData.tg_trigtuple = trigtuple;
2752  LocTriggerData.tg_trigger = trigger;
2753  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2754  i,
2755  relinfo->ri_TrigFunctions,
2756  relinfo->ri_TrigInstrument,
2757  GetPerTupleMemoryContext(estate));
2758  if (newtuple == NULL)
2759  {
2760  result = false; /* tell caller to suppress delete */
2761  break;
2762  }
2763  if (newtuple != trigtuple)
2764  heap_freetuple(newtuple);
2765  }
2766  if (should_free)
2767  heap_freetuple(trigtuple);
2768 
2769  return result;
2770 }
2771 
2772 /*
2773  * Note: is_crosspart_update must be true if the DELETE is being performed
2774  * as part of a cross-partition update.
2775  */
2776 void
2778  ResultRelInfo *relinfo,
2779  ItemPointer tupleid,
2780  HeapTuple fdw_trigtuple,
2781  TransitionCaptureState *transition_capture,
2782  bool is_crosspart_update)
2783 {
2784  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2785 
2786  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2787  (transition_capture && transition_capture->tcs_delete_old_table))
2788  {
2789  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2790 
2791  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2792  if (fdw_trigtuple == NULL)
2793  GetTupleForTrigger(estate,
2794  NULL,
2795  relinfo,
2796  tupleid,
2798  slot,
2799  NULL,
2800  NULL);
2801  else
2802  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2803 
2804  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2806  true, slot, NULL, NIL, NULL,
2807  transition_capture,
2808  is_crosspart_update);
2809  }
2810 }
2811 
2812 bool
2814  HeapTuple trigtuple)
2815 {
2816  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2817  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2818  TriggerData LocTriggerData = {0};
2819  int i;
2820 
2821  LocTriggerData.type = T_TriggerData;
2822  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2825  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2826 
2827  ExecForceStoreHeapTuple(trigtuple, slot, false);
2828 
2829  for (i = 0; i < trigdesc->numtriggers; i++)
2830  {
2831  HeapTuple rettuple;
2832  Trigger *trigger = &trigdesc->triggers[i];
2833 
2834  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2835  TRIGGER_TYPE_ROW,
2836  TRIGGER_TYPE_INSTEAD,
2837  TRIGGER_TYPE_DELETE))
2838  continue;
2839  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2840  NULL, slot, NULL))
2841  continue;
2842 
2843  LocTriggerData.tg_trigslot = slot;
2844  LocTriggerData.tg_trigtuple = trigtuple;
2845  LocTriggerData.tg_trigger = trigger;
2846  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2847  i,
2848  relinfo->ri_TrigFunctions,
2849  relinfo->ri_TrigInstrument,
2850  GetPerTupleMemoryContext(estate));
2851  if (rettuple == NULL)
2852  return false; /* Delete was suppressed */
2853  if (rettuple != trigtuple)
2854  heap_freetuple(rettuple);
2855  }
2856  return true;
2857 }
2858 
2859 void
2861 {
2862  TriggerDesc *trigdesc;
2863  int i;
2864  TriggerData LocTriggerData = {0};
2865  Bitmapset *updatedCols;
2866 
2867  trigdesc = relinfo->ri_TrigDesc;
2868 
2869  if (trigdesc == NULL)
2870  return;
2871  if (!trigdesc->trig_update_before_statement)
2872  return;
2873 
2874  /* no-op if we already fired BS triggers in this context */
2876  CMD_UPDATE))
2877  return;
2878 
2879  /* statement-level triggers operate on the parent table */
2880  Assert(relinfo->ri_RootResultRelInfo == NULL);
2881 
2882  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2883 
2884  LocTriggerData.type = T_TriggerData;
2885  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2887  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2888  LocTriggerData.tg_updatedcols = updatedCols;
2889  for (i = 0; i < trigdesc->numtriggers; i++)
2890  {
2891  Trigger *trigger = &trigdesc->triggers[i];
2892  HeapTuple newtuple;
2893 
2894  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2895  TRIGGER_TYPE_STATEMENT,
2896  TRIGGER_TYPE_BEFORE,
2897  TRIGGER_TYPE_UPDATE))
2898  continue;
2899  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2900  updatedCols, NULL, NULL))
2901  continue;
2902 
2903  LocTriggerData.tg_trigger = trigger;
2904  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2905  i,
2906  relinfo->ri_TrigFunctions,
2907  relinfo->ri_TrigInstrument,
2908  GetPerTupleMemoryContext(estate));
2909 
2910  if (newtuple)
2911  ereport(ERROR,
2912  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2913  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2914  }
2915 }
2916 
2917 void
2919  TransitionCaptureState *transition_capture)
2920 {
2921  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2922 
2923  /* statement-level triggers operate on the parent table */
2924  Assert(relinfo->ri_RootResultRelInfo == NULL);
2925 
2926  if (trigdesc && trigdesc->trig_update_after_statement)
2927  AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2929  false, NULL, NULL, NIL,
2930  ExecGetAllUpdatedCols(relinfo, estate),
2931  transition_capture,
2932  false);
2933 }
2934 
2935 bool
2937  ResultRelInfo *relinfo,
2938  ItemPointer tupleid,
2939  HeapTuple fdw_trigtuple,
2940  TupleTableSlot *newslot,
2941  TM_FailureData *tmfd)
2942 {
2943  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2944  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2945  HeapTuple newtuple = NULL;
2946  HeapTuple trigtuple;
2947  bool should_free_trig = false;
2948  bool should_free_new = false;
2949  TriggerData LocTriggerData = {0};
2950  int i;
2951  Bitmapset *updatedCols;
2952  LockTupleMode lockmode;
2953 
2954  /* Determine lock mode to use */
2955  lockmode = ExecUpdateLockMode(estate, relinfo);
2956 
2957  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2958  if (fdw_trigtuple == NULL)
2959  {
2960  TupleTableSlot *epqslot_candidate = NULL;
2961 
2962  /* get a copy of the on-disk tuple we are planning to update */
2963  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2964  lockmode, oldslot, &epqslot_candidate,
2965  tmfd))
2966  return false; /* cancel the update action */
2967 
2968  /*
2969  * In READ COMMITTED isolation level it's possible that target tuple
2970  * was changed due to concurrent update. In that case we have a raw
2971  * subplan output tuple in epqslot_candidate, and need to form a new
2972  * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2973  * received in newslot. Neither we nor our callers have any further
2974  * interest in the passed-in tuple, so it's okay to overwrite newslot
2975  * with the newer data.
2976  *
2977  * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so
2978  * that epqslot_clean will be that same slot and the copy step below
2979  * is not needed.)
2980  */
2981  if (epqslot_candidate != NULL)
2982  {
2983  TupleTableSlot *epqslot_clean;
2984 
2985  epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2986  oldslot);
2987 
2988  if (newslot != epqslot_clean)
2989  ExecCopySlot(newslot, epqslot_clean);
2990  }
2991 
2992  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
2993  }
2994  else
2995  {
2996  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2997  trigtuple = fdw_trigtuple;
2998  }
2999 
3000  LocTriggerData.type = T_TriggerData;
3001  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3004  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3005  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3006  LocTriggerData.tg_updatedcols = updatedCols;
3007  for (i = 0; i < trigdesc->numtriggers; i++)
3008  {
3009  Trigger *trigger = &trigdesc->triggers[i];
3010  HeapTuple oldtuple;
3011 
3012  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3013  TRIGGER_TYPE_ROW,
3014  TRIGGER_TYPE_BEFORE,
3015  TRIGGER_TYPE_UPDATE))
3016  continue;
3017  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3018  updatedCols, oldslot, newslot))
3019  continue;
3020 
3021  if (!newtuple)
3022  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3023 
3024  LocTriggerData.tg_trigslot = oldslot;
3025  LocTriggerData.tg_trigtuple = trigtuple;
3026  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3027  LocTriggerData.tg_newslot = newslot;
3028  LocTriggerData.tg_trigger = trigger;
3029  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3030  i,
3031  relinfo->ri_TrigFunctions,
3032  relinfo->ri_TrigInstrument,
3033  GetPerTupleMemoryContext(estate));
3034 
3035  if (newtuple == NULL)
3036  {
3037  if (should_free_trig)
3038  heap_freetuple(trigtuple);
3039  if (should_free_new)
3040  heap_freetuple(oldtuple);
3041  return false; /* "do nothing" */
3042  }
3043  else if (newtuple != oldtuple)
3044  {
3045  ExecForceStoreHeapTuple(newtuple, newslot, false);
3046 
3047  /*
3048  * If the tuple returned by the trigger / being stored, is the old
3049  * row version, and the heap tuple passed to the trigger was
3050  * allocated locally, materialize the slot. Otherwise we might
3051  * free it while still referenced by the slot.
3052  */
3053  if (should_free_trig && newtuple == trigtuple)
3054  ExecMaterializeSlot(newslot);
3055 
3056  if (should_free_new)
3057  heap_freetuple(oldtuple);
3058 
3059  /* signal tuple should be re-fetched if used */
3060  newtuple = NULL;
3061  }
3062  }
3063  if (should_free_trig)
3064  heap_freetuple(trigtuple);
3065 
3066  return true;
3067 }
3068 
3069 /*
3070  * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3071  * and destination partitions, respectively, of a cross-partition update of
3072  * the root partitioned table mentioned in the query, given by 'relinfo'.
3073  * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3074  * partition, and 'newslot' contains the "new" tuple in the destination
3075  * partition. This interface allows to support the requirements of
3076  * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3077  * that case.
3078  */
3079 void
3081  ResultRelInfo *src_partinfo,
3082  ResultRelInfo *dst_partinfo,
3083  ItemPointer tupleid,
3084  HeapTuple fdw_trigtuple,
3085  TupleTableSlot *newslot,
3086  List *recheckIndexes,
3087  TransitionCaptureState *transition_capture,
3088  bool is_crosspart_update)
3089 {
3090  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3091 
3092  if ((trigdesc && trigdesc->trig_update_after_row) ||
3093  (transition_capture &&
3094  (transition_capture->tcs_update_old_table ||
3095  transition_capture->tcs_update_new_table)))
3096  {
3097  /*
3098  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3099  * update-partition-key operation, then this function is also called
3100  * separately for DELETE and INSERT to capture transition table rows.
3101  * In such case, either old tuple or new tuple can be NULL.
3102  */
3103  TupleTableSlot *oldslot;
3104  ResultRelInfo *tupsrc;
3105 
3106  Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3107  !is_crosspart_update);
3108 
3109  tupsrc = src_partinfo ? src_partinfo : relinfo;
3110  oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3111 
3112  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3113  GetTupleForTrigger(estate,
3114  NULL,
3115  tupsrc,
3116  tupleid,
3118  oldslot,
3119  NULL,
3120  NULL);
3121  else if (fdw_trigtuple != NULL)
3122  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3123  else
3124  ExecClearTuple(oldslot);
3125 
3126  AfterTriggerSaveEvent(estate, relinfo,
3127  src_partinfo, dst_partinfo,
3129  true,
3130  oldslot, newslot, recheckIndexes,
3131  ExecGetAllUpdatedCols(relinfo, estate),
3132  transition_capture,
3133  is_crosspart_update);
3134  }
3135 }
3136 
3137 bool
3139  HeapTuple trigtuple, TupleTableSlot *newslot)
3140 {
3141  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3142  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3143  HeapTuple newtuple = NULL;
3144  bool should_free;
3145  TriggerData LocTriggerData = {0};
3146  int i;
3147 
3148  LocTriggerData.type = T_TriggerData;
3149  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3152  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3153 
3154  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3155 
3156  for (i = 0; i < trigdesc->numtriggers; i++)
3157  {
3158  Trigger *trigger = &trigdesc->triggers[i];
3159  HeapTuple oldtuple;
3160 
3161  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3162  TRIGGER_TYPE_ROW,
3163  TRIGGER_TYPE_INSTEAD,
3164  TRIGGER_TYPE_UPDATE))
3165  continue;
3166  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3167  NULL, oldslot, newslot))
3168  continue;
3169 
3170  if (!newtuple)
3171  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3172 
3173  LocTriggerData.tg_trigslot = oldslot;
3174  LocTriggerData.tg_trigtuple = trigtuple;
3175  LocTriggerData.tg_newslot = newslot;
3176  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3177 
3178  LocTriggerData.tg_trigger = trigger;
3179  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3180  i,
3181  relinfo->ri_TrigFunctions,
3182  relinfo->ri_TrigInstrument,
3183  GetPerTupleMemoryContext(estate));
3184  if (newtuple == NULL)
3185  {
3186  return false; /* "do nothing" */
3187  }
3188  else if (newtuple != oldtuple)
3189  {
3190  ExecForceStoreHeapTuple(newtuple, newslot, false);
3191 
3192  if (should_free)
3193  heap_freetuple(oldtuple);
3194 
3195  /* signal tuple should be re-fetched if used */
3196  newtuple = NULL;
3197  }
3198  }
3199 
3200  return true;
3201 }
3202 
3203 void
3205 {
3206  TriggerDesc *trigdesc;
3207  int i;
3208  TriggerData LocTriggerData = {0};
3209 
3210  trigdesc = relinfo->ri_TrigDesc;
3211 
3212  if (trigdesc == NULL)
3213  return;
3214  if (!trigdesc->trig_truncate_before_statement)
3215  return;
3216 
3217  LocTriggerData.type = T_TriggerData;
3218  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3220  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3221 
3222  for (i = 0; i < trigdesc->numtriggers; i++)
3223  {
3224  Trigger *trigger = &trigdesc->triggers[i];
3225  HeapTuple newtuple;
3226 
3227  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3228  TRIGGER_TYPE_STATEMENT,
3229  TRIGGER_TYPE_BEFORE,
3230  TRIGGER_TYPE_TRUNCATE))
3231  continue;
3232  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3233  NULL, NULL, NULL))
3234  continue;
3235 
3236  LocTriggerData.tg_trigger = trigger;
3237  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3238  i,
3239  relinfo->ri_TrigFunctions,
3240  relinfo->ri_TrigInstrument,
3241  GetPerTupleMemoryContext(estate));
3242 
3243  if (newtuple)
3244  ereport(ERROR,
3245  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3246  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3247  }
3248 }
3249 
3250 void
3252 {
3253  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3254 
3255  if (trigdesc && trigdesc->trig_truncate_after_statement)
3256  AfterTriggerSaveEvent(estate, relinfo,
3257  NULL, NULL,
3259  false, NULL, NULL, NIL, NULL, NULL,
3260  false);
3261 }
3262 
3263 
3264 /*
3265  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3266  */
3267 static bool
3269  EPQState *epqstate,
3270  ResultRelInfo *relinfo,
3271  ItemPointer tid,
3272  LockTupleMode lockmode,
3273  TupleTableSlot *oldslot,
3274  TupleTableSlot **epqslot,
3275  TM_FailureData *tmfdp)
3276 {
3277  Relation relation = relinfo->ri_RelationDesc;
3278 
3279  if (epqslot != NULL)
3280  {
3281  TM_Result test;
3282  TM_FailureData tmfd;
3283  int lockflags = 0;
3284 
3285  *epqslot = NULL;
3286 
3287  /* caller must pass an epqstate if EvalPlanQual is possible */
3288  Assert(epqstate != NULL);
3289 
3290  /*
3291  * lock tuple for update
3292  */
3294  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3295  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3296  estate->es_output_cid,
3297  lockmode, LockWaitBlock,
3298  lockflags,
3299  &tmfd);
3300 
3301  /* Let the caller know about the status of this operation */
3302  if (tmfdp)
3303  *tmfdp = tmfd;
3304 
3305  switch (test)
3306  {
3307  case TM_SelfModified:
3308 
3309  /*
3310  * The target tuple was already updated or deleted by the
3311  * current command, or by a later command in the current
3312  * transaction. We ignore the tuple in the former case, and
3313  * throw error in the latter case, for the same reasons
3314  * enumerated in ExecUpdate and ExecDelete in
3315  * nodeModifyTable.c.
3316  */
3317  if (tmfd.cmax != estate->es_output_cid)
3318  ereport(ERROR,
3319  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3320  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3321  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3322 
3323  /* treat it as deleted; do not process */
3324  return false;
3325 
3326  case TM_Ok:
3327  if (tmfd.traversed)
3328  {
3329  *epqslot = EvalPlanQual(epqstate,
3330  relation,
3331  relinfo->ri_RangeTableIndex,
3332  oldslot);
3333 
3334  /*
3335  * If PlanQual failed for updated tuple - we must not
3336  * process this tuple!
3337  */
3338  if (TupIsNull(*epqslot))
3339  {
3340  *epqslot = NULL;
3341  return false;
3342  }
3343  }
3344  break;
3345 
3346  case TM_Updated:
3348  ereport(ERROR,
3350  errmsg("could not serialize access due to concurrent update")));
3351  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3352  break;
3353 
3354  case TM_Deleted:
3356  ereport(ERROR,
3358  errmsg("could not serialize access due to concurrent delete")));
3359  /* tuple was deleted */
3360  return false;
3361 
3362  case TM_Invisible:
3363  elog(ERROR, "attempted to lock invisible tuple");
3364  break;
3365 
3366  default:
3367  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3368  return false; /* keep compiler quiet */
3369  }
3370  }
3371  else
3372  {
3373  /*
3374  * We expect the tuple to be present, thus very simple error handling
3375  * suffices.
3376  */
3377  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3378  oldslot))
3379  elog(ERROR, "failed to fetch tuple for trigger");
3380  }
3381 
3382  return true;
3383 }
3384 
3385 /*
3386  * Is trigger enabled to fire?
3387  */
3388 static bool
3390  Trigger *trigger, TriggerEvent event,
3391  Bitmapset *modifiedCols,
3392  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3393 {
3394  /* Check replication-role-dependent enable state */
3396  {
3397  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3398  trigger->tgenabled == TRIGGER_DISABLED)
3399  return false;
3400  }
3401  else /* ORIGIN or LOCAL role */
3402  {
3403  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3404  trigger->tgenabled == TRIGGER_DISABLED)
3405  return false;
3406  }
3407 
3408  /*
3409  * Check for column-specific trigger (only possible for UPDATE, and in
3410  * fact we *must* ignore tgattr for other event types)
3411  */
3412  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3413  {
3414  int i;
3415  bool modified;
3416 
3417  modified = false;
3418  for (i = 0; i < trigger->tgnattr; i++)
3419  {
3421  modifiedCols))
3422  {
3423  modified = true;
3424  break;
3425  }
3426  }
3427  if (!modified)
3428  return false;
3429  }
3430 
3431  /* Check for WHEN clause */
3432  if (trigger->tgqual)
3433  {
3434  ExprState **predicate;
3435  ExprContext *econtext;
3436  MemoryContext oldContext;
3437  int i;
3438 
3439  Assert(estate != NULL);
3440 
3441  /*
3442  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3443  * matching element of relinfo->ri_TrigWhenExprs[]
3444  */
3445  i = trigger - relinfo->ri_TrigDesc->triggers;
3446  predicate = &relinfo->ri_TrigWhenExprs[i];
3447 
3448  /*
3449  * If first time through for this WHEN expression, build expression
3450  * nodetrees for it. Keep them in the per-query memory context so
3451  * they'll survive throughout the query.
3452  */
3453  if (*predicate == NULL)
3454  {
3455  Node *tgqual;
3456 
3457  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3458  tgqual = stringToNode(trigger->tgqual);
3459  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3462  /* ExecPrepareQual wants implicit-AND form */
3463  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3464  *predicate = ExecPrepareQual((List *) tgqual, estate);
3465  MemoryContextSwitchTo(oldContext);
3466  }
3467 
3468  /*
3469  * We will use the EState's per-tuple context for evaluating WHEN
3470  * expressions (creating it if it's not already there).
3471  */
3472  econtext = GetPerTupleExprContext(estate);
3473 
3474  /*
3475  * Finally evaluate the expression, making the old and/or new tuples
3476  * available as INNER_VAR/OUTER_VAR respectively.
3477  */
3478  econtext->ecxt_innertuple = oldslot;
3479  econtext->ecxt_outertuple = newslot;
3480  if (!ExecQual(*predicate, econtext))
3481  return false;
3482  }
3483 
3484  return true;
3485 }
3486 
3487 
3488 /* ----------
3489  * After-trigger stuff
3490  *
3491  * The AfterTriggersData struct holds data about pending AFTER trigger events
3492  * during the current transaction tree. (BEFORE triggers are fired
3493  * immediately so we don't need any persistent state about them.) The struct
3494  * and most of its subsidiary data are kept in TopTransactionContext; however
3495  * some data that can be discarded sooner appears in the CurTransactionContext
3496  * of the relevant subtransaction. Also, the individual event records are
3497  * kept in a separate sub-context of TopTransactionContext. This is done
3498  * mainly so that it's easy to tell from a memory context dump how much space
3499  * is being eaten by trigger events.
3500  *
3501  * Because the list of pending events can grow large, we go to some
3502  * considerable effort to minimize per-event memory consumption. The event
3503  * records are grouped into chunks and common data for similar events in the
3504  * same chunk is only stored once.
3505  *
3506  * XXX We need to be able to save the per-event data in a file if it grows too
3507  * large.
3508  * ----------
3509  */
3510 
3511 /* Per-trigger SET CONSTRAINT status */
3513 {
3517 
3519 
3520 /*
3521  * SET CONSTRAINT intra-transaction status.
3522  *
3523  * We make this a single palloc'd object so it can be copied and freed easily.
3524  *
3525  * all_isset and all_isdeferred are used to keep track
3526  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3527  *
3528  * trigstates[] stores per-trigger tgisdeferred settings.
3529  */
3531 {
3534  int numstates; /* number of trigstates[] entries in use */
3535  int numalloc; /* allocated size of trigstates[] */
3538 
3540 
3541 
3542 /*
3543  * Per-trigger-event data
3544  *
3545  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3546  * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3547  * Each event record also has an associated AfterTriggerSharedData that is
3548  * shared across all instances of similar events within a "chunk".
3549  *
3550  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3551  * fields. Updates of regular tables use two; inserts and deletes of regular
3552  * tables use one; foreign tables always use zero and save the tuple(s) to a
3553  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3554  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3555  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3556  * tuple(s). This permits storing tuples once regardless of the number of
3557  * row-level triggers on a foreign table.
3558  *
3559  * When updates on partitioned tables cause rows to move between partitions,
3560  * the OIDs of both partitions are stored too, so that the tuples can be
3561  * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3562  * partition update").
3563  *
3564  * Note that we need triggers on foreign tables to be fired in exactly the
3565  * order they were queued, so that the tuples come out of the tuplestore in
3566  * the right order. To ensure that, we forbid deferrable (constraint)
3567  * triggers on foreign tables. This also ensures that such triggers do not
3568  * get deferred into outer trigger query levels, meaning that it's okay to
3569  * destroy the tuplestore at the end of the query level.
3570  *
3571  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3572  * require no ctid field. We lack the flag bit space to neatly represent that
3573  * distinct case, and it seems unlikely to be worth much trouble.
3574  *
3575  * Note: ats_firing_id is initially zero and is set to something else when
3576  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3577  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3578  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3579  * because all instances of the same type of event in a given event list will
3580  * be fired at the same time, if they were queued between the same firing
3581  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3582  * a new event to an existing AfterTriggerSharedData record.
3583  */
3585 
3586 #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3587 #define AFTER_TRIGGER_DONE 0x80000000
3588 #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3589 /* bits describing the size and tuple sources of this event */
3590 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3591 #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3592 #define AFTER_TRIGGER_1CTID 0x10000000
3593 #define AFTER_TRIGGER_2CTID 0x30000000
3594 #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3595 #define AFTER_TRIGGER_TUP_BITS 0x38000000
3597 
3599 {
3600  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3601  Oid ats_tgoid; /* the trigger's ID */
3602  Oid ats_relid; /* the relation it's on */
3603  CommandId ats_firing_id; /* ID for firing cycle */
3604  struct AfterTriggersTableData *ats_table; /* transition table access */
3605  Bitmapset *ats_modifiedcols; /* modified columns */
3607 
3609 
3611 {
3612  TriggerFlags ate_flags; /* status bits and offset to shared data */
3613  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3614  ItemPointerData ate_ctid2; /* new updated tuple */
3615 
3616  /*
3617  * During a cross-partition update of a partitioned table, we also store
3618  * the OIDs of source and destination partitions that are needed to fetch
3619  * the old (ctid1) and the new tuple (ctid2) from, respectively.
3620  */
3624 
3625 /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3627 {
3632 
3633 /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3635 {
3636  TriggerFlags ate_flags; /* status bits and offset to shared data */
3637  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3639 
3640 /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3642 {
3643  TriggerFlags ate_flags; /* status bits and offset to shared data */
3645 
3646 #define SizeofTriggerEvent(evt) \
3647  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3648  sizeof(AfterTriggerEventData) : \
3649  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3650  sizeof(AfterTriggerEventDataNoOids) : \
3651  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3652  sizeof(AfterTriggerEventDataOneCtid) : \
3653  sizeof(AfterTriggerEventDataZeroCtids))))
3654 
3655 #define GetTriggerSharedData(evt) \
3656  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3657 
3658 /*
3659  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3660  * larger chunks (a slightly more sophisticated version of an expansible
3661  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3662  * AfterTriggerEventData records; the space between endfree and endptr is
3663  * occupied by AfterTriggerSharedData records.
3664  */
3666 {
3667  struct AfterTriggerEventChunk *next; /* list link */
3668  char *freeptr; /* start of free space in chunk */
3669  char *endfree; /* end of free space in chunk */
3670  char *endptr; /* end of chunk */
3671  /* event data follows here */
3673 
3674 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3675 
3676 /* A list of events */
3678 {
3681  char *tailfree; /* freeptr of tail chunk */
3683 
3684 /* Macros to help in iterating over a list of events */
3685 #define for_each_chunk(cptr, evtlist) \
3686  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3687 #define for_each_event(eptr, cptr) \
3688  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3689  (char *) eptr < (cptr)->freeptr; \
3690  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3691 /* Use this if no special per-chunk processing is needed */
3692 #define for_each_event_chunk(eptr, cptr, evtlist) \
3693  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3694 
3695 /* Macros for iterating from a start point that might not be list start */
3696 #define for_each_chunk_from(cptr) \
3697  for (; cptr != NULL; cptr = cptr->next)
3698 #define for_each_event_from(eptr, cptr) \
3699  for (; \
3700  (char *) eptr < (cptr)->freeptr; \
3701  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3702 
3703 
3704 /*
3705  * All per-transaction data for the AFTER TRIGGERS module.
3706  *
3707  * AfterTriggersData has the following fields:
3708  *
3709  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3710  * We mark firable events with the current firing cycle's ID so that we can
3711  * tell which ones to work on. This ensures sane behavior if a trigger
3712  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3713  * only fire those events that weren't already scheduled for firing.
3714  *
3715  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3716  * This is saved and restored across failed subtransactions.
3717  *
3718  * events is the current list of deferred events. This is global across
3719  * all subtransactions of the current transaction. In a subtransaction
3720  * abort, we know that the events added by the subtransaction are at the
3721  * end of the list, so it is relatively easy to discard them. The event
3722  * list chunks themselves are stored in event_cxt.
3723  *
3724  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3725  * (-1 when the stack is empty).
3726  *
3727  * query_stack[query_depth] is the per-query-level data, including these fields:
3728  *
3729  * events is a list of AFTER trigger events queued by the current query.
3730  * None of these are valid until the matching AfterTriggerEndQuery call
3731  * occurs. At that point we fire immediate-mode triggers, and append any
3732  * deferred events to the main events list.
3733  *
3734  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3735  * needed by events queued by the current query. (Note: we use just one
3736  * tuplestore even though more than one foreign table might be involved.
3737  * This is okay because tuplestores don't really care what's in the tuples
3738  * they store; but it's possible that someday it'd break.)
3739  *
3740  * tables is a List of AfterTriggersTableData structs for target tables
3741  * of the current query (see below).
3742  *
3743  * maxquerydepth is just the allocated length of query_stack.
3744  *
3745  * trans_stack holds per-subtransaction data, including these fields:
3746  *
3747  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3748  * state data. Each subtransaction level that modifies that state first
3749  * saves a copy, which we use to restore the state if we abort.
3750  *
3751  * events is a copy of the events head/tail pointers,
3752  * which we use to restore those values during subtransaction abort.
3753  *
3754  * query_depth is the subtransaction-start-time value of query_depth,
3755  * which we similarly use to clean up at subtransaction abort.
3756  *
3757  * firing_counter is the subtransaction-start-time value of firing_counter.
3758  * We use this to recognize which deferred triggers were fired (or marked
3759  * for firing) within an aborted subtransaction.
3760  *
3761  * We use GetCurrentTransactionNestLevel() to determine the correct array
3762  * index in trans_stack. maxtransdepth is the number of allocated entries in
3763  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3764  * in cases where errors during subxact abort cause multiple invocations
3765  * of AfterTriggerEndSubXact() at the same nesting depth.)
3766  *
3767  * We create an AfterTriggersTableData struct for each target table of the
3768  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3769  * either transition tables or statement-level triggers. This is used to
3770  * hold the relevant transition tables, as well as info tracking whether
3771  * we already queued the statement triggers. (We use that info to prevent
3772  * firing the same statement triggers more than once per statement, or really
3773  * once per transition table set.) These structs, along with the transition
3774  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3775  * That's sufficient lifespan because we don't allow transition tables to be
3776  * used by deferrable triggers, so they only need to survive until
3777  * AfterTriggerEndQuery.
3778  */
3782 
3783 typedef struct AfterTriggersData
3784 {
3785  CommandId firing_counter; /* next firing ID to assign */
3786  SetConstraintState state; /* the active S C state */
3787  AfterTriggerEventList events; /* deferred-event list */
3788  MemoryContext event_cxt; /* memory context for events, if any */
3789 
3790  /* per-query-level data: */
3791  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3792  int query_depth; /* current index in above array */
3793  int maxquerydepth; /* allocated len of above array */
3794 
3795  /* per-subtransaction-level data: */
3796  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3797  int maxtransdepth; /* allocated len of above array */
3799 
3801 {
3802  AfterTriggerEventList events; /* events pending from this query */
3803  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3804  List *tables; /* list of AfterTriggersTableData, see below */
3805 };
3806 
3808 {
3809  /* these fields are just for resetting at subtrans abort: */
3810  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3811  AfterTriggerEventList events; /* saved list pointer */
3812  int query_depth; /* saved query_depth */
3813  CommandId firing_counter; /* saved firing_counter */
3814 };
3815 
3817 {
3818  /* relid + cmdType form the lookup key for these structs: */
3819  Oid relid; /* target table's OID */
3820  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3821  bool closed; /* true when no longer OK to add tuples */
3822  bool before_trig_done; /* did we already queue BS triggers? */
3823  bool after_trig_done; /* did we already queue AS triggers? */
3824  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3825 
3826  /*
3827  * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3828  * MERGE can run all three actions in a single statement. Note that UPDATE
3829  * needs both old and new transition tables whereas INSERT needs only new,
3830  * and DELETE needs only old.
3831  */
3832 
3833  /* "old" transition table for UPDATE, if any */
3835  /* "new" transition table for UPDATE, if any */
3837  /* "old" transition table for DELETE, if any */
3839  /* "new" transition table for INSERT, if any */
3841 
3842  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3843 };
3844 
3846 
3847 static void AfterTriggerExecute(EState *estate,
3848  AfterTriggerEvent event,
3849  ResultRelInfo *relInfo,
3850  ResultRelInfo *src_relInfo,
3851  ResultRelInfo *dst_relInfo,
3852  TriggerDesc *trigdesc,
3853  FmgrInfo *finfo,
3854  Instrumentation *instr,
3855  MemoryContext per_tuple_context,
3856  TupleTableSlot *trig_tuple_slot1,
3857  TupleTableSlot *trig_tuple_slot2);
3859  CmdType cmdType);
3861  TupleDesc tupdesc);
3863  TupleTableSlot *oldslot,
3864  TupleTableSlot *newslot,
3865  TransitionCaptureState *transition_capture);
3866 static void TransitionTableAddTuple(EState *estate,
3867  TransitionCaptureState *transition_capture,
3868  ResultRelInfo *relinfo,
3869  TupleTableSlot *slot,
3870  TupleTableSlot *original_insert_tuple,
3871  Tuplestorestate *tuplestore);
3873 static SetConstraintState SetConstraintStateCreate(int numalloc);
3876  Oid tgoid, bool tgisdeferred);
3877 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3878 
3879 
3880 /*
3881  * Get the FDW tuplestore for the current trigger query level, creating it
3882  * if necessary.
3883  */
3884 static Tuplestorestate *
3886 {
3887  Tuplestorestate *ret;
3888 
3890  if (ret == NULL)
3891  {
3892  MemoryContext oldcxt;
3893  ResourceOwner saveResourceOwner;
3894 
3895  /*
3896  * Make the tuplestore valid until end of subtransaction. We really
3897  * only need it until AfterTriggerEndQuery().
3898  */
3900  saveResourceOwner = CurrentResourceOwner;
3902 
3903  ret = tuplestore_begin_heap(false, false, work_mem);
3904 
3905  CurrentResourceOwner = saveResourceOwner;
3906  MemoryContextSwitchTo(oldcxt);
3907 
3909  }
3910 
3911  return ret;
3912 }
3913 
3914 /* ----------
3915  * afterTriggerCheckState()
3916  *
3917  * Returns true if the trigger event is actually in state DEFERRED.
3918  * ----------
3919  */
3920 static bool
3922 {
3923  Oid tgoid = evtshared->ats_tgoid;
3925  int i;
3926 
3927  /*
3928  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3929  * constraints declared NOT DEFERRABLE), the state is always false.
3930  */
3931  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3932  return false;
3933 
3934  /*
3935  * If constraint state exists, SET CONSTRAINTS might have been executed
3936  * either for this trigger or for all triggers.
3937  */
3938  if (state != NULL)
3939  {
3940  /* Check for SET CONSTRAINTS for this specific trigger. */
3941  for (i = 0; i < state->numstates; i++)
3942  {
3943  if (state->trigstates[i].sct_tgoid == tgoid)
3944  return state->trigstates[i].sct_tgisdeferred;
3945  }
3946 
3947  /* Check for SET CONSTRAINTS ALL. */
3948  if (state->all_isset)
3949  return state->all_isdeferred;
3950  }
3951 
3952  /*
3953  * Otherwise return the default state for the trigger.
3954  */
3955  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3956 }
3957 
3958 
3959 /* ----------
3960  * afterTriggerAddEvent()
3961  *
3962  * Add a new trigger event to the specified queue.
3963  * The passed-in event data is copied.
3964  * ----------
3965  */
3966 static void
3968  AfterTriggerEvent event, AfterTriggerShared evtshared)
3969 {
3970  Size eventsize = SizeofTriggerEvent(event);
3971  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3972  AfterTriggerEventChunk *chunk;
3973  AfterTriggerShared newshared;
3974  AfterTriggerEvent newevent;
3975 
3976  /*
3977  * If empty list or not enough room in the tail chunk, make a new chunk.
3978  * We assume here that a new shared record will always be needed.
3979  */
3980  chunk = events->tail;
3981  if (chunk == NULL ||
3982  chunk->endfree - chunk->freeptr < needed)
3983  {
3984  Size chunksize;
3985 
3986  /* Create event context if we didn't already */
3987  if (afterTriggers.event_cxt == NULL)
3990  "AfterTriggerEvents",
3992 
3993  /*
3994  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3995  * These numbers are fairly arbitrary, though there is a hard limit at
3996  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3997  * shared records using the available space in ate_flags. Another
3998  * constraint is that if the chunk size gets too huge, the search loop
3999  * below would get slow given a (not too common) usage pattern with
4000  * many distinct event types in a chunk. Therefore, we double the
4001  * preceding chunk size only if there weren't too many shared records
4002  * in the preceding chunk; otherwise we halve it. This gives us some
4003  * ability to adapt to the actual usage pattern of the current query
4004  * while still having large chunk sizes in typical usage. All chunk
4005  * sizes used should be MAXALIGN multiples, to ensure that the shared
4006  * records will be aligned safely.
4007  */
4008 #define MIN_CHUNK_SIZE 1024
4009 #define MAX_CHUNK_SIZE (1024*1024)
4010 
4011 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4012 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4013 #endif
4014 
4015  if (chunk == NULL)
4016  chunksize = MIN_CHUNK_SIZE;
4017  else
4018  {
4019  /* preceding chunk size... */
4020  chunksize = chunk->endptr - (char *) chunk;
4021  /* check number of shared records in preceding chunk */
4022  if ((chunk->endptr - chunk->endfree) <=
4023  (100 * sizeof(AfterTriggerSharedData)))
4024  chunksize *= 2; /* okay, double it */
4025  else
4026  chunksize /= 2; /* too many shared records */
4027  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4028  }
4029  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4030  chunk->next = NULL;
4031  chunk->freeptr = CHUNK_DATA_START(chunk);
4032  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4033  Assert(chunk->endfree - chunk->freeptr >= needed);
4034 
4035  if (events->head == NULL)
4036  events->head = chunk;
4037  else
4038  events->tail->next = chunk;
4039  events->tail = chunk;
4040  /* events->tailfree is now out of sync, but we'll fix it below */
4041  }
4042 
4043  /*
4044  * Try to locate a matching shared-data record already in the chunk. If
4045  * none, make a new one.
4046  */
4047  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4048  (char *) newshared >= chunk->endfree;
4049  newshared--)
4050  {
4051  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4052  newshared->ats_relid == evtshared->ats_relid &&
4053  newshared->ats_event == evtshared->ats_event &&
4054  newshared->ats_table == evtshared->ats_table &&
4055  newshared->ats_firing_id == 0)
4056  break;
4057  }
4058  if ((char *) newshared < chunk->endfree)
4059  {
4060  *newshared = *evtshared;
4061  newshared->ats_firing_id = 0; /* just to be sure */
4062  chunk->endfree = (char *) newshared;
4063  }
4064 
4065  /* Insert the data */
4066  newevent = (AfterTriggerEvent) chunk->freeptr;
4067  memcpy(newevent, event, eventsize);
4068  /* ... and link the new event to its shared record */
4069  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4070  newevent->ate_flags |= (char *) newshared - (char *) newevent;
4071 
4072  chunk->freeptr += eventsize;
4073  events->tailfree = chunk->freeptr;
4074 }
4075 
4076 /* ----------
4077  * afterTriggerFreeEventList()
4078  *
4079  * Free all the event storage in the given list.
4080  * ----------
4081  */
4082 static void
4084 {
4085  AfterTriggerEventChunk *chunk;
4086 
4087  while ((chunk = events->head) != NULL)
4088  {
4089  events->head = chunk->next;
4090  pfree(chunk);
4091  }
4092  events->tail = NULL;
4093  events->tailfree = NULL;
4094 }
4095 
4096 /* ----------
4097  * afterTriggerRestoreEventList()
4098  *
4099  * Restore an event list to its prior length, removing all the events
4100  * added since it had the value old_events.
4101  * ----------
4102  */
4103 static void
4105  const AfterTriggerEventList *old_events)
4106 {
4107  AfterTriggerEventChunk *chunk;
4108  AfterTriggerEventChunk *next_chunk;
4109 
4110  if (old_events->tail == NULL)
4111  {
4112  /* restoring to a completely empty state, so free everything */
4113  afterTriggerFreeEventList(events);
4114  }
4115  else
4116  {
4117  *events = *old_events;
4118  /* free any chunks after the last one we want to keep */
4119  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4120  {
4121  next_chunk = chunk->next;
4122  pfree(chunk);
4123  }
4124  /* and clean up the tail chunk to be the right length */
4125  events->tail->next = NULL;
4126  events->tail->freeptr = events->tailfree;
4127 
4128  /*
4129  * We don't make any effort to remove now-unused shared data records.
4130  * They might still be useful, anyway.
4131  */
4132  }
4133 }
4134 
4135 /* ----------
4136  * afterTriggerDeleteHeadEventChunk()
4137  *
4138  * Remove the first chunk of events from the query level's event list.
4139  * Keep any event list pointers elsewhere in the query level's data
4140  * structures in sync.
4141  * ----------
4142  */
4143 static void
4145 {
4146  AfterTriggerEventChunk *target = qs->events.head;
4147  ListCell *lc;
4148 
4149  Assert(target && target->next);
4150 
4151  /*
4152  * First, update any pointers in the per-table data, so that they won't be
4153  * dangling. Resetting obsoleted pointers to NULL will make
4154  * cancel_prior_stmt_triggers start from the list head, which is fine.
4155  */
4156  foreach(lc, qs->tables)
4157  {
4159 
4160  if (table->after_trig_done &&
4161  table->after_trig_events.tail == target)
4162  {
4163  table->after_trig_events.head = NULL;
4164  table->after_trig_events.tail = NULL;
4165  table->after_trig_events.tailfree = NULL;
4166  }
4167  }
4168 
4169  /* Now we can flush the head chunk */
4170  qs->events.head = target->next;
4171  pfree(target);
4172 }
4173 
4174 
4175 /* ----------
4176  * AfterTriggerExecute()
4177  *
4178  * Fetch the required tuples back from the heap and fire one
4179  * single trigger function.
4180  *
4181  * Frequently, this will be fired many times in a row for triggers of
4182  * a single relation. Therefore, we cache the open relation and provide
4183  * fmgr lookup cache space at the caller level. (For triggers fired at
4184  * the end of a query, we can even piggyback on the executor's state.)
4185  *
4186  * When fired for a cross-partition update of a partitioned table, the old
4187  * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4188  * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4189  * both are converted into the root partitioned table's format before passing
4190  * to the trigger function.
4191  *
4192  * event: event currently being fired.
4193  * relInfo: result relation for event.
4194  * src_relInfo: source partition of a cross-partition update
4195  * dst_relInfo: its destination partition
4196  * trigdesc: working copy of rel's trigger info.
4197  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4198  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4199  * or NULL if no instrumentation is wanted.
4200  * per_tuple_context: memory context to call trigger function in.
4201  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4202  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4203  * ----------
4204  */
4205 static void
4207  AfterTriggerEvent event,
4208  ResultRelInfo *relInfo,
4209  ResultRelInfo *src_relInfo,
4210  ResultRelInfo *dst_relInfo,
4211  TriggerDesc *trigdesc,
4212  FmgrInfo *finfo, Instrumentation *instr,
4213  MemoryContext per_tuple_context,
4214  TupleTableSlot *trig_tuple_slot1,
4215  TupleTableSlot *trig_tuple_slot2)
4216 {
4217  Relation rel = relInfo->ri_RelationDesc;
4218  Relation src_rel = src_relInfo->ri_RelationDesc;
4219  Relation dst_rel = dst_relInfo->ri_RelationDesc;
4220  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4221  Oid tgoid = evtshared->ats_tgoid;
4222  TriggerData LocTriggerData = {0};
4223  HeapTuple rettuple;
4224  int tgindx;
4225  bool should_free_trig = false;
4226  bool should_free_new = false;
4227 
4228  /*
4229  * Locate trigger in trigdesc.
4230  */
4231  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4232  {
4233  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4234  {
4235  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4236  break;
4237  }
4238  }
4239  if (LocTriggerData.tg_trigger == NULL)
4240  elog(ERROR, "could not find trigger %u", tgoid);
4241 
4242  /*
4243  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4244  * to include time spent re-fetching tuples in the trigger cost.
4245  */
4246  if (instr)
4247  InstrStartNode(instr + tgindx);
4248 
4249  /*
4250  * Fetch the required tuple(s).
4251  */
4252  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4253  {
4255  {
4256  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4257 
4258  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4259  trig_tuple_slot1))
4260  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4261 
4262  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4264  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4265  trig_tuple_slot2))
4266  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4267  }
4268  /* fall through */
4270 
4271  /*
4272  * Store tuple in the slot so that tg_trigtuple does not reference
4273  * tuplestore memory. (It is formally possible for the trigger
4274  * function to queue trigger events that add to the same
4275  * tuplestore, which can push other tuples out of memory.) The
4276  * distinction is academic, because we start with a minimal tuple
4277  * that is stored as a heap tuple, constructed in different memory
4278  * context, in the slot anyway.
4279  */
4280  LocTriggerData.tg_trigslot = trig_tuple_slot1;
4281  LocTriggerData.tg_trigtuple =
4282  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4283 
4284  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4286  {
4287  LocTriggerData.tg_newslot = trig_tuple_slot2;
4288  LocTriggerData.tg_newtuple =
4289  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4290  }
4291  else
4292  {
4293  LocTriggerData.tg_newtuple = NULL;
4294  }
4295  break;
4296 
4297  default:
4298  if (ItemPointerIsValid(&(event->ate_ctid1)))
4299  {
4300  TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4301  src_relInfo);
4302 
4303  if (!table_tuple_fetch_row_version(src_rel,
4304  &(event->ate_ctid1),
4305  SnapshotAny,
4306  src_slot))
4307  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4308 
4309  /*
4310  * Store the tuple fetched from the source partition into the
4311  * target (root partitioned) table slot, converting if needed.
4312  */
4313  if (src_relInfo != relInfo)
4314  {
4315  TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4316 
4317  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4318  if (map)
4319  {
4321  src_slot,
4322  LocTriggerData.tg_trigslot);
4323  }
4324  else
4325  ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4326  }
4327  else
4328  LocTriggerData.tg_trigslot = src_slot;
4329  LocTriggerData.tg_trigtuple =
4330  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4331  }
4332  else
4333  {
4334  LocTriggerData.tg_trigtuple = NULL;
4335  }
4336 
4337  /* don't touch ctid2 if not there */
4339  (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4340  ItemPointerIsValid(&(event->ate_ctid2)))
4341  {
4342  TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4343  dst_relInfo);
4344 
4345  if (!table_tuple_fetch_row_version(dst_rel,
4346  &(event->ate_ctid2),
4347  SnapshotAny,
4348  dst_slot))
4349  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4350 
4351  /*
4352  * Store the tuple fetched from the destination partition into
4353  * the target (root partitioned) table slot, converting if
4354  * needed.
4355  */
4356  if (dst_relInfo != relInfo)
4357  {
4358  TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4359 
4360  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4361  if (map)
4362  {
4364  dst_slot,
4365  LocTriggerData.tg_newslot);
4366  }
4367  else
4368  ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4369  }
4370  else
4371  LocTriggerData.tg_newslot = dst_slot;
4372  LocTriggerData.tg_newtuple =
4373  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4374  }
4375  else
4376  {
4377  LocTriggerData.tg_newtuple = NULL;
4378  }
4379  }
4380 
4381  /*
4382  * Set up the tuplestore information to let the trigger have access to
4383  * transition tables. When we first make a transition table available to
4384  * a trigger, mark it "closed" so that it cannot change anymore. If any
4385  * additional events of the same type get queued in the current trigger
4386  * query level, they'll go into new transition tables.
4387  */
4388  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4389  if (evtshared->ats_table)
4390  {
4391  if (LocTriggerData.tg_trigger->tgoldtable)
4392  {
4393  if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4394  LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4395  else
4396  LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4397  evtshared->ats_table->closed = true;
4398  }
4399 
4400  if (LocTriggerData.tg_trigger->tgnewtable)
4401  {
4402  if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4403  LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4404  else
4405  LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4406  evtshared->ats_table->closed = true;
4407  }
4408  }
4409 
4410  /*
4411  * Setup the remaining trigger information
4412  */
4413  LocTriggerData.type = T_TriggerData;
4414  LocTriggerData.tg_event =
4416  LocTriggerData.tg_relation = rel;
4417  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4418  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4419 
4420  MemoryContextReset(per_tuple_context);
4421 
4422  /*
4423  * Call the trigger and throw away any possibly returned updated tuple.
4424  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4425  */
4426  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4427  tgindx,
4428  finfo,
4429  NULL,
4430  per_tuple_context);
4431  if (rettuple != NULL &&
4432  rettuple != LocTriggerData.tg_trigtuple &&
4433  rettuple != LocTriggerData.tg_newtuple)
4434  heap_freetuple(rettuple);
4435 
4436  /*
4437  * Release resources
4438  */
4439  if (should_free_trig)
4440  heap_freetuple(LocTriggerData.tg_trigtuple);
4441  if (should_free_new)
4442  heap_freetuple(LocTriggerData.tg_newtuple);
4443 
4444  /* don't clear slots' contents if foreign table */
4445  if (trig_tuple_slot1 == NULL)
4446  {
4447  if (LocTriggerData.tg_trigslot)
4448  ExecClearTuple(LocTriggerData.tg_trigslot);
4449  if (LocTriggerData.tg_newslot)
4450  ExecClearTuple(LocTriggerData.tg_newslot);
4451  }
4452 
4453  /*
4454  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4455  * one "tuple returned" (really the number of firings).
4456  */
4457  if (instr)
4458  InstrStopNode(instr + tgindx, 1);
4459 }
4460 
4461 
4462 /*
4463  * afterTriggerMarkEvents()
4464  *
4465  * Scan the given event list for not yet invoked events. Mark the ones
4466  * that can be invoked now with the current firing ID.
4467  *
4468  * If move_list isn't NULL, events that are not to be invoked now are
4469  * transferred to move_list.
4470  *
4471  * When immediate_only is true, do not invoke currently-deferred triggers.
4472  * (This will be false only at main transaction exit.)
4473  *
4474  * Returns true if any invokable events were found.
4475  */
4476 static bool
4478  AfterTriggerEventList *move_list,
4479  bool immediate_only)
4480 {
4481  bool found = false;
4482  bool deferred_found = false;
4483  AfterTriggerEvent event;
4484  AfterTriggerEventChunk *chunk;
4485 
4486  for_each_event_chunk(event, chunk, *events)
4487  {
4488  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4489  bool defer_it = false;
4490 
4491  if (!(event->ate_flags &
4493  {
4494  /*
4495  * This trigger hasn't been called or scheduled yet. Check if we
4496  * should call it now.
4497  */
4498  if (immediate_only && afterTriggerCheckState(evtshared))
4499  {
4500  defer_it = true;
4501  }
4502  else
4503  {
4504  /*
4505  * Mark it as to be fired in this firing cycle.
4506  */
4508  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4509  found = true;
4510  }
4511  }
4512 
4513  /*
4514  * If it's deferred, move it to move_list, if requested.
4515  */
4516  if (defer_it && move_list != NULL)
4517  {
4518  deferred_found = true;
4519  /* add it to move_list */
4520  afterTriggerAddEvent(move_list, event, evtshared);
4521  /* mark original copy "done" so we don't do it again */
4522  event->ate_flags |= AFTER_TRIGGER_DONE;
4523  }
4524  }
4525 
4526  /*
4527  * We could allow deferred triggers if, before the end of the
4528  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4529  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4530  */
4531  if (deferred_found && InSecurityRestrictedOperation())
4532  ereport(ERROR,
4533  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4534  errmsg("cannot fire deferred trigger within security-restricted operation")));
4535 
4536  return found;
4537 }
4538 
4539 /*
4540  * afterTriggerInvokeEvents()
4541  *
4542  * Scan the given event list for events that are marked as to be fired
4543  * in the current firing cycle, and fire them.
4544  *
4545  * If estate isn't NULL, we use its result relation info to avoid repeated
4546  * openings and closing of trigger target relations. If it is NULL, we
4547  * make one locally to cache the info in case there are multiple trigger
4548  * events per rel.
4549  *
4550  * When delete_ok is true, it's safe to delete fully-processed events.
4551  * (We are not very tense about that: we simply reset a chunk to be empty
4552  * if all its events got fired. The objective here is just to avoid useless
4553  * rescanning of events when a trigger queues new events during transaction
4554  * end, so it's not necessary to worry much about the case where only
4555  * some events are fired.)
4556  *
4557  * Returns true if no unfired events remain in the list (this allows us
4558  * to avoid repeating afterTriggerMarkEvents).
4559  */
4560 static bool
4562  CommandId firing_id,
4563  EState *estate,
4564  bool delete_ok)
4565 {
4566  bool all_fired = true;
4567  AfterTriggerEventChunk *chunk;
4568  MemoryContext per_tuple_context;
4569  bool local_estate = false;
4570  ResultRelInfo *rInfo = NULL;
4571  Relation rel = NULL;
4572  TriggerDesc *trigdesc = NULL;
4573  FmgrInfo *finfo = NULL;
4574  Instrumentation *instr = NULL;
4575  TupleTableSlot *slot1 = NULL,
4576  *slot2 = NULL;
4577 
4578  /* Make a local EState if need be */
4579  if (estate == NULL)
4580  {
4581  estate = CreateExecutorState();
4582  local_estate = true;
4583  }
4584 
4585  /* Make a per-tuple memory context for trigger function calls */
4586  per_tuple_context =
4588  "AfterTriggerTupleContext",
4590 
4591  for_each_chunk(chunk, *events)
4592  {
4593  AfterTriggerEvent event;
4594  bool all_fired_in_chunk = true;
4595 
4596  for_each_event(event, chunk)
4597  {
4598  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4599 
4600  /*
4601  * Is it one for me to fire?
4602  */
4603  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4604  evtshared->ats_firing_id == firing_id)
4605  {
4606  ResultRelInfo *src_rInfo,
4607  *dst_rInfo;
4608 
4609  /*
4610  * So let's fire it... but first, find the correct relation if
4611  * this is not the same relation as before.
4612  */
4613  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4614  {
4615  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4616  NULL);
4617  rel = rInfo->ri_RelationDesc;
4618  /* Catch calls with insufficient relcache refcounting */
4620  trigdesc = rInfo->ri_TrigDesc;
4621  finfo = rInfo->ri_TrigFunctions;
4622  instr = rInfo->ri_TrigInstrument;
4623  if (slot1 != NULL)
4624  {
4627  slot1 = slot2 = NULL;
4628  }
4629  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4630  {
4631  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4633  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4635  }
4636  if (trigdesc == NULL) /* should not happen */
4637  elog(ERROR, "relation %u has no triggers",
4638  evtshared->ats_relid);
4639  }
4640 
4641  /*
4642  * Look up source and destination partition result rels of a
4643  * cross-partition update event.
4644  */
4645  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4647  {
4648  Assert(OidIsValid(event->ate_src_part) &&
4649  OidIsValid(event->ate_dst_part));
4650  src_rInfo = ExecGetTriggerResultRel(estate,
4651  event->ate_src_part,
4652  rInfo);
4653  dst_rInfo = ExecGetTriggerResultRel(estate,
4654  event->ate_dst_part,
4655  rInfo);
4656  }
4657  else
4658  src_rInfo = dst_rInfo = rInfo;
4659 
4660  /*
4661  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4662  * still set, so recursive examinations of the event list
4663  * won't try to re-fire it.
4664  */
4665  AfterTriggerExecute(estate, event, rInfo,
4666  src_rInfo, dst_rInfo,
4667  trigdesc, finfo, instr,
4668  per_tuple_context, slot1, slot2);
4669 
4670  /*
4671  * Mark the event as done.
4672  */
4673  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4674  event->ate_flags |= AFTER_TRIGGER_DONE;
4675  }
4676  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4677  {
4678  /* something remains to be done */
4679  all_fired = all_fired_in_chunk = false;
4680  }
4681  }
4682 
4683  /* Clear the chunk if delete_ok and nothing left of interest */
4684  if (delete_ok && all_fired_in_chunk)
4685  {
4686  chunk->freeptr = CHUNK_DATA_START(chunk);
4687  chunk->endfree = chunk->endptr;
4688 
4689  /*
4690  * If it's last chunk, must sync event list's tailfree too. Note
4691  * that delete_ok must NOT be passed as true if there could be
4692  * additional AfterTriggerEventList values pointing at this event
4693  * list, since we'd fail to fix their copies of tailfree.
4694  */
4695  if (chunk == events->tail)
4696  events->tailfree = chunk->freeptr;
4697  }
4698  }
4699  if (slot1 != NULL)
4700  {
4703  }
4704 
4705  /* Release working resources */
4706  MemoryContextDelete(per_tuple_context);
4707 
4708  if (local_estate)
4709  {
4710  ExecCloseResultRelations(estate);
4711  ExecResetTupleTable(estate->es_tupleTable, false);
4712  FreeExecutorState(estate);
4713  }
4714 
4715  return all_fired;
4716 }
4717 
4718 
4719 /*
4720  * GetAfterTriggersTableData
4721  *
4722  * Find or create an AfterTriggersTableData struct for the specified
4723  * trigger event (relation + operation type). Ignore existing structs
4724  * marked "closed"; we don't want to put any additional tuples into them,
4725  * nor change their stmt-triggers-fired state.
4726  *
4727  * Note: the AfterTriggersTableData list is allocated in the current
4728  * (sub)transaction's CurTransactionContext. This is OK because
4729  * we don't need it to live past AfterTriggerEndQuery.
4730  */
4731 static AfterTriggersTableData *
4733 {
4734  AfterTriggersTableData *table;
4736  MemoryContext oldcxt;
4737  ListCell *lc;
4738 
4739  /* Caller should have ensured query_depth is OK. */
4743 
4744  foreach(lc, qs->tables)
4745  {
4746  table = (AfterTriggersTableData *) lfirst(lc);
4747  if (table->relid == relid && table->cmdType == cmdType &&
4748  !table->closed)
4749  return table;
4750  }
4751 
4753 
4755  table->relid = relid;
4756  table->cmdType = cmdType;
4757  qs->tables = lappend(qs->tables, table);
4758 
4759  MemoryContextSwitchTo(oldcxt);
4760 
4761  return table;
4762 }
4763 
4764 /*
4765  * Returns a TupleTableSlot suitable for holding the tuples to be put
4766  * into AfterTriggersTableData's transition table tuplestores.
4767  */
4768 static TupleTableSlot *
4770  TupleDesc tupdesc)
4771 {
4772  /* Create it if not already done. */
4773  if (!table->storeslot)
4774  {
4775  MemoryContext oldcxt;
4776 
4777  /*
4778  * We need this slot only until AfterTriggerEndQuery, but making it
4779  * last till end-of-subxact is good enough. It'll be freed by
4780  * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4781  * a different lifespan, so we'd better make a copy of that.
4782  */
4784  tupdesc = CreateTupleDescCopy(tupdesc);
4785  table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4786  MemoryContextSwitchTo(oldcxt);
4787  }
4788 
4789  return table->storeslot;
4790 }
4791 
4792 /*
4793  * MakeTransitionCaptureState
4794  *
4795  * Make a TransitionCaptureState object for the given TriggerDesc, target
4796  * relation, and operation type. The TCS object holds all the state needed
4797  * to decide whether to capture tuples in transition tables.
4798  *
4799  * If there are no triggers in 'trigdesc' that request relevant transition
4800  * tables, then return NULL.
4801  *
4802  * The resulting object can be passed to the ExecAR* functions. When
4803  * dealing with child tables, the caller can set tcs_original_insert_tuple
4804  * to avoid having to reconstruct the original tuple in the root table's
4805  * format.
4806  *
4807  * Note that we copy the flags from a parent table into this struct (rather
4808  * than subsequently using the relation's TriggerDesc directly) so that we can
4809  * use it to control collection of transition tuples from child tables.
4810  *
4811  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4812  * on the same table during one query should share one transition table.
4813  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4814  * looked up using the table OID + CmdType, and are merely referenced by
4815  * the TransitionCaptureState objects we hand out to callers.
4816  */
4819 {
4821  bool need_old_upd,
4822  need_new_upd,
4823  need_old_del,
4824  need_new_ins;
4825  AfterTriggersTableData *table;
4826  MemoryContext oldcxt;
4827  ResourceOwner saveResourceOwner;
4828 
4829  if (trigdesc == NULL)
4830  return NULL;
4831 
4832  /* Detect which table(s) we need. */
4833  switch (cmdType)
4834  {
4835  case CMD_INSERT:
4836  need_old_upd = need_old_del = need_new_upd = false;
4837  need_new_ins = trigdesc->trig_insert_new_table;
4838  break;
4839  case CMD_UPDATE:
4840  need_old_upd = trigdesc->trig_update_old_table;
4841  need_new_upd = trigdesc->trig_update_new_table;
4842  need_old_del = need_new_ins = false;
4843  break;
4844  case CMD_DELETE:
4845  need_old_del = trigdesc->trig_delete_old_table;
4846  need_old_upd = need_new_upd = need_new_ins = false;
4847  break;
4848  case CMD_MERGE:
4849  need_old_upd = trigdesc->trig_update_old_table;
4850  need_new_upd = trigdesc->trig_update_new_table;
4851  need_old_del = trigdesc->trig_delete_old_table;
4852  need_new_ins = trigdesc->trig_insert_new_table;
4853  break;
4854  default:
4855  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4856  /* keep compiler quiet */
4857  need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4858  break;
4859  }
4860  if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4861  return NULL;
4862 
4863  /* Check state, like AfterTriggerSaveEvent. */
4864  if (afterTriggers.query_depth < 0)
4865  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4866 
4867  /* Be sure we have enough space to record events at this query depth. */
4870 
4871  /*
4872  * Find or create an AfterTriggersTableData struct to hold the
4873  * tuplestore(s). If there's a matching struct but it's marked closed,
4874  * ignore it; we need a newer one.
4875  *
4876  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4877  * allocated in the current (sub)transaction's CurTransactionContext, and
4878  * the tuplestores are managed by the (sub)transaction's resource owner.
4879  * This is sufficient lifespan because we do not allow triggers using
4880  * transition tables to be deferrable; they will be fired during
4881  * AfterTriggerEndQuery, after which it's okay to delete the data.
4882  */
4883  table = GetAfterTriggersTableData(relid, cmdType);
4884 
4885  /* Now create required tuplestore(s), if we don't have them already. */
4887  saveResourceOwner = CurrentResourceOwner;
4889 
4890  if (need_old_upd && table->old_upd_tuplestore == NULL)
4891  table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4892  if (need_new_upd && table->new_upd_tuplestore == NULL)
4893  table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4894  if (need_old_del && table->old_del_tuplestore == NULL)
4895  table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4896  if (need_new_ins && table->new_ins_tuplestore == NULL)
4897  table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4898 
4899  CurrentResourceOwner = saveResourceOwner;
4900  MemoryContextSwitchTo(oldcxt);
4901 
4902  /* Now build the TransitionCaptureState struct, in caller's context */
4904  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4905  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4906  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4907  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4908  state->tcs_private = table;
4909 
4910  return state;
4911 }
4912 
4913 
4914 /* ----------
4915  * AfterTriggerBeginXact()
4916  *
4917  * Called at transaction start (either BEGIN or implicit for single
4918  * statement outside of transaction block).
4919  * ----------
4920  */
4921 void
4923 {
4924  /*
4925  * Initialize after-trigger state structure to empty
4926  */
4927  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4929 
4930  /*
4931  * Verify that there is no leftover state remaining. If these assertions
4932  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4933  * up properly.
4934  */
4935  Assert(afterTriggers.state == NULL);
4936  Assert(afterTriggers.query_stack == NULL);
4938  Assert(afterTriggers.event_cxt == NULL);
4939  Assert(afterTriggers.events.head == NULL);
4940  Assert(afterTriggers.trans_stack == NULL);
4942 }
4943 
4944 
4945 /* ----------
4946  * AfterTriggerBeginQuery()
4947  *
4948  * Called just before we start processing a single query within a
4949  * transaction (or subtransaction). Most of the real work gets deferred
4950  * until somebody actually tries to queue a trigger event.
4951  * ----------
4952  */
4953 void
4955 {
4956  /* Increase the query stack depth */
4958 }
4959 
4960 
4961 /* ----------
4962  * AfterTriggerEndQuery()
4963  *
4964  * Called after one query has been completely processed. At this time
4965  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4966  * transfer deferred trigger events to the global deferred-trigger list.
4967  *
4968  * Note that this must be called BEFORE closing down the executor
4969  * with ExecutorEnd, because we make use of the EState's info about
4970  * target relations. Normally it is called from ExecutorFinish.
4971  * ----------
4972  */
4973 void
4975 {
4977 
4978  /* Must be inside a query, too */
4980 
4981  /*
4982  * If we never even got as far as initializing the event stack, there
4983  * certainly won't be any events, so exit quickly.
4984  */
4986  {
4988  return;
4989  }
4990 
4991  /*
4992  * Process all immediate-mode triggers queued by the query, and move the
4993  * deferred ones to the main list of deferred events.
4994  *
4995  * Notice that we decide which ones will be fired, and put the deferred
4996  * ones on the main list, before anything is actually fired. This ensures
4997  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4998  * IMMEDIATE: all events we have decided to defer will be available for it
4999  * to fire.
5000  *
5001  * We loop in case a trigger queues more events at the same query level.
5002  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5003  * will instead fire any triggers in a dedicated query level. Foreign key
5004  * enforcement triggers do add to the current query level, thanks to their
5005  * passing fire_triggers = false to SPI_execute_snapshot(). Other
5006  * C-language triggers might do likewise.
5007  *
5008  * If we find no firable events, we don't have to increment
5009  * firing_counter.
5010  */
5012 
5013  for (;;)
5014  {
5016  {
5017  CommandId firing_id = afterTriggers.firing_counter++;
5018  AfterTriggerEventChunk *oldtail = qs->events.tail;
5019 
5020  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5021  break; /* all fired */
5022 
5023  /*
5024  * Firing a trigger could result in query_stack being repalloc'd,
5025  * so we must recalculate qs after each afterTriggerInvokeEvents
5026  * call. Furthermore, it's unsafe to pass delete_ok = true here,
5027  * because that could cause afterTriggerInvokeEvents to try to
5028  * access qs->events after the stack has been repalloc'd.
5029  */
5031 
5032  /*
5033  * We'll need to scan the events list again. To reduce the cost
5034  * of doing so, get rid of completely-fired chunks. We know that
5035  * all events were marked IN_PROGRESS or DONE at the conclusion of
5036  * afterTriggerMarkEvents, so any still-interesting events must
5037  * have been added after that, and so must be in the chunk that
5038  * was then the tail chunk, or in later chunks. So, zap all
5039  * chunks before oldtail. This is approximately the same set of
5040  * events we would have gotten rid of by passing delete_ok = true.
5041  */
5042  Assert(oldtail != NULL);
5043  while (qs->events.head != oldtail)
5045  }
5046  else
5047  break;
5048  }
5049 
5050  /* Release query-level-local storage, including tuplestores if any */
5052 
5054 }
5055 
5056 
5057 /*
5058  * AfterTriggerFreeQuery
5059  * Release subsidiary storage for a trigger query level.
5060  * This includes closing down tuplestores.
5061  * Note: it's important for this to be safe if interrupted by an error
5062  * and then called again for the same query level.
5063  */
5064 static void
5066 {
5067  Tuplestorestate *ts;
5068  List *tables;
5069  ListCell *lc;
5070 
5071  /* Drop the trigger events */
5073 
5074  /* Drop FDW tuplestore if any */
5075  ts = qs->fdw_tuplestore;
5076  qs->fdw_tuplestore = NULL;
5077  if (ts)
5078  tuplestore_end(ts);
5079 
5080  /* Release per-table subsidiary storage */
5081  tables = qs->tables;
5082  foreach(lc, tables)
5083  {
5085 
5086  ts = table->old_upd_tuplestore;
5087  table->old_upd_tuplestore = NULL;
5088  if (ts)
5089  tuplestore_end(ts);
5090  ts = table->new_upd_tuplestore;
5091  table->new_upd_tuplestore = NULL;
5092  if (ts)
5093  tuplestore_end(ts);
5094  ts = table->old_del_tuplestore;
5095  table->old_del_tuplestore = NULL;
5096  if (ts)
5097  tuplestore_end(ts);
5098  ts = table->new_ins_tuplestore;
5099  table->new_ins_tuplestore = NULL;
5100  if (ts)
5101  tuplestore_end(ts);
5102  if (table->storeslot)
5103  {
5104  TupleTableSlot *slot = table->storeslot;
5105 
5106  table->storeslot = NULL;
5108  }
5109  }
5110 
5111  /*
5112  * Now free the AfterTriggersTableData structs and list cells. Reset list
5113  * pointer first; if list_free_deep somehow gets an error, better to leak
5114  * that storage than have an infinite loop.
5115  */
5116  qs->tables = NIL;
5117  list_free_deep(tables);
5118 }
5119 
5120 
5121 /* ----------
5122  * AfterTriggerFireDeferred()
5123  *
5124  * Called just before the current transaction is committed. At this
5125  * time we invoke all pending DEFERRED triggers.
5126  *
5127  * It is possible for other modules to queue additional deferred triggers
5128  * during pre-commit processing; therefore xact.c may have to call this
5129  * multiple times.
5130  * ----------
5131  */
5132 void
5134 {
5135  AfterTriggerEventList *events;
5136  bool snap_pushed = false;
5137 
5138  /* Must not be inside a query */
5140 
5141  /*
5142  * If there are any triggers to fire, make sure we have set a snapshot for
5143  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5144  * can't assume ActiveSnapshot is valid on entry.)
5145  */
5146  events = &afterTriggers.events;
5147  if (events->head != NULL)
5148  {
5150  snap_pushed = true;
5151  }
5152 
5153  /*
5154  * Run all the remaining triggers. Loop until they are all gone, in case
5155  * some trigger queues more for us to do.
5156  */
5157  while (afterTriggerMarkEvents(events, NULL, false))
5158  {
5159  CommandId firing_id = afterTriggers.firing_counter++;
5160 
5161  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5162  break; /* all fired */
5163  }
5164 
5165  /*
5166  * We don't bother freeing the event list, since it will go away anyway
5167  * (and more efficiently than via pfree) in AfterTriggerEndXact.
5168  */
5169 
5170  if (snap_pushed)
5172 }
5173 
5174 
5175 /* ----------
5176  * AfterTriggerEndXact()
5177  *
5178  * The current transaction is finishing.
5179  *
5180  * Any unfired triggers are canceled so we simply throw
5181  * away anything we know.
5182  *
5183  * Note: it is possible for this to be called repeatedly in case of
5184  * error during transaction abort; therefore, do not complain if
5185  * already closed down.
5186  * ----------
5187  */
5188 void
5189 AfterTriggerEndXact(bool isCommit)
5190 {
5191  /*
5192  * Forget the pending-events list.
5193  *
5194  * Since all the info is in TopTransactionContext or children thereof, we
5195  * don't really need to do anything to reclaim memory. However, the
5196  * pending-events list could be large, and so it's useful to discard it as
5197  * soon as possible --- especially if we are aborting because we ran out
5198  * of memory for the list!
5199  */
5201  {
5203  afterTriggers.event_cxt = NULL;
5204  afterTriggers.events.head = NULL;
5205  afterTriggers.events.tail = NULL;
5206  afterTriggers.events.tailfree = NULL;
5207  }
5208 
5209  /*
5210  * Forget any subtransaction state as well. Since this can't be very
5211  * large, we let the eventual reset of TopTransactionContext free the
5212  * memory instead of doing it here.
5213  */
5214  afterTriggers.trans_stack = NULL;
5216 
5217 
5218  /*
5219  * Forget the query stack and constraint-related state information. As
5220  * with the subtransaction state information, we don't bother freeing the
5221  * memory here.
5222  */
5223  afterTriggers.query_stack = NULL;
5225  afterTriggers.state = NULL;
5226 
5227  /* No more afterTriggers manipulation until next transaction starts. */
5229 }
5230 
5231 /*
5232  * AfterTriggerBeginSubXact()
5233  *
5234  * Start a subtransaction.
5235  */
5236 void
5238 {
5239  int my_level = GetCurrentTransactionNestLevel();
5240 
5241  /*
5242  * Allocate more space in the trans_stack if needed. (Note: because the
5243  * minimum nest level of a subtransaction is 2, we waste the first couple
5244  * entries of the array; not worth the notational effort to avoid it.)
5245  */
5246  while (my_level >= afterTriggers.maxtransdepth)
5247  {
5248  if (afterTriggers.maxtransdepth == 0)
5249  {
5250  /* Arbitrarily initialize for max of 8 subtransaction levels */
5253  8 * sizeof(AfterTriggersTransData));
5255  }
5256  else
5257  {
5258  /* repalloc will keep the stack in the same context */
5259  int new_alloc = afterTriggers.maxtransdepth * 2;
5260 
5263  new_alloc * sizeof(AfterTriggersTransData));
5264  afterTriggers.maxtransdepth = new_alloc;
5265  }
5266  }
5267 
5268  /*
5269  * Push the current information into the stack. The SET CONSTRAINTS state
5270  * is not saved until/unless changed. Likewise, we don't make a
5271  * per-subtransaction event context until needed.
5272  */
5273  afterTriggers.trans_stack[my_level].state = NULL;
5277 }
5278 
5279 /*
5280  * AfterTriggerEndSubXact()
5281  *
5282  * The current subtransaction is ending.
5283  */
5284 void
5286 {
5287  int my_level = GetCurrentTransactionNestLevel();
5289  AfterTriggerEvent event;
5290  AfterTriggerEventChunk *chunk;
5291  CommandId subxact_firing_id;
5292 
5293  /*
5294  * Pop the prior state if needed.
5295  */
5296  if (isCommit)
5297  {
5298  Assert(my_level < afterTriggers.maxtransdepth);
5299  /* If we saved a prior state, we don't need it anymore */
5300  state = afterTriggers.trans_stack[my_level].state;
5301  if (state != NULL)
5302  pfree(state);
5303  /* this avoids double pfree if error later: */
5304  afterTriggers.trans_stack[my_level].state = NULL;
5307  }
5308  else
5309  {
5310  /*
5311  * Aborting. It is possible subxact start failed before calling
5312  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5313  * trans_stack levels that aren't there.
5314  */
5315  if (my_level >= afterTriggers.maxtransdepth)
5316  return;
5317 
5318  /*
5319  * Release query-level storage for queries being aborted, and restore
5320  * query_depth to its pre-subxact value. This assumes that a
5321  * subtransaction will not add events to query levels started in a
5322  * earlier transaction state.
5323  */
5325  {
5329  }
5332 
5333  /*
5334  * Restore the global deferred-event list to its former length,
5335  * discarding any events queued by the subxact.
5336  */
5338  &afterTriggers.trans_stack[my_level].events);
5339 
5340  /*
5341  * Restore the trigger state. If the saved state is NULL, then this
5342  * subxact didn't save it, so it doesn't need restoring.
5343  */
5344  state = afterTriggers.trans_stack[my_level].state;
5345  if (state != NULL)
5346  {
5349  }
5350  /* this avoids double pfree if error later: */
5351  afterTriggers.trans_stack[my_level].state = NULL;
5352 
5353  /*
5354  * Scan for any remaining deferred events that were marked DONE or IN
5355  * PROGRESS by this subxact or a child, and un-mark them. We can
5356  * recognize such events because they have a firing ID greater than or
5357  * equal to the firing_counter value we saved at subtransaction start.
5358  * (This essentially assumes that the current subxact includes all
5359  * subxacts started after it.)
5360  */
5361  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5363  {
5364  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5365 
5366  if (event->ate_flags &
5368  {
5369  if (evtshared->ats_firing_id >= subxact_firing_id)
5370  event->ate_flags &=
5372  }
5373  }
5374  }
5375 }
5376 
5377 /*
5378  * Get the transition table for the given event and depending on whether we are
5379  * processing the old or the new tuple.
5380  */
5381 static Tuplestorestate *
5383  TupleTableSlot *oldslot,
5384  TupleTableSlot *newslot,
5385  TransitionCaptureState *transition_capture)
5386 {
5387  Tuplestorestate *tuplestore = NULL;
5388  bool delete_old_table = transition_capture->tcs_delete_old_table;
5389  bool update_old_table = transition_capture->tcs_update_old_table;
5390  bool update_new_table = transition_capture->tcs_update_new_table;
5391  bool insert_new_table = transition_capture->tcs_insert_new_table;
5392 
5393  /*
5394  * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5395  * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5396  * non-NULL. But for UPDATE events fired for capturing transition tuples
5397  * during UPDATE partition-key row movement, OLD is NULL when the event is
5398  * for a row being inserted, whereas NEW is NULL when the event is for a
5399  * row being deleted.
5400  */
5401  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5402  TupIsNull(oldslot)));
5403  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5404  TupIsNull(newslot)));
5405 
5406  if (!TupIsNull(oldslot))
5407  {
5408  Assert(TupIsNull(newslot));
5409  if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5410  tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5411  else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5412  tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5413  }
5414  else if (!TupIsNull(newslot))
5415  {
5416  Assert(TupIsNull(oldslot));
5417  if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5418  tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5419  else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5420  tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5421  }
5422 
5423  return tuplestore;
5424 }
5425 
5426 /*
5427  * Add the given heap tuple to the given tuplestore, applying the conversion
5428  * map if necessary.
5429  *
5430  * If original_insert_tuple is given, we can add that tuple without conversion.
5431  */
5432 static void
5434  TransitionCaptureState *transition_capture,
5435  ResultRelInfo *relinfo,
5436  TupleTableSlot *slot,
5437  TupleTableSlot *original_insert_tuple,
5438  Tuplestorestate *tuplestore)
5439 {
5440  TupleConversionMap *map;
5441 
5442  /*
5443  * Nothing needs to be done if we don't have a tuplestore.
5444  */
5445  if (tuplestore == NULL)
5446  return;
5447 
5448  if (original_insert_tuple)
5449  tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5450  else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5451  {
5452  AfterTriggersTableData *table = transition_capture->tcs_private;
5453  TupleTableSlot *storeslot;
5454 
5455  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5456  execute_attr_map_slot(map->attrMap, slot, storeslot);
5457  tuplestore_puttupleslot(tuplestore, storeslot);
5458  }
5459  else
5460  tuplestore_puttupleslot(tuplestore, slot);
5461 }
5462 
5463 /* ----------
5464  * AfterTriggerEnlargeQueryState()
5465  *
5466  * Prepare the necessary state so that we can record AFTER trigger events
5467  * queued by a query. It is allowed to have nested queries within a
5468  * (sub)transaction, so we need to have separate state for each query
5469  * nesting level.
5470  * ----------
5471  */
5472 static void
5474 {
5475  int init_depth = afterTriggers.maxquerydepth;
5476 
5478 
5479  if (afterTriggers.maxquerydepth == 0)
5480  {
5481  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5482 
5485  new_alloc * sizeof(AfterTriggersQueryData));
5486  afterTriggers.maxquerydepth = new_alloc;
5487  }
5488  else
5489  {
5490  /* repalloc will keep the stack in the same context */
5491  int old_alloc = afterTriggers.maxquerydepth;
5492  int new_alloc = Max(afterTriggers.query_depth + 1,
5493  old_alloc * 2);
5494 
5497  new_alloc * sizeof(AfterTriggersQueryData));
5498  afterTriggers.maxquerydepth = new_alloc;
5499  }
5500 
5501  /* Initialize new array entries to empty */
5502  while (init_depth < afterTriggers.maxquerydepth)
5503  {
5505 
5506  qs->events.head = NULL;
5507  qs->events.tail = NULL;
5508  qs->events.tailfree = NULL;
5509  qs->fdw_tuplestore = NULL;
5510  qs->tables = NIL;
5511 
5512  ++init_depth;
5513  }
5514 }
5515 
5516 /*
5517  * Create an empty SetConstraintState with room for numalloc trigstates
5518  */
5519 static SetConstraintState
5521 {
5523 
5524  /* Behave sanely with numalloc == 0 */
5525  if (numalloc <= 0)
5526  numalloc = 1;
5527 
5528  /*
5529  * We assume that zeroing will correctly initialize the state values.
5530  */
5533  offsetof(SetConstraintStateData, trigstates) +
5534  numalloc * sizeof(SetConstraintTriggerData));
5535 
5536  state->numalloc = numalloc;
5537 
5538  return state;
5539 }
5540 
5541 /*
5542  * Copy a SetConstraintState
5543  */
5544 static SetConstraintState
5546 {
5548 
5550 
5551  state->all_isset = origstate->all_isset;
5552  state->all_isdeferred = origstate->all_isdeferred;
5553  state->numstates = origstate->numstates;
5554  memcpy(state->trigstates, origstate->trigstates,
5555  origstate->numstates * sizeof(SetConstraintTriggerData));
5556 
5557  return state;
5558 }
5559 
5560 /*
5561  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5562  * pointer to the state object (it will change if we have to repalloc).
5563  */
5564 static SetConstraintState
5566  Oid tgoid, bool tgisdeferred)
5567 {
5568  if (state->numstates >= state->numalloc)
5569  {
5570  int newalloc = state->numalloc * 2;
5571 
5572  newalloc = Max(newalloc, 8); /* in case original has size 0 */
5574  repalloc(state,
5575  offsetof(SetConstraintStateData, trigstates) +
5576  newalloc * sizeof(SetConstraintTriggerData));
5577  state->numalloc = newalloc;
5578  Assert(state->numstates < state->numalloc);
5579  }
5580