PostgreSQL Source Code  git master
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/htup_details.h"
18 #include "access/relation.h"
19 #include "access/sysattr.h"
20 #include "access/table.h"
21 #include "access/tableam.h"
22 #include "access/xact.h"
23 #include "catalog/catalog.h"
24 #include "catalog/dependency.h"
25 #include "catalog/index.h"
26 #include "catalog/indexing.h"
27 #include "catalog/objectaccess.h"
28 #include "catalog/partition.h"
29 #include "catalog/pg_constraint.h"
30 #include "catalog/pg_inherits.h"
31 #include "catalog/pg_proc.h"
32 #include "catalog/pg_trigger.h"
33 #include "catalog/pg_type.h"
34 #include "commands/dbcommands.h"
35 #include "commands/defrem.h"
36 #include "commands/trigger.h"
37 #include "executor/executor.h"
38 #include "executor/execPartition.h"
39 #include "miscadmin.h"
40 #include "nodes/bitmapset.h"
41 #include "nodes/makefuncs.h"
42 #include "optimizer/optimizer.h"
43 #include "parser/parse_clause.h"
44 #include "parser/parse_collate.h"
45 #include "parser/parse_func.h"
46 #include "parser/parse_relation.h"
47 #include "parser/parsetree.h"
48 #include "partitioning/partdesc.h"
49 #include "pgstat.h"
50 #include "rewrite/rewriteManip.h"
51 #include "storage/bufmgr.h"
52 #include "storage/lmgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/builtins.h"
56 #include "utils/bytea.h"
57 #include "utils/fmgroids.h"
58 #include "utils/inval.h"
59 #include "utils/lsyscache.h"
60 #include "utils/memutils.h"
61 #include "utils/rel.h"
62 #include "utils/snapmgr.h"
63 #include "utils/syscache.h"
64 #include "utils/tuplestore.h"
65 
66 
67 /* GUC variables */
69 
70 /* How many levels deep into trigger execution are we? */
71 static int MyTriggerDepth = 0;
72 
73 /* Local function prototypes */
74 static void renametrig_internal(Relation tgrel, Relation targetrel,
75  HeapTuple trigtup, const char *newname,
76  const char *expected_name);
77 static void renametrig_partition(Relation tgrel, Oid partitionId,
78  Oid parentTriggerOid, const char *newname,
79  const char *expected_name);
80 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
81 static bool GetTupleForTrigger(EState *estate,
82  EPQState *epqstate,
83  ResultRelInfo *relinfo,
84  ItemPointer tid,
85  LockTupleMode lockmode,
86  TupleTableSlot *oldslot,
87  TupleTableSlot **newSlot);
88 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
89  Trigger *trigger, TriggerEvent event,
90  Bitmapset *modifiedCols,
91  TupleTableSlot *oldslot, TupleTableSlot *newslot);
93  int tgindx,
94  FmgrInfo *finfo,
95  Instrumentation *instr,
96  MemoryContext per_tuple_context);
97 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
98  int event, bool row_trigger,
99  TupleTableSlot *oldtup, TupleTableSlot *newtup,
100  List *recheckIndexes, Bitmapset *modifiedCols,
101  TransitionCaptureState *transition_capture);
102 static void AfterTriggerEnlargeQueryState(void);
103 static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
104 
105 
106 /*
107  * Create a trigger. Returns the address of the created trigger.
108  *
109  * queryString is the source text of the CREATE TRIGGER command.
110  * This must be supplied if a whenClause is specified, else it can be NULL.
111  *
112  * relOid, if nonzero, is the relation on which the trigger should be
113  * created. If zero, the name provided in the statement will be looked up.
114  *
115  * refRelOid, if nonzero, is the relation to which the constraint trigger
116  * refers. If zero, the constraint relation name provided in the statement
117  * will be looked up as needed.
118  *
119  * constraintOid, if nonzero, says that this trigger is being created
120  * internally to implement that constraint. A suitable pg_depend entry will
121  * be made to link the trigger to that constraint. constraintOid is zero when
122  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
123  * TRIGGER, we build a pg_constraint entry internally.)
124  *
125  * indexOid, if nonzero, is the OID of an index associated with the constraint.
126  * We do nothing with this except store it into pg_trigger.tgconstrindid;
127  * but when creating a trigger for a deferrable unique constraint on a
128  * partitioned table, its children are looked up. Note we don't cope with
129  * invalid indexes in that case.
130  *
131  * funcoid, if nonzero, is the OID of the function to invoke. When this is
132  * given, stmt->funcname is ignored.
133  *
134  * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
135  * if that trigger is dropped, this one should be too. (This is passed as
136  * Invalid by most callers; it's set here when recursing on a partition.)
137  *
138  * If whenClause is passed, it is an already-transformed expression for
139  * WHEN. In this case, we ignore any that may come in stmt->whenClause.
140  *
141  * If isInternal is true then this is an internally-generated trigger.
142  * This argument sets the tgisinternal field of the pg_trigger entry, and
143  * if true causes us to modify the given trigger name to ensure uniqueness.
144  *
145  * When isInternal is not true we require ACL_TRIGGER permissions on the
146  * relation, as well as ACL_EXECUTE on the trigger function. For internal
147  * triggers the caller must apply any required permission checks.
148  *
149  * When called on partitioned tables, this function recurses to create the
150  * trigger on all the partitions, except if isInternal is true, in which
151  * case caller is expected to execute recursion on its own. in_partition
152  * indicates such a recursive call; outside callers should pass "false"
153  * (but see CloneRowTriggersToPartition).
154  */
156 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
157  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
158  Oid funcoid, Oid parentTriggerOid, Node *whenClause,
159  bool isInternal, bool in_partition)
160 {
161  return
162  CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
163  constraintOid, indexOid, funcoid,
164  parentTriggerOid, whenClause, isInternal,
165  in_partition, TRIGGER_FIRES_ON_ORIGIN);
166 }
167 
168 /*
169  * Like the above; additionally the firing condition
170  * (always/origin/replica/disabled) can be specified.
171  */
173 CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
174  Oid relOid, Oid refRelOid, Oid constraintOid,
175  Oid indexOid, Oid funcoid, Oid parentTriggerOid,
176  Node *whenClause, bool isInternal, bool in_partition,
177  char trigger_fires_when)
178 {
179  int16 tgtype;
180  int ncolumns;
181  int16 *columns;
182  int2vector *tgattr;
183  List *whenRtable;
184  char *qual;
185  Datum values[Natts_pg_trigger];
186  bool nulls[Natts_pg_trigger];
187  Relation rel;
188  AclResult aclresult;
189  Relation tgrel;
190  Relation pgrel;
191  HeapTuple tuple = NULL;
192  Oid funcrettype;
193  Oid trigoid = InvalidOid;
194  char internaltrigname[NAMEDATALEN];
195  char *trigname;
196  Oid constrrelid = InvalidOid;
197  ObjectAddress myself,
198  referenced;
199  char *oldtablename = NULL;
200  char *newtablename = NULL;
201  bool partition_recurse;
202  bool trigger_exists = false;
203  Oid existing_constraint_oid = InvalidOid;
204  bool existing_isInternal = false;
205 
206  if (OidIsValid(relOid))
207  rel = table_open(relOid, ShareRowExclusiveLock);
208  else
210 
211  /*
212  * Triggers must be on tables or views, and there are additional
213  * relation-type-specific restrictions.
214  */
215  if (rel->rd_rel->relkind == RELKIND_RELATION)
216  {
217  /* Tables can't have INSTEAD OF triggers */
218  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
219  stmt->timing != TRIGGER_TYPE_AFTER)
220  ereport(ERROR,
221  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
222  errmsg("\"%s\" is a table",
224  errdetail("Tables cannot have INSTEAD OF triggers.")));
225  }
226  else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
227  {
228  /* Partitioned tables can't have INSTEAD OF triggers */
229  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
230  stmt->timing != TRIGGER_TYPE_AFTER)
231  ereport(ERROR,
232  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
233  errmsg("\"%s\" is a table",
235  errdetail("Tables cannot have INSTEAD OF triggers.")));
236 
237  /*
238  * FOR EACH ROW triggers have further restrictions
239  */
240  if (stmt->row)
241  {
242  /*
243  * Disallow use of transition tables.
244  *
245  * Note that we have another restriction about transition tables
246  * in partitions; search for 'has_superclass' below for an
247  * explanation. The check here is just to protect from the fact
248  * that if we allowed it here, the creation would succeed for a
249  * partitioned table with no partitions, but would be blocked by
250  * the other restriction when the first partition was created,
251  * which is very unfriendly behavior.
252  */
253  if (stmt->transitionRels != NIL)
254  ereport(ERROR,
255  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
256  errmsg("\"%s\" is a partitioned table",
258  errdetail("Triggers on partitioned tables cannot have transition tables.")));
259  }
260  }
261  else if (rel->rd_rel->relkind == RELKIND_VIEW)
262  {
263  /*
264  * Views can have INSTEAD OF triggers (which we check below are
265  * row-level), or statement-level BEFORE/AFTER triggers.
266  */
267  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
268  ereport(ERROR,
269  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
270  errmsg("\"%s\" is a view",
272  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
273  /* Disallow TRUNCATE triggers on VIEWs */
274  if (TRIGGER_FOR_TRUNCATE(stmt->events))
275  ereport(ERROR,
276  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
277  errmsg("\"%s\" is a view",
279  errdetail("Views cannot have TRUNCATE triggers.")));
280  }
281  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
282  {
283  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
284  stmt->timing != TRIGGER_TYPE_AFTER)
285  ereport(ERROR,
286  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
287  errmsg("\"%s\" is a foreign table",
289  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
290 
291  if (TRIGGER_FOR_TRUNCATE(stmt->events))
292  ereport(ERROR,
293  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
294  errmsg("\"%s\" is a foreign table",
296  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
297 
298  /*
299  * We disallow constraint triggers to protect the assumption that
300  * triggers on FKs can't be deferred. See notes with AfterTriggers
301  * data structures, below.
302  */
303  if (stmt->isconstraint)
304  ereport(ERROR,
305  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
306  errmsg("\"%s\" is a foreign table",
308  errdetail("Foreign tables cannot have constraint triggers.")));
309  }
310  else
311  ereport(ERROR,
312  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
313  errmsg("relation \"%s\" cannot have triggers",
315  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
316 
318  ereport(ERROR,
319  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
320  errmsg("permission denied: \"%s\" is a system catalog",
321  RelationGetRelationName(rel))));
322 
323  if (stmt->isconstraint)
324  {
325  /*
326  * We must take a lock on the target relation to protect against
327  * concurrent drop. It's not clear that AccessShareLock is strong
328  * enough, but we certainly need at least that much... otherwise, we
329  * might end up creating a pg_constraint entry referencing a
330  * nonexistent table.
331  */
332  if (OidIsValid(refRelOid))
333  {
334  LockRelationOid(refRelOid, AccessShareLock);
335  constrrelid = refRelOid;
336  }
337  else if (stmt->constrrel != NULL)
338  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
339  false);
340  }
341 
342  /* permission checks */
343  if (!isInternal)
344  {
345  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
346  ACL_TRIGGER);
347  if (aclresult != ACLCHECK_OK)
348  aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
350 
351  if (OidIsValid(constrrelid))
352  {
353  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
354  ACL_TRIGGER);
355  if (aclresult != ACLCHECK_OK)
356  aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
357  get_rel_name(constrrelid));
358  }
359  }
360 
361  /*
362  * When called on a partitioned table to create a FOR EACH ROW trigger
363  * that's not internal, we create one trigger for each partition, too.
364  *
365  * For that, we'd better hold lock on all of them ahead of time.
366  */
367  partition_recurse = !isInternal && stmt->row &&
368  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
369  if (partition_recurse)
371  ShareRowExclusiveLock, NULL));
372 
373  /* Compute tgtype */
374  TRIGGER_CLEAR_TYPE(tgtype);
375  if (stmt->row)
376  TRIGGER_SETT_ROW(tgtype);
377  tgtype |= stmt->timing;
378  tgtype |= stmt->events;
379 
380  /* Disallow ROW-level TRUNCATE triggers */
381  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
382  ereport(ERROR,
383  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
384  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
385 
386  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
387  if (TRIGGER_FOR_INSTEAD(tgtype))
388  {
389  if (!TRIGGER_FOR_ROW(tgtype))
390  ereport(ERROR,
391  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
392  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
393  if (stmt->whenClause)
394  ereport(ERROR,
395  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
396  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
397  if (stmt->columns != NIL)
398  ereport(ERROR,
399  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
400  errmsg("INSTEAD OF triggers cannot have column lists")));
401  }
402 
403  /*
404  * We don't yet support naming ROW transition variables, but the parser
405  * recognizes the syntax so we can give a nicer message here.
406  *
407  * Per standard, REFERENCING TABLE names are only allowed on AFTER
408  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
409  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
410  * only allowed once. Per standard, OLD may not be specified when
411  * creating a trigger only for INSERT, and NEW may not be specified when
412  * creating a trigger only for DELETE.
413  *
414  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
415  * reference both ROW and TABLE transition data.
416  */
417  if (stmt->transitionRels != NIL)
418  {
419  List *varList = stmt->transitionRels;
420  ListCell *lc;
421 
422  foreach(lc, varList)
423  {
425 
426  if (!(tt->isTable))
427  ereport(ERROR,
428  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
429  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
430  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
431 
432  /*
433  * Because of the above test, we omit further ROW-related testing
434  * below. If we later allow naming OLD and NEW ROW variables,
435  * adjustments will be needed below.
436  */
437 
438  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
439  ereport(ERROR,
440  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
441  errmsg("\"%s\" is a foreign table",
443  errdetail("Triggers on foreign tables cannot have transition tables.")));
444 
445  if (rel->rd_rel->relkind == RELKIND_VIEW)
446  ereport(ERROR,
447  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
448  errmsg("\"%s\" is a view",
450  errdetail("Triggers on views cannot have transition tables.")));
451 
452  /*
453  * We currently don't allow row-level triggers with transition
454  * tables on partition or inheritance children. Such triggers
455  * would somehow need to see tuples converted to the format of the
456  * table they're attached to, and it's not clear which subset of
457  * tuples each child should see. See also the prohibitions in
458  * ATExecAttachPartition() and ATExecAddInherit().
459  */
460  if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
461  {
462  /* Use appropriate error message. */
463  if (rel->rd_rel->relispartition)
464  ereport(ERROR,
465  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
466  errmsg("ROW triggers with transition tables are not supported on partitions")));
467  else
468  ereport(ERROR,
469  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
470  errmsg("ROW triggers with transition tables are not supported on inheritance children")));
471  }
472 
473  if (stmt->timing != TRIGGER_TYPE_AFTER)
474  ereport(ERROR,
475  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
476  errmsg("transition table name can only be specified for an AFTER trigger")));
477 
478  if (TRIGGER_FOR_TRUNCATE(tgtype))
479  ereport(ERROR,
480  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
481  errmsg("TRUNCATE triggers with transition tables are not supported")));
482 
483  /*
484  * We currently don't allow multi-event triggers ("INSERT OR
485  * UPDATE") with transition tables, because it's not clear how to
486  * handle INSERT ... ON CONFLICT statements which can fire both
487  * INSERT and UPDATE triggers. We show the inserted tuples to
488  * INSERT triggers and the updated tuples to UPDATE triggers, but
489  * it's not yet clear what INSERT OR UPDATE trigger should see.
490  * This restriction could be lifted if we can decide on the right
491  * semantics in a later release.
492  */
493  if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
494  (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
495  (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
496  ereport(ERROR,
497  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
498  errmsg("transition tables cannot be specified for triggers with more than one event")));
499 
500  /*
501  * We currently don't allow column-specific triggers with
502  * transition tables. Per spec, that seems to require
503  * accumulating separate transition tables for each combination of
504  * columns, which is a lot of work for a rather marginal feature.
505  */
506  if (stmt->columns != NIL)
507  ereport(ERROR,
508  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
509  errmsg("transition tables cannot be specified for triggers with column lists")));
510 
511  /*
512  * We disallow constraint triggers with transition tables, to
513  * protect the assumption that such triggers can't be deferred.
514  * See notes with AfterTriggers data structures, below.
515  *
516  * Currently this is enforced by the grammar, so just Assert here.
517  */
518  Assert(!stmt->isconstraint);
519 
520  if (tt->isNew)
521  {
522  if (!(TRIGGER_FOR_INSERT(tgtype) ||
523  TRIGGER_FOR_UPDATE(tgtype)))
524  ereport(ERROR,
525  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
526  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
527 
528  if (newtablename != NULL)
529  ereport(ERROR,
530  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
531  errmsg("NEW TABLE cannot be specified multiple times")));
532 
533  newtablename = tt->name;
534  }
535  else
536  {
537  if (!(TRIGGER_FOR_DELETE(tgtype) ||
538  TRIGGER_FOR_UPDATE(tgtype)))
539  ereport(ERROR,
540  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
541  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
542 
543  if (oldtablename != NULL)
544  ereport(ERROR,
545  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
546  errmsg("OLD TABLE cannot be specified multiple times")));
547 
548  oldtablename = tt->name;
549  }
550  }
551 
552  if (newtablename != NULL && oldtablename != NULL &&
553  strcmp(newtablename, oldtablename) == 0)
554  ereport(ERROR,
555  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
556  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
557  }
558 
559  /*
560  * Parse the WHEN clause, if any and we weren't passed an already
561  * transformed one.
562  *
563  * Note that as a side effect, we fill whenRtable when parsing. If we got
564  * an already parsed clause, this does not occur, which is what we want --
565  * no point in adding redundant dependencies below.
566  */
567  if (!whenClause && stmt->whenClause)
568  {
569  ParseState *pstate;
570  ParseNamespaceItem *nsitem;
571  List *varList;
572  ListCell *lc;
573 
574  /* Set up a pstate to parse with */
575  pstate = make_parsestate(NULL);
576  pstate->p_sourcetext = queryString;
577 
578  /*
579  * Set up nsitems for OLD and NEW references.
580  *
581  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
582  */
583  nsitem = addRangeTableEntryForRelation(pstate, rel,
585  makeAlias("old", NIL),
586  false, false);
587  addNSItemToQuery(pstate, nsitem, false, true, true);
588  nsitem = addRangeTableEntryForRelation(pstate, rel,
590  makeAlias("new", NIL),
591  false, false);
592  addNSItemToQuery(pstate, nsitem, false, true, true);
593 
594  /* Transform expression. Copy to be sure we don't modify original */
595  whenClause = transformWhereClause(pstate,
596  copyObject(stmt->whenClause),
598  "WHEN");
599  /* we have to fix its collations too */
600  assign_expr_collations(pstate, whenClause);
601 
602  /*
603  * Check for disallowed references to OLD/NEW.
604  *
605  * NB: pull_var_clause is okay here only because we don't allow
606  * subselects in WHEN clauses; it would fail to examine the contents
607  * of subselects.
608  */
609  varList = pull_var_clause(whenClause, 0);
610  foreach(lc, varList)
611  {
612  Var *var = (Var *) lfirst(lc);
613 
614  switch (var->varno)
615  {
616  case PRS2_OLD_VARNO:
617  if (!TRIGGER_FOR_ROW(tgtype))
618  ereport(ERROR,
619  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
620  errmsg("statement trigger's WHEN condition cannot reference column values"),
621  parser_errposition(pstate, var->location)));
622  if (TRIGGER_FOR_INSERT(tgtype))
623  ereport(ERROR,
624  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
625  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
626  parser_errposition(pstate, var->location)));
627  /* system columns are okay here */
628  break;
629  case PRS2_NEW_VARNO:
630  if (!TRIGGER_FOR_ROW(tgtype))
631  ereport(ERROR,
632  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
633  errmsg("statement trigger's WHEN condition cannot reference column values"),
634  parser_errposition(pstate, var->location)));
635  if (TRIGGER_FOR_DELETE(tgtype))
636  ereport(ERROR,
637  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
638  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
639  parser_errposition(pstate, var->location)));
640  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
641  ereport(ERROR,
642  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
643  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
644  parser_errposition(pstate, var->location)));
645  if (TRIGGER_FOR_BEFORE(tgtype) &&
646  var->varattno == 0 &&
647  RelationGetDescr(rel)->constr &&
648  RelationGetDescr(rel)->constr->has_generated_stored)
649  ereport(ERROR,
650  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
651  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
652  errdetail("A whole-row reference is used and the table contains generated columns."),
653  parser_errposition(pstate, var->location)));
654  if (TRIGGER_FOR_BEFORE(tgtype) &&
655  var->varattno > 0 &&
656  TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
657  ereport(ERROR,
658  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
659  errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
660  errdetail("Column \"%s\" is a generated column.",
661  NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
662  parser_errposition(pstate, var->location)));
663  break;
664  default:
665  /* can't happen without add_missing_from, so just elog */
666  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
667  break;
668  }
669  }
670 
671  /* we'll need the rtable for recordDependencyOnExpr */
672  whenRtable = pstate->p_rtable;
673 
674  qual = nodeToString(whenClause);
675 
676  free_parsestate(pstate);
677  }
678  else if (!whenClause)
679  {
680  whenClause = NULL;
681  whenRtable = NIL;
682  qual = NULL;
683  }
684  else
685  {
686  qual = nodeToString(whenClause);
687  whenRtable = NIL;
688  }
689 
690  /*
691  * Find and validate the trigger function.
692  */
693  if (!OidIsValid(funcoid))
694  funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
695  if (!isInternal)
696  {
697  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
698  if (aclresult != ACLCHECK_OK)
699  aclcheck_error(aclresult, OBJECT_FUNCTION,
700  NameListToString(stmt->funcname));
701  }
702  funcrettype = get_func_rettype(funcoid);
703  if (funcrettype != TRIGGEROID)
704  ereport(ERROR,
705  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
706  errmsg("function %s must return type %s",
707  NameListToString(stmt->funcname), "trigger")));
708 
709  /*
710  * Scan pg_trigger to see if there is already a trigger of the same name.
711  * Skip this for internally generated triggers, since we'll modify the
712  * name to be unique below.
713  *
714  * NOTE that this is cool only because we have ShareRowExclusiveLock on
715  * the relation, so the trigger set won't be changing underneath us.
716  */
717  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
718  if (!isInternal)
719  {
720  ScanKeyData skeys[2];
721  SysScanDesc tgscan;
722 
723  ScanKeyInit(&skeys[0],
724  Anum_pg_trigger_tgrelid,
725  BTEqualStrategyNumber, F_OIDEQ,
727 
728  ScanKeyInit(&skeys[1],
729  Anum_pg_trigger_tgname,
730  BTEqualStrategyNumber, F_NAMEEQ,
731  CStringGetDatum(stmt->trigname));
732 
733  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
734  NULL, 2, skeys);
735 
736  /* There should be at most one matching tuple */
737  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
738  {
739  Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
740 
741  trigoid = oldtrigger->oid;
742  existing_constraint_oid = oldtrigger->tgconstraint;
743  existing_isInternal = oldtrigger->tgisinternal;
744  trigger_exists = true;
745  /* copy the tuple to use in CatalogTupleUpdate() */
746  tuple = heap_copytuple(tuple);
747  }
748  systable_endscan(tgscan);
749  }
750 
751  if (!trigger_exists)
752  {
753  /* Generate the OID for the new trigger. */
754  trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
755  Anum_pg_trigger_oid);
756  }
757  else
758  {
759  /*
760  * If OR REPLACE was specified, we'll replace the old trigger;
761  * otherwise complain about the duplicate name.
762  */
763  if (!stmt->replace)
764  ereport(ERROR,
766  errmsg("trigger \"%s\" for relation \"%s\" already exists",
767  stmt->trigname, RelationGetRelationName(rel))));
768 
769  /*
770  * An internal trigger cannot be replaced by a user-defined trigger.
771  * However, skip this test when in_partition, because then we're
772  * recursing from a partitioned table and the check was made at the
773  * parent level. Child triggers will always be marked "internal" (so
774  * this test does protect us from the user trying to replace a child
775  * trigger directly).
776  */
777  if (existing_isInternal && !isInternal && !in_partition)
778  ereport(ERROR,
780  errmsg("trigger \"%s\" for relation \"%s\" is an internal trigger",
781  stmt->trigname, RelationGetRelationName(rel))));
782 
783  /*
784  * It is not allowed to replace with a constraint trigger; gram.y
785  * should have enforced this already.
786  */
787  Assert(!stmt->isconstraint);
788 
789  /*
790  * It is not allowed to replace an existing constraint trigger,
791  * either. (The reason for these restrictions is partly that it seems
792  * difficult to deal with pending trigger events in such cases, and
793  * partly that the command might imply changing the constraint's
794  * properties as well, which doesn't seem nice.)
795  */
796  if (OidIsValid(existing_constraint_oid))
797  ereport(ERROR,
799  errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
800  stmt->trigname, RelationGetRelationName(rel))));
801  }
802 
803  /*
804  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
805  * corresponding pg_constraint entry.
806  */
807  if (stmt->isconstraint && !OidIsValid(constraintOid))
808  {
809  /* Internal callers should have made their own constraints */
810  Assert(!isInternal);
811  constraintOid = CreateConstraintEntry(stmt->trigname,
813  CONSTRAINT_TRIGGER,
814  stmt->deferrable,
815  stmt->initdeferred,
816  true,
817  InvalidOid, /* no parent */
818  RelationGetRelid(rel),
819  NULL, /* no conkey */
820  0,
821  0,
822  InvalidOid, /* no domain */
823  InvalidOid, /* no index */
824  InvalidOid, /* no foreign key */
825  NULL,
826  NULL,
827  NULL,
828  NULL,
829  0,
830  ' ',
831  ' ',
832  ' ',
833  NULL, /* no exclusion */
834  NULL, /* no check constraint */
835  NULL,
836  true, /* islocal */
837  0, /* inhcount */
838  true, /* noinherit */
839  isInternal); /* is_internal */
840  }
841 
842  /*
843  * If trigger is internally generated, modify the provided trigger name to
844  * ensure uniqueness by appending the trigger OID. (Callers will usually
845  * supply a simple constant trigger name in these cases.)
846  */
847  if (isInternal)
848  {
849  snprintf(internaltrigname, sizeof(internaltrigname),
850  "%s_%u", stmt->trigname, trigoid);
851  trigname = internaltrigname;
852  }
853  else
854  {
855  /* user-defined trigger; use the specified trigger name as-is */
856  trigname = stmt->trigname;
857  }
858 
859  /*
860  * Build the new pg_trigger tuple.
861  *
862  * When we're creating a trigger in a partition, we mark it as internal,
863  * even though we don't do the isInternal magic in this function. This
864  * makes the triggers in partitions identical to the ones in the
865  * partitioned tables, except that they are marked internal.
866  */
867  memset(nulls, false, sizeof(nulls));
868 
869  values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
870  values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
871  values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
872  values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
873  CStringGetDatum(trigname));
874  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
875  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
876  values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
877  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal || in_partition);
878  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
879  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
880  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
881  values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
882  values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
883 
884  if (stmt->args)
885  {
886  ListCell *le;
887  char *args;
888  int16 nargs = list_length(stmt->args);
889  int len = 0;
890 
891  foreach(le, stmt->args)
892  {
893  char *ar = strVal(lfirst(le));
894 
895  len += strlen(ar) + 4;
896  for (; *ar; ar++)
897  {
898  if (*ar == '\\')
899  len++;
900  }
901  }
902  args = (char *) palloc(len + 1);
903  args[0] = '\0';
904  foreach(le, stmt->args)
905  {
906  char *s = strVal(lfirst(le));
907  char *d = args + strlen(args);
908 
909  while (*s)
910  {
911  if (*s == '\\')
912  *d++ = '\\';
913  *d++ = *s++;
914  }
915  strcpy(d, "\\000");
916  }
917  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
918  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
919  CStringGetDatum(args));
920  }
921  else
922  {
923  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
924  values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
925  CStringGetDatum(""));
926  }
927 
928  /* build column number array if it's a column-specific trigger */
929  ncolumns = list_length(stmt->columns);
930  if (ncolumns == 0)
931  columns = NULL;
932  else
933  {
934  ListCell *cell;
935  int i = 0;
936 
937  columns = (int16 *) palloc(ncolumns * sizeof(int16));
938  foreach(cell, stmt->columns)
939  {
940  char *name = strVal(lfirst(cell));
941  int16 attnum;
942  int j;
943 
944  /* Lookup column name. System columns are not allowed */
945  attnum = attnameAttNum(rel, name, false);
946  if (attnum == InvalidAttrNumber)
947  ereport(ERROR,
948  (errcode(ERRCODE_UNDEFINED_COLUMN),
949  errmsg("column \"%s\" of relation \"%s\" does not exist",
950  name, RelationGetRelationName(rel))));
951 
952  /* Check for duplicates */
953  for (j = i - 1; j >= 0; j--)
954  {
955  if (columns[j] == attnum)
956  ereport(ERROR,
957  (errcode(ERRCODE_DUPLICATE_COLUMN),
958  errmsg("column \"%s\" specified more than once",
959  name)));
960  }
961 
962  columns[i++] = attnum;
963  }
964  }
965  tgattr = buildint2vector(columns, ncolumns);
966  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
967 
968  /* set tgqual if trigger has WHEN clause */
969  if (qual)
970  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
971  else
972  nulls[Anum_pg_trigger_tgqual - 1] = true;
973 
974  if (oldtablename)
975  values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
976  CStringGetDatum(oldtablename));
977  else
978  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
979  if (newtablename)
980  values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
981  CStringGetDatum(newtablename));
982  else
983  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
984 
985  /*
986  * Insert or replace tuple in pg_trigger.
987  */
988  if (!trigger_exists)
989  {
990  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
991  CatalogTupleInsert(tgrel, tuple);
992  }
993  else
994  {
995  HeapTuple newtup;
996 
997  newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
998  CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
999  heap_freetuple(newtup);
1000  }
1001 
1002  heap_freetuple(tuple); /* free either original or new tuple */
1003  table_close(tgrel, RowExclusiveLock);
1004 
1005  pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1006  pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1007  pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1008  if (oldtablename)
1009  pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1010  if (newtablename)
1011  pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1012 
1013  /*
1014  * Update relation's pg_class entry; if necessary; and if not, send an SI
1015  * message to make other backends (and this one) rebuild relcache entries.
1016  */
1017  pgrel = table_open(RelationRelationId, RowExclusiveLock);
1018  tuple = SearchSysCacheCopy1(RELOID,
1020  if (!HeapTupleIsValid(tuple))
1021  elog(ERROR, "cache lookup failed for relation %u",
1022  RelationGetRelid(rel));
1023  if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1024  {
1025  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1026 
1027  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1028 
1030  }
1031  else
1033 
1034  heap_freetuple(tuple);
1035  table_close(pgrel, RowExclusiveLock);
1036 
1037  /*
1038  * If we're replacing a trigger, flush all the old dependencies before
1039  * recording new ones.
1040  */
1041  if (trigger_exists)
1042  deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1043 
1044  /*
1045  * Record dependencies for trigger. Always place a normal dependency on
1046  * the function.
1047  */
1048  myself.classId = TriggerRelationId;
1049  myself.objectId = trigoid;
1050  myself.objectSubId = 0;
1051 
1052  referenced.classId = ProcedureRelationId;
1053  referenced.objectId = funcoid;
1054  referenced.objectSubId = 0;
1055  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1056 
1057  if (isInternal && OidIsValid(constraintOid))
1058  {
1059  /*
1060  * Internally-generated trigger for a constraint, so make it an
1061  * internal dependency of the constraint. We can skip depending on
1062  * the relation(s), as there'll be an indirect dependency via the
1063  * constraint.
1064  */
1065  referenced.classId = ConstraintRelationId;
1066  referenced.objectId = constraintOid;
1067  referenced.objectSubId = 0;
1068  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1069  }
1070  else
1071  {
1072  /*
1073  * User CREATE TRIGGER, so place dependencies. We make trigger be
1074  * auto-dropped if its relation is dropped or if the FK relation is
1075  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1076  */
1077  referenced.classId = RelationRelationId;
1078  referenced.objectId = RelationGetRelid(rel);
1079  referenced.objectSubId = 0;
1080  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1081 
1082  if (OidIsValid(constrrelid))
1083  {
1084  referenced.classId = RelationRelationId;
1085  referenced.objectId = constrrelid;
1086  referenced.objectSubId = 0;
1087  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1088  }
1089  /* Not possible to have an index dependency in this case */
1090  Assert(!OidIsValid(indexOid));
1091 
1092  /*
1093  * If it's a user-specified constraint trigger, make the constraint
1094  * internally dependent on the trigger instead of vice versa.
1095  */
1096  if (OidIsValid(constraintOid))
1097  {
1098  referenced.classId = ConstraintRelationId;
1099  referenced.objectId = constraintOid;
1100  referenced.objectSubId = 0;
1101  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1102  }
1103 
1104  /*
1105  * If it's a partition trigger, create the partition dependencies.
1106  */
1107  if (OidIsValid(parentTriggerOid))
1108  {
1109  ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1110  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1111  ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1112  recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1113  }
1114  }
1115 
1116  /* If column-specific trigger, add normal dependencies on columns */
1117  if (columns != NULL)
1118  {
1119  int i;
1120 
1121  referenced.classId = RelationRelationId;
1122  referenced.objectId = RelationGetRelid(rel);
1123  for (i = 0; i < ncolumns; i++)
1124  {
1125  referenced.objectSubId = columns[i];
1126  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1127  }
1128  }
1129 
1130  /*
1131  * If it has a WHEN clause, add dependencies on objects mentioned in the
1132  * expression (eg, functions, as well as any columns used).
1133  */
1134  if (whenRtable != NIL)
1135  recordDependencyOnExpr(&myself, whenClause, whenRtable,
1137 
1138  /* Post creation hook for new trigger */
1139  InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1140  isInternal);
1141 
1142  /*
1143  * Lastly, create the trigger on child relations, if needed.
1144  */
1145  if (partition_recurse)
1146  {
1147  PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1148  List *idxs = NIL;
1149  List *childTbls = NIL;
1150  ListCell *l;
1151  int i;
1152  MemoryContext oldcxt,
1153  perChildCxt;
1154 
1156  "part trig clone",
1158 
1159  /*
1160  * When a trigger is being created associated with an index, we'll
1161  * need to associate the trigger in each child partition with the
1162  * corresponding index on it.
1163  */
1164  if (OidIsValid(indexOid))
1165  {
1166  ListCell *l;
1167  List *idxs = NIL;
1168 
1170  foreach(l, idxs)
1171  childTbls = lappend_oid(childTbls,
1173  false));
1174  }
1175 
1176  oldcxt = MemoryContextSwitchTo(perChildCxt);
1177 
1178  /* Iterate to create the trigger on each existing partition */
1179  for (i = 0; i < partdesc->nparts; i++)
1180  {
1181  Oid indexOnChild = InvalidOid;
1182  ListCell *l2;
1183  CreateTrigStmt *childStmt;
1184  Relation childTbl;
1185  Node *qual;
1186 
1187  childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1188 
1189  /* Find which of the child indexes is the one on this partition */
1190  if (OidIsValid(indexOid))
1191  {
1192  forboth(l, idxs, l2, childTbls)
1193  {
1194  if (lfirst_oid(l2) == partdesc->oids[i])
1195  {
1196  indexOnChild = lfirst_oid(l);
1197  break;
1198  }
1199  }
1200  if (!OidIsValid(indexOnChild))
1201  elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"",
1202  get_rel_name(indexOid),
1203  get_rel_name(partdesc->oids[i]));
1204  }
1205 
1206  /*
1207  * Initialize our fabricated parse node by copying the original
1208  * one, then resetting fields that we pass separately.
1209  */
1210  childStmt = (CreateTrigStmt *) copyObject(stmt);
1211  childStmt->funcname = NIL;
1212  childStmt->whenClause = NULL;
1213 
1214  /* If there is a WHEN clause, create a modified copy of it */
1215  qual = copyObject(whenClause);
1216  qual = (Node *)
1218  childTbl, rel);
1219  qual = (Node *)
1221  childTbl, rel);
1222 
1223  CreateTriggerFiringOn(childStmt, queryString,
1224  partdesc->oids[i], refRelOid,
1225  InvalidOid, indexOnChild,
1226  funcoid, trigoid, qual,
1227  isInternal, true, trigger_fires_when);
1228 
1229  table_close(childTbl, NoLock);
1230 
1231  MemoryContextReset(perChildCxt);
1232  }
1233 
1234  MemoryContextSwitchTo(oldcxt);
1235  MemoryContextDelete(perChildCxt);
1236  list_free(idxs);
1237  list_free(childTbls);
1238  }
1239 
1240  /* Keep lock on target rel until end of xact */
1241  table_close(rel, NoLock);
1242 
1243  return myself;
1244 }
1245 
1246 
1247 /*
1248  * Guts of trigger deletion.
1249  */
1250 void
1252 {
1253  Relation tgrel;
1254  SysScanDesc tgscan;
1255  ScanKeyData skey[1];
1256  HeapTuple tup;
1257  Oid relid;
1258  Relation rel;
1259 
1260  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1261 
1262  /*
1263  * Find the trigger to delete.
1264  */
1265  ScanKeyInit(&skey[0],
1266  Anum_pg_trigger_oid,
1267  BTEqualStrategyNumber, F_OIDEQ,
1268  ObjectIdGetDatum(trigOid));
1269 
1270  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1271  NULL, 1, skey);
1272 
1273  tup = systable_getnext(tgscan);
1274  if (!HeapTupleIsValid(tup))
1275  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1276 
1277  /*
1278  * Open and exclusive-lock the relation the trigger belongs to.
1279  */
1280  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1281 
1282  rel = table_open(relid, AccessExclusiveLock);
1283 
1284  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1285  rel->rd_rel->relkind != RELKIND_VIEW &&
1286  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1287  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1288  ereport(ERROR,
1289  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1290  errmsg("relation \"%s\" cannot have triggers",
1292  errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1293 
1295  ereport(ERROR,
1296  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1297  errmsg("permission denied: \"%s\" is a system catalog",
1298  RelationGetRelationName(rel))));
1299 
1300  /*
1301  * Delete the pg_trigger tuple.
1302  */
1303  CatalogTupleDelete(tgrel, &tup->t_self);
1304 
1305  systable_endscan(tgscan);
1306  table_close(tgrel, RowExclusiveLock);
1307 
1308  /*
1309  * We do not bother to try to determine whether any other triggers remain,
1310  * which would be needed in order to decide whether it's safe to clear the
1311  * relation's relhastriggers. (In any case, there might be a concurrent
1312  * process adding new triggers.) Instead, just force a relcache inval to
1313  * make other backends (and this one too!) rebuild their relcache entries.
1314  * There's no great harm in leaving relhastriggers true even if there are
1315  * no triggers left.
1316  */
1318 
1319  /* Keep lock on trigger's rel until end of xact */
1320  table_close(rel, NoLock);
1321 }
1322 
1323 /*
1324  * get_trigger_oid - Look up a trigger by name to find its OID.
1325  *
1326  * If missing_ok is false, throw an error if trigger not found. If
1327  * true, just return InvalidOid.
1328  */
1329 Oid
1330 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1331 {
1332  Relation tgrel;
1333  ScanKeyData skey[2];
1334  SysScanDesc tgscan;
1335  HeapTuple tup;
1336  Oid oid;
1337 
1338  /*
1339  * Find the trigger, verify permissions, set up object address
1340  */
1341  tgrel = table_open(TriggerRelationId, AccessShareLock);
1342 
1343  ScanKeyInit(&skey[0],
1344  Anum_pg_trigger_tgrelid,
1345  BTEqualStrategyNumber, F_OIDEQ,
1346  ObjectIdGetDatum(relid));
1347  ScanKeyInit(&skey[1],
1348  Anum_pg_trigger_tgname,
1349  BTEqualStrategyNumber, F_NAMEEQ,
1350  CStringGetDatum(trigname));
1351 
1352  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1353  NULL, 2, skey);
1354 
1355  tup = systable_getnext(tgscan);
1356 
1357  if (!HeapTupleIsValid(tup))
1358  {
1359  if (!missing_ok)
1360  ereport(ERROR,
1361  (errcode(ERRCODE_UNDEFINED_OBJECT),
1362  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1363  trigname, get_rel_name(relid))));
1364  oid = InvalidOid;
1365  }
1366  else
1367  {
1368  oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1369  }
1370 
1371  systable_endscan(tgscan);
1372  table_close(tgrel, AccessShareLock);
1373  return oid;
1374 }
1375 
1376 /*
1377  * Perform permissions and integrity checks before acquiring a relation lock.
1378  */
1379 static void
1381  void *arg)
1382 {
1383  HeapTuple tuple;
1384  Form_pg_class form;
1385 
1386  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1387  if (!HeapTupleIsValid(tuple))
1388  return; /* concurrently dropped */
1389  form = (Form_pg_class) GETSTRUCT(tuple);
1390 
1391  /* only tables and views can have triggers */
1392  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1393  form->relkind != RELKIND_FOREIGN_TABLE &&
1394  form->relkind != RELKIND_PARTITIONED_TABLE)
1395  ereport(ERROR,
1396  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1397  errmsg("relation \"%s\" cannot have triggers",
1398  rv->relname),
1399  errdetail_relkind_not_supported(form->relkind)));
1400 
1401  /* you must own the table to rename one of its triggers */
1402  if (!pg_class_ownercheck(relid, GetUserId()))
1404  if (!allowSystemTableMods && IsSystemClass(relid, form))
1405  ereport(ERROR,
1406  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1407  errmsg("permission denied: \"%s\" is a system catalog",
1408  rv->relname)));
1409 
1410  ReleaseSysCache(tuple);
1411 }
1412 
1413 /*
1414  * renametrig - changes the name of a trigger on a relation
1415  *
1416  * trigger name is changed in trigger catalog.
1417  * No record of the previous name is kept.
1418  *
1419  * get proper relrelation from relation catalog (if not arg)
1420  * scan trigger catalog
1421  * for name conflict (within rel)
1422  * for original trigger (if not arg)
1423  * modify tgname in trigger tuple
1424  * update row in catalog
1425  */
1428 {
1429  Oid tgoid;
1430  Relation targetrel;
1431  Relation tgrel;
1432  HeapTuple tuple;
1433  SysScanDesc tgscan;
1434  ScanKeyData key[2];
1435  Oid relid;
1436  ObjectAddress address;
1437 
1438  /*
1439  * Look up name, check permissions, and acquire lock (which we will NOT
1440  * release until end of transaction).
1441  */
1443  0,
1445  NULL);
1446 
1447  /* Have lock already, so just need to build relcache entry. */
1448  targetrel = relation_open(relid, NoLock);
1449 
1450  /*
1451  * On partitioned tables, this operation recurses to partitions. Lock all
1452  * tables upfront.
1453  */
1454  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1455  (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1456 
1457  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1458 
1459  /*
1460  * Search for the trigger to modify.
1461  */
1462  ScanKeyInit(&key[0],
1463  Anum_pg_trigger_tgrelid,
1464  BTEqualStrategyNumber, F_OIDEQ,
1465  ObjectIdGetDatum(relid));
1466  ScanKeyInit(&key[1],
1467  Anum_pg_trigger_tgname,
1468  BTEqualStrategyNumber, F_NAMEEQ,
1469  PointerGetDatum(stmt->subname));
1470  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1471  NULL, 2, key);
1472  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1473  {
1474  Form_pg_trigger trigform;
1475 
1476  trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1477  tgoid = trigform->oid;
1478 
1479  /*
1480  * If the trigger descends from a trigger on a parent partitioned
1481  * table, reject the rename. We don't allow a trigger in a partition
1482  * to differ in name from that of its parent: that would lead to an
1483  * inconsistency that pg_dump would not reproduce.
1484  */
1485  if (OidIsValid(trigform->tgparentid))
1486  ereport(ERROR,
1487  errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1488  stmt->subname, RelationGetRelationName(targetrel)),
1489  errhint("Rename trigger on partitioned table \"%s\" instead.",
1490  get_rel_name(get_partition_parent(relid, false))));
1491 
1492 
1493  /* Rename the trigger on this relation ... */
1494  renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1495  stmt->subname);
1496 
1497  /* ... and if it is partitioned, recurse to its partitions */
1498  if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1499  {
1500  PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1501 
1502  for (int i = 0; i < partdesc->nparts; i++)
1503  {
1504  Oid partitionId = partdesc->oids[i];
1505 
1506  renametrig_partition(tgrel, partitionId, trigform->oid,
1507  stmt->newname, stmt->subname);
1508  }
1509  }
1510  }
1511  else
1512  {
1513  ereport(ERROR,
1514  (errcode(ERRCODE_UNDEFINED_OBJECT),
1515  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1516  stmt->subname, RelationGetRelationName(targetrel))));
1517  }
1518 
1519  ObjectAddressSet(address, TriggerRelationId, tgoid);
1520 
1521  systable_endscan(tgscan);
1522 
1523  table_close(tgrel, RowExclusiveLock);
1524 
1525  /*
1526  * Close rel, but keep exclusive lock!
1527  */
1528  relation_close(targetrel, NoLock);
1529 
1530  return address;
1531 }
1532 
1533 /*
1534  * Subroutine for renametrig -- perform the actual work of renaming one
1535  * trigger on one table.
1536  *
1537  * If the trigger has a name different from the expected one, raise a
1538  * NOTICE about it.
1539  */
1540 static void
1542  const char *newname, const char *expected_name)
1543 {
1544  HeapTuple tuple;
1545  Form_pg_trigger tgform;
1546  ScanKeyData key[2];
1547  SysScanDesc tgscan;
1548 
1549  /* If the trigger already has the new name, nothing to do. */
1550  tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1551  if (strcmp(NameStr(tgform->tgname), newname) == 0)
1552  return;
1553 
1554  /*
1555  * Before actually trying the rename, search for triggers with the same
1556  * name. The update would fail with an ugly message in that case, and it
1557  * is better to throw a nicer error.
1558  */
1559  ScanKeyInit(&key[0],
1560  Anum_pg_trigger_tgrelid,
1561  BTEqualStrategyNumber, F_OIDEQ,
1562  ObjectIdGetDatum(RelationGetRelid(targetrel)));
1563  ScanKeyInit(&key[1],
1564  Anum_pg_trigger_tgname,
1565  BTEqualStrategyNumber, F_NAMEEQ,
1566  PointerGetDatum(newname));
1567  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1568  NULL, 2, key);
1569  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1570  ereport(ERROR,
1572  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1573  newname, RelationGetRelationName(targetrel))));
1574  systable_endscan(tgscan);
1575 
1576  /*
1577  * The target name is free; update the existing pg_trigger tuple with it.
1578  */
1579  tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1580  tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1581 
1582  /*
1583  * If the trigger has a name different from what we expected, let the user
1584  * know. (We can proceed anyway, since we must have reached here following
1585  * a tgparentid link.)
1586  */
1587  if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1588  ereport(NOTICE,
1589  errmsg("renamed trigger \"%s\" on relation \"%s\"",
1590  NameStr(tgform->tgname),
1591  RelationGetRelationName(targetrel)));
1592 
1593  namestrcpy(&tgform->tgname, newname);
1594 
1595  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1596 
1597  InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1598 
1599  /*
1600  * Invalidate relation's relcache entry so that other backends (and this
1601  * one too!) are sent SI message to make them rebuild relcache entries.
1602  * (Ideally this should happen automatically...)
1603  */
1604  CacheInvalidateRelcache(targetrel);
1605 }
1606 
1607 /*
1608  * Subroutine for renametrig -- Helper for recursing to partitions when
1609  * renaming triggers on a partitioned table.
1610  */
1611 static void
1612 renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1613  const char *newname, const char *expected_name)
1614 {
1615  SysScanDesc tgscan;
1616  ScanKeyData key;
1617  HeapTuple tuple;
1618 
1619  /*
1620  * Given a relation and the OID of a trigger on parent relation, find the
1621  * corresponding trigger in the child and rename that trigger to the given
1622  * name.
1623  */
1624  ScanKeyInit(&key,
1625  Anum_pg_trigger_tgrelid,
1626  BTEqualStrategyNumber, F_OIDEQ,
1627  ObjectIdGetDatum(partitionId));
1628  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1629  NULL, 1, &key);
1630  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1631  {
1632  Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1633  Relation partitionRel;
1634 
1635  if (tgform->tgparentid != parentTriggerOid)
1636  continue; /* not our trigger */
1637 
1638  partitionRel = table_open(partitionId, NoLock);
1639 
1640  /* Rename the trigger on this partition */
1641  renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1642 
1643  /* And if this relation is partitioned, recurse to its partitions */
1644  if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1645  {
1646  PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1647  true);
1648 
1649  for (int i = 0; i < partdesc->nparts; i++)
1650  {
1651  Oid partitionId = partdesc->oids[i];
1652 
1653  renametrig_partition(tgrel, partitionId, tgform->oid, newname,
1654  NameStr(tgform->tgname));
1655  }
1656  }
1657  table_close(partitionRel, NoLock);
1658 
1659  /* There should be at most one matching tuple */
1660  break;
1661  }
1662  systable_endscan(tgscan);
1663 }
1664 
1665 /*
1666  * EnableDisableTrigger()
1667  *
1668  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1669  * to change 'tgenabled' field for the specified trigger(s)
1670  *
1671  * rel: relation to process (caller must hold suitable lock on it)
1672  * tgname: trigger to process, or NULL to scan all triggers
1673  * fires_when: new value for tgenabled field. In addition to generic
1674  * enablement/disablement, this also defines when the trigger
1675  * should be fired in session replication roles.
1676  * skip_system: if true, skip "system" triggers (constraint triggers)
1677  *
1678  * Caller should have checked permissions for the table; here we also
1679  * enforce that superuser privilege is required to alter the state of
1680  * system triggers
1681  */
1682 void
1683 EnableDisableTrigger(Relation rel, const char *tgname,
1684  char fires_when, bool skip_system, LOCKMODE lockmode)
1685 {
1686  Relation tgrel;
1687  int nkeys;
1688  ScanKeyData keys[2];
1689  SysScanDesc tgscan;
1690  HeapTuple tuple;
1691  bool found;
1692  bool changed;
1693 
1694  /* Scan the relevant entries in pg_triggers */
1695  tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1696 
1697  ScanKeyInit(&keys[0],
1698  Anum_pg_trigger_tgrelid,
1699  BTEqualStrategyNumber, F_OIDEQ,
1701  if (tgname)
1702  {
1703  ScanKeyInit(&keys[1],
1704  Anum_pg_trigger_tgname,
1705  BTEqualStrategyNumber, F_NAMEEQ,
1706  CStringGetDatum(tgname));
1707  nkeys = 2;
1708  }
1709  else
1710  nkeys = 1;
1711 
1712  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1713  NULL, nkeys, keys);
1714 
1715  found = changed = false;
1716 
1717  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1718  {
1719  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1720 
1721  if (oldtrig->tgisinternal)
1722  {
1723  /* system trigger ... ok to process? */
1724  if (skip_system)
1725  continue;
1726  if (!superuser())
1727  ereport(ERROR,
1728  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1729  errmsg("permission denied: \"%s\" is a system trigger",
1730  NameStr(oldtrig->tgname))));
1731  }
1732 
1733  found = true;
1734 
1735  if (oldtrig->tgenabled != fires_when)
1736  {
1737  /* need to change this one ... make a copy to scribble on */
1738  HeapTuple newtup = heap_copytuple(tuple);
1739  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1740 
1741  newtrig->tgenabled = fires_when;
1742 
1743  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1744 
1745  heap_freetuple(newtup);
1746 
1747  changed = true;
1748  }
1749 
1750  InvokeObjectPostAlterHook(TriggerRelationId,
1751  oldtrig->oid, 0);
1752  }
1753 
1754  systable_endscan(tgscan);
1755 
1756  table_close(tgrel, RowExclusiveLock);
1757 
1758  if (tgname && !found)
1759  ereport(ERROR,
1760  (errcode(ERRCODE_UNDEFINED_OBJECT),
1761  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1762  tgname, RelationGetRelationName(rel))));
1763 
1764  /*
1765  * If we changed anything, broadcast a SI inval message to force each
1766  * backend (including our own!) to rebuild relation's relcache entry.
1767  * Otherwise they will fail to apply the change promptly.
1768  */
1769  if (changed)
1771 }
1772 
1773 
1774 /*
1775  * Build trigger data to attach to the given relcache entry.
1776  *
1777  * Note that trigger data attached to a relcache entry must be stored in
1778  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1779  * But we should be running in a less long-lived working context. To avoid
1780  * leaking cache memory if this routine fails partway through, we build a
1781  * temporary TriggerDesc in working memory and then copy the completed
1782  * structure into cache memory.
1783  */
1784 void
1786 {
1787  TriggerDesc *trigdesc;
1788  int numtrigs;
1789  int maxtrigs;
1790  Trigger *triggers;
1791  Relation tgrel;
1792  ScanKeyData skey;
1793  SysScanDesc tgscan;
1794  HeapTuple htup;
1795  MemoryContext oldContext;
1796  int i;
1797 
1798  /*
1799  * Allocate a working array to hold the triggers (the array is extended if
1800  * necessary)
1801  */
1802  maxtrigs = 16;
1803  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1804  numtrigs = 0;
1805 
1806  /*
1807  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1808  * be reading the triggers in name order, except possibly during
1809  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1810  * ensures that triggers will be fired in name order.
1811  */
1812  ScanKeyInit(&skey,
1813  Anum_pg_trigger_tgrelid,
1814  BTEqualStrategyNumber, F_OIDEQ,
1815  ObjectIdGetDatum(RelationGetRelid(relation)));
1816 
1817  tgrel = table_open(TriggerRelationId, AccessShareLock);
1818  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1819  NULL, 1, &skey);
1820 
1821  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1822  {
1823  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1824  Trigger *build;
1825  Datum datum;
1826  bool isnull;
1827 
1828  if (numtrigs >= maxtrigs)
1829  {
1830  maxtrigs *= 2;
1831  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1832  }
1833  build = &(triggers[numtrigs]);
1834 
1835  build->tgoid = pg_trigger->oid;
1837  NameGetDatum(&pg_trigger->tgname)));
1838  build->tgfoid = pg_trigger->tgfoid;
1839  build->tgtype = pg_trigger->tgtype;
1840  build->tgenabled = pg_trigger->tgenabled;
1841  build->tgisinternal = pg_trigger->tgisinternal;
1842  build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1843  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1844  build->tgconstrindid = pg_trigger->tgconstrindid;
1845  build->tgconstraint = pg_trigger->tgconstraint;
1846  build->tgdeferrable = pg_trigger->tgdeferrable;
1847  build->tginitdeferred = pg_trigger->tginitdeferred;
1848  build->tgnargs = pg_trigger->tgnargs;
1849  /* tgattr is first var-width field, so OK to access directly */
1850  build->tgnattr = pg_trigger->tgattr.dim1;
1851  if (build->tgnattr > 0)
1852  {
1853  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1854  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1855  build->tgnattr * sizeof(int16));
1856  }
1857  else
1858  build->tgattr = NULL;
1859  if (build->tgnargs > 0)
1860  {
1861  bytea *val;
1862  char *p;
1863 
1864  val = DatumGetByteaPP(fastgetattr(htup,
1865  Anum_pg_trigger_tgargs,
1866  tgrel->rd_att, &isnull));
1867  if (isnull)
1868  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1869  RelationGetRelationName(relation));
1870  p = (char *) VARDATA_ANY(val);
1871  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1872  for (i = 0; i < build->tgnargs; i++)
1873  {
1874  build->tgargs[i] = pstrdup(p);
1875  p += strlen(p) + 1;
1876  }
1877  }
1878  else
1879  build->tgargs = NULL;
1880 
1881  datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1882  tgrel->rd_att, &isnull);
1883  if (!isnull)
1884  build->tgoldtable =
1886  else
1887  build->tgoldtable = NULL;
1888 
1889  datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1890  tgrel->rd_att, &isnull);
1891  if (!isnull)
1892  build->tgnewtable =
1894  else
1895  build->tgnewtable = NULL;
1896 
1897  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1898  tgrel->rd_att, &isnull);
1899  if (!isnull)
1900  build->tgqual = TextDatumGetCString(datum);
1901  else
1902  build->tgqual = NULL;
1903 
1904  numtrigs++;
1905  }
1906 
1907  systable_endscan(tgscan);
1908  table_close(tgrel, AccessShareLock);
1909 
1910  /* There might not be any triggers */
1911  if (numtrigs == 0)
1912  {
1913  pfree(triggers);
1914  return;
1915  }
1916 
1917  /* Build trigdesc */
1918  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1919  trigdesc->triggers = triggers;
1920  trigdesc->numtriggers = numtrigs;
1921  for (i = 0; i < numtrigs; i++)
1922  SetTriggerFlags(trigdesc, &(triggers[i]));
1923 
1924  /* Copy completed trigdesc into cache storage */
1926  relation->trigdesc = CopyTriggerDesc(trigdesc);
1927  MemoryContextSwitchTo(oldContext);
1928 
1929  /* Release working memory */
1930  FreeTriggerDesc(trigdesc);
1931 }
1932 
1933 /*
1934  * Update the TriggerDesc's hint flags to include the specified trigger
1935  */
1936 static void
1938 {
1939  int16 tgtype = trigger->tgtype;
1940 
1941  trigdesc->trig_insert_before_row |=
1942  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1943  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1944  trigdesc->trig_insert_after_row |=
1945  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1946  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1947  trigdesc->trig_insert_instead_row |=
1948  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1949  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
1950  trigdesc->trig_insert_before_statement |=
1951  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1952  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
1953  trigdesc->trig_insert_after_statement |=
1954  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1955  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
1956  trigdesc->trig_update_before_row |=
1957  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1958  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1959  trigdesc->trig_update_after_row |=
1960  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1961  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1962  trigdesc->trig_update_instead_row |=
1963  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1964  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
1965  trigdesc->trig_update_before_statement |=
1966  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1967  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
1968  trigdesc->trig_update_after_statement |=
1969  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1970  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
1971  trigdesc->trig_delete_before_row |=
1972  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1973  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1974  trigdesc->trig_delete_after_row |=
1975  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1976  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1977  trigdesc->trig_delete_instead_row |=
1978  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
1979  TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
1980  trigdesc->trig_delete_before_statement |=
1981  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1982  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
1983  trigdesc->trig_delete_after_statement |=
1984  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1985  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
1986  /* there are no row-level truncate triggers */
1987  trigdesc->trig_truncate_before_statement |=
1988  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1989  TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
1990  trigdesc->trig_truncate_after_statement |=
1991  TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
1992  TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
1993 
1994  trigdesc->trig_insert_new_table |=
1995  (TRIGGER_FOR_INSERT(tgtype) &&
1996  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
1997  trigdesc->trig_update_old_table |=
1998  (TRIGGER_FOR_UPDATE(tgtype) &&
1999  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2000  trigdesc->trig_update_new_table |=
2001  (TRIGGER_FOR_UPDATE(tgtype) &&
2002  TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2003  trigdesc->trig_delete_old_table |=
2004  (TRIGGER_FOR_DELETE(tgtype) &&
2005  TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2006 }
2007 
2008 /*
2009  * Copy a TriggerDesc data structure.
2010  *
2011  * The copy is allocated in the current memory context.
2012  */
2013 TriggerDesc *
2015 {
2016  TriggerDesc *newdesc;
2017  Trigger *trigger;
2018  int i;
2019 
2020  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2021  return NULL;
2022 
2023  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2024  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2025 
2026  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2027  memcpy(trigger, trigdesc->triggers,
2028  trigdesc->numtriggers * sizeof(Trigger));
2029  newdesc->triggers = trigger;
2030 
2031  for (i = 0; i < trigdesc->numtriggers; i++)
2032  {
2033  trigger->tgname = pstrdup(trigger->tgname);
2034  if (trigger->tgnattr > 0)
2035  {
2036  int16 *newattr;
2037 
2038  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2039  memcpy(newattr, trigger->tgattr,
2040  trigger->tgnattr * sizeof(int16));
2041  trigger->tgattr = newattr;
2042  }
2043  if (trigger->tgnargs > 0)
2044  {
2045  char **newargs;
2046  int16 j;
2047 
2048  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2049  for (j = 0; j < trigger->tgnargs; j++)
2050  newargs[j] = pstrdup(trigger->tgargs[j]);
2051  trigger->tgargs = newargs;
2052  }
2053  if (trigger->tgqual)
2054  trigger->tgqual = pstrdup(trigger->tgqual);
2055  if (trigger->tgoldtable)
2056  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2057  if (trigger->tgnewtable)
2058  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2059  trigger++;
2060  }
2061 
2062  return newdesc;
2063 }
2064 
2065 /*
2066  * Free a TriggerDesc data structure.
2067  */
2068 void
2070 {
2071  Trigger *trigger;
2072  int i;
2073 
2074  if (trigdesc == NULL)
2075  return;
2076 
2077  trigger = trigdesc->triggers;
2078  for (i = 0; i < trigdesc->numtriggers; i++)
2079  {
2080  pfree(trigger->tgname);
2081  if (trigger->tgnattr > 0)
2082  pfree(trigger->tgattr);
2083  if (trigger->tgnargs > 0)
2084  {
2085  while (--(trigger->tgnargs) >= 0)
2086  pfree(trigger->tgargs[trigger->tgnargs]);
2087  pfree(trigger->tgargs);
2088  }
2089  if (trigger->tgqual)
2090  pfree(trigger->tgqual);
2091  if (trigger->tgoldtable)
2092  pfree(trigger->tgoldtable);
2093  if (trigger->tgnewtable)
2094  pfree(trigger->tgnewtable);
2095  trigger++;
2096  }
2097  pfree(trigdesc->triggers);
2098  pfree(trigdesc);
2099 }
2100 
2101 /*
2102  * Compare two TriggerDesc structures for logical equality.
2103  */
2104 #ifdef NOT_USED
2105 bool
2106 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2107 {
2108  int i,
2109  j;
2110 
2111  /*
2112  * We need not examine the hint flags, just the trigger array itself; if
2113  * we have the same triggers with the same types, the flags should match.
2114  *
2115  * As of 7.3 we assume trigger set ordering is significant in the
2116  * comparison; so we just compare corresponding slots of the two sets.
2117  *
2118  * Note: comparing the stringToNode forms of the WHEN clauses means that
2119  * parse column locations will affect the result. This is okay as long as
2120  * this function is only used for detecting exact equality, as for example
2121  * in checking for staleness of a cache entry.
2122  */
2123  if (trigdesc1 != NULL)
2124  {
2125  if (trigdesc2 == NULL)
2126  return false;
2127  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2128  return false;
2129  for (i = 0; i < trigdesc1->numtriggers; i++)
2130  {
2131  Trigger *trig1 = trigdesc1->triggers + i;
2132  Trigger *trig2 = trigdesc2->triggers + i;
2133 
2134  if (trig1->tgoid != trig2->tgoid)
2135  return false;
2136  if (strcmp(trig1->tgname, trig2->tgname) != 0)
2137  return false;
2138  if (trig1->tgfoid != trig2->tgfoid)
2139  return false;
2140  if (trig1->tgtype != trig2->tgtype)
2141  return false;
2142  if (trig1->tgenabled != trig2->tgenabled)
2143  return false;
2144  if (trig1->tgisinternal != trig2->tgisinternal)
2145  return false;
2146  if (trig1->tgisclone != trig2->tgisclone)
2147  return false;
2148  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2149  return false;
2150  if (trig1->tgconstrindid != trig2->tgconstrindid)
2151  return false;
2152  if (trig1->tgconstraint != trig2->tgconstraint)
2153  return false;
2154  if (trig1->tgdeferrable != trig2->tgdeferrable)
2155  return false;
2156  if (trig1->tginitdeferred != trig2->tginitdeferred)
2157  return false;
2158  if (trig1->tgnargs != trig2->tgnargs)
2159  return false;
2160  if (trig1->tgnattr != trig2->tgnattr)
2161  return false;
2162  if (trig1->tgnattr > 0 &&
2163  memcmp(trig1->tgattr, trig2->tgattr,
2164  trig1->tgnattr * sizeof(int16)) != 0)
2165  return false;
2166  for (j = 0; j < trig1->tgnargs; j++)
2167  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2168  return false;
2169  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2170  /* ok */ ;
2171  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2172  return false;
2173  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2174  return false;
2175  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2176  /* ok */ ;
2177  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2178  return false;
2179  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2180  return false;
2181  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2182  /* ok */ ;
2183  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2184  return false;
2185  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2186  return false;
2187  }
2188  }
2189  else if (trigdesc2 != NULL)
2190  return false;
2191  return true;
2192 }
2193 #endif /* NOT_USED */
2194 
2195 /*
2196  * Check if there is a row-level trigger with transition tables that prevents
2197  * a table from becoming an inheritance child or partition. Return the name
2198  * of the first such incompatible trigger, or NULL if there is none.
2199  */
2200 const char *
2202 {
2203  if (trigdesc != NULL)
2204  {
2205  int i;
2206 
2207  for (i = 0; i < trigdesc->numtriggers; ++i)
2208  {
2209  Trigger *trigger = &trigdesc->triggers[i];
2210 
2211  if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2212  return trigger->tgname;
2213  }
2214  }
2215 
2216  return NULL;
2217 }
2218 
2219 /*
2220  * Call a trigger function.
2221  *
2222  * trigdata: trigger descriptor.
2223  * tgindx: trigger's index in finfo and instr arrays.
2224  * finfo: array of cached trigger function call information.
2225  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2226  * per_tuple_context: memory context to execute the function in.
2227  *
2228  * Returns the tuple (or NULL) as returned by the function.
2229  */
2230 static HeapTuple
2232  int tgindx,
2233  FmgrInfo *finfo,
2234  Instrumentation *instr,
2235  MemoryContext per_tuple_context)
2236 {
2237  LOCAL_FCINFO(fcinfo, 0);
2238  PgStat_FunctionCallUsage fcusage;
2239  Datum result;
2240  MemoryContext oldContext;
2241 
2242  /*
2243  * Protect against code paths that may fail to initialize transition table
2244  * info.
2245  */
2246  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2247  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2248  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2249  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2250  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2251  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2252  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2253 
2254  finfo += tgindx;
2255 
2256  /*
2257  * We cache fmgr lookup info, to avoid making the lookup again on each
2258  * call.
2259  */
2260  if (finfo->fn_oid == InvalidOid)
2261  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2262 
2263  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2264 
2265  /*
2266  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2267  */
2268  if (instr)
2269  InstrStartNode(instr + tgindx);
2270 
2271  /*
2272  * Do the function evaluation in the per-tuple memory context, so that
2273  * leaked memory will be reclaimed once per tuple. Note in particular that
2274  * any new tuple created by the trigger function will live till the end of
2275  * the tuple cycle.
2276  */
2277  oldContext = MemoryContextSwitchTo(per_tuple_context);
2278 
2279  /*
2280  * Call the function, passing no arguments but setting a context.
2281  */
2282  InitFunctionCallInfoData(*fcinfo, finfo, 0,
2283  InvalidOid, (Node *) trigdata, NULL);
2284 
2285  pgstat_init_function_usage(fcinfo, &fcusage);
2286 
2287  MyTriggerDepth++;
2288  PG_TRY();
2289  {
2290  result = FunctionCallInvoke(fcinfo);
2291  }
2292  PG_FINALLY();
2293  {
2294  MyTriggerDepth--;
2295  }
2296  PG_END_TRY();
2297 
2298  pgstat_end_function_usage(&fcusage, true);
2299 
2300  MemoryContextSwitchTo(oldContext);
2301 
2302  /*
2303  * Trigger protocol allows function to return a null pointer, but NOT to
2304  * set the isnull result flag.
2305  */
2306  if (fcinfo->isnull)
2307  ereport(ERROR,
2308  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2309  errmsg("trigger function %u returned null value",
2310  fcinfo->flinfo->fn_oid)));
2311 
2312  /*
2313  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2314  * one "tuple returned" (really the number of firings).
2315  */
2316  if (instr)
2317  InstrStopNode(instr + tgindx, 1);
2318 
2319  return (HeapTuple) DatumGetPointer(result);
2320 }
2321 
2322 void
2324 {
2325  TriggerDesc *trigdesc;
2326  int i;
2327  TriggerData LocTriggerData = {0};
2328 
2329  trigdesc = relinfo->ri_TrigDesc;
2330 
2331  if (trigdesc == NULL)
2332  return;
2333  if (!trigdesc->trig_insert_before_statement)
2334  return;
2335 
2336  /* no-op if we already fired BS triggers in this context */
2338  CMD_INSERT))
2339  return;
2340 
2341  LocTriggerData.type = T_TriggerData;
2342  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2344  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2345  for (i = 0; i < trigdesc->numtriggers; i++)
2346  {
2347  Trigger *trigger = &trigdesc->triggers[i];
2348  HeapTuple newtuple;
2349 
2350  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2351  TRIGGER_TYPE_STATEMENT,
2352  TRIGGER_TYPE_BEFORE,
2353  TRIGGER_TYPE_INSERT))
2354  continue;
2355  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2356  NULL, NULL, NULL))
2357  continue;
2358 
2359  LocTriggerData.tg_trigger = trigger;
2360  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2361  i,
2362  relinfo->ri_TrigFunctions,
2363  relinfo->ri_TrigInstrument,
2364  GetPerTupleMemoryContext(estate));
2365 
2366  if (newtuple)
2367  ereport(ERROR,
2368  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2369  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2370  }
2371 }
2372 
2373 void
2375  TransitionCaptureState *transition_capture)
2376 {
2377  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2378 
2379  if (trigdesc && trigdesc->trig_insert_after_statement)
2381  false, NULL, NULL, NIL, NULL, transition_capture);
2382 }
2383 
2384 bool
2386  TupleTableSlot *slot)
2387 {
2388  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2389  HeapTuple newtuple = NULL;
2390  bool should_free;
2391  TriggerData LocTriggerData = {0};
2392  int i;
2393 
2394  LocTriggerData.type = T_TriggerData;
2395  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2398  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2399  for (i = 0; i < trigdesc->numtriggers; i++)
2400  {
2401  Trigger *trigger = &trigdesc->triggers[i];
2402  HeapTuple oldtuple;
2403 
2404  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2405  TRIGGER_TYPE_ROW,
2406  TRIGGER_TYPE_BEFORE,
2407  TRIGGER_TYPE_INSERT))
2408  continue;
2409  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2410  NULL, NULL, slot))
2411  continue;
2412 
2413  if (!newtuple)
2414  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2415 
2416  LocTriggerData.tg_trigslot = slot;
2417  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2418  LocTriggerData.tg_trigger = trigger;
2419  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2420  i,
2421  relinfo->ri_TrigFunctions,
2422  relinfo->ri_TrigInstrument,
2423  GetPerTupleMemoryContext(estate));
2424  if (newtuple == NULL)
2425  {
2426  if (should_free)
2427  heap_freetuple(oldtuple);
2428  return false; /* "do nothing" */
2429  }
2430  else if (newtuple != oldtuple)
2431  {
2432  ExecForceStoreHeapTuple(newtuple, slot, false);
2433 
2434  /*
2435  * After a tuple in a partition goes through a trigger, the user
2436  * could have changed the partition key enough that the tuple no
2437  * longer fits the partition. Verify that.
2438  */
2439  if (trigger->tgisclone &&
2440  !ExecPartitionCheck(relinfo, slot, estate, false))
2441  ereport(ERROR,
2442  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2443  errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2444  errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2445  trigger->tgname,
2448 
2449  if (should_free)
2450  heap_freetuple(oldtuple);
2451 
2452  /* signal tuple should be re-fetched if used */
2453  newtuple = NULL;
2454  }
2455  }
2456 
2457  return true;
2458 }
2459 
2460 void
2462  TupleTableSlot *slot, List *recheckIndexes,
2463  TransitionCaptureState *transition_capture)
2464 {
2465  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2466 
2467  if ((trigdesc && trigdesc->trig_insert_after_row) ||
2468  (transition_capture && transition_capture->tcs_insert_new_table))
2470  true, NULL, slot,
2471  recheckIndexes, NULL,
2472  transition_capture);
2473 }
2474 
2475 bool
2477  TupleTableSlot *slot)
2478 {
2479  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2480  HeapTuple newtuple = NULL;
2481  bool should_free;
2482  TriggerData LocTriggerData = {0};
2483  int i;
2484 
2485  LocTriggerData.type = T_TriggerData;
2486  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2489  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2490  for (i = 0; i < trigdesc->numtriggers; i++)
2491  {
2492  Trigger *trigger = &trigdesc->triggers[i];
2493  HeapTuple oldtuple;
2494 
2495  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2496  TRIGGER_TYPE_ROW,
2497  TRIGGER_TYPE_INSTEAD,
2498  TRIGGER_TYPE_INSERT))
2499  continue;
2500  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2501  NULL, NULL, slot))
2502  continue;
2503 
2504  if (!newtuple)
2505  newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2506 
2507  LocTriggerData.tg_trigslot = slot;
2508  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2509  LocTriggerData.tg_trigger = trigger;
2510  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2511  i,
2512  relinfo->ri_TrigFunctions,
2513  relinfo->ri_TrigInstrument,
2514  GetPerTupleMemoryContext(estate));
2515  if (newtuple == NULL)
2516  {
2517  if (should_free)
2518  heap_freetuple(oldtuple);
2519  return false; /* "do nothing" */
2520  }
2521  else if (newtuple != oldtuple)
2522  {
2523  ExecForceStoreHeapTuple(newtuple, slot, false);
2524 
2525  if (should_free)
2526  heap_freetuple(oldtuple);
2527 
2528  /* signal tuple should be re-fetched if used */
2529  newtuple = NULL;
2530  }
2531  }
2532 
2533  return true;
2534 }
2535 
2536 void
2538 {
2539  TriggerDesc *trigdesc;
2540  int i;
2541  TriggerData LocTriggerData = {0};
2542 
2543  trigdesc = relinfo->ri_TrigDesc;
2544 
2545  if (trigdesc == NULL)
2546  return;
2547  if (!trigdesc->trig_delete_before_statement)
2548  return;
2549 
2550  /* no-op if we already fired BS triggers in this context */
2552  CMD_DELETE))
2553  return;
2554 
2555  LocTriggerData.type = T_TriggerData;
2556  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2558  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2559  for (i = 0; i < trigdesc->numtriggers; i++)
2560  {
2561  Trigger *trigger = &trigdesc->triggers[i];
2562  HeapTuple newtuple;
2563 
2564  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2565  TRIGGER_TYPE_STATEMENT,
2566  TRIGGER_TYPE_BEFORE,
2567  TRIGGER_TYPE_DELETE))
2568  continue;
2569  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2570  NULL, NULL, NULL))
2571  continue;
2572 
2573  LocTriggerData.tg_trigger = trigger;
2574  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2575  i,
2576  relinfo->ri_TrigFunctions,
2577  relinfo->ri_TrigInstrument,
2578  GetPerTupleMemoryContext(estate));
2579 
2580  if (newtuple)
2581  ereport(ERROR,
2582  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2583  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2584  }
2585 }
2586 
2587 void
2589  TransitionCaptureState *transition_capture)
2590 {
2591  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2592 
2593  if (trigdesc && trigdesc->trig_delete_after_statement)
2595  false, NULL, NULL, NIL, NULL, transition_capture);
2596 }
2597 
2598 /*
2599  * Execute BEFORE ROW DELETE triggers.
2600  *
2601  * True indicates caller can proceed with the delete. False indicates caller
2602  * need to suppress the delete and additionally if requested, we need to pass
2603  * back the concurrently updated tuple if any.
2604  */
2605 bool
2607  ResultRelInfo *relinfo,
2608  ItemPointer tupleid,
2609  HeapTuple fdw_trigtuple,
2610  TupleTableSlot **epqslot)
2611 {
2612  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2613  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2614  bool result = true;
2615  TriggerData LocTriggerData = {0};
2616  HeapTuple trigtuple;
2617  bool should_free = false;
2618  int i;
2619 
2620  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2621  if (fdw_trigtuple == NULL)
2622  {
2623  TupleTableSlot *epqslot_candidate = NULL;
2624 
2625  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2626  LockTupleExclusive, slot, &epqslot_candidate))
2627  return false;
2628 
2629  /*
2630  * If the tuple was concurrently updated and the caller of this
2631  * function requested for the updated tuple, skip the trigger
2632  * execution.
2633  */
2634  if (epqslot_candidate != NULL && epqslot != NULL)
2635  {
2636  *epqslot = epqslot_candidate;
2637  return false;
2638  }
2639 
2640  trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2641 
2642  }
2643  else
2644  {
2645  trigtuple = fdw_trigtuple;
2646  ExecForceStoreHeapTuple(trigtuple, slot, false);
2647  }
2648 
2649  LocTriggerData.type = T_TriggerData;
2650  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2653  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2654  for (i = 0; i < trigdesc->numtriggers; i++)
2655  {
2656  HeapTuple newtuple;
2657  Trigger *trigger = &trigdesc->triggers[i];
2658 
2659  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2660  TRIGGER_TYPE_ROW,
2661  TRIGGER_TYPE_BEFORE,
2662  TRIGGER_TYPE_DELETE))
2663  continue;
2664  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2665  NULL, slot, NULL))
2666  continue;
2667 
2668  LocTriggerData.tg_trigslot = slot;
2669  LocTriggerData.tg_trigtuple = trigtuple;
2670  LocTriggerData.tg_trigger = trigger;
2671  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2672  i,
2673  relinfo->ri_TrigFunctions,
2674  relinfo->ri_TrigInstrument,
2675  GetPerTupleMemoryContext(estate));
2676  if (newtuple == NULL)
2677  {
2678  result = false; /* tell caller to suppress delete */
2679  break;
2680  }
2681  if (newtuple != trigtuple)
2682  heap_freetuple(newtuple);
2683  }
2684  if (should_free)
2685  heap_freetuple(trigtuple);
2686 
2687  return result;
2688 }
2689 
2690 void
2692  ItemPointer tupleid,
2693  HeapTuple fdw_trigtuple,
2694  TransitionCaptureState *transition_capture)
2695 {
2696  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2697 
2698  if ((trigdesc && trigdesc->trig_delete_after_row) ||
2699  (transition_capture && transition_capture->tcs_delete_old_table))
2700  {
2701  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2702 
2703  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2704  if (fdw_trigtuple == NULL)
2705  GetTupleForTrigger(estate,
2706  NULL,
2707  relinfo,
2708  tupleid,
2710  slot,
2711  NULL);
2712  else
2713  ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2714 
2716  true, slot, NULL, NIL, NULL,
2717  transition_capture);
2718  }
2719 }
2720 
2721 bool
2723  HeapTuple trigtuple)
2724 {
2725  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2726  TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2727  TriggerData LocTriggerData = {0};
2728  int i;
2729 
2730  LocTriggerData.type = T_TriggerData;
2731  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2734  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2735 
2736  ExecForceStoreHeapTuple(trigtuple, slot, false);
2737 
2738  for (i = 0; i < trigdesc->numtriggers; i++)
2739  {
2740  HeapTuple rettuple;
2741  Trigger *trigger = &trigdesc->triggers[i];
2742 
2743  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2744  TRIGGER_TYPE_ROW,
2745  TRIGGER_TYPE_INSTEAD,
2746  TRIGGER_TYPE_DELETE))
2747  continue;
2748  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2749  NULL, slot, NULL))
2750  continue;
2751 
2752  LocTriggerData.tg_trigslot = slot;
2753  LocTriggerData.tg_trigtuple = trigtuple;
2754  LocTriggerData.tg_trigger = trigger;
2755  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2756  i,
2757  relinfo->ri_TrigFunctions,
2758  relinfo->ri_TrigInstrument,
2759  GetPerTupleMemoryContext(estate));
2760  if (rettuple == NULL)
2761  return false; /* Delete was suppressed */
2762  if (rettuple != trigtuple)
2763  heap_freetuple(rettuple);
2764  }
2765  return true;
2766 }
2767 
2768 void
2770 {
2771  TriggerDesc *trigdesc;
2772  int i;
2773  TriggerData LocTriggerData = {0};
2774  Bitmapset *updatedCols;
2775 
2776  trigdesc = relinfo->ri_TrigDesc;
2777 
2778  if (trigdesc == NULL)
2779  return;
2780  if (!trigdesc->trig_update_before_statement)
2781  return;
2782 
2783  /* no-op if we already fired BS triggers in this context */
2785  CMD_UPDATE))
2786  return;
2787 
2788  /* statement-level triggers operate on the parent table */
2789  Assert(relinfo->ri_RootResultRelInfo == NULL);
2790 
2791  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2792 
2793  LocTriggerData.type = T_TriggerData;
2794  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2796  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2797  LocTriggerData.tg_updatedcols = updatedCols;
2798  for (i = 0; i < trigdesc->numtriggers; i++)
2799  {
2800  Trigger *trigger = &trigdesc->triggers[i];
2801  HeapTuple newtuple;
2802 
2803  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2804  TRIGGER_TYPE_STATEMENT,
2805  TRIGGER_TYPE_BEFORE,
2806  TRIGGER_TYPE_UPDATE))
2807  continue;
2808  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2809  updatedCols, NULL, NULL))
2810  continue;
2811 
2812  LocTriggerData.tg_trigger = trigger;
2813  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2814  i,
2815  relinfo->ri_TrigFunctions,
2816  relinfo->ri_TrigInstrument,
2817  GetPerTupleMemoryContext(estate));
2818 
2819  if (newtuple)
2820  ereport(ERROR,
2821  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2822  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2823  }
2824 }
2825 
2826 void
2828  TransitionCaptureState *transition_capture)
2829 {
2830  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2831 
2832  /* statement-level triggers operate on the parent table */
2833  Assert(relinfo->ri_RootResultRelInfo == NULL);
2834 
2835  if (trigdesc && trigdesc->trig_update_after_statement)
2837  false, NULL, NULL, NIL,
2838  ExecGetAllUpdatedCols(relinfo, estate),
2839  transition_capture);
2840 }
2841 
2842 bool
2844  ResultRelInfo *relinfo,
2845  ItemPointer tupleid,
2846  HeapTuple fdw_trigtuple,
2847  TupleTableSlot *newslot)
2848 {
2849  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2850  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2851  HeapTuple newtuple = NULL;
2852  HeapTuple trigtuple;
2853  bool should_free_trig = false;
2854  bool should_free_new = false;
2855  TriggerData LocTriggerData = {0};
2856  int i;
2857  Bitmapset *updatedCols;
2858  LockTupleMode lockmode;
2859 
2860  /* Determine lock mode to use */
2861  lockmode = ExecUpdateLockMode(estate, relinfo);
2862 
2863  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2864  if (fdw_trigtuple == NULL)
2865  {
2866  TupleTableSlot *epqslot_candidate = NULL;
2867 
2868  /* get a copy of the on-disk tuple we are planning to update */
2869  if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2870  lockmode, oldslot, &epqslot_candidate))
2871  return false; /* cancel the update action */
2872 
2873  /*
2874  * In READ COMMITTED isolation level it's possible that target tuple
2875  * was changed due to concurrent update. In that case we have a raw
2876  * subplan output tuple in epqslot_candidate, and need to form a new
2877  * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2878  * received in newslot. Neither we nor our callers have any further
2879  * interest in the passed-in tuple, so it's okay to overwrite newslot
2880  * with the newer data.
2881  *
2882  * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so
2883  * that epqslot_clean will be that same slot and the copy step below
2884  * is not needed.)
2885  */
2886  if (epqslot_candidate != NULL)
2887  {
2888  TupleTableSlot *epqslot_clean;
2889 
2890  epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2891  oldslot);
2892 
2893  if (newslot != epqslot_clean)
2894  ExecCopySlot(newslot, epqslot_clean);
2895  }
2896 
2897  trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
2898  }
2899  else
2900  {
2901  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
2902  trigtuple = fdw_trigtuple;
2903  }
2904 
2905  LocTriggerData.type = T_TriggerData;
2906  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2909  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2910  updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2911  LocTriggerData.tg_updatedcols = updatedCols;
2912  for (i = 0; i < trigdesc->numtriggers; i++)
2913  {
2914  Trigger *trigger = &trigdesc->triggers[i];
2915  HeapTuple oldtuple;
2916 
2917  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2918  TRIGGER_TYPE_ROW,
2919  TRIGGER_TYPE_BEFORE,
2920  TRIGGER_TYPE_UPDATE))
2921  continue;
2922  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2923  updatedCols, oldslot, newslot))
2924  continue;
2925 
2926  if (!newtuple)
2927  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
2928 
2929  LocTriggerData.tg_trigslot = oldslot;
2930  LocTriggerData.tg_trigtuple = trigtuple;
2931  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2932  LocTriggerData.tg_newslot = newslot;
2933  LocTriggerData.tg_trigger = trigger;
2934  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2935  i,
2936  relinfo->ri_TrigFunctions,
2937  relinfo->ri_TrigInstrument,
2938  GetPerTupleMemoryContext(estate));
2939 
2940  if (newtuple == NULL)
2941  {
2942  if (should_free_trig)
2943  heap_freetuple(trigtuple);
2944  if (should_free_new)
2945  heap_freetuple(oldtuple);
2946  return false; /* "do nothing" */
2947  }
2948  else if (newtuple != oldtuple)
2949  {
2950  ExecForceStoreHeapTuple(newtuple, newslot, false);
2951 
2952  /*
2953  * If the tuple returned by the trigger / being stored, is the old
2954  * row version, and the heap tuple passed to the trigger was
2955  * allocated locally, materialize the slot. Otherwise we might
2956  * free it while still referenced by the slot.
2957  */
2958  if (should_free_trig && newtuple == trigtuple)
2959  ExecMaterializeSlot(newslot);
2960 
2961  if (should_free_new)
2962  heap_freetuple(oldtuple);
2963 
2964  /* signal tuple should be re-fetched if used */
2965  newtuple = NULL;
2966  }
2967  }
2968  if (should_free_trig)
2969  heap_freetuple(trigtuple);
2970 
2971  return true;
2972 }
2973 
2974 void
2976  ItemPointer tupleid,
2977  HeapTuple fdw_trigtuple,
2978  TupleTableSlot *newslot,
2979  List *recheckIndexes,
2980  TransitionCaptureState *transition_capture)
2981 {
2982  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2983 
2984  if ((trigdesc && trigdesc->trig_update_after_row) ||
2985  (transition_capture &&
2986  (transition_capture->tcs_update_old_table ||
2987  transition_capture->tcs_update_new_table)))
2988  {
2989  /*
2990  * Note: if the UPDATE is converted into a DELETE+INSERT as part of
2991  * update-partition-key operation, then this function is also called
2992  * separately for DELETE and INSERT to capture transition table rows.
2993  * In such case, either old tuple or new tuple can be NULL.
2994  */
2995  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2996 
2997  if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
2998  GetTupleForTrigger(estate,
2999  NULL,
3000  relinfo,
3001  tupleid,
3003  oldslot,
3004  NULL);
3005  else if (fdw_trigtuple != NULL)
3006  ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3007  else
3008  ExecClearTuple(oldslot);
3009 
3011  true, oldslot, newslot, recheckIndexes,
3012  ExecGetAllUpdatedCols(relinfo, estate),
3013  transition_capture);
3014  }
3015 }
3016 
3017 bool
3019  HeapTuple trigtuple, TupleTableSlot *newslot)
3020 {
3021  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3022  TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3023  HeapTuple newtuple = NULL;
3024  bool should_free;
3025  TriggerData LocTriggerData = {0};
3026  int i;
3027 
3028  LocTriggerData.type = T_TriggerData;
3029  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3032  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3033 
3034  ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3035 
3036  for (i = 0; i < trigdesc->numtriggers; i++)
3037  {
3038  Trigger *trigger = &trigdesc->triggers[i];
3039  HeapTuple oldtuple;
3040 
3041  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3042  TRIGGER_TYPE_ROW,
3043  TRIGGER_TYPE_INSTEAD,
3044  TRIGGER_TYPE_UPDATE))
3045  continue;
3046  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3047  NULL, oldslot, newslot))
3048  continue;
3049 
3050  if (!newtuple)
3051  newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3052 
3053  LocTriggerData.tg_trigslot = oldslot;
3054  LocTriggerData.tg_trigtuple = trigtuple;
3055  LocTriggerData.tg_newslot = newslot;
3056  LocTriggerData.tg_newtuple = oldtuple = newtuple;
3057 
3058  LocTriggerData.tg_trigger = trigger;
3059  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3060  i,
3061  relinfo->ri_TrigFunctions,
3062  relinfo->ri_TrigInstrument,
3063  GetPerTupleMemoryContext(estate));
3064  if (newtuple == NULL)
3065  {
3066  return false; /* "do nothing" */
3067  }
3068  else if (newtuple != oldtuple)
3069  {
3070  ExecForceStoreHeapTuple(newtuple, newslot, false);
3071 
3072  if (should_free)
3073  heap_freetuple(oldtuple);
3074 
3075  /* signal tuple should be re-fetched if used */
3076  newtuple = NULL;
3077  }
3078  }
3079 
3080  return true;
3081 }
3082 
3083 void
3085 {
3086  TriggerDesc *trigdesc;
3087  int i;
3088  TriggerData LocTriggerData = {0};
3089 
3090  trigdesc = relinfo->ri_TrigDesc;
3091 
3092  if (trigdesc == NULL)
3093  return;
3094  if (!trigdesc->trig_truncate_before_statement)
3095  return;
3096 
3097  LocTriggerData.type = T_TriggerData;
3098  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3100  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3101 
3102  for (i = 0; i < trigdesc->numtriggers; i++)
3103  {
3104  Trigger *trigger = &trigdesc->triggers[i];
3105  HeapTuple newtuple;
3106 
3107  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3108  TRIGGER_TYPE_STATEMENT,
3109  TRIGGER_TYPE_BEFORE,
3110  TRIGGER_TYPE_TRUNCATE))
3111  continue;
3112  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3113  NULL, NULL, NULL))
3114  continue;
3115 
3116  LocTriggerData.tg_trigger = trigger;
3117  newtuple = ExecCallTriggerFunc(&LocTriggerData,
3118  i,
3119  relinfo->ri_TrigFunctions,
3120  relinfo->ri_TrigInstrument,
3121  GetPerTupleMemoryContext(estate));
3122 
3123  if (newtuple)
3124  ereport(ERROR,
3125  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3126  errmsg("BEFORE STATEMENT trigger cannot return a value")));
3127  }
3128 }
3129 
3130 void
3132 {
3133  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3134 
3135  if (trigdesc && trigdesc->trig_truncate_after_statement)
3137  false, NULL, NULL, NIL, NULL, NULL);
3138 }
3139 
3140 
3141 /*
3142  * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3143  */
3144 static bool
3146  EPQState *epqstate,
3147  ResultRelInfo *relinfo,
3148  ItemPointer tid,
3149  LockTupleMode lockmode,
3150  TupleTableSlot *oldslot,
3151  TupleTableSlot **epqslot)
3152 {
3153  Relation relation = relinfo->ri_RelationDesc;
3154 
3155  if (epqslot != NULL)
3156  {
3157  TM_Result test;
3158  TM_FailureData tmfd;
3159  int lockflags = 0;
3160 
3161  *epqslot = NULL;
3162 
3163  /* caller must pass an epqstate if EvalPlanQual is possible */
3164  Assert(epqstate != NULL);
3165 
3166  /*
3167  * lock tuple for update
3168  */
3170  lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3171  test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3172  estate->es_output_cid,
3173  lockmode, LockWaitBlock,
3174  lockflags,
3175  &tmfd);
3176 
3177  switch (test)
3178  {
3179  case TM_SelfModified:
3180 
3181  /*
3182  * The target tuple was already updated or deleted by the
3183  * current command, or by a later command in the current
3184  * transaction. We ignore the tuple in the former case, and
3185  * throw error in the latter case, for the same reasons
3186  * enumerated in ExecUpdate and ExecDelete in
3187  * nodeModifyTable.c.
3188  */
3189  if (tmfd.cmax != estate->es_output_cid)
3190  ereport(ERROR,
3191  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3192  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3193  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3194 
3195  /* treat it as deleted; do not process */
3196  return false;
3197 
3198  case TM_Ok:
3199  if (tmfd.traversed)
3200  {
3201  *epqslot = EvalPlanQual(epqstate,
3202  relation,
3203  relinfo->ri_RangeTableIndex,
3204  oldslot);
3205 
3206  /*
3207  * If PlanQual failed for updated tuple - we must not
3208  * process this tuple!
3209  */
3210  if (TupIsNull(*epqslot))
3211  {
3212  *epqslot = NULL;
3213  return false;
3214  }
3215  }
3216  break;
3217 
3218  case TM_Updated:
3220  ereport(ERROR,
3221  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3222  errmsg("could not serialize access due to concurrent update")));
3223  elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3224  break;
3225 
3226  case TM_Deleted:
3228  ereport(ERROR,
3229  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3230  errmsg("could not serialize access due to concurrent delete")));
3231  /* tuple was deleted */
3232  return false;
3233 
3234  case TM_Invisible:
3235  elog(ERROR, "attempted to lock invisible tuple");
3236  break;
3237 
3238  default:
3239  elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3240  return false; /* keep compiler quiet */
3241  }
3242  }
3243  else
3244  {
3245  /*
3246  * We expect the tuple to be present, thus very simple error handling
3247  * suffices.
3248  */
3249  if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3250  oldslot))
3251  elog(ERROR, "failed to fetch tuple for trigger");
3252  }
3253 
3254  return true;
3255 }
3256 
3257 /*
3258  * Is trigger enabled to fire?
3259  */
3260 static bool
3262  Trigger *trigger, TriggerEvent event,
3263  Bitmapset *modifiedCols,
3264  TupleTableSlot *oldslot, TupleTableSlot *newslot)
3265 {
3266  /* Check replication-role-dependent enable state */
3268  {
3269  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3270  trigger->tgenabled == TRIGGER_DISABLED)
3271  return false;
3272  }
3273  else /* ORIGIN or LOCAL role */
3274  {
3275  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3276  trigger->tgenabled == TRIGGER_DISABLED)
3277  return false;
3278  }
3279 
3280  /*
3281  * Check for column-specific trigger (only possible for UPDATE, and in
3282  * fact we *must* ignore tgattr for other event types)
3283  */
3284  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3285  {
3286  int i;
3287  bool modified;
3288 
3289  modified = false;
3290  for (i = 0; i < trigger->tgnattr; i++)
3291  {
3293  modifiedCols))
3294  {
3295  modified = true;
3296  break;
3297  }
3298  }
3299  if (!modified)
3300  return false;
3301  }
3302 
3303  /* Check for WHEN clause */
3304  if (trigger->tgqual)
3305  {
3306  ExprState **predicate;
3307  ExprContext *econtext;
3308  MemoryContext oldContext;
3309  int i;
3310 
3311  Assert(estate != NULL);
3312 
3313  /*
3314  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3315  * matching element of relinfo->ri_TrigWhenExprs[]
3316  */
3317  i = trigger - relinfo->ri_TrigDesc->triggers;
3318  predicate = &relinfo->ri_TrigWhenExprs[i];
3319 
3320  /*
3321  * If first time through for this WHEN expression, build expression
3322  * nodetrees for it. Keep them in the per-query memory context so
3323  * they'll survive throughout the query.
3324  */
3325  if (*predicate == NULL)
3326  {
3327  Node *tgqual;
3328 
3329  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3330  tgqual = stringToNode(trigger->tgqual);
3331  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3334  /* ExecPrepareQual wants implicit-AND form */
3335  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3336  *predicate = ExecPrepareQual((List *) tgqual, estate);
3337  MemoryContextSwitchTo(oldContext);
3338  }
3339 
3340  /*
3341  * We will use the EState's per-tuple context for evaluating WHEN
3342  * expressions (creating it if it's not already there).
3343  */
3344  econtext = GetPerTupleExprContext(estate);
3345 
3346  /*
3347  * Finally evaluate the expression, making the old and/or new tuples
3348  * available as INNER_VAR/OUTER_VAR respectively.
3349  */
3350  econtext->ecxt_innertuple = oldslot;
3351  econtext->ecxt_outertuple = newslot;
3352  if (!ExecQual(*predicate, econtext))
3353  return false;
3354  }
3355 
3356  return true;
3357 }
3358 
3359 
3360 /* ----------
3361  * After-trigger stuff
3362  *
3363  * The AfterTriggersData struct holds data about pending AFTER trigger events
3364  * during the current transaction tree. (BEFORE triggers are fired
3365  * immediately so we don't need any persistent state about them.) The struct
3366  * and most of its subsidiary data are kept in TopTransactionContext; however
3367  * some data that can be discarded sooner appears in the CurTransactionContext
3368  * of the relevant subtransaction. Also, the individual event records are
3369  * kept in a separate sub-context of TopTransactionContext. This is done
3370  * mainly so that it's easy to tell from a memory context dump how much space
3371  * is being eaten by trigger events.
3372  *
3373  * Because the list of pending events can grow large, we go to some
3374  * considerable effort to minimize per-event memory consumption. The event
3375  * records are grouped into chunks and common data for similar events in the
3376  * same chunk is only stored once.
3377  *
3378  * XXX We need to be able to save the per-event data in a file if it grows too
3379  * large.
3380  * ----------
3381  */
3382 
3383 /* Per-trigger SET CONSTRAINT status */
3385 {
3389 
3391 
3392 /*
3393  * SET CONSTRAINT intra-transaction status.
3394  *
3395  * We make this a single palloc'd object so it can be copied and freed easily.
3396  *
3397  * all_isset and all_isdeferred are used to keep track
3398  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3399  *
3400  * trigstates[] stores per-trigger tgisdeferred settings.
3401  */
3403 {
3406  int numstates; /* number of trigstates[] entries in use */
3407  int numalloc; /* allocated size of trigstates[] */
3410 
3412 
3413 
3414 /*
3415  * Per-trigger-event data
3416  *
3417  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3418  * status bits and up to two tuple CTIDs. Each event record also has an
3419  * associated AfterTriggerSharedData that is shared across all instances of
3420  * similar events within a "chunk".
3421  *
3422  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3423  * fields. Updates of regular tables use two; inserts and deletes of regular
3424  * tables use one; foreign tables always use zero and save the tuple(s) to a
3425  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3426  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3427  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3428  * tuple(s). This permits storing tuples once regardless of the number of
3429  * row-level triggers on a foreign table.
3430  *
3431  * Note that we need triggers on foreign tables to be fired in exactly the
3432  * order they were queued, so that the tuples come out of the tuplestore in
3433  * the right order. To ensure that, we forbid deferrable (constraint)
3434  * triggers on foreign tables. This also ensures that such triggers do not
3435  * get deferred into outer trigger query levels, meaning that it's okay to
3436  * destroy the tuplestore at the end of the query level.
3437  *
3438  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3439  * require no ctid field. We lack the flag bit space to neatly represent that
3440  * distinct case, and it seems unlikely to be worth much trouble.
3441  *
3442  * Note: ats_firing_id is initially zero and is set to something else when
3443  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3444  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3445  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3446  * because all instances of the same type of event in a given event list will
3447  * be fired at the same time, if they were queued between the same firing
3448  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3449  * a new event to an existing AfterTriggerSharedData record.
3450  */
3452 
3453 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3454 #define AFTER_TRIGGER_DONE 0x10000000
3455 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3456 /* bits describing the size and tuple sources of this event */
3457 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3458 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3459 #define AFTER_TRIGGER_1CTID 0x40000000
3460 #define AFTER_TRIGGER_2CTID 0xC0000000
3461 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3462 
3464 
3466 {
3467  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3468  Oid ats_tgoid; /* the trigger's ID */
3469  Oid ats_relid; /* the relation it's on */
3470  CommandId ats_firing_id; /* ID for firing cycle */
3471  struct AfterTriggersTableData *ats_table; /* transition table access */
3472  Bitmapset *ats_modifiedcols; /* modified columns */
3474 
3476 
3478 {
3479  TriggerFlags ate_flags; /* status bits and offset to shared data */
3480  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3481  ItemPointerData ate_ctid2; /* new updated tuple */
3483 
3484 /* AfterTriggerEventData, minus ate_ctid2 */
3486 {
3487  TriggerFlags ate_flags; /* status bits and offset to shared data */
3488  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3490 
3491 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3493 {
3494  TriggerFlags ate_flags; /* status bits and offset to shared data */
3496 
3497 #define SizeofTriggerEvent(evt) \
3498  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3499  sizeof(AfterTriggerEventData) : \
3500  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3501  sizeof(AfterTriggerEventDataOneCtid) : \
3502  sizeof(AfterTriggerEventDataZeroCtids))
3503 
3504 #define GetTriggerSharedData(evt) \
3505  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3506 
3507 /*
3508  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3509  * larger chunks (a slightly more sophisticated version of an expansible
3510  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3511  * AfterTriggerEventData records; the space between endfree and endptr is
3512  * occupied by AfterTriggerSharedData records.
3513  */
3515 {
3516  struct AfterTriggerEventChunk *next; /* list link */
3517  char *freeptr; /* start of free space in chunk */
3518  char *endfree; /* end of free space in chunk */
3519  char *endptr; /* end of chunk */
3520  /* event data follows here */
3522 
3523 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3524 
3525 /* A list of events */
3527 {
3530  char *tailfree; /* freeptr of tail chunk */
3532 
3533 /* Macros to help in iterating over a list of events */
3534 #define for_each_chunk(cptr, evtlist) \
3535  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3536 #define for_each_event(eptr, cptr) \
3537  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3538  (char *) eptr < (cptr)->freeptr; \
3539  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3540 /* Use this if no special per-chunk processing is needed */
3541 #define for_each_event_chunk(eptr, cptr, evtlist) \
3542  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3543 
3544 /* Macros for iterating from a start point that might not be list start */
3545 #define for_each_chunk_from(cptr) \
3546  for (; cptr != NULL; cptr = cptr->next)
3547 #define for_each_event_from(eptr, cptr) \
3548  for (; \
3549  (char *) eptr < (cptr)->freeptr; \
3550  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3551 
3552 
3553 /*
3554  * All per-transaction data for the AFTER TRIGGERS module.
3555  *
3556  * AfterTriggersData has the following fields:
3557  *
3558  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3559  * We mark firable events with the current firing cycle's ID so that we can
3560  * tell which ones to work on. This ensures sane behavior if a trigger
3561  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3562  * only fire those events that weren't already scheduled for firing.
3563  *
3564  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3565  * This is saved and restored across failed subtransactions.
3566  *
3567  * events is the current list of deferred events. This is global across
3568  * all subtransactions of the current transaction. In a subtransaction
3569  * abort, we know that the events added by the subtransaction are at the
3570  * end of the list, so it is relatively easy to discard them. The event
3571  * list chunks themselves are stored in event_cxt.
3572  *
3573  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3574  * (-1 when the stack is empty).
3575  *
3576  * query_stack[query_depth] is the per-query-level data, including these fields:
3577  *
3578  * events is a list of AFTER trigger events queued by the current query.
3579  * None of these are valid until the matching AfterTriggerEndQuery call
3580  * occurs. At that point we fire immediate-mode triggers, and append any
3581  * deferred events to the main events list.
3582  *
3583  * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3584  * needed by events queued by the current query. (Note: we use just one
3585  * tuplestore even though more than one foreign table might be involved.
3586  * This is okay because tuplestores don't really care what's in the tuples
3587  * they store; but it's possible that someday it'd break.)
3588  *
3589  * tables is a List of AfterTriggersTableData structs for target tables
3590  * of the current query (see below).
3591  *
3592  * maxquerydepth is just the allocated length of query_stack.
3593  *
3594  * trans_stack holds per-subtransaction data, including these fields:
3595  *
3596  * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3597  * state data. Each subtransaction level that modifies that state first
3598  * saves a copy, which we use to restore the state if we abort.
3599  *
3600  * events is a copy of the events head/tail pointers,
3601  * which we use to restore those values during subtransaction abort.
3602  *
3603  * query_depth is the subtransaction-start-time value of query_depth,
3604  * which we similarly use to clean up at subtransaction abort.
3605  *
3606  * firing_counter is the subtransaction-start-time value of firing_counter.
3607  * We use this to recognize which deferred triggers were fired (or marked
3608  * for firing) within an aborted subtransaction.
3609  *
3610  * We use GetCurrentTransactionNestLevel() to determine the correct array
3611  * index in trans_stack. maxtransdepth is the number of allocated entries in
3612  * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3613  * in cases where errors during subxact abort cause multiple invocations
3614  * of AfterTriggerEndSubXact() at the same nesting depth.)
3615  *
3616  * We create an AfterTriggersTableData struct for each target table of the
3617  * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3618  * either transition tables or statement-level triggers. This is used to
3619  * hold the relevant transition tables, as well as info tracking whether
3620  * we already queued the statement triggers. (We use that info to prevent
3621  * firing the same statement triggers more than once per statement, or really
3622  * once per transition table set.) These structs, along with the transition
3623  * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3624  * That's sufficient lifespan because we don't allow transition tables to be
3625  * used by deferrable triggers, so they only need to survive until
3626  * AfterTriggerEndQuery.
3627  */
3631 
3632 typedef struct AfterTriggersData
3633 {
3634  CommandId firing_counter; /* next firing ID to assign */
3635  SetConstraintState state; /* the active S C state */
3636  AfterTriggerEventList events; /* deferred-event list */
3637  MemoryContext event_cxt; /* memory context for events, if any */
3638 
3639  /* per-query-level data: */
3640  AfterTriggersQueryData *query_stack; /* array of structs shown below */
3641  int query_depth; /* current index in above array */
3642  int maxquerydepth; /* allocated len of above array */
3643 
3644  /* per-subtransaction-level data: */
3645  AfterTriggersTransData *trans_stack; /* array of structs shown below */
3646  int maxtransdepth; /* allocated len of above array */
3648 
3650 {
3651  AfterTriggerEventList events; /* events pending from this query */
3652  Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3653  List *tables; /* list of AfterTriggersTableData, see below */
3654 };
3655 
3657 {
3658  /* these fields are just for resetting at subtrans abort: */
3659  SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3660  AfterTriggerEventList events; /* saved list pointer */
3661  int query_depth; /* saved query_depth */
3662  CommandId firing_counter; /* saved firing_counter */
3663 };
3664 
3666 {
3667  /* relid + cmdType form the lookup key for these structs: */
3668  Oid relid; /* target table's OID */
3669  CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3670  bool closed; /* true when no longer OK to add tuples */
3671  bool before_trig_done; /* did we already queue BS triggers? */
3672  bool after_trig_done; /* did we already queue AS triggers? */
3673  AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3674  Tuplestorestate *old_tuplestore; /* "old" transition table, if any */
3675  Tuplestorestate *new_tuplestore; /* "new" transition table, if any */
3676  TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3677 };
3678 
3680 
3681 static void AfterTriggerExecute(EState *estate,
3682  AfterTriggerEvent event,
3683  ResultRelInfo *relInfo,
3684  TriggerDesc *trigdesc,
3685  FmgrInfo *finfo,
3686  Instrumentation *instr,
3687  MemoryContext per_tuple_context,
3688  TupleTableSlot *trig_tuple_slot1,
3689  TupleTableSlot *trig_tuple_slot2);
3691  CmdType cmdType);
3693  TupleDesc tupdesc);
3695 static SetConstraintState SetConstraintStateCreate(int numalloc);
3698  Oid tgoid, bool tgisdeferred);
3699 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3700 
3701 
3702 /*
3703  * Get the FDW tuplestore for the current trigger query level, creating it
3704  * if necessary.
3705  */
3706 static Tuplestorestate *
3708 {
3709  Tuplestorestate *ret;
3710 
3711  ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3712  if (ret == NULL)
3713  {
3714  MemoryContext oldcxt;
3715  ResourceOwner saveResourceOwner;
3716 
3717  /*
3718  * Make the tuplestore valid until end of subtransaction. We really
3719  * only need it until AfterTriggerEndQuery().
3720  */
3722  saveResourceOwner = CurrentResourceOwner;
3724 
3725  ret = tuplestore_begin_heap(false, false, work_mem);
3726 
3727  CurrentResourceOwner = saveResourceOwner;
3728  MemoryContextSwitchTo(oldcxt);
3729 
3730  afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3731  }
3732 
3733  return ret;
3734 }
3735 
3736 /* ----------
3737  * afterTriggerCheckState()
3738  *
3739  * Returns true if the trigger event is actually in state DEFERRED.
3740  * ----------
3741  */
3742 static bool
3743 afterTriggerCheckState(AfterTriggerShared evtshared)
3744 {
3745  Oid tgoid = evtshared->ats_tgoid;
3746  SetConstraintState state = afterTriggers.state;
3747  int i;
3748 
3749  /*
3750  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3751  * constraints declared NOT DEFERRABLE), the state is always false.
3752  */
3753  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3754  return false;
3755 
3756  /*
3757  * If constraint state exists, SET CONSTRAINTS might have been executed
3758  * either for this trigger or for all triggers.
3759  */
3760  if (state != NULL)
3761  {
3762  /* Check for SET CONSTRAINTS for this specific trigger. */
3763  for (i = 0; i < state->numstates; i++)
3764  {
3765  if (state->trigstates[i].sct_tgoid == tgoid)
3766  return state->trigstates[i].sct_tgisdeferred;
3767  }
3768 
3769  /* Check for SET CONSTRAINTS ALL. */
3770  if (state->all_isset)
3771  return state->all_isdeferred;
3772  }
3773 
3774  /*
3775  * Otherwise return the default state for the trigger.
3776  */
3777  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3778 }
3779 
3780 
3781 /* ----------
3782  * afterTriggerAddEvent()
3783  *
3784  * Add a new trigger event to the specified queue.
3785  * The passed-in event data is copied.
3786  * ----------
3787  */
3788 static void
3790  AfterTriggerEvent event, AfterTriggerShared evtshared)
3791 {
3792  Size eventsize = SizeofTriggerEvent(event);
3793  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3794  AfterTriggerEventChunk *chunk;
3795  AfterTriggerShared newshared;
3796  AfterTriggerEvent newevent;
3797 
3798  /*
3799  * If empty list or not enough room in the tail chunk, make a new chunk.
3800  * We assume here that a new shared record will always be needed.
3801  */
3802  chunk = events->tail;
3803  if (chunk == NULL ||
3804  chunk->endfree - chunk->freeptr < needed)
3805  {
3806  Size chunksize;
3807 
3808  /* Create event context if we didn't already */
3809  if (afterTriggers.event_cxt == NULL)
3810  afterTriggers.event_cxt =
3812  "AfterTriggerEvents",
3814 
3815  /*
3816  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3817  * These numbers are fairly arbitrary, though there is a hard limit at
3818  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3819  * shared records using the available space in ate_flags. Another
3820  * constraint is that if the chunk size gets too huge, the search loop
3821  * below would get slow given a (not too common) usage pattern with
3822  * many distinct event types in a chunk. Therefore, we double the
3823  * preceding chunk size only if there weren't too many shared records
3824  * in the preceding chunk; otherwise we halve it. This gives us some
3825  * ability to adapt to the actual usage pattern of the current query
3826  * while still having large chunk sizes in typical usage. All chunk
3827  * sizes used should be MAXALIGN multiples, to ensure that the shared
3828  * records will be aligned safely.
3829  */
3830 #define MIN_CHUNK_SIZE 1024
3831 #define MAX_CHUNK_SIZE (1024*1024)
3832 
3833 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3834 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3835 #endif
3836 
3837  if (chunk == NULL)
3838  chunksize = MIN_CHUNK_SIZE;
3839  else
3840  {
3841  /* preceding chunk size... */
3842  chunksize = chunk->endptr - (char *) chunk;
3843  /* check number of shared records in preceding chunk */
3844  if ((chunk->endptr - chunk->endfree) <=
3845  (100 * sizeof(AfterTriggerSharedData)))
3846  chunksize *= 2; /* okay, double it */
3847  else
3848  chunksize /= 2; /* too many shared records */
3849  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3850  }
3851  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3852  chunk->next = NULL;
3853  chunk->freeptr = CHUNK_DATA_START(chunk);
3854  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3855  Assert(chunk->endfree - chunk->freeptr >= needed);
3856 
3857  if (events->head == NULL)
3858  events->head = chunk;
3859  else
3860  events->tail->next = chunk;
3861  events->tail = chunk;
3862  /* events->tailfree is now out of sync, but we'll fix it below */
3863  }
3864 
3865  /*
3866  * Try to locate a matching shared-data record already in the chunk. If
3867  * none, make a new one.
3868  */
3869  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3870  (char *) newshared >= chunk->endfree;
3871  newshared--)
3872  {
3873  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3874  newshared->ats_relid == evtshared->ats_relid &&
3875  newshared->ats_event == evtshared->ats_event &&
3876  newshared->ats_table == evtshared->ats_table &&
3877  newshared->ats_firing_id == 0)
3878  break;
3879  }
3880  if ((char *) newshared < chunk->endfree)
3881  {
3882  *newshared = *evtshared;
3883  newshared->ats_firing_id = 0; /* just to be sure */
3884  chunk->endfree = (char *) newshared;
3885  }
3886 
3887  /* Insert the data */
3888  newevent = (AfterTriggerEvent) chunk->freeptr;
3889  memcpy(newevent, event, eventsize);
3890  /* ... and link the new event to its shared record */
3891  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3892  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3893 
3894  chunk->freeptr += eventsize;
3895  events->tailfree = chunk->freeptr;
3896 }
3897 
3898 /* ----------
3899  * afterTriggerFreeEventList()
3900  *
3901  * Free all the event storage in the given list.
3902  * ----------
3903  */
3904 static void
3906 {
3907  AfterTriggerEventChunk *chunk;
3908 
3909  while ((chunk = events->head) != NULL)
3910  {
3911  events->head = chunk->next;
3912  pfree(chunk);
3913  }
3914  events->tail = NULL;
3915  events->tailfree = NULL;
3916 }
3917 
3918 /* ----------
3919  * afterTriggerRestoreEventList()
3920  *
3921  * Restore an event list to its prior length, removing all the events
3922  * added since it had the value old_events.
3923  * ----------
3924  */
3925 static void
3927  const AfterTriggerEventList *old_events)
3928 {
3929  AfterTriggerEventChunk *chunk;
3930  AfterTriggerEventChunk *next_chunk;
3931 
3932  if (old_events->tail == NULL)
3933  {
3934  /* restoring to a completely empty state, so free everything */
3935  afterTriggerFreeEventList(events);
3936  }
3937  else
3938  {
3939  *events = *old_events;
3940  /* free any chunks after the last one we want to keep */
3941  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3942  {
3943  next_chunk = chunk->next;
3944  pfree(chunk);
3945  }
3946  /* and clean up the tail chunk to be the right length */
3947  events->tail->next = NULL;
3948  events->tail->freeptr = events->tailfree;
3949 
3950  /*
3951  * We don't make any effort to remove now-unused shared data records.
3952  * They might still be useful, anyway.
3953  */
3954  }
3955 }
3956 
3957 /* ----------
3958  * afterTriggerDeleteHeadEventChunk()
3959  *
3960  * Remove the first chunk of events from the query level's event list.
3961  * Keep any event list pointers elsewhere in the query level's data
3962  * structures in sync.
3963  * ----------
3964  */
3965 static void
3967 {
3968  AfterTriggerEventChunk *target = qs->events.head;
3969  ListCell *lc;
3970 
3971  Assert(target && target->next);
3972 
3973  /*
3974  * First, update any pointers in the per-table data, so that they won't be
3975  * dangling. Resetting obsoleted pointers to NULL will make
3976  * cancel_prior_stmt_triggers start from the list head, which is fine.
3977  */
3978  foreach(lc, qs->tables)
3979  {
3981 
3982  if (table->after_trig_done &&
3983  table->after_trig_events.tail == target)
3984  {
3985  table->after_trig_events.head = NULL;
3986  table->after_trig_events.tail = NULL;
3987  table->after_trig_events.tailfree = NULL;
3988  }
3989  }
3990 
3991  /* Now we can flush the head chunk */
3992  qs->events.head = target->next;
3993  pfree(target);
3994 }
3995 
3996 
3997 /* ----------
3998  * AfterTriggerExecute()
3999  *
4000  * Fetch the required tuples back from the heap and fire one
4001  * single trigger function.
4002  *
4003  * Frequently, this will be fired many times in a row for triggers of
4004  * a single relation. Therefore, we cache the open relation and provide
4005  * fmgr lookup cache space at the caller level. (For triggers fired at
4006  * the end of a query, we can even piggyback on the executor's state.)
4007  *
4008  * event: event currently being fired.
4009  * rel: open relation for event.
4010  * trigdesc: working copy of rel's trigger info.
4011  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4012  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4013  * or NULL if no instrumentation is wanted.
4014  * per_tuple_context: memory context to call trigger function in.
4015  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4016  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4017  * ----------
4018  */
4019 static void
4021  AfterTriggerEvent event,
4022  ResultRelInfo *relInfo,
4023  TriggerDesc *trigdesc,
4024  FmgrInfo *finfo, Instrumentation *instr,
4025  MemoryContext per_tuple_context,
4026  TupleTableSlot *trig_tuple_slot1,
4027  TupleTableSlot *trig_tuple_slot2)
4028 {
4029  Relation rel = relInfo->ri_RelationDesc;
4030  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4031  Oid tgoid = evtshared->ats_tgoid;
4032  TriggerData LocTriggerData = {0};
4033  HeapTuple rettuple;
4034  int tgindx;
4035  bool should_free_trig = false;
4036  bool should_free_new = false;
4037 
4038  /*
4039  * Locate trigger in trigdesc.
4040  */
4041  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4042  {
4043  if (trigdesc->triggers[tgindx].tgoid == tgoid)
4044  {
4045  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4046  break;
4047  }
4048  }
4049  if (LocTriggerData.tg_trigger == NULL)
4050  elog(ERROR, "could not find trigger %u", tgoid);
4051 
4052  /*
4053  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4054  * to include time spent re-fetching tuples in the trigger cost.
4055  */
4056  if (instr)
4057  InstrStartNode(instr + tgindx);
4058 
4059  /*
4060  * Fetch the required tuple(s).
4061  */
4062  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4063  {
4065  {
4066  Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4067 
4068  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4069  trig_tuple_slot1))
4070  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4071 
4072  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4074  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4075  trig_tuple_slot2))
4076  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4077  }
4078  /* fall through */
4080 
4081  /*
4082  * Store tuple in the slot so that tg_trigtuple does not reference
4083  * tuplestore memory. (It is formally possible for the trigger
4084  * function to queue trigger events that add to the same
4085  * tuplestore, which can push other tuples out of memory.) The
4086  * distinction is academic, because we start with a minimal tuple
4087  * that is stored as a heap tuple, constructed in different memory
4088  * context, in the slot anyway.
4089  */
4090  LocTriggerData.tg_trigslot = trig_tuple_slot1;
4091  LocTriggerData.tg_trigtuple =
4092  ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4093 
4094  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4096  {
4097  LocTriggerData.tg_newslot = trig_tuple_slot2;
4098  LocTriggerData.tg_newtuple =
4099  ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4100  }
4101  else
4102  {
4103  LocTriggerData.tg_newtuple = NULL;
4104  }
4105  break;
4106 
4107  default:
4108  if (ItemPointerIsValid(&(event->ate_ctid1)))
4109  {
4110  LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4111 
4112  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
4113  SnapshotAny,
4114  LocTriggerData.tg_trigslot))
4115  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4116  LocTriggerData.tg_trigtuple =
4117  ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4118  }
4119  else
4120  {
4121  LocTriggerData.tg_trigtuple = NULL;
4122  }
4123 
4124  /* don't touch ctid2 if not there */
4125  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4127  ItemPointerIsValid(&(event->ate_ctid2)))
4128  {
4129  LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4130 
4131  if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
4132  SnapshotAny,
4133  LocTriggerData.tg_newslot))
4134  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4135  LocTriggerData.tg_newtuple =
4136  ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4137  }
4138  else
4139  {
4140  LocTriggerData.tg_newtuple = NULL;
4141  }
4142  }
4143 
4144  /*
4145  * Set up the tuplestore information to let the trigger have access to
4146  * transition tables. When we first make a transition table available to
4147  * a trigger, mark it "closed" so that it cannot change anymore. If any
4148  * additional events of the same type get queued in the current trigger
4149  * query level, they'll go into new transition tables.
4150  */
4151  LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4152  if (evtshared->ats_table)
4153  {
4154  if (LocTriggerData.tg_trigger->tgoldtable)
4155  {
4156  LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore;
4157  evtshared->ats_table->closed = true;
4158  }
4159 
4160  if (LocTriggerData.tg_trigger->tgnewtable)
4161  {
4162  LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore;
4163  evtshared->ats_table->closed = true;
4164  }
4165  }
4166 
4167  /*
4168  * Setup the remaining trigger information
4169  */
4170  LocTriggerData.type = T_TriggerData;
4171  LocTriggerData.tg_event =
4173  LocTriggerData.tg_relation = rel;
4174  if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4175  LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4176 
4177  MemoryContextReset(per_tuple_context);
4178 
4179  /*
4180  * Call the trigger and throw away any possibly returned updated tuple.
4181  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4182  */
4183  rettuple = ExecCallTriggerFunc(&LocTriggerData,
4184  tgindx,
4185  finfo,
4186  NULL,
4187  per_tuple_context);
4188  if (rettuple != NULL &&
4189  rettuple != LocTriggerData.tg_trigtuple &&
4190  rettuple != LocTriggerData.tg_newtuple)
4191  heap_freetuple(rettuple);
4192 
4193  /*
4194  * Release resources
4195  */
4196  if (should_free_trig)
4197  heap_freetuple(LocTriggerData.tg_trigtuple);
4198  if (should_free_new)
4199  heap_freetuple(LocTriggerData.tg_newtuple);
4200 
4201  /* don't clear slots' contents if foreign table */
4202  if (trig_tuple_slot1 == NULL)
4203  {
4204  if (LocTriggerData.tg_trigslot)
4205  ExecClearTuple(LocTriggerData.tg_trigslot);
4206  if (LocTriggerData.tg_newslot)
4207  ExecClearTuple(LocTriggerData.tg_newslot);
4208  }
4209 
4210  /*
4211  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4212  * one "tuple returned" (really the number of firings).
4213  */
4214  if (instr)
4215  InstrStopNode(instr + tgindx, 1);
4216 }
4217 
4218 
4219 /*
4220  * afterTriggerMarkEvents()
4221  *
4222  * Scan the given event list for not yet invoked events. Mark the ones
4223  * that can be invoked now with the current firing ID.
4224  *
4225  * If move_list isn't NULL, events that are not to be invoked now are
4226  * transferred to move_list.
4227  *
4228  * When immediate_only is true, do not invoke currently-deferred triggers.
4229  * (This will be false only at main transaction exit.)
4230  *
4231  * Returns true if any invokable events were found.
4232  */
4233 static bool
4235  AfterTriggerEventList *move_list,
4236  bool immediate_only)
4237 {
4238  bool found = false;
4239  bool deferred_found = false;
4240  AfterTriggerEvent event;
4241  AfterTriggerEventChunk *chunk;
4242 
4243  for_each_event_chunk(event, chunk, *events)
4244  {
4245  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4246  bool defer_it = false;
4247 
4248  if (!(event->ate_flags &
4250  {
4251  /*
4252  * This trigger hasn't been called or scheduled yet. Check if we
4253  * should call it now.
4254  */
4255  if (immediate_only && afterTriggerCheckState(evtshared))
4256  {
4257  defer_it = true;
4258  }
4259  else
4260  {
4261  /*
4262  * Mark it as to be fired in this firing cycle.
4263  */
4264  evtshared->ats_firing_id = afterTriggers.firing_counter;
4265  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4266  found = true;
4267  }
4268  }
4269 
4270  /*
4271  * If it's deferred, move it to move_list, if requested.
4272  */
4273  if (defer_it && move_list != NULL)
4274  {
4275  deferred_found = true;
4276  /* add it to move_list */
4277  afterTriggerAddEvent(move_list, event, evtshared);
4278  /* mark original copy "done" so we don't do it again */
4279  event->ate_flags |= AFTER_TRIGGER_DONE;
4280  }
4281  }
4282 
4283  /*
4284  * We could allow deferred triggers if, before the end of the
4285  * security-restricted operation, we were to verify that a SET CONSTRAINTS
4286  * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4287  */
4288  if (deferred_found && InSecurityRestrictedOperation())
4289  ereport(ERROR,
4290  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4291  errmsg("cannot fire deferred trigger within security-restricted operation")));
4292 
4293  return found;
4294 }
4295 
4296 /*
4297  * afterTriggerInvokeEvents()
4298  *
4299  * Scan the given event list for events that are marked as to be fired
4300  * in the current firing cycle, and fire them.
4301  *
4302  * If estate isn't NULL, we use its result relation info to avoid repeated
4303  * openings and closing of trigger target relations. If it is NULL, we
4304  * make one locally to cache the info in case there are multiple trigger
4305  * events per rel.
4306  *
4307  * When delete_ok is true, it's safe to delete fully-processed events.
4308  * (We are not very tense about that: we simply reset a chunk to be empty
4309  * if all its events got fired. The objective here is just to avoid useless
4310  * rescanning of events when a trigger queues new events during transaction
4311  * end, so it's not necessary to worry much about the case where only
4312  * some events are fired.)
4313  *
4314  * Returns true if no unfired events remain in the list (this allows us
4315  * to avoid repeating afterTriggerMarkEvents).
4316  */
4317 static bool
4319  CommandId firing_id,
4320  EState *estate,
4321  bool delete_ok)
4322 {
4323  bool all_fired = true;
4324  AfterTriggerEventChunk *chunk;
4325  MemoryContext per_tuple_context;
4326  bool local_estate = false;
4327  ResultRelInfo *rInfo = NULL;
4328  Relation rel = NULL;
4329  TriggerDesc *trigdesc = NULL;
4330  FmgrInfo *finfo = NULL;
4331  Instrumentation *instr = NULL;
4332  TupleTableSlot *slot1 = NULL,
4333  *slot2 = NULL;
4334 
4335  /* Make a local EState if need be */
4336  if (estate == NULL)
4337  {
4338  estate = CreateExecutorState();
4339  local_estate = true;
4340  }
4341 
4342  /* Make a per-tuple memory context for trigger function calls */
4343  per_tuple_context =
4345  "AfterTriggerTupleContext",
4347 
4348  for_each_chunk(chunk, *events)
4349  {
4350  AfterTriggerEvent event;
4351  bool all_fired_in_chunk = true;
4352 
4353  for_each_event(event, chunk)
4354  {
4355  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4356 
4357  /*
4358  * Is it one for me to fire?
4359  */
4360  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4361  evtshared->ats_firing_id == firing_id)
4362  {
4363  /*
4364  * So let's fire it... but first, find the correct relation if
4365  * this is not the same relation as before.
4366  */
4367  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4368  {
4369  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4370  rel = rInfo->ri_RelationDesc;
4371  /* Catch calls with insufficient relcache refcounting */
4373  trigdesc = rInfo->ri_TrigDesc;
4374  finfo = rInfo->ri_TrigFunctions;
4375  instr = rInfo->ri_TrigInstrument;
4376  if (slot1 != NULL)
4377  {
4380  slot1 = slot2 = NULL;
4381  }
4382  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4383  {
4384  slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4386  slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4388  }
4389  if (trigdesc == NULL) /* should not happen */
4390  elog(ERROR, "relation %u has no triggers",
4391  evtshared->ats_relid);
4392  }
4393 
4394  /*
4395  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4396  * still set, so recursive examinations of the event list
4397  * won't try to re-fire it.
4398  */
4399  AfterTriggerExecute(estate, event, rInfo, trigdesc, finfo, instr,
4400  per_tuple_context, slot1, slot2);
4401 
4402  /*
4403  * Mark the event as done.
4404  */
4405  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4406  event->ate_flags |= AFTER_TRIGGER_DONE;
4407  }
4408  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4409  {
4410  /* something remains to be done */
4411  all_fired = all_fired_in_chunk = false;
4412  }
4413  }
4414 
4415  /* Clear the chunk if delete_ok and nothing left of interest */
4416  if (delete_ok && all_fired_in_chunk)
4417  {
4418  chunk->freeptr = CHUNK_DATA_START(chunk);
4419  chunk->endfree = chunk->endptr;
4420 
4421  /*
4422  * If it's last chunk, must sync event list's tailfree too. Note
4423  * that delete_ok must NOT be passed as true if there could be
4424  * additional AfterTriggerEventList values pointing at this event
4425  * list, since we'd fail to fix their copies of tailfree.
4426  */
4427  if (chunk == events->tail)
4428  events->tailfree = chunk->freeptr;
4429  }
4430  }
4431  if (slot1 != NULL)
4432  {
4435  }
4436 
4437  /* Release working resources */
4438  MemoryContextDelete(per_tuple_context);
4439 
4440  if (local_estate)
4441  {
4442  ExecCloseResultRelations(estate);
4443  ExecResetTupleTable(estate->es_tupleTable, false);
4444  FreeExecutorState(estate);
4445  }
4446 
4447  return all_fired;
4448 }
4449 
4450 
4451 /*
4452  * GetAfterTriggersTableData
4453  *
4454  * Find or create an AfterTriggersTableData struct for the specified
4455  * trigger event (relation + operation type). Ignore existing structs
4456  * marked "closed"; we don't want to put any additional tuples into them,
4457  * nor change their stmt-triggers-fired state.
4458  *
4459  * Note: the AfterTriggersTableData list is allocated in the current
4460  * (sub)transaction's CurTransactionContext. This is OK because
4461  * we don't need it to live past AfterTriggerEndQuery.
4462  */
4463 static AfterTriggersTableData *
4465 {
4466  AfterTriggersTableData *table;
4468  MemoryContext oldcxt;
4469  ListCell *lc;
4470 
4471  /* Caller should have ensured query_depth is OK. */
4472  Assert(afterTriggers.query_depth >= 0 &&
4473  afterTriggers.query_depth < afterTriggers.maxquerydepth);
4474  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4475 
4476  foreach(lc, qs->tables)
4477  {
4478  table = (AfterTriggersTableData *) lfirst(lc);
4479  if (table->relid == relid && table->cmdType == cmdType &&
4480  !table->closed)
4481  return table;
4482  }
4483 
4485 
4487  table->relid = relid;
4488  table->cmdType = cmdType;
4489  qs->tables = lappend(qs->tables, table);
4490 
4491  MemoryContextSwitchTo(oldcxt);
4492 
4493  return table;
4494 }
4495 
4496 /*
4497  * Returns a TupleTableSlot suitable for holding the tuples to be put
4498  * into AfterTriggersTableData's transition table tuplestores.
4499  */
4500 static TupleTableSlot *
4502  TupleDesc tupdesc)
4503 {
4504  /* Create it if not already done. */
4505  if (!table->storeslot)
4506  {
4507  MemoryContext oldcxt;
4508 
4509  /*
4510  * We only need this slot only until AfterTriggerEndQuery, but making
4511  * it last till end-of-subxact is good enough. It'll be freed by
4512  * AfterTriggerFreeQuery().
4513  */
4515  table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4516  MemoryContextSwitchTo(oldcxt);
4517  }
4518 
4519  return table->storeslot;
4520 }
4521 
4522 /*
4523  * MakeTransitionCaptureState
4524  *
4525  * Make a TransitionCaptureState object for the given TriggerDesc, target
4526  * relation, and operation type. The TCS object holds all the state needed
4527  * to decide whether to capture tuples in transition tables.
4528  *
4529  * If there are no triggers in 'trigdesc' that request relevant transition
4530  * tables, then return NULL.
4531  *
4532  * The resulting object can be passed to the ExecAR* functions. When
4533  * dealing with child tables, the caller can set tcs_original_insert_tuple
4534  * to avoid having to reconstruct the original tuple in the root table's
4535  * format.
4536  *
4537  * Note that we copy the flags from a parent table into this struct (rather
4538  * than subsequently using the relation's TriggerDesc directly) so that we can
4539  * use it to control collection of transition tuples from child tables.
4540  *
4541  * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4542  * on the same table during one query should share one transition table.
4543  * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4544  * looked up using the table OID + CmdType, and are merely referenced by
4545  * the TransitionCaptureState objects we hand out to callers.
4546  */
4549 {
4551  bool need_old,
4552  need_new;
4553  AfterTriggersTableData *table;
4554  MemoryContext oldcxt;
4555  ResourceOwner saveResourceOwner;
4556 
4557  if (trigdesc == NULL)
4558  return NULL;
4559 
4560  /* Detect which table(s) we need. */
4561  switch (cmdType)
4562  {
4563  case CMD_INSERT:
4564  need_old = false;
4565  need_new = trigdesc->trig_insert_new_table;
4566  break;
4567  case CMD_UPDATE:
4568  need_old = trigdesc->trig_update_old_table;
4569  need_new = trigdesc->trig_update_new_table;
4570  break;
4571  case CMD_DELETE:
4572  need_old = trigdesc->trig_delete_old_table;
4573  need_new = false;
4574  break;
4575  default:
4576  elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4577  need_old = need_new = false; /* keep compiler quiet */
4578  break;
4579  }
4580  if (!need_old && !need_new)
4581  return NULL;
4582 
4583  /* Check state, like AfterTriggerSaveEvent. */
4584  if (afterTriggers.query_depth < 0)
4585  elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4586 
4587  /* Be sure we have enough space to record events at this query depth. */
4588  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4590 
4591  /*
4592  * Find or create an AfterTriggersTableData struct to hold the
4593  * tuplestore(s). If there's a matching struct but it's marked closed,
4594  * ignore it; we need a newer one.
4595  *
4596  * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4597  * allocated in the current (sub)transaction's CurTransactionContext, and
4598  * the tuplestores are managed by the (sub)transaction's resource owner.
4599  * This is sufficient lifespan because we do not allow triggers using
4600  * transition tables to be deferrable; they will be fired during
4601  * AfterTriggerEndQuery, after which it's okay to delete the data.
4602  */
4603  table = GetAfterTriggersTableData(relid, cmdType);
4604 
4605  /* Now create required tuplestore(s), if we don't have them already. */
4607  saveResourceOwner = CurrentResourceOwner;
4609 
4610  if (need_old && table->old_tuplestore == NULL)
4611  table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4612  if (need_new && table->new_tuplestore == NULL)
4613  table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4614 
4615  CurrentResourceOwner = saveResourceOwner;
4616  MemoryContextSwitchTo(oldcxt);
4617 
4618  /* Now build the TransitionCaptureState struct, in caller's context */
4620  state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4621  state->tcs_update_old_table = trigdesc->trig_update_old_table;
4622  state->tcs_update_new_table = trigdesc->trig_update_new_table;
4623  state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4624  state->tcs_private = table;
4625 
4626  return state;
4627 }
4628 
4629 
4630 /* ----------
4631  * AfterTriggerBeginXact()
4632  *
4633  * Called at transaction start (either BEGIN or implicit for single
4634  * statement outside of transaction block).
4635  * ----------
4636  */
4637 void
4639 {
4640  /*
4641  * Initialize after-trigger state structure to empty
4642  */
4643  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4644  afterTriggers.query_depth = -1;
4645 
4646  /*
4647  * Verify that there is no leftover state remaining. If these assertions
4648  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4649  * up properly.
4650  */
4651  Assert(afterTriggers.state == NULL);
4652  Assert(afterTriggers.query_stack == NULL);
4653  Assert(afterTriggers.maxquerydepth == 0);
4654  Assert(afterTriggers.event_cxt == NULL);
4655  Assert(afterTriggers.events.head == NULL);
4656  Assert(afterTriggers.trans_stack == NULL);
4657  Assert(afterTriggers.maxtransdepth == 0);
4658 }
4659 
4660 
4661 /* ----------
4662  * AfterTriggerBeginQuery()
4663  *
4664  * Called just before we start processing a single query within a
4665  * transaction (or subtransaction). Most of the real work gets deferred
4666  * until somebody actually tries to queue a trigger event.
4667  * ----------
4668  */
4669 void
4671 {
4672  /* Increase the query stack depth */
4673  afterTriggers.query_depth++;
4674 }
4675 
4676 
4677 /* ----------
4678  * AfterTriggerEndQuery()
4679  *
4680  * Called after one query has been completely processed. At this time
4681  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4682  * transfer deferred trigger events to the global deferred-trigger list.
4683  *
4684  * Note that this must be called BEFORE closing down the executor
4685  * with ExecutorEnd, because we make use of the EState's info about
4686  * target relations. Normally it is called from ExecutorFinish.
4687  * ----------
4688  */
4689 void
4691 {
4693 
4694  /* Must be inside a query, too */
4695  Assert(afterTriggers.query_depth >= 0);
4696 
4697  /*
4698  * If we never even got as far as initializing the event stack, there
4699  * certainly won't be any events, so exit quickly.
4700  */
4701  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4702  {
4703  afterTriggers.query_depth--;
4704  return;
4705  }
4706 
4707  /*
4708  * Process all immediate-mode triggers queued by the query, and move the
4709  * deferred ones to the main list of deferred events.
4710  *
4711  * Notice that we decide which ones will be fired, and put the deferred
4712  * ones on the main list, before anything is actually fired. This ensures
4713  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4714  * IMMEDIATE: all events we have decided to defer will be available for it
4715  * to fire.
4716  *
4717  * We loop in case a trigger queues more events at the same query level.
4718  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4719  * will instead fire any triggers in a dedicated query level. Foreign key
4720  * enforcement triggers do add to the current query level, thanks to their
4721  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4722  * C-language triggers might do likewise.
4723  *
4724  * If we find no firable events, we don't have to increment
4725  * firing_counter.
4726  */
4727  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4728 
4729  for (;;)
4730  {
4731  if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
4732  {
4733  CommandId firing_id = afterTriggers.firing_counter++;
4734  AfterTriggerEventChunk *oldtail = qs->events.tail;
4735 
4736  if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
4737  break; /* all fired */
4738 
4739  /*
4740  * Firing a trigger could result in query_stack being repalloc'd,
4741  * so we must recalculate qs after each afterTriggerInvokeEvents
4742  * call. Furthermore, it's unsafe to pass delete_ok = true here,
4743  * because that could cause afterTriggerInvokeEvents to try to
4744  * access qs->events after the stack has been repalloc'd.
4745  */
4746  qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4747 
4748  /*
4749  * We'll need to scan the events list again. To reduce the cost
4750  * of doing so, get rid of completely-fired chunks. We know that
4751  * all events were marked IN_PROGRESS or DONE at the conclusion of
4752  * afterTriggerMarkEvents, so any still-interesting events must
4753  * have been added after that, and so must be in the chunk that
4754  * was then the tail chunk, or in later chunks. So, zap all
4755  * chunks before oldtail. This is approximately the same set of
4756  * events we would have gotten rid of by passing delete_ok = true.
4757  */
4758  Assert(oldtail != NULL);
4759  while (qs->events.head != oldtail)
4761  }
4762  else
4763  break;
4764  }
4765 
4766  /* Release query-level-local storage, including tuplestores if any */
4767  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
4768 
4769  afterTriggers.query_depth--;
4770 }
4771 
4772 
4773 /*
4774  * AfterTriggerFreeQuery
4775  * Release subsidiary storage for a trigger query level.
4776  * This includes closing down tuplestores.
4777  * Note: it's important for this to be safe if interrupted by an error
4778  * and then called again for the same query level.
4779  */
4780 static void
4782 {
4783  Tuplestorestate *ts;
4784  List *tables;
4785  ListCell *lc;
4786 
4787  /* Drop the trigger events */
4789 
4790  /* Drop FDW tuplestore if any */
4791  ts = qs->fdw_tuplestore;
4792  qs->fdw_tuplestore = NULL;
4793  if (ts)
4794  tuplestore_end(ts);
4795 
4796  /* Release per-table subsidiary storage */
4797  tables = qs->tables;
4798  foreach(lc, tables)
4799  {
4801 
4802  ts = table->old_tuplestore;
4803  table->old_tuplestore = NULL;
4804  if (ts)
4805  tuplestore_end(ts);
4806  ts = table->new_tuplestore;
4807  table->new_tuplestore = NULL;
4808  if (ts)
4809  tuplestore_end(ts);
4810  if (table->storeslot)
4812  }
4813 
4814  /*
4815  * Now free the AfterTriggersTableData structs and list cells. Reset list
4816  * pointer first; if list_free_deep somehow gets an error, better to leak
4817  * that storage than have an infinite loop.
4818  */
4819  qs->tables = NIL;
4820  list_free_deep(tables);
4821 }
4822 
4823 
4824 /* ----------
4825  * AfterTriggerFireDeferred()
4826  *
4827  * Called just before the current transaction is committed. At this
4828  * time we invoke all pending DEFERRED triggers.
4829  *
4830  * It is possible for other modules to queue additional deferred triggers
4831  * during pre-commit processing; therefore xact.c may have to call this
4832  * multiple times.
4833  * ----------
4834  */
4835 void
4837 {
4838  AfterTriggerEventList *events;
4839  bool snap_pushed = false;
4840 
4841  /* Must not be inside a query */
4842  Assert(afterTriggers.query_depth == -1);
4843 
4844  /*
4845  * If there are any triggers to fire, make sure we have set a snapshot for
4846  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4847  * can't assume ActiveSnapshot is valid on entry.)
4848  */
4849  events = &afterTriggers.events;
4850  if (events->head != NULL)
4851  {
4853  snap_pushed = true;
4854  }
4855 
4856  /*
4857  * Run all the remaining triggers. Loop until they are all gone, in case
4858  * some trigger queues more for us to do.
4859  */
4860  while (afterTriggerMarkEvents(events, NULL, false))
4861  {
4862  CommandId firing_id = afterTriggers.firing_counter++;
4863 
4864  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4865  break; /* all fired */
4866  }
4867 
4868  /*
4869  * We don't bother freeing the event list, since it will go away anyway
4870  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4871  */
4872 
4873  if (snap_pushed)
4875 }
4876 
4877 
4878 /* ----------
4879  * AfterTriggerEndXact()
4880  *
4881  * The current transaction is finishing.
4882  *
4883  * Any unfired triggers are canceled so we simply throw
4884  * away anything we know.
4885  *
4886  * Note: it is possible for this to be called repeatedly in case of
4887  * error during transaction abort; therefore, do not complain if
4888  * already closed down.
4889  * ----------
4890  */
4891 void
4892 AfterTriggerEndXact(bool isCommit)
4893 {
4894  /*
4895  * Forget the pending-events list.
4896  *
4897  * Since all the info is in TopTransactionContext or children thereof, we
4898  * don't really need to do anything to reclaim memory. However, the
4899  * pending-events list could be large, and so it's useful to discard it as
4900  * soon as possible --- especially if we are aborting because we ran out
4901  * of memory for the list!
4902  */
4903  if (afterTriggers.event_cxt)
4904  {
4905  MemoryContextDelete(afterTriggers.event_cxt);
4906  afterTriggers.event_cxt = NULL;
4907  afterTriggers.events.head = NULL;
4908  afterTriggers.events.tail = NULL;
4909  afterTriggers.events.tailfree = NULL;
4910  }
4911 
4912  /*
4913  * Forget any subtransaction state as well. Since this can't be very
4914  * large, we let the eventual reset of TopTransactionContext free the
4915  * memory instead of doing it here.
4916  */
4917  afterTriggers.trans_stack = NULL;
4918  afterTriggers.maxtransdepth = 0;
4919 
4920 
4921  /*
4922  * Forget the query stack and constraint-related state information. As
4923  * with the subtransaction state information, we don't bother freeing the
4924  * memory here.
4925  */
4926  afterTriggers.query_stack = NULL;
4927  afterTriggers.maxquerydepth = 0;
4928  afterTriggers.state = NULL;
4929 
4930  /* No more afterTriggers manipulation until next transaction starts. */
4931  afterTriggers.query_depth = -1;
4932 }
4933 
4934 /*
4935  * AfterTriggerBeginSubXact()
4936  *
4937  * Start a subtransaction.
4938  */
4939 void
4941 {
4942  int my_level = GetCurrentTransactionNestLevel();
4943 
4944  /*
4945  * Allocate more space in the trans_stack if needed. (Note: because the
4946  * minimum nest level of a subtransaction is 2, we waste the first couple
4947  * entries of the array; not worth the notational effort to avoid it.)
4948  */
4949  while (my_level >= afterTriggers.maxtransdepth)
4950  {
4951  if (afterTriggers.maxtransdepth == 0)
4952  {
4953  /* Arbitrarily initialize for max of 8 subtransaction levels */
4954  afterTriggers.trans_stack = (AfterTriggersTransData *)
4956  8 * sizeof(AfterTriggersTransData));
4957  afterTriggers.maxtransdepth = 8;
4958  }
4959  else
4960  {
4961  /* repalloc will keep the stack in the same context */
4962  int new_alloc = afterTriggers.maxtransdepth * 2;
4963 
4964  afterTriggers.trans_stack = (AfterTriggersTransData *)
4965  repalloc(afterTriggers.trans_stack,
4966  new_alloc * sizeof(AfterTriggersTransData));
4967  afterTriggers.maxtransdepth = new_alloc;
4968  }
4969  }
4970 
4971  /*
4972  * Push the current information into the stack. The SET CONSTRAINTS state
4973  * is not saved until/unless changed. Likewise, we don't make a
4974  * per-subtransaction event context until needed.
4975  */
4976  afterTriggers.trans_stack[my_level].state = NULL;
4977  afterTriggers.trans_stack[my_level].events = afterTriggers.events;
4978  afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
4979  afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
4980 }
4981 
4982 /*
4983  * AfterTriggerEndSubXact()
4984  *
4985  * The current subtransaction is ending.
4986  */
4987 void
4989 {
4990  int my_level = GetCurrentTransactionNestLevel();
4992  AfterTriggerEvent event;
4993  AfterTriggerEventChunk *chunk;
4994  CommandId subxact_firing_id;
4995 
4996  /*
4997  * Pop the prior state if needed.
4998  */
4999  if (isCommit)
5000  {
5001  Assert(my_level < afterTriggers.maxtransdepth);
5002  /* If we saved a prior state, we don't need it anymore */
5003  state = afterTriggers.trans_stack[my_level].state;
5004  if (state != NULL)
5005  pfree(state);
5006  /* this avoids double pfree if error later: */
5007  afterTriggers.trans_stack[my_level].state = NULL;
5008  Assert(afterTriggers.query_depth ==
5009  afterTriggers.trans_stack[my_level].query_depth);
5010  }
5011  else
5012  {
5013  /*
5014  * Aborting. It is possible subxact start failed before calling
5015  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5016  * trans_stack levels that aren't there.
5017  */
5018  if (my_level >= afterTriggers.maxtransdepth)
5019  return;
5020 
5021  /*
5022  * Release query-level storage for queries being aborted, and restore
5023  * query_depth to its pre-subxact value. This assumes that a
5024  * subtransaction will not add events to query levels started in a
5025  * earlier transaction state.
5026  */
5027  while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
5028  {
5029  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
5030  AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
5031  afterTriggers.query_depth--;
5032  }
5033  Assert(afterTriggers.query_depth ==
5034  afterTriggers.trans_stack[my_level].query_depth);
5035 
5036  /*
5037  * Restore the global deferred-event list to its former length,
5038  * discarding any events queued by the subxact.
5039  */
5040  afterTriggerRestoreEventList(&afterTriggers.events,
5041  &afterTriggers.trans_stack[my_level].events);
5042 
5043  /*
5044  * Restore the trigger state. If the saved state is NULL, then this
5045  * subxact didn't save it, so it doesn't need restoring.
5046  */
5047  state = afterTriggers.trans_stack[my_level].state;
5048  if (state != NULL)
5049  {
5050  pfree(afterTriggers.state);
5051  afterTriggers.state = state;
5052  }
5053  /* this avoids double pfree if error later: */
5054  afterTriggers.trans_stack[my_level].state = NULL;
5055 
5056  /*
5057  * Scan for any remaining deferred events that were marked DONE or IN
5058  * PROGRESS by this subxact or a child, and un-mark them. We can
5059  * recognize such events because they have a firing ID greater than or
5060  * equal to the firing_counter value we saved at subtransaction start.
5061  * (This essentially assumes that the current subxact includes all
5062  * subxacts started after it.)
5063  */
5064  subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5065  for_each_event_chunk(event, chunk, afterTriggers.events)
5066  {
5067  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5068 
5069  if (event->ate_flags &
5071  {
5072  if (evtshared->ats_firing_id >= subxact_firing_id)
5073  event->ate_flags &=
5075  }
5076  }
5077  }
5078 }
5079 
5080 /* ----------
5081  * AfterTriggerEnlargeQueryState()
5082  *
5083  * Prepare the necessary state so that we can record AFTER trigger events
5084  * queued by a query. It is allowed to have nested queries within a
5085  * (sub)transaction, so we need to have separate state for each query
5086  * nesting level.
5087  * ----------
5088  */
5089 static void
5091 {
5092  int init_depth = afterTriggers.maxquerydepth;
5093 
5094  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
5095 
5096  if (afterTriggers.maxquerydepth == 0)
5097  {
5098  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5099 
5100  afterTriggers.query_stack = (AfterTriggersQueryData *)
5102  new_alloc * sizeof(AfterTriggersQueryData));
5103  afterTriggers.maxquerydepth = new_alloc;
5104  }
5105  else
5106  {
5107  /* repalloc will keep the stack in the same context */
5108  int old_alloc = afterTriggers.maxquerydepth;
5109  int new_alloc = Max(afterTriggers.query_depth + 1,
5110  old_alloc * 2);
5111 
5112  afterTriggers.query_stack = (AfterTriggersQueryData *)
5113  repalloc(afterTriggers.query_stack,
5114  new_alloc * sizeof(AfterTriggersQueryData));
5115  afterTriggers.maxquerydepth = new_alloc;
5116  }
5117 
5118  /* Initialize new array entries to empty */
5119  while (init_depth < afterTriggers.maxquerydepth)
5120  {
5121  AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
5122 
5123  qs->events.head = NULL;
5124  qs->events.tail = NULL;
5125  qs->events.tailfree = NULL;
5126  qs->fdw_tuplestore = NULL;
5127  qs->tables = NIL;
5128 
5129  ++init_depth;
5130  }
5131 }
5132 
5133 /*
5134  * Create an empty SetConstraintState with room for numalloc trigstates
5135  */
5136 static SetConstraintState
5138 {
5140 
5141  /* Behave sanely with numalloc == 0 */
5142  if (numalloc <= 0)
5143  numalloc = 1;
5144 
5145  /*
5146  * We assume that zeroing will correctly initialize the state values.
5147  */
5148  state = (SetConstraintState)
5150  offsetof(SetConstraintStateData, trigstates) +
5151  numalloc * sizeof(SetConstraintTriggerData));
5152 
5153  state->numalloc = numalloc;
5154 
5155  return state;
5156 }
5157 
5158 /*
5159  * Copy a SetConstraintState
5160  */
5161 static SetConstraintState
5163 {
5165 
5166  state = SetConstraintStateCreate(origstate->numstates);
5167 
5168  state->all_isset = origstate->all_isset;
5169  state->all_isdeferred = origstate->all_isdeferred;
5170  state->numstates = origstate->numstates;
5171  memcpy(state->trigstates, origstate->trigstates,
5172  origstate->numstates * sizeof(SetConstraintTriggerData));
5173 
5174  return state;
5175 }
5176 
5177 /*
5178  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5179  * pointer to the state object (it will change if we have to repalloc).
5180  */
5181 static SetConstraintState
5183  Oid tgoid, bool tgisdeferred)
5184 {
5185  if (state->numstates >= state->numalloc)
5186  {
5187  int newalloc = state->numalloc * 2;
5188 
5189  newalloc = Max(newalloc, 8); /* in case original has size 0 */
5190  state = (SetConstraintState)
5191  repalloc(state,
5192  offsetof(SetConstraintStateData, trigstates) +
5193  newalloc * sizeof(SetConstraintTriggerData));
5194  state->numalloc = newalloc;
5195  Assert(state->numstates < state->numalloc);
5196  }
5197 
5198  state->trigstates[state->numstates].sct_tgoid = tgoid;
5199  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
5200  state->numstates++;
5201 
5202  return state;
5203 }
5204 
5205 /* ----------
5206  * AfterTriggerSetState()
5207  *
5208  * Execute the SET CONSTRAINTS ... utility command.
5209  * ----------
5210  */
5211 void
5213 {
5214  int my_level = GetCurrentTransactionNestLevel();
5215 
5216  /* If we haven't already done so, initialize our state. */
5217  if (afterTriggers.state == NULL)
5218  afterTriggers.state = SetConstraintStateCreate(8);
5219 
5220  /*
5221  * If in a subtransaction, and we didn't save the current state already,
5222  * save it so it can be restored if the subtransaction aborts.
5223  */
5224  if (my_level > 1 &&
5225  afterTriggers.trans_stack[my_level].state == NULL)
5226  {
5227  afterTriggers.trans_stack[my_level].state =
5228  SetConstraintStateCopy(afterTriggers.state);
5229  }
5230 
5231  /*
5232  * Handle SET CONSTRAINTS ALL ...
5233  */
5234  if (stmt->constraints == NIL)
5235  {
5236  /*
5237  * Forget any previous SET CONSTRAINTS commands in this transaction.
5238  */
5239  afterTriggers.state->numstates = 0;
5240 
5241  /*
5242  * Set the per-transaction ALL state to known.
5243  */
5244  afterTriggers.state->all_isset = true;
5245  afterTriggers.state->all_isdeferred = stmt->deferred;
5246  }
5247  else
5248  {
5249  Relation conrel;
5250  Relation tgrel;
5251  List *conoidlist = NIL;
5252  List *tgoidlist = NIL;
5253  ListCell *lc;
5254 
5255  /*
5256  * Handle SET CONSTRAINTS constraint-name [, ...]
5257  *
5258  * First, identify all the named constraints and make a list of their
5259  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5260  * the same name within a schema, the specifications are not
5261  * necessarily unique. Our strategy is to target all matching
5262  * constraints within the first search-path schema that has any
5263  * matches, but disregard matches in schemas beyond the first match.
5264  * (This is a bit odd but it's the historical behavior.)
5265  *
5266  * A constraint in a partitioned table may have corresponding
5267  * constraints in the partitions. Grab those too.
5268  */
5269  conrel = table_open(ConstraintRelationId, AccessShareLock);
5270 
5271  foreach(lc, stmt->constraints)
5272  {
5273  RangeVar *constraint = lfirst(lc);
5274  bool found;
5275  List *namespacelist;
5276  ListCell *nslc;
5277 
5278  if (constraint->catalogname)
5279  {
5280  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5281  ereport(ERROR,
5282  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5283  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5284  constraint->catalogname, constraint->schemaname,
5285  constraint->relname)));
5286  }
5287 
5288  /*
5289  * If we're given the schema name with the constraint, look only
5290  * in that schema. If given a bare constraint name, use the
5291  * search path to find the first matching constraint.
5292  */
5293  if (constraint->schemaname)
5294  {
5295  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5296  false);
5297 
5298  namespacelist = list_make1_oid(namespaceId);
5299  }
5300  else
5301  {
5302  namespacelist = fetch_search_path(true);
5303  }
5304 
5305  found = false;
5306  foreach(nslc, namespacelist)
5307  {
5308  Oid namespaceId = lfirst_oid(nslc);
5309  SysScanDesc conscan;
5310  ScanKeyData skey[2];
5311  HeapTuple tup;
5312 
5313  ScanKeyInit(&skey[0],
5314  Anum_pg_constraint_conname,
5315  BTEqualStrategyNumber, F_NAMEEQ,
5316  CStringGetDatum(constraint->relname));
5317  ScanKeyInit(&skey[1],
5318  Anum_pg_constraint_connamespace,
5319  BTEqualStrategyNumber, F_OIDEQ,
5320  ObjectIdGetDatum(namespaceId));
5321 
5322  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5323  true, NULL, 2, skey);
5324 
5325  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5326  {
5328 
5329  if (con->condeferrable)
5330  conoidlist = lappend_oid(conoidlist, con->oid);
5331  else if (stmt->deferred)
5332  ereport(ERROR,
5333  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5334  errmsg("constraint \"%s\" is not deferrable",
5335  constraint->relname)));
5336  found = true;
5337  }
5338 
5339  systable_endscan(conscan);
5340 
5341  /*
5342  * Once we've found a matching constraint we do not search
5343  * later parts of the search path.
5344  */
5345  if (found)
5346  break;
5347  }
5348 
5349  list_free(namespacelist);
5350 
5351  /*
5352  * Not found ?
5353  */
5354  if (!found)
5355  ereport(ERROR,
5356  (errcode(ERRCODE_UNDEFINED_OBJECT),
5357  errmsg("constraint \"%s\" does not exist",
5358  constraint->relname)));
5359  }
5360 
5361  /*
5362  * Scan for any possible descendants of the constraints. We append
5363  * whatever we find to the same list that we're scanning; this has the
5364  * effect that we create new scans for those, too, so if there are
5365  * further descendents, we'll also catch them.
5366  */
5367  foreach(lc, conoidlist)
5368  {
5369  Oid parent = lfirst_oid(lc);
5370  ScanKeyData key;
5371  SysScanDesc scan;
5372  HeapTuple tuple;
5373 
5374  ScanKeyInit(&key,
5375  Anum_pg_constraint_conparentid,
5376  BTEqualStrategyNumber, F_OIDEQ,
5377  ObjectIdGetDatum(parent));
5378 
5379  scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5380 
5381  while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5382  {
5384 
5385  conoidlist = lappend_oid(conoidlist, con->oid);
5386  }
5387 
5388  systable_endscan(scan);
5389  }
5390 
5391  table_close(conrel, AccessShareLock);
5392 
5393  /*
5394  * Now, locate the trigger(s) implementing each of these constraints,
5395  * and make a list of their OIDs.
5396  */
5397  tgrel = table_open(TriggerRelationId, AccessShareLock);
5398 
5399  foreach(lc, conoidlist)
5400  {
5401  Oid conoid = lfirst_oid(lc);
5402  ScanKeyData skey;
5403  SysScanDesc tgscan;
5404  HeapTuple htup;
5405 
5406  ScanKeyInit(&skey,
5407  Anum_pg_trigger_tgconstraint,
5408  BTEqualStrategyNumber, F_OIDEQ,
5409  ObjectIdGetDatum(conoid));
5410 
5411  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5412  NULL, 1, &skey);
5413 
5414  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5415  {
5416  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5417 
5418  /*
5419  * Silently skip triggers that are marked as non-deferrable in
5420  * pg_trigger. This is not an error condition, since a
5421  * deferrable RI constraint may have some non-deferrable
5422  * actions.
5423  */
5424  if (pg_trigger->tgdeferrable)
5425  tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5426  }
5427 
5428  systable_endscan(tgscan);
5429  }
5430 
5431  table_close(tgrel, AccessShareLock);
5432 
5433  /*
5434  * Now we can set the trigger states of individual triggers for this
5435  * xact.
5436  */
5437  foreach(lc, tgoidlist)
5438  {
5439  Oid tgoid = lfirst_oid(lc);
5440  SetConstraintState state = afterTriggers.state;
5441  bool found = false;
5442  int i;
5443 
5444  for (i = 0; i < state->numstates; i++)
5445  {
5446  if (state->trigstates[i].sct_tgoid == tgoid)
5447  {
5448  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5449  found = true;
5450  break;
5451  }
5452  }
5453  if (!found)
5454  {
5455  afterTriggers.state =
5456  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5457  }
5458  }
5459  }
5460 
5461  /*
5462  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5463  * checks against that constraint must be made when the SET CONSTRAINTS
5464  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5465  * apply retroactively. We've updated the constraints state, so scan the
5466  * list of previously deferred events to fire any that have now become
5467  * immediate.
5468  *
5469  * Obviously, if this was SET ... DEFERRED then it can't have converted
5470  * any unfired events to immediate, so we need do nothing in that case.
5471  */
5472  if (!stmt->deferred)
5473  {
5474  AfterTriggerEventList *events = &afterTriggers.events;
5475  bool snapshot_set = false;
5476 
5477  while (afterTriggerMarkEvents(events, NULL, true))
5478  {
5479  CommandId firing_id = afterTriggers.firing_counter++;
5480 
5481  /*
5482  * Make sure a snapshot has been established in case trigger
5483  * functions need one. Note that we avoid setting a snapshot if
5484  * we don't find at least one trigger that has to be fired now.
5485  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5486  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5487  * at the start of a transaction it's not possible for any trigger
5488  * events to be queued yet.)
5489  */
5490  if (!snapshot_set)
5491  {
5493  snapshot_set = true;
5494  }
5495 
5496  /*
5497  * We can delete fired events if we are at top transaction level,
5498  * but we'd better not if inside a subtransaction, since the
5499  * subtransaction could later get rolled back.
5500  */
5501  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5502  !IsSubTransaction()))
5503  break; /* all fired */
5504  }
5505 
5506  if (snapshot_set)
5508  }
5509 }
5510 
5511 /* ----------
5512  * AfterTriggerPendingOnRel()
5513  * Test to see if there are any pending after-trigger events for rel.
5514  *
5515  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5516  * it is unsafe to perform major surgery on a relation. Note that only
5517  * local pending events are examined. We assume that having exclusive lock
5518  * on a rel guarantees there are no unserviced events in other backends ---
5519  * but having a lock does not prevent there being such events in our own.
5520  *
5521  * In some scenarios it'd be reasonable to remove pending events (more
5522  * specifically, mark them DONE by the current subxact) but without a lot
5523  * of knowledge of the trigger semantics we can't do this in general.
5524  * ----------
5525  */
5526 bool
5528 {
5529  AfterTriggerEvent event;
5530  AfterTriggerEventChunk *chunk;
5531  int depth;
5532 
5533  /* Scan queued events */
5534  for_each_event_chunk(event, chunk, afterTriggers.events)
5535  {
5536  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5537 
5538  /*
5539  * We can ignore completed events. (Even if a DONE flag is rolled
5540  * back by subxact abort, it's OK because the effects of the TRUNCATE
5541  * or whatever must get rolled back too.)
5542  */
5543  if (event->ate_flags & AFTER_TRIGGER_DONE)
5544  continue;
5545 
5546  if (evtshared->ats_relid == relid)
5547  return true;
5548  }
5549 
5550  /*
5551  * Also scan events queued by incomplete queries. This could only matter
5552  * if TRUNCATE/etc is executed by a function or trigger within an updating
5553  * query on the same relation, which is pretty perverse, but let's check.
5554  */
5555  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5556  {
5557  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
5558  {
5559  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5560 
5561  if (event->ate_flags & AFTER_TRIGGER_DONE)
5562  continue;
5563 
5564  if (evtshared->ats_relid == relid)
5565  return true;
5566  }
5567  }
5568 
5569  return false;
5570 }
5571 
5572 
5573 /* ----------
5574  * AfterTriggerSaveEvent()
5575  *
5576  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5577  * be fired for an event.
5578  *
5579  * NOTE: this is called whenever there are any triggers associated with
5580  * the event (even if they are disabled). This function decides which
5581  * triggers actually need to be queued. It is also called after each row,
5582  * even if there are no triggers for that event, if there are any AFTER
5583  * STATEMENT triggers for the statement which use transition tables, so that
5584  * the transition tuplestores can be built. Furthermore, if the transition
5585  * capture is happening for UPDATEd rows being moved to another partition due
5586  * to the partition-key being changed, then this function is called once when
5587  * the row is deleted (to capture OLD row), and once when the row is inserted
5588  * into another partition (to capture NEW row). This is done separately because
5589  * DELETE and INSERT happen on different tables.
5590  *
5591  * Transition tuplestores are built now, rather than when events are pulled
5592  * off of the queue because AFTER ROW triggers are allowed to select from the
5593  * transition tables for the statement.
5594  * ----------
5595  */
5596 static void
5598  int event, bool row_trigger,
5599  TupleTableSlot *oldslot, TupleTableSlot *newslot,
5600  List *recheckIndexes, Bitmapset *modifiedCols,
5601  TransitionCaptureState *transition_capture)
5602 {
5603  Relation rel = relinfo->ri_RelationDesc;
5604  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5605  AfterTriggerEventData new_event;
5606  AfterTriggerSharedData new_shared;
5607  char relkind = rel->rd_rel->relkind;
5608  int tgtype_event;
5609  int tgtype_level;
5610  int i;
5611  Tuplestorestate *fdw_tuplestore = NULL;
5612 
5613  /*
5614  * Check state. We use a normal test not Assert because it is possible to
5615  * reach here in the wrong state given misconfigured RI triggers, in
5616  * particular deferring a cascade action trigger.
5617  */
5618  if (afterTriggers.query_depth < 0)
5619  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5620 
5621  /* Be sure we have enough space to record events at this query depth. */
5622  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5624 
5625  /*
5626  * If the directly named relation has any triggers with transition tables,
5627  * then we need to capture transition tuples.
5628  */
5629  if (row_trigger && transition_capture != NULL)
5630  {
5631  TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
5632  TupleConversionMap *map = ExecGetChildToRootMap(relinfo);
5633  bool delete_old_table = transition_capture->tcs_delete_old_table;
5634  bool update_old_table = transition_capture->tcs_update_old_table;
5635  bool update_new_table = transition_capture->tcs_update_new_table;
5636  bool insert_new_table = transition_capture->tcs_insert_new_table;
5637 
5638  /*
5639  * For INSERT events NEW should be non-NULL, for DELETE events OLD
5640  * should be non-NULL, whereas for UPDATE events normally both OLD and
5641  * NEW are non-NULL. But for UPDATE events fired for capturing
5642  * transition tuples during UPDATE partition-key row movement, OLD is
5643  * NULL when the event is for a row being inserted, whereas NEW is
5644  * NULL when the event is for a row being deleted.
5645  */
5646  Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5647  TupIsNull(oldslot)));
5648  Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5649  TupIsNull(newslot)));
5650 
5651  if (!TupIsNull(oldslot) &&
5652  ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
5653  (event == TRIGGER_EVENT_UPDATE && update_old_table)))
5654  {
5655  Tuplestorestate *old_tuplestore;
5656 
5657  old_tuplestore = transition_capture->tcs_private->old_tuplestore;
5658 
5659  if (map != NULL)
5660  {
5661  AfterTriggersTableData *table = transition_capture->tcs_private;
5662  TupleTableSlot *storeslot;
5663 
5664  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5665  execute_attr_map_slot(map->attrMap, oldslot, storeslot);
5666  tuplestore_puttupleslot(old_tuplestore, storeslot);
5667  }
5668  else
5669  tuplestore_puttupleslot(old_tuplestore, oldslot);
5670  }
5671  if (!TupIsNull(newslot) &&
5672  ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
5673  (event == TRIGGER_EVENT_UPDATE && update_new_table)))
5674  {
5675  Tuplestorestate *new_tuplestore;
5676 
5677  new_tuplestore = transition_capture->tcs_private->new_tuplestore;
5678 
5679  if (original_insert_tuple != NULL)
5680  tuplestore_puttupleslot(new_tuplestore,
5681  original_insert_tuple);
5682  else if (map != NULL)
5683  {
5684  AfterTriggersTableData *table = transition_capture->tcs_private;
5685  TupleTableSlot *storeslot;
5686 
5687  storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5688  execute_attr_map_slot(map->attrMap, newslot, storeslot);
5689  tuplestore_puttupleslot(new_tuplestore, storeslot);
5690  }
5691  else
5692  tuplestore_puttupleslot(new_tuplestore, newslot);
5693  }
5694 
5695  /*
5696  * If transition tables are the only reason we're here, return. As
5697  * mentioned above, we can also be here during update tuple routing in
5698  * presence of transition tables, in which case this function is
5699  * called separately for oldtup and newtup, so we expect exactly one
5700  * of them to be NULL.
5701  */
5702  if (trigdesc == NULL ||
5703  (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5704  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5705  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
5706  (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
5707  return;
5708  }
5709 
5710  /*
5711  * Validate the event code and collect the associated tuple CTIDs.
5712  *
5713  * The event code will be used both as a bitmask and an array offset, so
5714  * validation is important to make sure we don't walk off the edge of our
5715  * arrays.
5716  *
5717  * Also, if we're considering statement-level triggers, check whether we
5718  * already queued a set of them for this event, and cancel the prior set
5719  * if so. This preserves the behavior that statement-level triggers fire
5720  * just once per statement and fire after row-level triggers.
5721  */
5722  switch (event)
5723  {
5724  case TRIGGER_EVENT_INSERT:
5725  tgtype_event = TRIGGER_TYPE_INSERT;
5726  if (row_trigger)
5727  {
5728  Assert(oldslot == NULL);
5729  Assert(newslot != NULL);
5730  ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
5731  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5732  }
5733  else
5734  {
5735  Assert(oldslot == NULL);
5736  Assert(newslot == NULL);
5737  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5738  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5739