PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/indexing.h"
24 #include "catalog/objectaccess.h"
25 #include "catalog/pg_constraint.h"
27 #include "catalog/pg_proc.h"
28 #include "catalog/pg_trigger.h"
29 #include "catalog/pg_type.h"
30 #include "commands/dbcommands.h"
31 #include "commands/defrem.h"
32 #include "commands/trigger.h"
33 #include "executor/executor.h"
34 #include "miscadmin.h"
35 #include "nodes/bitmapset.h"
36 #include "nodes/makefuncs.h"
37 #include "optimizer/clauses.h"
38 #include "optimizer/var.h"
39 #include "parser/parse_clause.h"
40 #include "parser/parse_collate.h"
41 #include "parser/parse_func.h"
42 #include "parser/parse_relation.h"
43 #include "parser/parsetree.h"
44 #include "pgstat.h"
45 #include "rewrite/rewriteManip.h"
46 #include "storage/bufmgr.h"
47 #include "storage/lmgr.h"
48 #include "tcop/utility.h"
49 #include "utils/acl.h"
50 #include "utils/builtins.h"
51 #include "utils/bytea.h"
52 #include "utils/fmgroids.h"
53 #include "utils/inval.h"
54 #include "utils/lsyscache.h"
55 #include "utils/memutils.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tqual.h"
60 #include "utils/tuplestore.h"
61 
62 
63 /* GUC variables */
65 
66 /* How many levels deep into trigger execution are we? */
67 static int MyTriggerDepth = 0;
68 
69 /*
70  * Note that similar macros also exist in executor/execMain.c. There does not
71  * appear to be any good header to put them into, given the structures that
72  * they use, so we let them be duplicated. Be sure to update all if one needs
73  * to be changed, however.
74  */
75 #define GetUpdatedColumns(relinfo, estate) \
76  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
77 
78 /* Local function prototypes */
79 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
80 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
81 static HeapTuple GetTupleForTrigger(EState *estate,
82  EPQState *epqstate,
83  ResultRelInfo *relinfo,
84  ItemPointer tid,
85  LockTupleMode lockmode,
86  TupleTableSlot **newSlot);
87 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
88  Trigger *trigger, TriggerEvent event,
89  Bitmapset *modifiedCols,
90  HeapTuple oldtup, HeapTuple newtup);
92  int tgindx,
93  FmgrInfo *finfo,
94  Instrumentation *instr,
95  MemoryContext per_tuple_context);
96 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
97  int event, bool row_trigger,
98  HeapTuple oldtup, HeapTuple newtup,
99  List *recheckIndexes, Bitmapset *modifiedCols);
100 static void AfterTriggerEnlargeQueryState(void);
101 
102 
103 /*
104  * Create a trigger. Returns the address of the created trigger.
105  *
106  * queryString is the source text of the CREATE TRIGGER command.
107  * This must be supplied if a whenClause is specified, else it can be NULL.
108  *
109  * relOid, if nonzero, is the relation on which the trigger should be
110  * created. If zero, the name provided in the statement will be looked up.
111  *
112  * refRelOid, if nonzero, is the relation to which the constraint trigger
113  * refers. If zero, the constraint relation name provided in the statement
114  * will be looked up as needed.
115  *
116  * constraintOid, if nonzero, says that this trigger is being created
117  * internally to implement that constraint. A suitable pg_depend entry will
118  * be made to link the trigger to that constraint. constraintOid is zero when
119  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
120  * TRIGGER, we build a pg_constraint entry internally.)
121  *
122  * indexOid, if nonzero, is the OID of an index associated with the constraint.
123  * We do nothing with this except store it into pg_trigger.tgconstrindid.
124  *
125  * If isInternal is true then this is an internally-generated trigger.
126  * This argument sets the tgisinternal field of the pg_trigger entry, and
127  * if TRUE causes us to modify the given trigger name to ensure uniqueness.
128  *
129  * When isInternal is not true we require ACL_TRIGGER permissions on the
130  * relation, as well as ACL_EXECUTE on the trigger function. For internal
131  * triggers the caller must apply any required permission checks.
132  *
133  * Note: can return InvalidObjectAddress if we decided to not create a trigger
134  * at all, but a foreign-key constraint. This is a kluge for backwards
135  * compatibility.
136  */
138 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
139  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
140  bool isInternal)
141 {
142  int16 tgtype;
143  int ncolumns;
144  int16 *columns;
145  int2vector *tgattr;
146  Node *whenClause;
147  List *whenRtable;
148  char *qual;
150  bool nulls[Natts_pg_trigger];
151  Relation rel;
152  AclResult aclresult;
153  Relation tgrel;
154  SysScanDesc tgscan;
155  ScanKeyData key;
156  Relation pgrel;
157  HeapTuple tuple;
158  Oid fargtypes[1]; /* dummy */
159  Oid funcoid;
160  Oid funcrettype;
161  Oid trigoid;
162  char internaltrigname[NAMEDATALEN];
163  char *trigname;
164  Oid constrrelid = InvalidOid;
165  ObjectAddress myself,
166  referenced;
167  char *oldtablename = NULL;
168  char *newtablename = NULL;
169 
170  if (OidIsValid(relOid))
171  rel = heap_open(relOid, ShareRowExclusiveLock);
172  else
174 
175  /*
176  * Triggers must be on tables or views, and there are additional
177  * relation-type-specific restrictions.
178  */
179  if (rel->rd_rel->relkind == RELKIND_RELATION ||
180  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
181  {
182  /* Tables can't have INSTEAD OF triggers */
183  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
184  stmt->timing != TRIGGER_TYPE_AFTER)
185  ereport(ERROR,
186  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
187  errmsg("\"%s\" is a table",
189  errdetail("Tables cannot have INSTEAD OF triggers.")));
190  /* Disallow ROW triggers on partitioned tables */
191  if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
192  ereport(ERROR,
193  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
194  errmsg("\"%s\" is a partitioned table",
196  errdetail("Partitioned tables cannot have ROW triggers.")));
197  }
198  else if (rel->rd_rel->relkind == RELKIND_VIEW)
199  {
200  /*
201  * Views can have INSTEAD OF triggers (which we check below are
202  * row-level), or statement-level BEFORE/AFTER triggers.
203  */
204  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
205  ereport(ERROR,
206  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
207  errmsg("\"%s\" is a view",
209  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
210  /* Disallow TRUNCATE triggers on VIEWs */
211  if (TRIGGER_FOR_TRUNCATE(stmt->events))
212  ereport(ERROR,
213  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
214  errmsg("\"%s\" is a view",
216  errdetail("Views cannot have TRUNCATE triggers.")));
217  }
218  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
219  {
220  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
221  stmt->timing != TRIGGER_TYPE_AFTER)
222  ereport(ERROR,
223  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
224  errmsg("\"%s\" is a foreign table",
226  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
227 
228  if (TRIGGER_FOR_TRUNCATE(stmt->events))
229  ereport(ERROR,
230  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
231  errmsg("\"%s\" is a foreign table",
233  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
234 
235  if (stmt->isconstraint)
236  ereport(ERROR,
237  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
238  errmsg("\"%s\" is a foreign table",
240  errdetail("Foreign tables cannot have constraint triggers.")));
241  }
242  else
243  ereport(ERROR,
244  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
245  errmsg("\"%s\" is not a table or view",
246  RelationGetRelationName(rel))));
247 
249  ereport(ERROR,
250  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
251  errmsg("permission denied: \"%s\" is a system catalog",
252  RelationGetRelationName(rel))));
253 
254  if (stmt->isconstraint)
255  {
256  /*
257  * We must take a lock on the target relation to protect against
258  * concurrent drop. It's not clear that AccessShareLock is strong
259  * enough, but we certainly need at least that much... otherwise, we
260  * might end up creating a pg_constraint entry referencing a
261  * nonexistent table.
262  */
263  if (OidIsValid(refRelOid))
264  {
265  LockRelationOid(refRelOid, AccessShareLock);
266  constrrelid = refRelOid;
267  }
268  else if (stmt->constrrel != NULL)
269  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
270  false);
271  }
272 
273  /* permission checks */
274  if (!isInternal)
275  {
276  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
277  ACL_TRIGGER);
278  if (aclresult != ACLCHECK_OK)
279  aclcheck_error(aclresult, ACL_KIND_CLASS,
281 
282  if (OidIsValid(constrrelid))
283  {
284  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
285  ACL_TRIGGER);
286  if (aclresult != ACLCHECK_OK)
287  aclcheck_error(aclresult, ACL_KIND_CLASS,
288  get_rel_name(constrrelid));
289  }
290  }
291 
292  /* Compute tgtype */
293  TRIGGER_CLEAR_TYPE(tgtype);
294  if (stmt->row)
295  TRIGGER_SETT_ROW(tgtype);
296  tgtype |= stmt->timing;
297  tgtype |= stmt->events;
298 
299  /* Disallow ROW-level TRUNCATE triggers */
300  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
301  ereport(ERROR,
302  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
303  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
304 
305  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
306  if (TRIGGER_FOR_INSTEAD(tgtype))
307  {
308  if (!TRIGGER_FOR_ROW(tgtype))
309  ereport(ERROR,
310  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
311  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
312  if (stmt->whenClause)
313  ereport(ERROR,
314  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
315  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
316  if (stmt->columns != NIL)
317  ereport(ERROR,
318  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
319  errmsg("INSTEAD OF triggers cannot have column lists")));
320  }
321 
322  /*
323  * We don't yet support naming ROW transition variables, but the parser
324  * recognizes the syntax so we can give a nicer message here.
325  *
326  * Per standard, REFERENCING TABLE names are only allowed on AFTER
327  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
328  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
329  * only allowed once. Per standard, OLD may not be specified when
330  * creating a trigger only for INSERT, and NEW may not be specified when
331  * creating a trigger only for DELETE.
332  *
333  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
334  * reference both ROW and TABLE transition data.
335  */
336  if (stmt->transitionRels != NIL)
337  {
338  List *varList = stmt->transitionRels;
339  ListCell *lc;
340 
341  foreach(lc, varList)
342  {
344 
345  if (!(tt->isTable))
346  ereport(ERROR,
347  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
348  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
349  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
350 
351  /*
352  * Because of the above test, we omit further ROW-related testing
353  * below. If we later allow naming OLD and NEW ROW variables,
354  * adjustments will be needed below.
355  */
356 
357  if (stmt->timing != TRIGGER_TYPE_AFTER)
358  ereport(ERROR,
359  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
360  errmsg("transition table name can only be specified for an AFTER trigger")));
361 
362  if (tt->isNew)
363  {
364  if (!(TRIGGER_FOR_INSERT(tgtype) ||
365  TRIGGER_FOR_UPDATE(tgtype)))
366  ereport(ERROR,
367  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
368  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
369 
370  if (newtablename != NULL)
371  ereport(ERROR,
372  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
373  errmsg("NEW TABLE cannot be specified multiple times")));
374 
375  newtablename = tt->name;
376  }
377  else
378  {
379  if (!(TRIGGER_FOR_DELETE(tgtype) ||
380  TRIGGER_FOR_UPDATE(tgtype)))
381  ereport(ERROR,
382  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
383  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
384 
385  if (oldtablename != NULL)
386  ereport(ERROR,
387  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
388  errmsg("OLD TABLE cannot be specified multiple times")));
389 
390  oldtablename = tt->name;
391  }
392  }
393 
394  if (newtablename != NULL && oldtablename != NULL &&
395  strcmp(newtablename, oldtablename) == 0)
396  ereport(ERROR,
397  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
398  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
399  }
400 
401  /*
402  * Parse the WHEN clause, if any
403  */
404  if (stmt->whenClause)
405  {
406  ParseState *pstate;
407  RangeTblEntry *rte;
408  List *varList;
409  ListCell *lc;
410 
411  /* Set up a pstate to parse with */
412  pstate = make_parsestate(NULL);
413  pstate->p_sourcetext = queryString;
414 
415  /*
416  * Set up RTEs for OLD and NEW references.
417  *
418  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
419  */
420  rte = addRangeTableEntryForRelation(pstate, rel,
421  makeAlias("old", NIL),
422  false, false);
423  addRTEtoQuery(pstate, rte, false, true, true);
424  rte = addRangeTableEntryForRelation(pstate, rel,
425  makeAlias("new", NIL),
426  false, false);
427  addRTEtoQuery(pstate, rte, false, true, true);
428 
429  /* Transform expression. Copy to be sure we don't modify original */
430  whenClause = transformWhereClause(pstate,
431  copyObject(stmt->whenClause),
433  "WHEN");
434  /* we have to fix its collations too */
435  assign_expr_collations(pstate, whenClause);
436 
437  /*
438  * Check for disallowed references to OLD/NEW.
439  *
440  * NB: pull_var_clause is okay here only because we don't allow
441  * subselects in WHEN clauses; it would fail to examine the contents
442  * of subselects.
443  */
444  varList = pull_var_clause(whenClause, 0);
445  foreach(lc, varList)
446  {
447  Var *var = (Var *) lfirst(lc);
448 
449  switch (var->varno)
450  {
451  case PRS2_OLD_VARNO:
452  if (!TRIGGER_FOR_ROW(tgtype))
453  ereport(ERROR,
454  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
455  errmsg("statement trigger's WHEN condition cannot reference column values"),
456  parser_errposition(pstate, var->location)));
457  if (TRIGGER_FOR_INSERT(tgtype))
458  ereport(ERROR,
459  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
460  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
461  parser_errposition(pstate, var->location)));
462  /* system columns are okay here */
463  break;
464  case PRS2_NEW_VARNO:
465  if (!TRIGGER_FOR_ROW(tgtype))
466  ereport(ERROR,
467  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
468  errmsg("statement trigger's WHEN condition cannot reference column values"),
469  parser_errposition(pstate, var->location)));
470  if (TRIGGER_FOR_DELETE(tgtype))
471  ereport(ERROR,
472  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
473  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
474  parser_errposition(pstate, var->location)));
475  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
476  ereport(ERROR,
477  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
478  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
479  parser_errposition(pstate, var->location)));
480  break;
481  default:
482  /* can't happen without add_missing_from, so just elog */
483  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
484  break;
485  }
486  }
487 
488  /* we'll need the rtable for recordDependencyOnExpr */
489  whenRtable = pstate->p_rtable;
490 
491  qual = nodeToString(whenClause);
492 
493  free_parsestate(pstate);
494  }
495  else
496  {
497  whenClause = NULL;
498  whenRtable = NIL;
499  qual = NULL;
500  }
501 
502  /*
503  * Find and validate the trigger function.
504  */
505  funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
506  if (!isInternal)
507  {
508  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
509  if (aclresult != ACLCHECK_OK)
510  aclcheck_error(aclresult, ACL_KIND_PROC,
511  NameListToString(stmt->funcname));
512  }
513  funcrettype = get_func_rettype(funcoid);
514  if (funcrettype != TRIGGEROID)
515  {
516  /*
517  * We allow OPAQUE just so we can load old dump files. When we see a
518  * trigger function declared OPAQUE, change it to TRIGGER.
519  */
520  if (funcrettype == OPAQUEOID)
521  {
523  (errmsg("changing return type of function %s from %s to %s",
524  NameListToString(stmt->funcname),
525  "opaque", "trigger")));
527  }
528  else
529  ereport(ERROR,
530  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
531  errmsg("function %s must return type %s",
532  NameListToString(stmt->funcname), "trigger")));
533  }
534 
535  /*
536  * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
537  * references one of the built-in RI_FKey trigger functions, assume it is
538  * from a dump of a pre-7.3 foreign key constraint, and take steps to
539  * convert this legacy representation into a regular foreign key
540  * constraint. Ugly, but necessary for loading old dump files.
541  */
542  if (stmt->isconstraint && !isInternal &&
543  list_length(stmt->args) >= 6 &&
544  (list_length(stmt->args) % 2) == 0 &&
546  {
547  /* Keep lock on target rel until end of xact */
548  heap_close(rel, NoLock);
549 
550  ConvertTriggerToFK(stmt, funcoid);
551 
552  return InvalidObjectAddress;
553  }
554 
555  /*
556  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
557  * corresponding pg_constraint entry.
558  */
559  if (stmt->isconstraint && !OidIsValid(constraintOid))
560  {
561  /* Internal callers should have made their own constraints */
562  Assert(!isInternal);
563  constraintOid = CreateConstraintEntry(stmt->trigname,
566  stmt->deferrable,
567  stmt->initdeferred,
568  true,
569  RelationGetRelid(rel),
570  NULL, /* no conkey */
571  0,
572  InvalidOid, /* no domain */
573  InvalidOid, /* no index */
574  InvalidOid, /* no foreign key */
575  NULL,
576  NULL,
577  NULL,
578  NULL,
579  0,
580  ' ',
581  ' ',
582  ' ',
583  NULL, /* no exclusion */
584  NULL, /* no check constraint */
585  NULL,
586  NULL,
587  true, /* islocal */
588  0, /* inhcount */
589  true, /* isnoinherit */
590  isInternal); /* is_internal */
591  }
592 
593  /*
594  * Generate the trigger's OID now, so that we can use it in the name if
595  * needed.
596  */
598 
599  trigoid = GetNewOid(tgrel);
600 
601  /*
602  * If trigger is internally generated, modify the provided trigger name to
603  * ensure uniqueness by appending the trigger OID. (Callers will usually
604  * supply a simple constant trigger name in these cases.)
605  */
606  if (isInternal)
607  {
608  snprintf(internaltrigname, sizeof(internaltrigname),
609  "%s_%u", stmt->trigname, trigoid);
610  trigname = internaltrigname;
611  }
612  else
613  {
614  /* user-defined trigger; use the specified trigger name as-is */
615  trigname = stmt->trigname;
616  }
617 
618  /*
619  * Scan pg_trigger for existing triggers on relation. We do this only to
620  * give a nice error message if there's already a trigger of the same
621  * name. (The unique index on tgrelid/tgname would complain anyway.) We
622  * can skip this for internally generated triggers, since the name
623  * modification above should be sufficient.
624  *
625  * NOTE that this is cool only because we have ShareRowExclusiveLock on
626  * the relation, so the trigger set won't be changing underneath us.
627  */
628  if (!isInternal)
629  {
630  ScanKeyInit(&key,
632  BTEqualStrategyNumber, F_OIDEQ,
634  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
635  NULL, 1, &key);
636  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
637  {
638  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
639 
640  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
641  ereport(ERROR,
643  errmsg("trigger \"%s\" for relation \"%s\" already exists",
644  trigname, RelationGetRelationName(rel))));
645  }
646  systable_endscan(tgscan);
647  }
648 
649  /*
650  * Build the new pg_trigger tuple.
651  */
652  memset(nulls, false, sizeof(nulls));
653 
656  CStringGetDatum(trigname));
657  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
658  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
660  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
661  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
662  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
663  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
666 
667  if (stmt->args)
668  {
669  ListCell *le;
670  char *args;
671  int16 nargs = list_length(stmt->args);
672  int len = 0;
673 
674  foreach(le, stmt->args)
675  {
676  char *ar = strVal(lfirst(le));
677 
678  len += strlen(ar) + 4;
679  for (; *ar; ar++)
680  {
681  if (*ar == '\\')
682  len++;
683  }
684  }
685  args = (char *) palloc(len + 1);
686  args[0] = '\0';
687  foreach(le, stmt->args)
688  {
689  char *s = strVal(lfirst(le));
690  char *d = args + strlen(args);
691 
692  while (*s)
693  {
694  if (*s == '\\')
695  *d++ = '\\';
696  *d++ = *s++;
697  }
698  strcpy(d, "\\000");
699  }
700  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
702  CStringGetDatum(args));
703  }
704  else
705  {
706  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
708  CStringGetDatum(""));
709  }
710 
711  /* build column number array if it's a column-specific trigger */
712  ncolumns = list_length(stmt->columns);
713  if (ncolumns == 0)
714  columns = NULL;
715  else
716  {
717  ListCell *cell;
718  int i = 0;
719 
720  columns = (int16 *) palloc(ncolumns * sizeof(int16));
721  foreach(cell, stmt->columns)
722  {
723  char *name = strVal(lfirst(cell));
724  int16 attnum;
725  int j;
726 
727  /* Lookup column name. System columns are not allowed */
728  attnum = attnameAttNum(rel, name, false);
729  if (attnum == InvalidAttrNumber)
730  ereport(ERROR,
731  (errcode(ERRCODE_UNDEFINED_COLUMN),
732  errmsg("column \"%s\" of relation \"%s\" does not exist",
733  name, RelationGetRelationName(rel))));
734 
735  /* Check for duplicates */
736  for (j = i - 1; j >= 0; j--)
737  {
738  if (columns[j] == attnum)
739  ereport(ERROR,
740  (errcode(ERRCODE_DUPLICATE_COLUMN),
741  errmsg("column \"%s\" specified more than once",
742  name)));
743  }
744 
745  columns[i++] = attnum;
746  }
747  }
748  tgattr = buildint2vector(columns, ncolumns);
749  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
750 
751  /* set tgqual if trigger has WHEN clause */
752  if (qual)
753  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
754  else
755  nulls[Anum_pg_trigger_tgqual - 1] = true;
756 
757  if (oldtablename)
759  CStringGetDatum(oldtablename));
760  else
761  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
762  if (newtablename)
764  CStringGetDatum(newtablename));
765  else
766  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
767 
768  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
769 
770  /* force tuple to have the desired OID */
771  HeapTupleSetOid(tuple, trigoid);
772 
773  /*
774  * Insert tuple into pg_trigger.
775  */
776  CatalogTupleInsert(tgrel, tuple);
777 
778  heap_freetuple(tuple);
780 
784  if (oldtablename)
786  if (newtablename)
788 
789  /*
790  * Update relation's pg_class entry. Crucial side-effect: other backends
791  * (and this one too!) are sent SI message to make them rebuild relcache
792  * entries.
793  */
795  tuple = SearchSysCacheCopy1(RELOID,
797  if (!HeapTupleIsValid(tuple))
798  elog(ERROR, "cache lookup failed for relation %u",
799  RelationGetRelid(rel));
800 
801  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
802 
803  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
804 
805  heap_freetuple(tuple);
807 
808  /*
809  * We used to try to update the rel's relcache entry here, but that's
810  * fairly pointless since it will happen as a byproduct of the upcoming
811  * CommandCounterIncrement...
812  */
813 
814  /*
815  * Record dependencies for trigger. Always place a normal dependency on
816  * the function.
817  */
818  myself.classId = TriggerRelationId;
819  myself.objectId = trigoid;
820  myself.objectSubId = 0;
821 
822  referenced.classId = ProcedureRelationId;
823  referenced.objectId = funcoid;
824  referenced.objectSubId = 0;
825  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
826 
827  if (isInternal && OidIsValid(constraintOid))
828  {
829  /*
830  * Internally-generated trigger for a constraint, so make it an
831  * internal dependency of the constraint. We can skip depending on
832  * the relation(s), as there'll be an indirect dependency via the
833  * constraint.
834  */
835  referenced.classId = ConstraintRelationId;
836  referenced.objectId = constraintOid;
837  referenced.objectSubId = 0;
838  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
839  }
840  else
841  {
842  /*
843  * User CREATE TRIGGER, so place dependencies. We make trigger be
844  * auto-dropped if its relation is dropped or if the FK relation is
845  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
846  */
847  referenced.classId = RelationRelationId;
848  referenced.objectId = RelationGetRelid(rel);
849  referenced.objectSubId = 0;
850  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
851  if (OidIsValid(constrrelid))
852  {
853  referenced.classId = RelationRelationId;
854  referenced.objectId = constrrelid;
855  referenced.objectSubId = 0;
856  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
857  }
858  /* Not possible to have an index dependency in this case */
859  Assert(!OidIsValid(indexOid));
860 
861  /*
862  * If it's a user-specified constraint trigger, make the constraint
863  * internally dependent on the trigger instead of vice versa.
864  */
865  if (OidIsValid(constraintOid))
866  {
867  referenced.classId = ConstraintRelationId;
868  referenced.objectId = constraintOid;
869  referenced.objectSubId = 0;
870  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
871  }
872  }
873 
874  /* If column-specific trigger, add normal dependencies on columns */
875  if (columns != NULL)
876  {
877  int i;
878 
879  referenced.classId = RelationRelationId;
880  referenced.objectId = RelationGetRelid(rel);
881  for (i = 0; i < ncolumns; i++)
882  {
883  referenced.objectSubId = columns[i];
884  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
885  }
886  }
887 
888  /*
889  * If it has a WHEN clause, add dependencies on objects mentioned in the
890  * expression (eg, functions, as well as any columns used).
891  */
892  if (whenClause != NULL)
893  recordDependencyOnExpr(&myself, whenClause, whenRtable,
895 
896  /* Post creation hook for new trigger */
898  isInternal);
899 
900  /* Keep lock on target rel until end of xact */
901  heap_close(rel, NoLock);
902 
903  return myself;
904 }
905 
906 
907 /*
908  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
909  * full-fledged foreign key constraints.
910  *
911  * The conversion is complex because a pre-7.3 foreign key involved three
912  * separate triggers, which were reported separately in dumps. While the
913  * single trigger on the referencing table adds no new information, we need
914  * to know the trigger functions of both of the triggers on the referenced
915  * table to build the constraint declaration. Also, due to lack of proper
916  * dependency checking pre-7.3, it is possible that the source database had
917  * an incomplete set of triggers resulting in an only partially enforced
918  * FK constraint. (This would happen if one of the tables had been dropped
919  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
920  * that caused loss of tgconstrrelid information.) We choose to translate to
921  * an FK constraint only when we've seen all three triggers of a set. This is
922  * implemented by storing unmatched items in a list in TopMemoryContext.
923  * We match triggers together by comparing the trigger arguments (which
924  * include constraint name, table and column names, so should be good enough).
925  */
926 typedef struct
927 {
928  List *args; /* list of (T_String) Values or NIL */
929  Oid funcoids[3]; /* OIDs of trigger functions */
930  /* The three function OIDs are stored in the order update, delete, child */
932 
933 static void
935 {
936  static List *info_list = NIL;
937 
938  static const char *const funcdescr[3] = {
939  gettext_noop("Found referenced table's UPDATE trigger."),
940  gettext_noop("Found referenced table's DELETE trigger."),
941  gettext_noop("Found referencing table's trigger.")
942  };
943 
944  char *constr_name;
945  char *fk_table_name;
946  char *pk_table_name;
947  char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
948  List *fk_attrs = NIL;
949  List *pk_attrs = NIL;
951  int funcnum;
952  OldTriggerInfo *info = NULL;
953  ListCell *l;
954  int i;
955 
956  /* Parse out the trigger arguments */
957  constr_name = strVal(linitial(stmt->args));
958  fk_table_name = strVal(lsecond(stmt->args));
959  pk_table_name = strVal(lthird(stmt->args));
960  i = 0;
961  foreach(l, stmt->args)
962  {
963  Value *arg = (Value *) lfirst(l);
964 
965  i++;
966  if (i < 4) /* skip constraint and table names */
967  continue;
968  if (i == 4) /* handle match type */
969  {
970  if (strcmp(strVal(arg), "FULL") == 0)
971  fk_matchtype = FKCONSTR_MATCH_FULL;
972  else
973  fk_matchtype = FKCONSTR_MATCH_SIMPLE;
974  continue;
975  }
976  if (i % 2)
977  fk_attrs = lappend(fk_attrs, arg);
978  else
979  pk_attrs = lappend(pk_attrs, arg);
980  }
981 
982  /* Prepare description of constraint for use in messages */
983  initStringInfo(&buf);
984  appendStringInfo(&buf, "FOREIGN KEY %s(",
985  quote_identifier(fk_table_name));
986  i = 0;
987  foreach(l, fk_attrs)
988  {
989  Value *arg = (Value *) lfirst(l);
990 
991  if (i++ > 0)
992  appendStringInfoChar(&buf, ',');
994  }
995  appendStringInfo(&buf, ") REFERENCES %s(",
996  quote_identifier(pk_table_name));
997  i = 0;
998  foreach(l, pk_attrs)
999  {
1000  Value *arg = (Value *) lfirst(l);
1001 
1002  if (i++ > 0)
1003  appendStringInfoChar(&buf, ',');
1005  }
1006  appendStringInfoChar(&buf, ')');
1007 
1008  /* Identify class of trigger --- update, delete, or referencing-table */
1009  switch (funcoid)
1010  {
1011  case F_RI_FKEY_CASCADE_UPD:
1012  case F_RI_FKEY_RESTRICT_UPD:
1013  case F_RI_FKEY_SETNULL_UPD:
1014  case F_RI_FKEY_SETDEFAULT_UPD:
1015  case F_RI_FKEY_NOACTION_UPD:
1016  funcnum = 0;
1017  break;
1018 
1019  case F_RI_FKEY_CASCADE_DEL:
1020  case F_RI_FKEY_RESTRICT_DEL:
1021  case F_RI_FKEY_SETNULL_DEL:
1022  case F_RI_FKEY_SETDEFAULT_DEL:
1023  case F_RI_FKEY_NOACTION_DEL:
1024  funcnum = 1;
1025  break;
1026 
1027  default:
1028  funcnum = 2;
1029  break;
1030  }
1031 
1032  /* See if we have a match to this trigger */
1033  foreach(l, info_list)
1034  {
1035  info = (OldTriggerInfo *) lfirst(l);
1036  if (info->funcoids[funcnum] == InvalidOid &&
1037  equal(info->args, stmt->args))
1038  {
1039  info->funcoids[funcnum] = funcoid;
1040  break;
1041  }
1042  }
1043 
1044  if (l == NULL)
1045  {
1046  /* First trigger of set, so create a new list entry */
1047  MemoryContext oldContext;
1048 
1049  ereport(NOTICE,
1050  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1051  constr_name, buf.data),
1052  errdetail_internal("%s", _(funcdescr[funcnum]))));
1054  info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
1055  info->args = copyObject(stmt->args);
1056  info->funcoids[funcnum] = funcoid;
1057  info_list = lappend(info_list, info);
1058  MemoryContextSwitchTo(oldContext);
1059  }
1060  else if (info->funcoids[0] == InvalidOid ||
1061  info->funcoids[1] == InvalidOid ||
1062  info->funcoids[2] == InvalidOid)
1063  {
1064  /* Second trigger of set */
1065  ereport(NOTICE,
1066  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1067  constr_name, buf.data),
1068  errdetail_internal("%s", _(funcdescr[funcnum]))));
1069  }
1070  else
1071  {
1072  /* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
1075  Constraint *fkcon = makeNode(Constraint);
1076  PlannedStmt *wrapper = makeNode(PlannedStmt);
1077 
1078  ereport(NOTICE,
1079  (errmsg("converting trigger group into constraint \"%s\" %s",
1080  constr_name, buf.data),
1081  errdetail_internal("%s", _(funcdescr[funcnum]))));
1082  fkcon->contype = CONSTR_FOREIGN;
1083  fkcon->location = -1;
1084  if (funcnum == 2)
1085  {
1086  /* This trigger is on the FK table */
1087  atstmt->relation = stmt->relation;
1088  if (stmt->constrrel)
1089  fkcon->pktable = stmt->constrrel;
1090  else
1091  {
1092  /* Work around ancient pg_dump bug that omitted constrrel */
1093  fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
1094  }
1095  }
1096  else
1097  {
1098  /* This trigger is on the PK table */
1099  fkcon->pktable = stmt->relation;
1100  if (stmt->constrrel)
1101  atstmt->relation = stmt->constrrel;
1102  else
1103  {
1104  /* Work around ancient pg_dump bug that omitted constrrel */
1105  atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1106  }
1107  }
1108  atstmt->cmds = list_make1(atcmd);
1109  atstmt->relkind = OBJECT_TABLE;
1110  atcmd->subtype = AT_AddConstraint;
1111  atcmd->def = (Node *) fkcon;
1112  if (strcmp(constr_name, "<unnamed>") == 0)
1113  fkcon->conname = NULL;
1114  else
1115  fkcon->conname = constr_name;
1116  fkcon->fk_attrs = fk_attrs;
1117  fkcon->pk_attrs = pk_attrs;
1118  fkcon->fk_matchtype = fk_matchtype;
1119  switch (info->funcoids[0])
1120  {
1121  case F_RI_FKEY_NOACTION_UPD:
1123  break;
1124  case F_RI_FKEY_CASCADE_UPD:
1126  break;
1127  case F_RI_FKEY_RESTRICT_UPD:
1129  break;
1130  case F_RI_FKEY_SETNULL_UPD:
1132  break;
1133  case F_RI_FKEY_SETDEFAULT_UPD:
1135  break;
1136  default:
1137  /* can't get here because of earlier checks */
1138  elog(ERROR, "confused about RI update function");
1139  }
1140  switch (info->funcoids[1])
1141  {
1142  case F_RI_FKEY_NOACTION_DEL:
1144  break;
1145  case F_RI_FKEY_CASCADE_DEL:
1147  break;
1148  case F_RI_FKEY_RESTRICT_DEL:
1150  break;
1151  case F_RI_FKEY_SETNULL_DEL:
1153  break;
1154  case F_RI_FKEY_SETDEFAULT_DEL:
1156  break;
1157  default:
1158  /* can't get here because of earlier checks */
1159  elog(ERROR, "confused about RI delete function");
1160  }
1161  fkcon->deferrable = stmt->deferrable;
1162  fkcon->initdeferred = stmt->initdeferred;
1163  fkcon->skip_validation = false;
1164  fkcon->initially_valid = true;
1165 
1166  /* finally, wrap it in a dummy PlannedStmt */
1167  wrapper->commandType = CMD_UTILITY;
1168  wrapper->canSetTag = false;
1169  wrapper->utilityStmt = (Node *) atstmt;
1170  wrapper->stmt_location = -1;
1171  wrapper->stmt_len = -1;
1172 
1173  /* ... and execute it */
1174  ProcessUtility(wrapper,
1175  "(generated ALTER TABLE ADD FOREIGN KEY command)",
1177  None_Receiver, NULL);
1178 
1179  /* Remove the matched item from the list */
1180  info_list = list_delete_ptr(info_list, info);
1181  pfree(info);
1182  /* We leak the copied args ... not worth worrying about */
1183  }
1184 }
1185 
1186 /*
1187  * Guts of trigger deletion.
1188  */
1189 void
1191 {
1192  Relation tgrel;
1193  SysScanDesc tgscan;
1194  ScanKeyData skey[1];
1195  HeapTuple tup;
1196  Oid relid;
1197  Relation rel;
1198 
1200 
1201  /*
1202  * Find the trigger to delete.
1203  */
1204  ScanKeyInit(&skey[0],
1206  BTEqualStrategyNumber, F_OIDEQ,
1207  ObjectIdGetDatum(trigOid));
1208 
1209  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1210  NULL, 1, skey);
1211 
1212  tup = systable_getnext(tgscan);
1213  if (!HeapTupleIsValid(tup))
1214  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1215 
1216  /*
1217  * Open and exclusive-lock the relation the trigger belongs to.
1218  */
1219  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1220 
1221  rel = heap_open(relid, AccessExclusiveLock);
1222 
1223  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1224  rel->rd_rel->relkind != RELKIND_VIEW &&
1225  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1226  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1227  ereport(ERROR,
1228  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1229  errmsg("\"%s\" is not a table, view, or foreign table",
1230  RelationGetRelationName(rel))));
1231 
1233  ereport(ERROR,
1234  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1235  errmsg("permission denied: \"%s\" is a system catalog",
1236  RelationGetRelationName(rel))));
1237 
1238  /*
1239  * Delete the pg_trigger tuple.
1240  */
1241  CatalogTupleDelete(tgrel, &tup->t_self);
1242 
1243  systable_endscan(tgscan);
1244  heap_close(tgrel, RowExclusiveLock);
1245 
1246  /*
1247  * We do not bother to try to determine whether any other triggers remain,
1248  * which would be needed in order to decide whether it's safe to clear the
1249  * relation's relhastriggers. (In any case, there might be a concurrent
1250  * process adding new triggers.) Instead, just force a relcache inval to
1251  * make other backends (and this one too!) rebuild their relcache entries.
1252  * There's no great harm in leaving relhastriggers true even if there are
1253  * no triggers left.
1254  */
1256 
1257  /* Keep lock on trigger's rel until end of xact */
1258  heap_close(rel, NoLock);
1259 }
1260 
1261 /*
1262  * get_trigger_oid - Look up a trigger by name to find its OID.
1263  *
1264  * If missing_ok is false, throw an error if trigger not found. If
1265  * true, just return InvalidOid.
1266  */
1267 Oid
1268 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1269 {
1270  Relation tgrel;
1271  ScanKeyData skey[2];
1272  SysScanDesc tgscan;
1273  HeapTuple tup;
1274  Oid oid;
1275 
1276  /*
1277  * Find the trigger, verify permissions, set up object address
1278  */
1280 
1281  ScanKeyInit(&skey[0],
1283  BTEqualStrategyNumber, F_OIDEQ,
1284  ObjectIdGetDatum(relid));
1285  ScanKeyInit(&skey[1],
1287  BTEqualStrategyNumber, F_NAMEEQ,
1288  CStringGetDatum(trigname));
1289 
1290  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1291  NULL, 2, skey);
1292 
1293  tup = systable_getnext(tgscan);
1294 
1295  if (!HeapTupleIsValid(tup))
1296  {
1297  if (!missing_ok)
1298  ereport(ERROR,
1299  (errcode(ERRCODE_UNDEFINED_OBJECT),
1300  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1301  trigname, get_rel_name(relid))));
1302  oid = InvalidOid;
1303  }
1304  else
1305  {
1306  oid = HeapTupleGetOid(tup);
1307  }
1308 
1309  systable_endscan(tgscan);
1310  heap_close(tgrel, AccessShareLock);
1311  return oid;
1312 }
1313 
1314 /*
1315  * Perform permissions and integrity checks before acquiring a relation lock.
1316  */
1317 static void
1319  void *arg)
1320 {
1321  HeapTuple tuple;
1322  Form_pg_class form;
1323 
1324  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1325  if (!HeapTupleIsValid(tuple))
1326  return; /* concurrently dropped */
1327  form = (Form_pg_class) GETSTRUCT(tuple);
1328 
1329  /* only tables and views can have triggers */
1330  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1331  form->relkind != RELKIND_FOREIGN_TABLE &&
1332  form->relkind != RELKIND_PARTITIONED_TABLE)
1333  ereport(ERROR,
1334  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1335  errmsg("\"%s\" is not a table, view, or foreign table",
1336  rv->relname)));
1337 
1338  /* you must own the table to rename one of its triggers */
1339  if (!pg_class_ownercheck(relid, GetUserId()))
1341  if (!allowSystemTableMods && IsSystemClass(relid, form))
1342  ereport(ERROR,
1343  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1344  errmsg("permission denied: \"%s\" is a system catalog",
1345  rv->relname)));
1346 
1347  ReleaseSysCache(tuple);
1348 }
1349 
1350 /*
1351  * renametrig - changes the name of a trigger on a relation
1352  *
1353  * trigger name is changed in trigger catalog.
1354  * No record of the previous name is kept.
1355  *
1356  * get proper relrelation from relation catalog (if not arg)
1357  * scan trigger catalog
1358  * for name conflict (within rel)
1359  * for original trigger (if not arg)
1360  * modify tgname in trigger tuple
1361  * update row in catalog
1362  */
1365 {
1366  Oid tgoid;
1367  Relation targetrel;
1368  Relation tgrel;
1369  HeapTuple tuple;
1370  SysScanDesc tgscan;
1371  ScanKeyData key[2];
1372  Oid relid;
1373  ObjectAddress address;
1374 
1375  /*
1376  * Look up name, check permissions, and acquire lock (which we will NOT
1377  * release until end of transaction).
1378  */
1380  false, false,
1382  NULL);
1383 
1384  /* Have lock already, so just need to build relcache entry. */
1385  targetrel = relation_open(relid, NoLock);
1386 
1387  /*
1388  * Scan pg_trigger twice for existing triggers on relation. We do this in
1389  * order to ensure a trigger does not exist with newname (The unique index
1390  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1391  * exist with oldname.
1392  *
1393  * NOTE that this is cool only because we have AccessExclusiveLock on the
1394  * relation, so the trigger set won't be changing underneath us.
1395  */
1397 
1398  /*
1399  * First pass -- look for name conflict
1400  */
1401  ScanKeyInit(&key[0],
1403  BTEqualStrategyNumber, F_OIDEQ,
1404  ObjectIdGetDatum(relid));
1405  ScanKeyInit(&key[1],
1407  BTEqualStrategyNumber, F_NAMEEQ,
1408  PointerGetDatum(stmt->newname));
1409  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1410  NULL, 2, key);
1411  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1412  ereport(ERROR,
1414  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1415  stmt->newname, RelationGetRelationName(targetrel))));
1416  systable_endscan(tgscan);
1417 
1418  /*
1419  * Second pass -- look for trigger existing with oldname and update
1420  */
1421  ScanKeyInit(&key[0],
1423  BTEqualStrategyNumber, F_OIDEQ,
1424  ObjectIdGetDatum(relid));
1425  ScanKeyInit(&key[1],
1427  BTEqualStrategyNumber, F_NAMEEQ,
1428  PointerGetDatum(stmt->subname));
1429  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1430  NULL, 2, key);
1431  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1432  {
1433  tgoid = HeapTupleGetOid(tuple);
1434 
1435  /*
1436  * Update pg_trigger tuple with new tgname.
1437  */
1438  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1439 
1440  namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1441  stmt->newname);
1442 
1443  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1444 
1446  HeapTupleGetOid(tuple), 0);
1447 
1448  /*
1449  * Invalidate relation's relcache entry so that other backends (and
1450  * this one too!) are sent SI message to make them rebuild relcache
1451  * entries. (Ideally this should happen automatically...)
1452  */
1453  CacheInvalidateRelcache(targetrel);
1454  }
1455  else
1456  {
1457  ereport(ERROR,
1458  (errcode(ERRCODE_UNDEFINED_OBJECT),
1459  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1460  stmt->subname, RelationGetRelationName(targetrel))));
1461  }
1462 
1463  ObjectAddressSet(address, TriggerRelationId, tgoid);
1464 
1465  systable_endscan(tgscan);
1466 
1467  heap_close(tgrel, RowExclusiveLock);
1468 
1469  /*
1470  * Close rel, but keep exclusive lock!
1471  */
1472  relation_close(targetrel, NoLock);
1473 
1474  return address;
1475 }
1476 
1477 
1478 /*
1479  * EnableDisableTrigger()
1480  *
1481  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1482  * to change 'tgenabled' field for the specified trigger(s)
1483  *
1484  * rel: relation to process (caller must hold suitable lock on it)
1485  * tgname: trigger to process, or NULL to scan all triggers
1486  * fires_when: new value for tgenabled field. In addition to generic
1487  * enablement/disablement, this also defines when the trigger
1488  * should be fired in session replication roles.
1489  * skip_system: if true, skip "system" triggers (constraint triggers)
1490  *
1491  * Caller should have checked permissions for the table; here we also
1492  * enforce that superuser privilege is required to alter the state of
1493  * system triggers
1494  */
1495 void
1496 EnableDisableTrigger(Relation rel, const char *tgname,
1497  char fires_when, bool skip_system)
1498 {
1499  Relation tgrel;
1500  int nkeys;
1501  ScanKeyData keys[2];
1502  SysScanDesc tgscan;
1503  HeapTuple tuple;
1504  bool found;
1505  bool changed;
1506 
1507  /* Scan the relevant entries in pg_triggers */
1509 
1510  ScanKeyInit(&keys[0],
1512  BTEqualStrategyNumber, F_OIDEQ,
1514  if (tgname)
1515  {
1516  ScanKeyInit(&keys[1],
1518  BTEqualStrategyNumber, F_NAMEEQ,
1519  CStringGetDatum(tgname));
1520  nkeys = 2;
1521  }
1522  else
1523  nkeys = 1;
1524 
1525  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1526  NULL, nkeys, keys);
1527 
1528  found = changed = false;
1529 
1530  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1531  {
1532  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1533 
1534  if (oldtrig->tgisinternal)
1535  {
1536  /* system trigger ... ok to process? */
1537  if (skip_system)
1538  continue;
1539  if (!superuser())
1540  ereport(ERROR,
1541  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1542  errmsg("permission denied: \"%s\" is a system trigger",
1543  NameStr(oldtrig->tgname))));
1544  }
1545 
1546  found = true;
1547 
1548  if (oldtrig->tgenabled != fires_when)
1549  {
1550  /* need to change this one ... make a copy to scribble on */
1551  HeapTuple newtup = heap_copytuple(tuple);
1552  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1553 
1554  newtrig->tgenabled = fires_when;
1555 
1556  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1557 
1558  heap_freetuple(newtup);
1559 
1560  changed = true;
1561  }
1562 
1564  HeapTupleGetOid(tuple), 0);
1565  }
1566 
1567  systable_endscan(tgscan);
1568 
1569  heap_close(tgrel, RowExclusiveLock);
1570 
1571  if (tgname && !found)
1572  ereport(ERROR,
1573  (errcode(ERRCODE_UNDEFINED_OBJECT),
1574  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1575  tgname, RelationGetRelationName(rel))));
1576 
1577  /*
1578  * If we changed anything, broadcast a SI inval message to force each
1579  * backend (including our own!) to rebuild relation's relcache entry.
1580  * Otherwise they will fail to apply the change promptly.
1581  */
1582  if (changed)
1584 }
1585 
1586 
1587 /*
1588  * Build trigger data to attach to the given relcache entry.
1589  *
1590  * Note that trigger data attached to a relcache entry must be stored in
1591  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1592  * But we should be running in a less long-lived working context. To avoid
1593  * leaking cache memory if this routine fails partway through, we build a
1594  * temporary TriggerDesc in working memory and then copy the completed
1595  * structure into cache memory.
1596  */
1597 void
1599 {
1600  TriggerDesc *trigdesc;
1601  int numtrigs;
1602  int maxtrigs;
1603  Trigger *triggers;
1604  Relation tgrel;
1605  ScanKeyData skey;
1606  SysScanDesc tgscan;
1607  HeapTuple htup;
1608  MemoryContext oldContext;
1609  int i;
1610 
1611  /*
1612  * Allocate a working array to hold the triggers (the array is extended if
1613  * necessary)
1614  */
1615  maxtrigs = 16;
1616  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1617  numtrigs = 0;
1618 
1619  /*
1620  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1621  * be reading the triggers in name order, except possibly during
1622  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1623  * ensures that triggers will be fired in name order.
1624  */
1625  ScanKeyInit(&skey,
1627  BTEqualStrategyNumber, F_OIDEQ,
1628  ObjectIdGetDatum(RelationGetRelid(relation)));
1629 
1631  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1632  NULL, 1, &skey);
1633 
1634  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1635  {
1636  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1637  Trigger *build;
1638  Datum datum;
1639  bool isnull;
1640 
1641  if (numtrigs >= maxtrigs)
1642  {
1643  maxtrigs *= 2;
1644  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1645  }
1646  build = &(triggers[numtrigs]);
1647 
1648  build->tgoid = HeapTupleGetOid(htup);
1650  NameGetDatum(&pg_trigger->tgname)));
1651  build->tgfoid = pg_trigger->tgfoid;
1652  build->tgtype = pg_trigger->tgtype;
1653  build->tgenabled = pg_trigger->tgenabled;
1654  build->tgisinternal = pg_trigger->tgisinternal;
1655  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1656  build->tgconstrindid = pg_trigger->tgconstrindid;
1657  build->tgconstraint = pg_trigger->tgconstraint;
1658  build->tgdeferrable = pg_trigger->tgdeferrable;
1659  build->tginitdeferred = pg_trigger->tginitdeferred;
1660  build->tgnargs = pg_trigger->tgnargs;
1661  /* tgattr is first var-width field, so OK to access directly */
1662  build->tgnattr = pg_trigger->tgattr.dim1;
1663  if (build->tgnattr > 0)
1664  {
1665  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1666  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1667  build->tgnattr * sizeof(int16));
1668  }
1669  else
1670  build->tgattr = NULL;
1671  if (build->tgnargs > 0)
1672  {
1673  bytea *val;
1674  char *p;
1675 
1676  val = DatumGetByteaPP(fastgetattr(htup,
1678  tgrel->rd_att, &isnull));
1679  if (isnull)
1680  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1681  RelationGetRelationName(relation));
1682  p = (char *) VARDATA_ANY(val);
1683  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1684  for (i = 0; i < build->tgnargs; i++)
1685  {
1686  build->tgargs[i] = pstrdup(p);
1687  p += strlen(p) + 1;
1688  }
1689  }
1690  else
1691  build->tgargs = NULL;
1692 
1694  tgrel->rd_att, &isnull);
1695  if (!isnull)
1696  build->tgoldtable =
1698  else
1699  build->tgoldtable = NULL;
1700 
1702  tgrel->rd_att, &isnull);
1703  if (!isnull)
1704  build->tgnewtable =
1706  else
1707  build->tgnewtable = NULL;
1708 
1709  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1710  tgrel->rd_att, &isnull);
1711  if (!isnull)
1712  build->tgqual = TextDatumGetCString(datum);
1713  else
1714  build->tgqual = NULL;
1715 
1716  numtrigs++;
1717  }
1718 
1719  systable_endscan(tgscan);
1720  heap_close(tgrel, AccessShareLock);
1721 
1722  /* There might not be any triggers */
1723  if (numtrigs == 0)
1724  {
1725  pfree(triggers);
1726  return;
1727  }
1728 
1729  /* Build trigdesc */
1730  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1731  trigdesc->triggers = triggers;
1732  trigdesc->numtriggers = numtrigs;
1733  for (i = 0; i < numtrigs; i++)
1734  SetTriggerFlags(trigdesc, &(triggers[i]));
1735 
1736  /* Copy completed trigdesc into cache storage */
1738  relation->trigdesc = CopyTriggerDesc(trigdesc);
1739  MemoryContextSwitchTo(oldContext);
1740 
1741  /* Release working memory */
1742  FreeTriggerDesc(trigdesc);
1743 }
1744 
1745 /*
1746  * Update the TriggerDesc's hint flags to include the specified trigger
1747  */
1748 static void
1750 {
1751  int16 tgtype = trigger->tgtype;
1752 
1753  trigdesc->trig_insert_before_row |=
1756  trigdesc->trig_insert_after_row |=
1759  trigdesc->trig_insert_instead_row |=
1762  trigdesc->trig_insert_before_statement |=
1765  trigdesc->trig_insert_after_statement |=
1768  trigdesc->trig_update_before_row |=
1771  trigdesc->trig_update_after_row |=
1774  trigdesc->trig_update_instead_row |=
1777  trigdesc->trig_update_before_statement |=
1780  trigdesc->trig_update_after_statement |=
1783  trigdesc->trig_delete_before_row |=
1786  trigdesc->trig_delete_after_row |=
1789  trigdesc->trig_delete_instead_row |=
1792  trigdesc->trig_delete_before_statement |=
1795  trigdesc->trig_delete_after_statement |=
1798  /* there are no row-level truncate triggers */
1799  trigdesc->trig_truncate_before_statement |=
1802  trigdesc->trig_truncate_after_statement |=
1805 
1806  trigdesc->trig_insert_new_table |=
1807  (TRIGGER_FOR_INSERT(tgtype) &&
1809  trigdesc->trig_update_old_table |=
1810  (TRIGGER_FOR_UPDATE(tgtype) &&
1812  trigdesc->trig_update_new_table |=
1813  (TRIGGER_FOR_UPDATE(tgtype) &&
1815  trigdesc->trig_delete_old_table |=
1816  (TRIGGER_FOR_DELETE(tgtype) &&
1818 }
1819 
1820 /*
1821  * Copy a TriggerDesc data structure.
1822  *
1823  * The copy is allocated in the current memory context.
1824  */
1825 TriggerDesc *
1827 {
1828  TriggerDesc *newdesc;
1829  Trigger *trigger;
1830  int i;
1831 
1832  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1833  return NULL;
1834 
1835  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1836  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1837 
1838  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1839  memcpy(trigger, trigdesc->triggers,
1840  trigdesc->numtriggers * sizeof(Trigger));
1841  newdesc->triggers = trigger;
1842 
1843  for (i = 0; i < trigdesc->numtriggers; i++)
1844  {
1845  trigger->tgname = pstrdup(trigger->tgname);
1846  if (trigger->tgnattr > 0)
1847  {
1848  int16 *newattr;
1849 
1850  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1851  memcpy(newattr, trigger->tgattr,
1852  trigger->tgnattr * sizeof(int16));
1853  trigger->tgattr = newattr;
1854  }
1855  if (trigger->tgnargs > 0)
1856  {
1857  char **newargs;
1858  int16 j;
1859 
1860  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1861  for (j = 0; j < trigger->tgnargs; j++)
1862  newargs[j] = pstrdup(trigger->tgargs[j]);
1863  trigger->tgargs = newargs;
1864  }
1865  if (trigger->tgqual)
1866  trigger->tgqual = pstrdup(trigger->tgqual);
1867  if (trigger->tgoldtable)
1868  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1869  if (trigger->tgnewtable)
1870  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1871  trigger++;
1872  }
1873 
1874  return newdesc;
1875 }
1876 
1877 /*
1878  * Free a TriggerDesc data structure.
1879  */
1880 void
1882 {
1883  Trigger *trigger;
1884  int i;
1885 
1886  if (trigdesc == NULL)
1887  return;
1888 
1889  trigger = trigdesc->triggers;
1890  for (i = 0; i < trigdesc->numtriggers; i++)
1891  {
1892  pfree(trigger->tgname);
1893  if (trigger->tgnattr > 0)
1894  pfree(trigger->tgattr);
1895  if (trigger->tgnargs > 0)
1896  {
1897  while (--(trigger->tgnargs) >= 0)
1898  pfree(trigger->tgargs[trigger->tgnargs]);
1899  pfree(trigger->tgargs);
1900  }
1901  if (trigger->tgqual)
1902  pfree(trigger->tgqual);
1903  if (trigger->tgoldtable)
1904  pfree(trigger->tgoldtable);
1905  if (trigger->tgnewtable)
1906  pfree(trigger->tgnewtable);
1907  trigger++;
1908  }
1909  pfree(trigdesc->triggers);
1910  pfree(trigdesc);
1911 }
1912 
1913 /*
1914  * Compare two TriggerDesc structures for logical equality.
1915  */
1916 #ifdef NOT_USED
1917 bool
1918 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1919 {
1920  int i,
1921  j;
1922 
1923  /*
1924  * We need not examine the hint flags, just the trigger array itself; if
1925  * we have the same triggers with the same types, the flags should match.
1926  *
1927  * As of 7.3 we assume trigger set ordering is significant in the
1928  * comparison; so we just compare corresponding slots of the two sets.
1929  *
1930  * Note: comparing the stringToNode forms of the WHEN clauses means that
1931  * parse column locations will affect the result. This is okay as long as
1932  * this function is only used for detecting exact equality, as for example
1933  * in checking for staleness of a cache entry.
1934  */
1935  if (trigdesc1 != NULL)
1936  {
1937  if (trigdesc2 == NULL)
1938  return false;
1939  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1940  return false;
1941  for (i = 0; i < trigdesc1->numtriggers; i++)
1942  {
1943  Trigger *trig1 = trigdesc1->triggers + i;
1944  Trigger *trig2 = trigdesc2->triggers + i;
1945 
1946  if (trig1->tgoid != trig2->tgoid)
1947  return false;
1948  if (strcmp(trig1->tgname, trig2->tgname) != 0)
1949  return false;
1950  if (trig1->tgfoid != trig2->tgfoid)
1951  return false;
1952  if (trig1->tgtype != trig2->tgtype)
1953  return false;
1954  if (trig1->tgenabled != trig2->tgenabled)
1955  return false;
1956  if (trig1->tgisinternal != trig2->tgisinternal)
1957  return false;
1958  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
1959  return false;
1960  if (trig1->tgconstrindid != trig2->tgconstrindid)
1961  return false;
1962  if (trig1->tgconstraint != trig2->tgconstraint)
1963  return false;
1964  if (trig1->tgdeferrable != trig2->tgdeferrable)
1965  return false;
1966  if (trig1->tginitdeferred != trig2->tginitdeferred)
1967  return false;
1968  if (trig1->tgnargs != trig2->tgnargs)
1969  return false;
1970  if (trig1->tgnattr != trig2->tgnattr)
1971  return false;
1972  if (trig1->tgnattr > 0 &&
1973  memcmp(trig1->tgattr, trig2->tgattr,
1974  trig1->tgnattr * sizeof(int16)) != 0)
1975  return false;
1976  for (j = 0; j < trig1->tgnargs; j++)
1977  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
1978  return false;
1979  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
1980  /* ok */ ;
1981  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
1982  return false;
1983  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
1984  return false;
1985  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
1986  /* ok */ ;
1987  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
1988  return false;
1989  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
1990  return false;
1991  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
1992  /* ok */ ;
1993  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
1994  return false;
1995  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
1996  return false;
1997  }
1998  }
1999  else if (trigdesc2 != NULL)
2000  return false;
2001  return true;
2002 }
2003 #endif /* NOT_USED */
2004 
2005 /*
2006  * Call a trigger function.
2007  *
2008  * trigdata: trigger descriptor.
2009  * tgindx: trigger's index in finfo and instr arrays.
2010  * finfo: array of cached trigger function call information.
2011  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2012  * per_tuple_context: memory context to execute the function in.
2013  *
2014  * Returns the tuple (or NULL) as returned by the function.
2015  */
2016 static HeapTuple
2018  int tgindx,
2019  FmgrInfo *finfo,
2020  Instrumentation *instr,
2021  MemoryContext per_tuple_context)
2022 {
2023  FunctionCallInfoData fcinfo;
2024  PgStat_FunctionCallUsage fcusage;
2025  Datum result;
2026  MemoryContext oldContext;
2027 
2028  /*
2029  * Protect against code paths that may fail to initialize transition table
2030  * info.
2031  */
2032  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2033  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2034  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2035  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2036  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2037  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2038  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2039 
2040  finfo += tgindx;
2041 
2042  /*
2043  * We cache fmgr lookup info, to avoid making the lookup again on each
2044  * call.
2045  */
2046  if (finfo->fn_oid == InvalidOid)
2047  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2048 
2049  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2050 
2051  /*
2052  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2053  */
2054  if (instr)
2055  InstrStartNode(instr + tgindx);
2056 
2057  /*
2058  * Do the function evaluation in the per-tuple memory context, so that
2059  * leaked memory will be reclaimed once per tuple. Note in particular that
2060  * any new tuple created by the trigger function will live till the end of
2061  * the tuple cycle.
2062  */
2063  oldContext = MemoryContextSwitchTo(per_tuple_context);
2064 
2065  /*
2066  * Call the function, passing no arguments but setting a context.
2067  */
2068  InitFunctionCallInfoData(fcinfo, finfo, 0,
2069  InvalidOid, (Node *) trigdata, NULL);
2070 
2071  pgstat_init_function_usage(&fcinfo, &fcusage);
2072 
2073  MyTriggerDepth++;
2074  PG_TRY();
2075  {
2076  result = FunctionCallInvoke(&fcinfo);
2077  }
2078  PG_CATCH();
2079  {
2080  MyTriggerDepth--;
2081  PG_RE_THROW();
2082  }
2083  PG_END_TRY();
2084  MyTriggerDepth--;
2085 
2086  pgstat_end_function_usage(&fcusage, true);
2087 
2088  MemoryContextSwitchTo(oldContext);
2089 
2090  /*
2091  * Trigger protocol allows function to return a null pointer, but NOT to
2092  * set the isnull result flag.
2093  */
2094  if (fcinfo.isnull)
2095  ereport(ERROR,
2096  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2097  errmsg("trigger function %u returned null value",
2098  fcinfo.flinfo->fn_oid)));
2099 
2100  /*
2101  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2102  * one "tuple returned" (really the number of firings).
2103  */
2104  if (instr)
2105  InstrStopNode(instr + tgindx, 1);
2106 
2107  return (HeapTuple) DatumGetPointer(result);
2108 }
2109 
2110 void
2112 {
2113  TriggerDesc *trigdesc;
2114  int i;
2115  TriggerData LocTriggerData;
2116 
2117  trigdesc = relinfo->ri_TrigDesc;
2118 
2119  if (trigdesc == NULL)
2120  return;
2121  if (!trigdesc->trig_insert_before_statement)
2122  return;
2123 
2124  LocTriggerData.type = T_TriggerData;
2125  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2127  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2128  LocTriggerData.tg_trigtuple = NULL;
2129  LocTriggerData.tg_newtuple = NULL;
2130  LocTriggerData.tg_oldtable = NULL;
2131  LocTriggerData.tg_newtable = NULL;
2132  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2133  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2134  for (i = 0; i < trigdesc->numtriggers; i++)
2135  {
2136  Trigger *trigger = &trigdesc->triggers[i];
2137  HeapTuple newtuple;
2138 
2139  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2143  continue;
2144  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2145  NULL, NULL, NULL))
2146  continue;
2147 
2148  LocTriggerData.tg_trigger = trigger;
2149  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2150  i,
2151  relinfo->ri_TrigFunctions,
2152  relinfo->ri_TrigInstrument,
2153  GetPerTupleMemoryContext(estate));
2154 
2155  if (newtuple)
2156  ereport(ERROR,
2157  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2158  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2159  }
2160 }
2161 
2162 void
2164 {
2165  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2166 
2167  if (trigdesc && trigdesc->trig_insert_after_statement)
2169  false, NULL, NULL, NIL, NULL);
2170 }
2171 
2174  TupleTableSlot *slot)
2175 {
2176  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2177  HeapTuple slottuple = ExecMaterializeSlot(slot);
2178  HeapTuple newtuple = slottuple;
2179  HeapTuple oldtuple;
2180  TriggerData LocTriggerData;
2181  int i;
2182 
2183  LocTriggerData.type = T_TriggerData;
2184  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2187  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2188  LocTriggerData.tg_newtuple = NULL;
2189  LocTriggerData.tg_oldtable = NULL;
2190  LocTriggerData.tg_newtable = NULL;
2191  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2192  for (i = 0; i < trigdesc->numtriggers; i++)
2193  {
2194  Trigger *trigger = &trigdesc->triggers[i];
2195 
2196  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2200  continue;
2201  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2202  NULL, NULL, newtuple))
2203  continue;
2204 
2205  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2206  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2207  LocTriggerData.tg_trigger = trigger;
2208  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2209  i,
2210  relinfo->ri_TrigFunctions,
2211  relinfo->ri_TrigInstrument,
2212  GetPerTupleMemoryContext(estate));
2213  if (oldtuple != newtuple && oldtuple != slottuple)
2214  heap_freetuple(oldtuple);
2215  if (newtuple == NULL)
2216  return NULL; /* "do nothing" */
2217  }
2218 
2219  if (newtuple != slottuple)
2220  {
2221  /*
2222  * Return the modified tuple using the es_trig_tuple_slot. We assume
2223  * the tuple was allocated in per-tuple memory context, and therefore
2224  * will go away by itself. The tuple table slot should not try to
2225  * clear it.
2226  */
2227  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2228  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2229 
2230  if (newslot->tts_tupleDescriptor != tupdesc)
2231  ExecSetSlotDescriptor(newslot, tupdesc);
2232  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2233  slot = newslot;
2234  }
2235  return slot;
2236 }
2237 
2238 void
2240  HeapTuple trigtuple, List *recheckIndexes)
2241 {
2242  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2243 
2244  if (trigdesc &&
2245  (trigdesc->trig_insert_after_row || trigdesc->trig_insert_new_table))
2247  true, NULL, trigtuple, recheckIndexes, NULL);
2248 }
2249 
2252  TupleTableSlot *slot)
2253 {
2254  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2255  HeapTuple slottuple = ExecMaterializeSlot(slot);
2256  HeapTuple newtuple = slottuple;
2257  HeapTuple oldtuple;
2258  TriggerData LocTriggerData;
2259  int i;
2260 
2261  LocTriggerData.type = T_TriggerData;
2262  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2265  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2266  LocTriggerData.tg_newtuple = NULL;
2267  LocTriggerData.tg_oldtable = NULL;
2268  LocTriggerData.tg_newtable = NULL;
2269  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2270  for (i = 0; i < trigdesc->numtriggers; i++)
2271  {
2272  Trigger *trigger = &trigdesc->triggers[i];
2273 
2274  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2278  continue;
2279  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2280  NULL, NULL, newtuple))
2281  continue;
2282 
2283  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2284  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2285  LocTriggerData.tg_trigger = trigger;
2286  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2287  i,
2288  relinfo->ri_TrigFunctions,
2289  relinfo->ri_TrigInstrument,
2290  GetPerTupleMemoryContext(estate));
2291  if (oldtuple != newtuple && oldtuple != slottuple)
2292  heap_freetuple(oldtuple);
2293  if (newtuple == NULL)
2294  return NULL; /* "do nothing" */
2295  }
2296 
2297  if (newtuple != slottuple)
2298  {
2299  /*
2300  * Return the modified tuple using the es_trig_tuple_slot. We assume
2301  * the tuple was allocated in per-tuple memory context, and therefore
2302  * will go away by itself. The tuple table slot should not try to
2303  * clear it.
2304  */
2305  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2306  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2307 
2308  if (newslot->tts_tupleDescriptor != tupdesc)
2309  ExecSetSlotDescriptor(newslot, tupdesc);
2310  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2311  slot = newslot;
2312  }
2313  return slot;
2314 }
2315 
2316 void
2318 {
2319  TriggerDesc *trigdesc;
2320  int i;
2321  TriggerData LocTriggerData;
2322 
2323  trigdesc = relinfo->ri_TrigDesc;
2324 
2325  if (trigdesc == NULL)
2326  return;
2327  if (!trigdesc->trig_delete_before_statement)
2328  return;
2329 
2330  LocTriggerData.type = T_TriggerData;
2331  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2333  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2334  LocTriggerData.tg_trigtuple = NULL;
2335  LocTriggerData.tg_newtuple = NULL;
2336  LocTriggerData.tg_oldtable = NULL;
2337  LocTriggerData.tg_newtable = NULL;
2338  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2339  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2340  for (i = 0; i < trigdesc->numtriggers; i++)
2341  {
2342  Trigger *trigger = &trigdesc->triggers[i];
2343  HeapTuple newtuple;
2344 
2345  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2349  continue;
2350  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2351  NULL, NULL, NULL))
2352  continue;
2353 
2354  LocTriggerData.tg_trigger = trigger;
2355  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2356  i,
2357  relinfo->ri_TrigFunctions,
2358  relinfo->ri_TrigInstrument,
2359  GetPerTupleMemoryContext(estate));
2360 
2361  if (newtuple)
2362  ereport(ERROR,
2363  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2364  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2365  }
2366 }
2367 
2368 void
2370 {
2371  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2372 
2373  if (trigdesc && trigdesc->trig_delete_after_statement)
2375  false, NULL, NULL, NIL, NULL);
2376 }
2377 
2378 bool
2380  ResultRelInfo *relinfo,
2381  ItemPointer tupleid,
2382  HeapTuple fdw_trigtuple)
2383 {
2384  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2385  bool result = true;
2386  TriggerData LocTriggerData;
2387  HeapTuple trigtuple;
2388  HeapTuple newtuple;
2389  TupleTableSlot *newSlot;
2390  int i;
2391 
2392  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2393  if (fdw_trigtuple == NULL)
2394  {
2395  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2396  LockTupleExclusive, &newSlot);
2397  if (trigtuple == NULL)
2398  return false;
2399  }
2400  else
2401  trigtuple = fdw_trigtuple;
2402 
2403  LocTriggerData.type = T_TriggerData;
2404  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2407  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2408  LocTriggerData.tg_newtuple = NULL;
2409  LocTriggerData.tg_oldtable = NULL;
2410  LocTriggerData.tg_newtable = NULL;
2411  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2412  for (i = 0; i < trigdesc->numtriggers; i++)
2413  {
2414  Trigger *trigger = &trigdesc->triggers[i];
2415 
2416  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2420  continue;
2421  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2422  NULL, trigtuple, NULL))
2423  continue;
2424 
2425  LocTriggerData.tg_trigtuple = trigtuple;
2426  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2427  LocTriggerData.tg_trigger = trigger;
2428  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2429  i,
2430  relinfo->ri_TrigFunctions,
2431  relinfo->ri_TrigInstrument,
2432  GetPerTupleMemoryContext(estate));
2433  if (newtuple == NULL)
2434  {
2435  result = false; /* tell caller to suppress delete */
2436  break;
2437  }
2438  if (newtuple != trigtuple)
2439  heap_freetuple(newtuple);
2440  }
2441  if (trigtuple != fdw_trigtuple)
2442  heap_freetuple(trigtuple);
2443 
2444  return result;
2445 }
2446 
2447 void
2449  ItemPointer tupleid,
2450  HeapTuple fdw_trigtuple)
2451 {
2452  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2453 
2454  if (trigdesc &&
2455  (trigdesc->trig_delete_after_row || trigdesc->trig_delete_old_table))
2456  {
2457  HeapTuple trigtuple;
2458 
2459  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2460  if (fdw_trigtuple == NULL)
2461  trigtuple = GetTupleForTrigger(estate,
2462  NULL,
2463  relinfo,
2464  tupleid,
2466  NULL);
2467  else
2468  trigtuple = fdw_trigtuple;
2469 
2471  true, trigtuple, NULL, NIL, NULL);
2472  if (trigtuple != fdw_trigtuple)
2473  heap_freetuple(trigtuple);
2474  }
2475 }
2476 
2477 bool
2479  HeapTuple trigtuple)
2480 {
2481  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2482  TriggerData LocTriggerData;
2483  HeapTuple rettuple;
2484  int i;
2485 
2486  LocTriggerData.type = T_TriggerData;
2487  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2490  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2491  LocTriggerData.tg_newtuple = NULL;
2492  LocTriggerData.tg_oldtable = NULL;
2493  LocTriggerData.tg_newtable = NULL;
2494  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2495  for (i = 0; i < trigdesc->numtriggers; i++)
2496  {
2497  Trigger *trigger = &trigdesc->triggers[i];
2498 
2499  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2503  continue;
2504  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2505  NULL, trigtuple, NULL))
2506  continue;
2507 
2508  LocTriggerData.tg_trigtuple = trigtuple;
2509  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2510  LocTriggerData.tg_trigger = trigger;
2511  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2512  i,
2513  relinfo->ri_TrigFunctions,
2514  relinfo->ri_TrigInstrument,
2515  GetPerTupleMemoryContext(estate));
2516  if (rettuple == NULL)
2517  return false; /* Delete was suppressed */
2518  if (rettuple != trigtuple)
2519  heap_freetuple(rettuple);
2520  }
2521  return true;
2522 }
2523 
2524 void
2526 {
2527  TriggerDesc *trigdesc;
2528  int i;
2529  TriggerData LocTriggerData;
2530  Bitmapset *updatedCols;
2531 
2532  trigdesc = relinfo->ri_TrigDesc;
2533 
2534  if (trigdesc == NULL)
2535  return;
2536  if (!trigdesc->trig_update_before_statement)
2537  return;
2538 
2539  updatedCols = GetUpdatedColumns(relinfo, estate);
2540 
2541  LocTriggerData.type = T_TriggerData;
2542  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2544  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2545  LocTriggerData.tg_trigtuple = NULL;
2546  LocTriggerData.tg_newtuple = NULL;
2547  LocTriggerData.tg_oldtable = NULL;
2548  LocTriggerData.tg_newtable = NULL;
2549  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2550  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2551  for (i = 0; i < trigdesc->numtriggers; i++)
2552  {
2553  Trigger *trigger = &trigdesc->triggers[i];
2554  HeapTuple newtuple;
2555 
2556  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2560  continue;
2561  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2562  updatedCols, NULL, NULL))
2563  continue;
2564 
2565  LocTriggerData.tg_trigger = trigger;
2566  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2567  i,
2568  relinfo->ri_TrigFunctions,
2569  relinfo->ri_TrigInstrument,
2570  GetPerTupleMemoryContext(estate));
2571 
2572  if (newtuple)
2573  ereport(ERROR,
2574  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2575  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2576  }
2577 }
2578 
2579 void
2581 {
2582  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2583 
2584  if (trigdesc && trigdesc->trig_update_after_statement)
2586  false, NULL, NULL, NIL,
2587  GetUpdatedColumns(relinfo, estate));
2588 }
2589 
2592  ResultRelInfo *relinfo,
2593  ItemPointer tupleid,
2594  HeapTuple fdw_trigtuple,
2595  TupleTableSlot *slot)
2596 {
2597  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2598  HeapTuple slottuple = ExecMaterializeSlot(slot);
2599  HeapTuple newtuple = slottuple;
2600  TriggerData LocTriggerData;
2601  HeapTuple trigtuple;
2602  HeapTuple oldtuple;
2603  TupleTableSlot *newSlot;
2604  int i;
2605  Bitmapset *updatedCols;
2606  LockTupleMode lockmode;
2607 
2608  /* Determine lock mode to use */
2609  lockmode = ExecUpdateLockMode(estate, relinfo);
2610 
2611  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2612  if (fdw_trigtuple == NULL)
2613  {
2614  /* get a copy of the on-disk tuple we are planning to update */
2615  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2616  lockmode, &newSlot);
2617  if (trigtuple == NULL)
2618  return NULL; /* cancel the update action */
2619  }
2620  else
2621  {
2622  trigtuple = fdw_trigtuple;
2623  newSlot = NULL;
2624  }
2625 
2626  /*
2627  * In READ COMMITTED isolation level it's possible that target tuple was
2628  * changed due to concurrent update. In that case we have a raw subplan
2629  * output tuple in newSlot, and need to run it through the junk filter to
2630  * produce an insertable tuple.
2631  *
2632  * Caution: more than likely, the passed-in slot is the same as the
2633  * junkfilter's output slot, so we are clobbering the original value of
2634  * slottuple by doing the filtering. This is OK since neither we nor our
2635  * caller have any more interest in the prior contents of that slot.
2636  */
2637  if (newSlot != NULL)
2638  {
2639  slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
2640  slottuple = ExecMaterializeSlot(slot);
2641  newtuple = slottuple;
2642  }
2643 
2644 
2645  LocTriggerData.type = T_TriggerData;
2646  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2649  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2650  LocTriggerData.tg_oldtable = NULL;
2651  LocTriggerData.tg_newtable = NULL;
2652  updatedCols = GetUpdatedColumns(relinfo, estate);
2653  for (i = 0; i < trigdesc->numtriggers; i++)
2654  {
2655  Trigger *trigger = &trigdesc->triggers[i];
2656 
2657  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2661  continue;
2662  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2663  updatedCols, trigtuple, newtuple))
2664  continue;
2665 
2666  LocTriggerData.tg_trigtuple = trigtuple;
2667  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2668  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2669  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2670  LocTriggerData.tg_trigger = trigger;
2671  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2672  i,
2673  relinfo->ri_TrigFunctions,
2674  relinfo->ri_TrigInstrument,
2675  GetPerTupleMemoryContext(estate));
2676  if (oldtuple != newtuple && oldtuple != slottuple)
2677  heap_freetuple(oldtuple);
2678  if (newtuple == NULL)
2679  {
2680  if (trigtuple != fdw_trigtuple)
2681  heap_freetuple(trigtuple);
2682  return NULL; /* "do nothing" */
2683  }
2684  }
2685  if (trigtuple != fdw_trigtuple)
2686  heap_freetuple(trigtuple);
2687 
2688  if (newtuple != slottuple)
2689  {
2690  /*
2691  * Return the modified tuple using the es_trig_tuple_slot. We assume
2692  * the tuple was allocated in per-tuple memory context, and therefore
2693  * will go away by itself. The tuple table slot should not try to
2694  * clear it.
2695  */
2696  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2697  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2698 
2699  if (newslot->tts_tupleDescriptor != tupdesc)
2700  ExecSetSlotDescriptor(newslot, tupdesc);
2701  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2702  slot = newslot;
2703  }
2704  return slot;
2705 }
2706 
2707 void
2709  ItemPointer tupleid,
2710  HeapTuple fdw_trigtuple,
2711  HeapTuple newtuple,
2712  List *recheckIndexes)
2713 {
2714  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2715 
2716  if (trigdesc && (trigdesc->trig_update_after_row ||
2717  trigdesc->trig_update_old_table || trigdesc->trig_update_new_table))
2718  {
2719  HeapTuple trigtuple;
2720 
2721  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2722  if (fdw_trigtuple == NULL)
2723  trigtuple = GetTupleForTrigger(estate,
2724  NULL,
2725  relinfo,
2726  tupleid,
2728  NULL);
2729  else
2730  trigtuple = fdw_trigtuple;
2731 
2733  true, trigtuple, newtuple, recheckIndexes,
2734  GetUpdatedColumns(relinfo, estate));
2735  if (trigtuple != fdw_trigtuple)
2736  heap_freetuple(trigtuple);
2737  }
2738 }
2739 
2742  HeapTuple trigtuple, TupleTableSlot *slot)
2743 {
2744  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2745  HeapTuple slottuple = ExecMaterializeSlot(slot);
2746  HeapTuple newtuple = slottuple;
2747  TriggerData LocTriggerData;
2748  HeapTuple oldtuple;
2749  int i;
2750 
2751  LocTriggerData.type = T_TriggerData;
2752  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2755  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2756  LocTriggerData.tg_oldtable = NULL;
2757  LocTriggerData.tg_newtable = NULL;
2758  for (i = 0; i < trigdesc->numtriggers; i++)
2759  {
2760  Trigger *trigger = &trigdesc->triggers[i];
2761 
2762  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2766  continue;
2767  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2768  NULL, trigtuple, newtuple))
2769  continue;
2770 
2771  LocTriggerData.tg_trigtuple = trigtuple;
2772  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2773  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2774  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2775  LocTriggerData.tg_trigger = trigger;
2776  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2777  i,
2778  relinfo->ri_TrigFunctions,
2779  relinfo->ri_TrigInstrument,
2780  GetPerTupleMemoryContext(estate));
2781  if (oldtuple != newtuple && oldtuple != slottuple)
2782  heap_freetuple(oldtuple);
2783  if (newtuple == NULL)
2784  return NULL; /* "do nothing" */
2785  }
2786 
2787  if (newtuple != slottuple)
2788  {
2789  /*
2790  * Return the modified tuple using the es_trig_tuple_slot. We assume
2791  * the tuple was allocated in per-tuple memory context, and therefore
2792  * will go away by itself. The tuple table slot should not try to
2793  * clear it.
2794  */
2795  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2796  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2797 
2798  if (newslot->tts_tupleDescriptor != tupdesc)
2799  ExecSetSlotDescriptor(newslot, tupdesc);
2800  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2801  slot = newslot;
2802  }
2803  return slot;
2804 }
2805 
2806 void
2808 {
2809  TriggerDesc *trigdesc;
2810  int i;
2811  TriggerData LocTriggerData;
2812 
2813  trigdesc = relinfo->ri_TrigDesc;
2814 
2815  if (trigdesc == NULL)
2816  return;
2817  if (!trigdesc->trig_truncate_before_statement)
2818  return;
2819 
2820  LocTriggerData.type = T_TriggerData;
2821  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2823  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2824  LocTriggerData.tg_trigtuple = NULL;
2825  LocTriggerData.tg_newtuple = NULL;
2826  LocTriggerData.tg_oldtable = NULL;
2827  LocTriggerData.tg_newtable = NULL;
2828  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2829  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2830  for (i = 0; i < trigdesc->numtriggers; i++)
2831  {
2832  Trigger *trigger = &trigdesc->triggers[i];
2833  HeapTuple newtuple;
2834 
2835  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2839  continue;
2840  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2841  NULL, NULL, NULL))
2842  continue;
2843 
2844  LocTriggerData.tg_trigger = trigger;
2845  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2846  i,
2847  relinfo->ri_TrigFunctions,
2848  relinfo->ri_TrigInstrument,
2849  GetPerTupleMemoryContext(estate));
2850 
2851  if (newtuple)
2852  ereport(ERROR,
2853  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2854  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2855  }
2856 }
2857 
2858 void
2860 {
2861  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2862 
2863  if (trigdesc && trigdesc->trig_truncate_after_statement)
2865  false, NULL, NULL, NIL, NULL);
2866 }
2867 
2868 
2869 static HeapTuple
2871  EPQState *epqstate,
2872  ResultRelInfo *relinfo,
2873  ItemPointer tid,
2874  LockTupleMode lockmode,
2875  TupleTableSlot **newSlot)
2876 {
2877  Relation relation = relinfo->ri_RelationDesc;
2878  HeapTupleData tuple;
2879  HeapTuple result;
2880  Buffer buffer;
2881 
2882  if (newSlot != NULL)
2883  {
2884  HTSU_Result test;
2885  HeapUpdateFailureData hufd;
2886 
2887  *newSlot = NULL;
2888 
2889  /* caller must pass an epqstate if EvalPlanQual is possible */
2890  Assert(epqstate != NULL);
2891 
2892  /*
2893  * lock tuple for update
2894  */
2895 ltrmark:;
2896  tuple.t_self = *tid;
2897  test = heap_lock_tuple(relation, &tuple,
2898  estate->es_output_cid,
2899  lockmode, LockWaitBlock,
2900  false, &buffer, &hufd);
2901  switch (test)
2902  {
2903  case HeapTupleSelfUpdated:
2904 
2905  /*
2906  * The target tuple was already updated or deleted by the
2907  * current command, or by a later command in the current
2908  * transaction. We ignore the tuple in the former case, and
2909  * throw error in the latter case, for the same reasons
2910  * enumerated in ExecUpdate and ExecDelete in
2911  * nodeModifyTable.c.
2912  */
2913  if (hufd.cmax != estate->es_output_cid)
2914  ereport(ERROR,
2915  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2916  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2917  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2918 
2919  /* treat it as deleted; do not process */
2920  ReleaseBuffer(buffer);
2921  return NULL;
2922 
2923  case HeapTupleMayBeUpdated:
2924  break;
2925 
2926  case HeapTupleUpdated:
2927  ReleaseBuffer(buffer);
2929  ereport(ERROR,
2930  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2931  errmsg("could not serialize access due to concurrent update")));
2932  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2933  {
2934  /* it was updated, so look at the updated version */
2935  TupleTableSlot *epqslot;
2936 
2937  epqslot = EvalPlanQual(estate,
2938  epqstate,
2939  relation,
2940  relinfo->ri_RangeTableIndex,
2941  lockmode,
2942  &hufd.ctid,
2943  hufd.xmax);
2944  if (!TupIsNull(epqslot))
2945  {
2946  *tid = hufd.ctid;
2947  *newSlot = epqslot;
2948 
2949  /*
2950  * EvalPlanQual already locked the tuple, but we
2951  * re-call heap_lock_tuple anyway as an easy way of
2952  * re-fetching the correct tuple. Speed is hardly a
2953  * criterion in this path anyhow.
2954  */
2955  goto ltrmark;
2956  }
2957  }
2958 
2959  /*
2960  * if tuple was deleted or PlanQual failed for updated tuple -
2961  * we must not process this tuple!
2962  */
2963  return NULL;
2964 
2965  case HeapTupleInvisible:
2966  elog(ERROR, "attempted to lock invisible tuple");
2967 
2968  default:
2969  ReleaseBuffer(buffer);
2970  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
2971  return NULL; /* keep compiler quiet */
2972  }
2973  }
2974  else
2975  {
2976  Page page;
2977  ItemId lp;
2978 
2979  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2980 
2981  /*
2982  * Although we already know this tuple is valid, we must lock the
2983  * buffer to ensure that no one has a buffer cleanup lock; otherwise
2984  * they might move the tuple while we try to copy it. But we can
2985  * release the lock before actually doing the heap_copytuple call,
2986  * since holding pin is sufficient to prevent anyone from getting a
2987  * cleanup lock they don't already hold.
2988  */
2989  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2990 
2991  page = BufferGetPage(buffer);
2992  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2993 
2994  Assert(ItemIdIsNormal(lp));
2995 
2996  tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2997  tuple.t_len = ItemIdGetLength(lp);
2998  tuple.t_self = *tid;
2999  tuple.t_tableOid = RelationGetRelid(relation);
3000 
3001  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3002  }
3003 
3004  result = heap_copytuple(&tuple);
3005  ReleaseBuffer(buffer);
3006 
3007  return result;
3008 }
3009 
3010 /*
3011  * Is trigger enabled to fire?
3012  */
3013 static bool
3015  Trigger *trigger, TriggerEvent event,
3016  Bitmapset *modifiedCols,
3017  HeapTuple oldtup, HeapTuple newtup)
3018 {
3019  /* Check replication-role-dependent enable state */
3021  {
3022  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3023  trigger->tgenabled == TRIGGER_DISABLED)
3024  return false;
3025  }
3026  else /* ORIGIN or LOCAL role */
3027  {
3028  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3029  trigger->tgenabled == TRIGGER_DISABLED)
3030  return false;
3031  }
3032 
3033  /*
3034  * Check for column-specific trigger (only possible for UPDATE, and in
3035  * fact we *must* ignore tgattr for other event types)
3036  */
3037  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3038  {
3039  int i;
3040  bool modified;
3041 
3042  modified = false;
3043  for (i = 0; i < trigger->tgnattr; i++)
3044  {
3046  modifiedCols))
3047  {
3048  modified = true;
3049  break;
3050  }
3051  }
3052  if (!modified)
3053  return false;
3054  }
3055 
3056  /* Check for WHEN clause */
3057  if (trigger->tgqual)
3058  {
3059  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3060  List **predicate;
3061  ExprContext *econtext;
3062  TupleTableSlot *oldslot = NULL;
3063  TupleTableSlot *newslot = NULL;
3064  MemoryContext oldContext;
3065  int i;
3066 
3067  Assert(estate != NULL);
3068 
3069  /*
3070  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3071  * matching element of relinfo->ri_TrigWhenExprs[]
3072  */
3073  i = trigger - relinfo->ri_TrigDesc->triggers;
3074  predicate = &relinfo->ri_TrigWhenExprs[i];
3075 
3076  /*
3077  * If first time through for this WHEN expression, build expression
3078  * nodetrees for it. Keep them in the per-query memory context so
3079  * they'll survive throughout the query.
3080  */
3081  if (*predicate == NIL)
3082  {
3083  Node *tgqual;
3084 
3085  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3086  tgqual = stringToNode(trigger->tgqual);
3087  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3090  /* ExecQual wants implicit-AND form */
3091  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3092  *predicate = (List *) ExecPrepareExpr((Expr *) tgqual, estate);
3093  MemoryContextSwitchTo(oldContext);
3094  }
3095 
3096  /*
3097  * We will use the EState's per-tuple context for evaluating WHEN
3098  * expressions (creating it if it's not already there).
3099  */
3100  econtext = GetPerTupleExprContext(estate);
3101 
3102  /*
3103  * Put OLD and NEW tuples into tupleslots for expression evaluation.
3104  * These slots can be shared across the whole estate, but be careful
3105  * that they have the current resultrel's tupdesc.
3106  */
3107  if (HeapTupleIsValid(oldtup))
3108  {
3109  if (estate->es_trig_oldtup_slot == NULL)
3110  {
3111  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3112  estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate);
3113  MemoryContextSwitchTo(oldContext);
3114  }
3115  oldslot = estate->es_trig_oldtup_slot;
3116  if (oldslot->tts_tupleDescriptor != tupdesc)
3117  ExecSetSlotDescriptor(oldslot, tupdesc);
3118  ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
3119  }
3120  if (HeapTupleIsValid(newtup))
3121  {
3122  if (estate->es_trig_newtup_slot == NULL)
3123  {
3124  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3125  estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
3126  MemoryContextSwitchTo(oldContext);
3127  }
3128  newslot = estate->es_trig_newtup_slot;
3129  if (newslot->tts_tupleDescriptor != tupdesc)
3130  ExecSetSlotDescriptor(newslot, tupdesc);
3131  ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
3132  }
3133 
3134  /*
3135  * Finally evaluate the expression, making the old and/or new tuples
3136  * available as INNER_VAR/OUTER_VAR respectively.
3137  */
3138  econtext->ecxt_innertuple = oldslot;
3139  econtext->ecxt_outertuple = newslot;
3140  if (!ExecQual(*predicate, econtext, false))
3141  return false;
3142  }
3143 
3144  return true;
3145 }
3146 
3147 
3148 /* ----------
3149  * After-trigger stuff
3150  *
3151  * The AfterTriggersData struct holds data about pending AFTER trigger events
3152  * during the current transaction tree. (BEFORE triggers are fired
3153  * immediately so we don't need any persistent state about them.) The struct
3154  * and most of its subsidiary data are kept in TopTransactionContext; however
3155  * the individual event records are kept in a separate sub-context. This is
3156  * done mainly so that it's easy to tell from a memory context dump how much
3157  * space is being eaten by trigger events.
3158  *
3159  * Because the list of pending events can grow large, we go to some
3160  * considerable effort to minimize per-event memory consumption. The event
3161  * records are grouped into chunks and common data for similar events in the
3162  * same chunk is only stored once.
3163  *
3164  * XXX We need to be able to save the per-event data in a file if it grows too
3165  * large.
3166  * ----------
3167  */
3168 
3169 /* Per-trigger SET CONSTRAINT status */
3171 {
3175 
3177 
3178 /*
3179  * SET CONSTRAINT intra-transaction status.
3180  *
3181  * We make this a single palloc'd object so it can be copied and freed easily.
3182  *
3183  * all_isset and all_isdeferred are used to keep track
3184  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3185  *
3186  * trigstates[] stores per-trigger tgisdeferred settings.
3187  */
3189 {
3192  int numstates; /* number of trigstates[] entries in use */
3193  int numalloc; /* allocated size of trigstates[] */
3194  SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3196 
3198 
3199 
3200 /*
3201  * Per-trigger-event data
3202  *
3203  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3204  * status bits and up to two tuple CTIDs. Each event record also has an
3205  * associated AfterTriggerSharedData that is shared across all instances of
3206  * similar events within a "chunk".
3207  *
3208  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3209  * fields. Updates of regular tables use two; inserts and deletes of regular
3210  * tables use one; foreign tables always use zero and save the tuple(s) to a
3211  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3212  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3213  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3214  * tuple(s). This permits storing tuples once regardless of the number of
3215  * row-level triggers on a foreign table.
3216  *
3217  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3218  * require no ctid field. We lack the flag bit space to neatly represent that
3219  * distinct case, and it seems unlikely to be worth much trouble.
3220  *
3221  * Note: ats_firing_id is initially zero and is set to something else when
3222  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3223  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3224  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3225  * because all instances of the same type of event in a given event list will
3226  * be fired at the same time, if they were queued between the same firing
3227  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3228  * a new event to an existing AfterTriggerSharedData record.
3229  */
3231 
3232 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order
3233  * bits */
3234 #define AFTER_TRIGGER_DONE 0x10000000
3235 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3236 /* bits describing the size and tuple sources of this event */
3237 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3238 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3239 #define AFTER_TRIGGER_1CTID 0x40000000
3240 #define AFTER_TRIGGER_2CTID 0xC0000000
3241 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3242 
3244 
3246 {
3247  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3248  Oid ats_tgoid; /* the trigger's ID */
3249  Oid ats_relid; /* the relation it's on */
3250  CommandId ats_firing_id; /* ID for firing cycle */
3252 
3254 
3256 {
3257  TriggerFlags ate_flags; /* status bits and offset to shared data */
3258  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3259  ItemPointerData ate_ctid2; /* new updated tuple */
3261 
3262 /* AfterTriggerEventData, minus ate_ctid2 */
3264 {
3265  TriggerFlags ate_flags; /* status bits and offset to shared data */
3266  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3268 
3269 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3271 {
3272  TriggerFlags ate_flags; /* status bits and offset to shared data */
3274 
3275 #define SizeofTriggerEvent(evt) \
3276  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3277  sizeof(AfterTriggerEventData) : \
3278  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3279  sizeof(AfterTriggerEventDataOneCtid) : \
3280  sizeof(AfterTriggerEventDataZeroCtids))
3281 
3282 #define GetTriggerSharedData(evt) \
3283  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3284 
3285 /*
3286  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3287  * larger chunks (a slightly more sophisticated version of an expansible
3288  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3289  * AfterTriggerEventData records; the space between endfree and endptr is
3290  * occupied by AfterTriggerSharedData records.
3291  */
3293 {
3294  struct AfterTriggerEventChunk *next; /* list link */
3295  char *freeptr; /* start of free space in chunk */
3296  char *endfree; /* end of free space in chunk */
3297  char *endptr; /* end of chunk */
3298  /* event data follows here */
3300 
3301 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3302 
3303 /* A list of events */
3305 {
3308  char *tailfree; /* freeptr of tail chunk */
3310 
3311 /* Macros to help in iterating over a list of events */
3312 #define for_each_chunk(cptr, evtlist) \
3313  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3314 #define for_each_event(eptr, cptr) \
3315  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3316  (char *) eptr < (cptr)->freeptr; \
3317  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3318 /* Use this if no special per-chunk processing is needed */
3319 #define for_each_event_chunk(eptr, cptr, evtlist) \
3320  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3321 
3322 
3323 /*
3324  * All per-transaction data for the AFTER TRIGGERS module.
3325  *
3326  * AfterTriggersData has the following fields:
3327  *
3328  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3329  * We mark firable events with the current firing cycle's ID so that we can
3330  * tell which ones to work on. This ensures sane behavior if a trigger
3331  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3332  * only fire those events that weren't already scheduled for firing.
3333  *
3334  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3335  * This is saved and restored across failed subtransactions.
3336  *
3337  * events is the current list of deferred events. This is global across
3338  * all subtransactions of the current transaction. In a subtransaction
3339  * abort, we know that the events added by the subtransaction are at the
3340  * end of the list, so it is relatively easy to discard them. The event
3341  * list chunks themselves are stored in event_cxt.
3342  *
3343  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3344  * (-1 when the stack is empty).
3345  *
3346  * query_stack[query_depth] is a list of AFTER trigger events queued by the
3347  * current query (and the query_stack entries below it are lists of trigger
3348  * events queued by calling queries). None of these are valid until the
3349  * matching AfterTriggerEndQuery call occurs. At that point we fire
3350  * immediate-mode triggers, and append any deferred events to the main events
3351  * list.
3352  *
3353  * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples
3354  * needed for the current query.
3355  *
3356  * old_tuplestores[query_depth] and new_tuplestores[query_depth] hold the
3357  * transition relations for the current query.
3358  *
3359  * maxquerydepth is just the allocated length of query_stack and the
3360  * tuplestores.
3361  *
3362  * state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS
3363  * state data; each subtransaction level that modifies that state first
3364  * saves a copy, which we use to restore the state if we abort.
3365  *
3366  * events_stack is a stack of copies of the events head/tail pointers,
3367  * which we use to restore those values during subtransaction abort.
3368  *
3369  * depth_stack is a stack of copies of subtransaction-start-time query_depth,
3370  * which we similarly use to clean up at subtransaction abort.
3371  *
3372  * firing_stack is a stack of copies of subtransaction-start-time
3373  * firing_counter. We use this to recognize which deferred triggers were
3374  * fired (or marked for firing) within an aborted subtransaction.
3375  *
3376  * We use GetCurrentTransactionNestLevel() to determine the correct array
3377  * index in these stacks. maxtransdepth is the number of allocated entries in
3378  * each stack. (By not keeping our own stack pointer, we can avoid trouble
3379  * in cases where errors during subxact abort cause multiple invocations
3380  * of AfterTriggerEndSubXact() at the same nesting depth.)
3381  */
3382 typedef struct AfterTriggersData
3383 {
3384  CommandId firing_counter; /* next firing ID to assign */
3385  SetConstraintState state; /* the active S C state */
3386  AfterTriggerEventList events; /* deferred-event list */
3387  int query_depth; /* current query list index */
3388  AfterTriggerEventList *query_stack; /* events pending from each query */
3389  Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from each query */
3390  Tuplestorestate **old_tuplestores; /* all old tuples from each query */
3391  Tuplestorestate **new_tuplestores; /* all new tuples from each query */
3392  int maxquerydepth; /* allocated len of above array */
3393  MemoryContext event_cxt; /* memory context for events, if any */
3394 
3395  /* these fields are just for resetting at subtrans abort: */
3396 
3397  SetConstraintState *state_stack; /* stacked S C states */
3398  AfterTriggerEventList *events_stack; /* stacked list pointers */
3399  int *depth_stack; /* stacked query_depths */
3400  CommandId *firing_stack; /* stacked firing_counters */
3401  int maxtransdepth; /* allocated len of above arrays */
3403 
3405 
3406 static void AfterTriggerExecute(AfterTriggerEvent event,
3407  Relation rel, TriggerDesc *trigdesc,
3408  FmgrInfo *finfo,
3409  Instrumentation *instr,
3410  MemoryContext per_tuple_context,
3411  TupleTableSlot *trig_tuple_slot1,
3412  TupleTableSlot *trig_tuple_slot2);
3413 static SetConstraintState SetConstraintStateCreate(int numalloc);
3414 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3415 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3416  Oid tgoid, bool tgisdeferred);
3417 
3418 
3419 /*
3420  * Gets a current query transition tuplestore and initializes it if necessary.
3421  * This can be holding a single transition row tuple (in the case of an FDW)
3422  * or a transition table (for an AFTER trigger).
3423  */
3424 static Tuplestorestate *
3426 {
3427  Tuplestorestate *ret;
3428 
3429  ret = tss[afterTriggers.query_depth];
3430  if (ret == NULL)
3431  {
3432  MemoryContext oldcxt;
3433  ResourceOwner saveResourceOwner;
3434 
3435  /*
3436  * Make the tuplestore valid until end of transaction. This is the
3437  * allocation lifespan of the associated events list, but we really
3438  * only need it until AfterTriggerEndQuery().
3439  */
3441  saveResourceOwner = CurrentResourceOwner;
3442  PG_TRY();
3443  {
3445  ret = tuplestore_begin_heap(false, false, work_mem);
3446  }
3447  PG_CATCH();
3448  {
3449  CurrentResourceOwner = saveResourceOwner;
3450  PG_RE_THROW();
3451  }
3452  PG_END_TRY();
3453  CurrentResourceOwner = saveResourceOwner;
3454  MemoryContextSwitchTo(oldcxt);
3455 
3456  tss[afterTriggers.query_depth] = ret;
3457  }
3458 
3459  return ret;
3460 }
3461 
3462 /* ----------
3463  * afterTriggerCheckState()
3464  *
3465  * Returns true if the trigger event is actually in state DEFERRED.
3466  * ----------
3467  */
3468 static bool
3469 afterTriggerCheckState(AfterTriggerShared evtshared)
3470 {
3471  Oid tgoid = evtshared->ats_tgoid;
3472  SetConstraintState state = afterTriggers.state;
3473  int i;
3474 
3475  /*
3476  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3477  * constraints declared NOT DEFERRABLE), the state is always false.
3478  */
3479  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3480  return false;
3481 
3482  /*
3483  * If constraint state exists, SET CONSTRAINTS might have been executed
3484  * either for this trigger or for all triggers.
3485  */
3486  if (state != NULL)
3487  {
3488  /* Check for SET CONSTRAINTS for this specific trigger. */
3489  for (i = 0; i < state->numstates; i++)
3490  {
3491  if (state->trigstates[i].sct_tgoid == tgoid)
3492  return state->trigstates[i].sct_tgisdeferred;
3493  }
3494 
3495  /* Check for SET CONSTRAINTS ALL. */
3496  if (state->all_isset)
3497  return state->all_isdeferred;
3498  }
3499 
3500  /*
3501  * Otherwise return the default state for the trigger.
3502  */
3503  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3504 }
3505 
3506 
3507 /* ----------
3508  * afterTriggerAddEvent()
3509  *
3510  * Add a new trigger event to the specified queue.
3511  * The passed-in event data is copied.
3512  * ----------
3513  */
3514 static void
3516  AfterTriggerEvent event, AfterTriggerShared evtshared)
3517 {
3518  Size eventsize = SizeofTriggerEvent(event);
3519  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3520  AfterTriggerEventChunk *chunk;
3521  AfterTriggerShared newshared;
3522  AfterTriggerEvent newevent;
3523 
3524  /*
3525  * If empty list or not enough room in the tail chunk, make a new chunk.
3526  * We assume here that a new shared record will always be needed.
3527  */
3528  chunk = events->tail;
3529  if (chunk == NULL ||
3530  chunk->endfree - chunk->freeptr < needed)
3531  {
3532  Size chunksize;
3533 
3534  /* Create event context if we didn't already */
3535  if (afterTriggers.event_cxt == NULL)
3536  afterTriggers.event_cxt =
3538  "AfterTriggerEvents",
3540 
3541  /*
3542  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3543  * These numbers are fairly arbitrary, though there is a hard limit at
3544  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3545  * shared records using the available space in ate_flags. Another
3546  * constraint is that if the chunk size gets too huge, the search loop
3547  * below would get slow given a (not too common) usage pattern with
3548  * many distinct event types in a chunk. Therefore, we double the
3549  * preceding chunk size only if there weren't too many shared records
3550  * in the preceding chunk; otherwise we halve it. This gives us some
3551  * ability to adapt to the actual usage pattern of the current query
3552  * while still having large chunk sizes in typical usage. All chunk
3553  * sizes used should be MAXALIGN multiples, to ensure that the shared
3554  * records will be aligned safely.
3555  */
3556 #define MIN_CHUNK_SIZE 1024
3557 #define MAX_CHUNK_SIZE (1024*1024)
3558 
3559 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3560 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3561 #endif
3562 
3563  if (chunk == NULL)
3564  chunksize = MIN_CHUNK_SIZE;
3565  else
3566  {
3567  /* preceding chunk size... */
3568  chunksize = chunk->endptr - (char *) chunk;
3569  /* check number of shared records in preceding chunk */
3570  if ((chunk->endptr - chunk->endfree) <=
3571  (100 * sizeof(AfterTriggerSharedData)))
3572  chunksize *= 2; /* okay, double it */
3573  else
3574  chunksize /= 2; /* too many shared records */
3575  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3576  }
3577  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3578  chunk->next = NULL;
3579  chunk->freeptr = CHUNK_DATA_START(chunk);
3580  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3581  Assert(chunk->endfree - chunk->freeptr >= needed);
3582 
3583  if (events->head == NULL)
3584  events->head = chunk;
3585  else
3586  events->tail->next = chunk;
3587  events->tail = chunk;
3588  /* events->tailfree is now out of sync, but we'll fix it below */
3589  }
3590 
3591  /*
3592  * Try to locate a matching shared-data record already in the chunk. If
3593  * none, make a new one.
3594  */
3595  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3596  (char *) newshared >= chunk->endfree;
3597  newshared--)
3598  {
3599  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3600  newshared->ats_relid == evtshared->ats_relid &&
3601  newshared->ats_event == evtshared->ats_event &&
3602  newshared->ats_firing_id == 0)
3603  break;
3604  }
3605  if ((char *) newshared < chunk->endfree)
3606  {
3607  *newshared = *evtshared;
3608  newshared->ats_firing_id = 0; /* just to be sure */
3609  chunk->endfree = (char *) newshared;
3610  }
3611 
3612  /* Insert the data */
3613  newevent = (AfterTriggerEvent) chunk->freeptr;
3614  memcpy(newevent, event, eventsize);
3615  /* ... and link the new event to its shared record */
3616  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3617  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3618 
3619  chunk->freeptr += eventsize;
3620  events->tailfree = chunk->freeptr;
3621 }
3622 
3623 /* ----------
3624  * afterTriggerFreeEventList()
3625  *
3626  * Free all the event storage in the given list.
3627  * ----------
3628  */
3629 static void
3631 {
3632  AfterTriggerEventChunk *chunk;
3633  AfterTriggerEventChunk *next_chunk;
3634 
3635  for (chunk = events->head; chunk != NULL; chunk = next_chunk)
3636  {
3637  next_chunk = chunk->next;
3638  pfree(chunk);
3639  }
3640  events->head = NULL;
3641  events->tail = NULL;
3642  events->tailfree = NULL;
3643 }
3644 
3645 /* ----------
3646  * afterTriggerRestoreEventList()
3647  *
3648  * Restore an event list to its prior length, removing all the events
3649  * added since it had the value old_events.
3650  * ----------
3651  */
3652 static void
3654  const AfterTriggerEventList *old_events)
3655 {
3656  AfterTriggerEventChunk *chunk;
3657  AfterTriggerEventChunk *next_chunk;
3658 
3659  if (old_events->tail == NULL)
3660  {
3661  /* restoring to a completely empty state, so free everything */
3662  afterTriggerFreeEventList(events);
3663  }
3664  else
3665  {
3666  *events = *old_events;
3667  /* free any chunks after the last one we want to keep */
3668  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3669  {
3670  next_chunk = chunk->next;
3671  pfree(chunk);
3672  }
3673  /* and clean up the tail chunk to be the right length */
3674  events->tail->next = NULL;
3675  events->tail->freeptr = events->tailfree;
3676 
3677  /*
3678  * We don't make any effort to remove now-unused shared data records.
3679  * They might still be useful, anyway.
3680  */
3681  }
3682 }
3683 
3684 
3685 /* ----------
3686  * AfterTriggerExecute()
3687  *
3688  * Fetch the required tuples back from the heap and fire one
3689  * single trigger function.
3690  *
3691  * Frequently, this will be fired many times in a row for triggers of
3692  * a single relation. Therefore, we cache the open relation and provide
3693  * fmgr lookup cache space at the caller level. (For triggers fired at
3694  * the end of a query, we can even piggyback on the executor's state.)
3695  *
3696  * event: event currently being fired.
3697  * rel: open relation for event.
3698  * trigdesc: working copy of rel's trigger info.
3699  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3700  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3701  * or NULL if no instrumentation is wanted.
3702  * per_tuple_context: memory context to call trigger function in.
3703  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3704  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3705  * ----------
3706  */
3707 static void
3708 AfterTriggerExecute(AfterTriggerEvent event,
3709  Relation rel, TriggerDesc *trigdesc,
3710  FmgrInfo *finfo, Instrumentation *instr,
3711  MemoryContext per_tuple_context,
3712  TupleTableSlot *trig_tuple_slot1,
3713  TupleTableSlot *trig_tuple_slot2)
3714 {
3715  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3716  Oid tgoid = evtshared->ats_tgoid;
3717  TriggerData LocTriggerData;
3718  HeapTupleData tuple1;
3719  HeapTupleData tuple2;
3720  HeapTuple rettuple;
3721  Buffer buffer1 = InvalidBuffer;
3722  Buffer buffer2 = InvalidBuffer;
3723  int tgindx;
3724 
3725  /*
3726  * Locate trigger in trigdesc.
3727  */
3728  LocTriggerData.tg_trigger = NULL;
3729  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3730  {
3731  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3732  {
3733  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3734  break;
3735  }
3736  }
3737  if (LocTriggerData.tg_trigger == NULL)
3738  elog(ERROR, "could not find trigger %u", tgoid);
3739 
3740  /*
3741  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3742  * to include time spent re-fetching tuples in the trigger cost.
3743  */
3744  if (instr)
3745  InstrStartNode(instr + tgindx);
3746 
3747  /*
3748  * Fetch the required tuple(s).
3749  */
3750  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3751  {
3753  {
3754  Tuplestorestate *fdw_tuplestore =
3756  (afterTriggers.fdw_tuplestores);
3757 
3758  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3759  trig_tuple_slot1))
3760  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3761 
3762  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3764  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3765  trig_tuple_slot2))
3766  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3767  }
3768  /* fall through */
3770 
3771  /*
3772  * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
3773  * ensures that tg_trigtuple does not reference tuplestore memory.
3774  * (It is formally possible for the trigger function to queue
3775  * trigger events that add to the same tuplestore, which can push
3776  * other tuples out of memory.) The distinction is academic,
3777  * because we start with a minimal tuple that ExecFetchSlotTuple()
3778  * must materialize anyway.
3779  */
3780  LocTriggerData.tg_trigtuple =
3781  ExecMaterializeSlot(trig_tuple_slot1);
3782  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3783 
3784  LocTriggerData.tg_newtuple =
3785  ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3787  ExecMaterializeSlot(trig_tuple_slot2) : NULL;
3788  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3789 
3790  break;
3791 
3792  default:
3793  if (ItemPointerIsValid(&(event->ate_ctid1)))
3794  {
3795  ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
3796  if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
3797  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3798  LocTriggerData.tg_trigtuple = &tuple1;
3799  LocTriggerData.tg_trigtuplebuf = buffer1;
3800  }
3801  else
3802  {
3803  LocTriggerData.tg_trigtuple = NULL;
3804  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3805  }
3806 
3807  /* don't touch ctid2 if not there */
3808  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3810  ItemPointerIsValid(&(event->ate_ctid2)))
3811  {
3812  ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
3813  if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
3814  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3815  LocTriggerData.tg_newtuple = &tuple2;
3816  LocTriggerData.tg_newtuplebuf = buffer2;
3817  }
3818  else
3819  {
3820  LocTriggerData.tg_newtuple = NULL;
3821  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3822  }
3823  }
3824 
3825  /*
3826  * Set up the tuplestore information.
3827  */
3828  if (LocTriggerData.tg_trigger->tgoldtable)
3829  LocTriggerData.tg_oldtable =
3831  else
3832  LocTriggerData.tg_oldtable = NULL;
3833  if (LocTriggerData.tg_trigger->tgnewtable)
3834  LocTriggerData.tg_newtable =
3836  else
3837  LocTriggerData.tg_newtable = NULL;
3838 
3839  /*
3840  * Setup the remaining trigger information
3841  */
3842  LocTriggerData.type = T_TriggerData;
3843  LocTriggerData.tg_event =
3845  LocTriggerData.tg_relation = rel;
3846 
3847  MemoryContextReset(per_tuple_context);
3848 
3849  /*
3850  * Call the trigger and throw away any possibly returned updated tuple.
3851  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
3852  */
3853  rettuple = ExecCallTriggerFunc(&LocTriggerData,
3854  tgindx,
3855  finfo,
3856  NULL,
3857  per_tuple_context);
3858  if (rettuple != NULL &&
3859  rettuple != LocTriggerData.tg_trigtuple &&
3860  rettuple != LocTriggerData.tg_newtuple)
3861  heap_freetuple(rettuple);
3862 
3863  /*
3864  * Release buffers
3865  */
3866  if (buffer1 != InvalidBuffer)
3867  ReleaseBuffer(buffer1);
3868  if (buffer2 != InvalidBuffer)
3869  ReleaseBuffer(buffer2);
3870 
3871  /*
3872  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
3873  * one "tuple returned" (really the number of firings).
3874  */
3875  if (instr)
3876  InstrStopNode(instr + tgindx, 1);
3877 }
3878 
3879 
3880 /*
3881  * afterTriggerMarkEvents()
3882  *
3883  * Scan the given event list for not yet invoked events. Mark the ones
3884  * that can be invoked now with the current firing ID.
3885  *
3886  * If move_list isn't NULL, events that are not to be invoked now are
3887  * transferred to move_list.
3888  *
3889  * When immediate_only is TRUE, do not invoke currently-deferred triggers.
3890  * (This will be FALSE only at main transaction exit.)
3891  *
3892  * Returns TRUE if any invokable events were found.
3893  */
3894 static bool
3896  AfterTriggerEventList *move_list,
3897  bool immediate_only)
3898 {
3899  bool found = false;
3900  AfterTriggerEvent event;
3901  AfterTriggerEventChunk *chunk;
3902 
3903  for_each_event_chunk(event, chunk, *events)
3904  {
3905  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3906  bool defer_it = false;
3907 
3908  if (!(event->ate_flags &
3910  {
3911  /*
3912  * This trigger hasn't been called or scheduled yet. Check if we
3913  * should call it now.
3914  */
3915  if (immediate_only && afterTriggerCheckState(evtshared))
3916  {
3917  defer_it = true;
3918  }
3919  else
3920  {
3921  /*
3922  * Mark it as to be fired in this firing cycle.
3923  */
3924  evtshared->ats_firing_id = afterTriggers.firing_counter;
3925  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
3926  found = true;
3927  }
3928  }
3929 
3930  /*
3931  * If it's deferred, move it to move_list, if requested.
3932  */
3933  if (defer_it && move_list != NULL)
3934  {
3935  /* add it to move_list */
3936  afterTriggerAddEvent(move_list, event, evtshared);
3937  /* mark original copy "done" so we don't do it again */
3938  event->ate_flags |= AFTER_TRIGGER_DONE;
3939  }
3940  }
3941 
3942  return found;
3943 }
3944 
3945 /*
3946  * afterTriggerInvokeEvents()
3947  *
3948  * Scan the given event list for events that are marked as to be fired
3949  * in the current firing cycle, and fire them.
3950  *
3951  * If estate isn't NULL, we use its result relation info to avoid repeated
3952  * openings and closing of trigger target relations. If it is NULL, we
3953  * make one locally to cache the info in case there are multiple trigger
3954  * events per rel.
3955  *
3956  * When delete_ok is TRUE, it's safe to delete fully-processed events.
3957  * (We are not very tense about that: we simply reset a chunk to be empty
3958  * if all its events got fired. The objective here is just to avoid useless
3959  * rescanning of events when a trigger queues new events during transaction
3960  * end, so it's not necessary to worry much about the case where only
3961  * some events are fired.)
3962  *
3963  * Returns TRUE if no unfired events remain in the list (this allows us
3964  * to avoid repeating afterTriggerMarkEvents).
3965  */
3966 static bool
3968  CommandId firing_id,
3969  EState *estate,
3970  bool delete_ok)
3971 {
3972  bool all_fired = true;
3973  AfterTriggerEventChunk *chunk;
3974  MemoryContext per_tuple_context;
3975  bool local_estate = false;
3976  Relation rel = NULL;
3977  TriggerDesc *trigdesc = NULL;
3978  FmgrInfo *finfo = NULL;
3979  Instrumentation *instr = NULL;
3980  TupleTableSlot *slot1 = NULL,
3981  *slot2 = NULL;
3982 
3983  /* Make a local EState if need be */
3984  if (estate == NULL)
3985  {
3986  estate = CreateExecutorState();
3987  local_estate = true;
3988  }
3989 
3990  /* Make a per-tuple memory context for trigger function calls */
3991  per_tuple_context =
3993  "AfterTriggerTupleContext",
3995 
3996  for_each_chunk(chunk, *events)
3997  {
3998  AfterTriggerEvent event;
3999  bool all_fired_in_chunk = true;
4000 
4001  for_each_event(event, chunk)
4002  {
4003  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4004 
4005  /*
4006  * Is it one for me to fire?
4007  */
4008  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4009  evtshared->ats_firing_id == firing_id)
4010  {
4011  /*
4012  * So let's fire it... but first, find the correct relation if
4013  * this is not the same relation as before.
4014  */
4015  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4016  {
4017  ResultRelInfo *rInfo;
4018 
4019  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4020  rel = rInfo->ri_RelationDesc;
4021  trigdesc = rInfo->ri_TrigDesc;
4022  finfo = rInfo->ri_TrigFunctions;
4023  instr = rInfo->ri_TrigInstrument;
4024  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4025  {
4026  if (slot1 != NULL)
4027  {
4030  }
4031  slot1 = MakeSingleTupleTableSlot(rel->rd_att);
4032  slot2 = MakeSingleTupleTableSlot(rel->rd_att);
4033  }
4034  if (trigdesc == NULL) /* should not happen */
4035  elog(ERROR, "relation %u has no triggers",
4036  evtshared->ats_relid);
4037  }
4038 
4039  /*
4040  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4041  * still set, so recursive examinations of the event list
4042  * won't try to re-fire it.
4043  */
4044  AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
4045  per_tuple_context, slot1, slot2);
4046 
4047  /*
4048  * Mark the event as done.
4049  */
4050  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4051  event->ate_flags |= AFTER_TRIGGER_DONE;
4052  }
4053  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4054  {
4055  /* something remains to be done */
4056  all_fired = all_fired_in_chunk = false;
4057  }
4058  }
4059 
4060  /* Clear the chunk if delete_ok and nothing left of interest */
4061  if (delete_ok && all_fired_in_chunk)
4062  {
4063  chunk->freeptr = CHUNK_DATA_START(chunk);
4064  chunk->endfree = chunk->endptr;
4065 
4066  /*
4067  * If it's last chunk, must sync event list's tailfree too. Note
4068  * that delete_ok must NOT be passed as true if there could be
4069  * stacked AfterTriggerEventList values pointing at this event
4070  * list, since we'd fail to fix their copies of tailfree.
4071  */
4072  if (chunk == events->tail)
4073  events->tailfree = chunk->freeptr;
4074  }
4075  }
4076  if (slot1 != NULL)
4077  {
4080  }
4081 
4082  /* Release working resources */
4083  MemoryContextDelete(per_tuple_context);
4084 
4085  if (local_estate)
4086  {
4087  ListCell *l;
4088 
4089  foreach(l, estate->es_trig_target_relations)
4090  {
4091  ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
4092 
4093  /* Close indices and then the relation itself */
4094  ExecCloseIndices(resultRelInfo);
4095  heap_close(resultRelInfo->ri_RelationDesc, NoLock);
4096  }
4097  FreeExecutorState(estate);
4098  }
4099 
4100  return all_fired;
4101 }
4102 
4103 
4104 /* ----------
4105  * AfterTriggerBeginXact()
4106  *
4107  * Called at transaction start (either BEGIN or implicit for single
4108  * statement outside of transaction block).
4109  * ----------
4110  */
4111 void
4113 {
4114  /*
4115  * Initialize after-trigger state structure to empty
4116  */
4117  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4118  afterTriggers.query_depth = -1;
4119 
4120  /*
4121  * Verify that there is no leftover state remaining. If these assertions
4122  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4123  * up properly.
4124  */
4125  Assert(afterTriggers.state == NULL);
4126  Assert(afterTriggers.query_stack == NULL);
4127  Assert(afterTriggers.fdw_tuplestores == NULL);
4128  Assert(afterTriggers.old_tuplestores == NULL);
4129  Assert(afterTriggers.new_tuplestores == NULL);
4130  Assert(afterTriggers.maxquerydepth == 0);
4131  Assert(afterTriggers.event_cxt == NULL);
4132  Assert(afterTriggers.events.head == NULL);
4133  Assert(afterTriggers.state_stack == NULL);
4134  Assert(afterTriggers.events_stack == NULL);
4135  Assert(afterTriggers.depth_stack == NULL);
4136  Assert(afterTriggers.firing_stack == NULL);
4137  Assert(afterTriggers.maxtransdepth == 0);
4138 }
4139 
4140 
4141 /* ----------
4142  * AfterTriggerBeginQuery()
4143  *
4144  * Called just before we start processing a single query within a
4145  * transaction (or subtransaction). Most of the real work gets deferred
4146  * until somebody actually tries to queue a trigger event.
4147  * ----------
4148  */
4149 void
4151 {
4152  /* Increase the query stack depth */
4153  afterTriggers.query_depth++;
4154 }
4155 
4156 
4157 /* ----------
4158  * AfterTriggerEndQuery()
4159  *
4160  * Called after one query has been completely processed. At this time
4161  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4162  * transfer deferred trigger events to the global deferred-trigger list.
4163  *
4164  * Note that this must be called BEFORE closing down the executor
4165  * with ExecutorEnd, because we make use of the EState's info about
4166  * target relations. Normally it is called from ExecutorFinish.
4167  * ----------
4168  */
4169 void
4171 {
4172  AfterTriggerEventList *events;
4173  Tuplestorestate *fdw_tuplestore;
4174  Tuplestorestate *old_tuplestore;
4175  Tuplestorestate *new_tuplestore;
4176 
4177  /* Must be inside a query, too */
4178  Assert(afterTriggers.query_depth >= 0);
4179 
4180  /*
4181  * If we never even got as far as initializing the event stack, there
4182  * certainly won't be any events, so exit quickly.
4183  */
4184  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4185  {
4186  afterTriggers.query_depth--;
4187  return;
4188  }
4189 
4190  /*
4191  * Process all immediate-mode triggers queued by the query, and move the
4192  * deferred ones to the main list of deferred events.
4193  *
4194  * Notice that we decide which ones will be fired, and put the deferred
4195  * ones on the main list, before anything is actually fired. This ensures
4196  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4197  * IMMEDIATE: all events we have decided to defer will be available for it
4198  * to fire.
4199  *
4200  * We loop in case a trigger queues more events at the same query level.
4201  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4202  * will instead fire any triggers in a dedicated query level. Foreign key
4203  * enforcement triggers do add to the current query level, thanks to their
4204  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4205  * C-language triggers might do likewise. Be careful here: firing a
4206  * trigger could result in query_stack being repalloc'd, so we can't save
4207  * its address across afterTriggerInvokeEvents calls.
4208  *
4209  * If we find no firable events, we don't have to increment
4210  * firing_counter.
4211  */
4212  for (;;)
4213  {
4214  events = &afterTriggers.query_stack[afterTriggers.query_depth];
4215  if (afterTriggerMarkEvents(events, &afterTriggers.events, true))
4216  {
4217  CommandId firing_id = afterTriggers.firing_counter++;
4218 
4219  /* OK to delete the immediate events after processing them */
4220  if (afterTriggerInvokeEvents(events, firing_id, estate, true))
4221  break; /* all fired */
4222  }
4223  else
4224  break;
4225  }
4226 
4227  /* Release query-local storage for events, including tuplestore if any */
4228  fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4229  if (fdw_tuplestore)
4230  {
4231  tuplestore_end(fdw_tuplestore);
4232  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4233  }
4234  old_tuplestore = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4235  if (old_tuplestore)
4236  {
4237  tuplestore_end(old_tuplestore);
4238  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4239  }
4240  new_tuplestore = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4241  if (new_tuplestore)
4242  {
4243  tuplestore_end(new_tuplestore);
4244  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4245  }
4246  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4247 
4248  afterTriggers.query_depth--;
4249 }
4250 
4251 
4252 /* ----------
4253  * AfterTriggerFireDeferred()
4254  *
4255  * Called just before the current transaction is committed. At this
4256  * time we invoke all pending DEFERRED triggers.
4257  *
4258  * It is possible for other modules to queue additional deferred triggers
4259  * during pre-commit processing; therefore xact.c may have to call this
4260  * multiple times.
4261  * ----------
4262  */
4263 void
4265 {
4266  AfterTriggerEventList *events;
4267  bool snap_pushed = false;
4268 
4269  /* Must not be inside a query */
4270  Assert(afterTriggers.query_depth == -1);
4271 
4272  /*
4273  * If there are any triggers to fire, make sure we have set a snapshot for
4274  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4275  * can't assume ActiveSnapshot is valid on entry.)
4276  */
4277  events = &afterTriggers.events;
4278  if (events->head != NULL)
4279  {
4281  snap_pushed = true;
4282  }
4283 
4284  /*
4285  * Run all the remaining triggers. Loop until they are all gone, in case
4286  * some trigger queues more for us to do.
4287  */
4288  while (afterTriggerMarkEvents(events, NULL, false))
4289  {
4290  CommandId firing_id = afterTriggers.firing_counter++;
4291 
4292  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4293  break; /* all fired */
4294  }
4295 
4296  /*
4297  * We don't bother freeing the event list, since it will go away anyway
4298  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4299  */
4300 
4301  if (snap_pushed)
4303 }
4304 
4305 
4306 /* ----------
4307  * AfterTriggerEndXact()
4308  *
4309  * The current transaction is finishing.
4310  *
4311  * Any unfired triggers are canceled so we simply throw
4312  * away anything we know.
4313  *
4314  * Note: it is possible for this to be called repeatedly in case of
4315  * error during transaction abort; therefore, do not complain if
4316  * already closed down.
4317  * ----------
4318  */
4319 void
4320 AfterTriggerEndXact(bool isCommit)
4321 {
4322  /*
4323  * Forget the pending-events list.
4324  *
4325  * Since all the info is in TopTransactionContext or children thereof, we
4326  * don't really need to do anything to reclaim memory. However, the
4327  * pending-events list could be large, and so it's useful to discard it as
4328  * soon as possible --- especially if we are aborting because we ran out
4329  * of memory for the list!
4330  */
4331  if (afterTriggers.event_cxt)
4332  {
4333  MemoryContextDelete(afterTriggers.event_cxt);
4334  afterTriggers.event_cxt = NULL;
4335  afterTriggers.events.head = NULL;
4336  afterTriggers.events.tail = NULL;
4337  afterTriggers.events.tailfree = NULL;
4338  }
4339 
4340  /*
4341  * Forget any subtransaction state as well. Since this can't be very
4342  * large, we let the eventual reset of TopTransactionContext free the
4343  * memory instead of doing it here.
4344  */
4345  afterTriggers.state_stack = NULL;
4346  afterTriggers.events_stack = NULL;
4347  afterTriggers.depth_stack = NULL;
4348  afterTriggers.firing_stack = NULL;
4349  afterTriggers.maxtransdepth = 0;
4350 
4351 
4352  /*
4353  * Forget the query stack and constraint-related state information. As
4354  * with the subtransaction state information, we don't bother freeing the
4355  * memory here.
4356  */
4357  afterTriggers.query_stack = NULL;
4358  afterTriggers.fdw_tuplestores = NULL;
4359  afterTriggers.old_tuplestores = NULL;
4360  afterTriggers.new_tuplestores = NULL;
4361  afterTriggers.maxquerydepth = 0;
4362  afterTriggers.state = NULL;
4363 
4364  /* No more afterTriggers manipulation until next transaction starts. */
4365  afterTriggers.query_depth = -1;
4366 }
4367 
4368 /*
4369  * AfterTriggerBeginSubXact()
4370  *
4371  * Start a subtransaction.
4372  */
4373 void
4375 {
4376  int my_level = GetCurrentTransactionNestLevel();
4377 
4378  /*
4379  * Allocate more space in the stacks if needed. (Note: because the
4380  * minimum nest level of a subtransaction is 2, we waste the first couple
4381  * entries of each array; not worth the notational effort to avoid it.)
4382  */
4383  while (my_level >= afterTriggers.maxtransdepth)
4384  {
4385  if (afterTriggers.maxtransdepth == 0)
4386  {
4387  MemoryContext old_cxt;
4388 
4390 
4391 #define DEFTRIG_INITALLOC 8
4392  afterTriggers.state_stack = (SetConstraintState *)
4393  palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState));
4394  afterTriggers.events_stack = (AfterTriggerEventList *)
4396  afterTriggers.depth_stack = (int *)
4397  palloc(DEFTRIG_INITALLOC * sizeof(int));
4398  afterTriggers.firing_stack = (CommandId *)
4399  palloc(DEFTRIG_INITALLOC * sizeof(CommandId));
4400  afterTriggers.maxtransdepth = DEFTRIG_INITALLOC;
4401 
4402  MemoryContextSwitchTo(old_cxt);
4403  }
4404  else
4405  {
4406  /* repalloc will keep the stacks in the same context */
4407  int new_alloc = afterTriggers.maxtransdepth * 2;
4408 
4409  afterTriggers.state_stack = (SetConstraintState *)
4410  repalloc(afterTriggers.state_stack,
4411  new_alloc * sizeof(SetConstraintState));
4412  afterTriggers.events_stack = (AfterTriggerEventList *)
4413  repalloc(afterTriggers.events_stack,
4414  new_alloc * sizeof(AfterTriggerEventList));
4415  afterTriggers.depth_stack = (int *)
4416  repalloc(afterTriggers.depth_stack,
4417  new_alloc * sizeof(int));
4418  afterTriggers.firing_stack = (CommandId *)
4419  repalloc(afterTriggers.firing_stack,
4420  new_alloc * sizeof(CommandId));
4421  afterTriggers.maxtransdepth = new_alloc;
4422  }
4423  }
4424 
4425  /*
4426  * Push the current information into the stack. The SET CONSTRAINTS state
4427  * is not saved until/unless changed. Likewise, we don't make a
4428  * per-subtransaction event context until needed.
4429  */
4430  afterTriggers.state_stack[my_level] = NULL;
4431  afterTriggers.events_stack[my_level] = afterTriggers.events;
4432  afterTriggers.depth_stack[my_level] = afterTriggers.query_depth;
4433  afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter;
4434 }
4435 
4436 /*
4437  * AfterTriggerEndSubXact()
4438  *
4439  * The current subtransaction is ending.
4440  */
4441 void
4443 {
4444  int my_level = GetCurrentTransactionNestLevel();
4445  SetConstraintState state;
4446  AfterTriggerEvent event;
4447  AfterTriggerEventChunk *chunk;
4448  CommandId subxact_firing_id;
4449 
4450  /*
4451  * Pop the prior state if needed.
4452  */
4453  if (isCommit)
4454  {
4455  Assert(my_level < afterTriggers.maxtransdepth);
4456  /* If we saved a prior state, we don't need it anymore */
4457  state = afterTriggers.state_stack[my_level];
4458  if (state != NULL)
4459  pfree(state);
4460  /* this avoids double pfree if error later: */
4461  afterTriggers.state_stack[my_level] = NULL;
4462  Assert(afterTriggers.query_depth ==
4463  afterTriggers.depth_stack[my_level]);
4464  }
4465  else
4466  {
4467  /*
4468  * Aborting. It is possible subxact start failed before calling
4469  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4470  * stack levels that aren't there.
4471  */
4472  if (my_level >= afterTriggers.maxtransdepth)
4473  return;
4474 
4475  /*
4476  * Release any event lists from queries being aborted, and restore
4477  * query_depth to its pre-subxact value. This assumes that a
4478  * subtransaction will not add events to query levels started in a
4479  * earlier transaction state.
4480  */
4481  while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level])
4482  {
4483  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4484  {
4485  Tuplestorestate *ts;
4486 
4487  ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4488  if (ts)
4489  {
4490  tuplestore_end(ts);
4491  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4492  }
4493  ts = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4494  if (ts)
4495  {
4496  tuplestore_end(ts);
4497  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4498  }
4499  ts = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4500  if (ts)
4501  {
4502  tuplestore_end(ts);
4503  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4504  }
4505 
4506  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4507  }
4508 
4509  afterTriggers.query_depth--;
4510  }
4511  Assert(afterTriggers.query_depth ==
4512  afterTriggers.depth_stack[my_level]);
4513 
4514  /*
4515  * Restore the global deferred-event list to its former length,
4516  * discarding any events queued by the subxact.
4517  */
4518  afterTriggerRestoreEventList(&afterTriggers.events,
4519  &afterTriggers.events_stack[my_level]);
4520 
4521  /*
4522  * Restore the trigger state. If the saved state is NULL, then this
4523  * subxact didn't save it, so it doesn't need restoring.
4524  */
4525  state = afterTriggers.state_stack[my_level];
4526  if (state != NULL)
4527  {
4528  pfree(afterTriggers.state);
4529  afterTriggers.state = state;
4530  }
4531  /* this avoids double pfree if error later: */
4532  afterTriggers.state_stack[my_level] = NULL;
4533 
4534  /*
4535  * Scan for any remaining deferred events that were marked DONE or IN
4536  * PROGRESS by this subxact or a child, and un-mark them. We can
4537  * recognize such events because they have a firing ID greater than or
4538  * equal to the firing_counter value we saved at subtransaction start.
4539  * (This essentially assumes that the current subxact includes all
4540  * subxacts started after it.)
4541  */
4542  subxact_firing_id = afterTriggers.firing_stack[my_level];
4543  for_each_event_chunk(event, chunk, afterTriggers.events)
4544  {
4545  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4546 
4547  if (event->ate_flags &
4549  {
4550  if (evtshared->ats_firing_id >= subxact_firing_id)
4551  event->ate_flags &=
4553  }
4554  }
4555  }
4556 }
4557 
4558 /* ----------
4559  * AfterTriggerEnlargeQueryState()
4560  *
4561  * Prepare the necessary state so that we can record AFTER trigger events
4562  * queued by a query. It is allowed to have nested queries within a
4563  * (sub)transaction, so we need to have separate state for each query
4564  * nesting level.
4565  * ----------
4566  */
4567 static void
4569 {
4570  int init_depth = afterTriggers.maxquerydepth;
4571 
4572  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4573 
4574  if (afterTriggers.maxquerydepth == 0)
4575  {
4576  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4577 
4578  afterTriggers.query_stack = (AfterTriggerEventList *)
4580  new_alloc * sizeof(AfterTriggerEventList));
4581  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4583  new_alloc * sizeof(Tuplestorestate *));
4584  afterTriggers.old_tuplestores = (Tuplestorestate **)
4586  new_alloc * sizeof(Tuplestorestate *));
4587  afterTriggers.new_tuplestores = (Tuplestorestate **)
4589  new_alloc * sizeof(Tuplestorestate *));
4590  afterTriggers.maxquerydepth = new_alloc;
4591  }
4592  else
4593  {
4594  /* repalloc will keep the stack in the same context */
4595  int old_alloc = afterTriggers.maxquerydepth;
4596  int new_alloc = Max(afterTriggers.query_depth + 1,
4597  old_alloc * 2);
4598 
4599  afterTriggers.query_stack = (AfterTriggerEventList *)
4600  repalloc(afterTriggers.query_stack,
4601  new_alloc * sizeof(AfterTriggerEventList));
4602  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4603  repalloc(afterTriggers.fdw_tuplestores,
4604  new_alloc * sizeof(Tuplestorestate *));
4605  afterTriggers.old_tuplestores = (Tuplestorestate **)
4606  repalloc(afterTriggers.old_tuplestores,
4607  new_alloc * sizeof(Tuplestorestate *));
4608  afterTriggers.new_tuplestores = (Tuplestorestate **)
4609  repalloc(afterTriggers.new_tuplestores,
4610  new_alloc * sizeof(Tuplestorestate *));
4611  /* Clear newly-allocated slots for subsequent lazy initialization. */
4612  memset(afterTriggers.fdw_tuplestores + old_alloc,
4613  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4614  memset(afterTriggers.old_tuplestores + old_alloc,
4615  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4616  memset(afterTriggers.new_tuplestores + old_alloc,
4617  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4618  afterTriggers.maxquerydepth = new_alloc;
4619  }
4620 
4621  /* Initialize new query lists to empty */
4622  while (init_depth < afterTriggers.maxquerydepth)
4623  {
4624  AfterTriggerEventList *events;
4625 
4626  events = &afterTriggers.query_stack[init_depth];
4627  events->head = NULL;
4628  events->tail = NULL;
4629  events->tailfree = NULL;
4630 
4631  ++init_depth;
4632  }
4633 }
4634 
4635 /*
4636  * Create an empty SetConstraintState with room for numalloc trigstates
4637  */
4638 static SetConstraintState
4640 {
4641  SetConstraintState state;
4642 
4643  /* Behave sanely with numalloc == 0 */
4644  if (numalloc <= 0)
4645  numalloc = 1;
4646 
4647  /*
4648  * We assume that zeroing will correctly initialize the state values.
4649  */
4650  state = (SetConstraintState)
4652  offsetof(SetConstraintStateData, trigstates) +
4653  numalloc * sizeof(SetConstraintTriggerData));
4654 
4655  state->numalloc = numalloc;
4656 
4657  return state;
4658 }
4659 
4660 /*
4661  * Copy a SetConstraintState
4662  */
4663 static SetConstraintState
4664 SetConstraintStateCopy(SetConstraintState origstate)
4665 {
4666  SetConstraintState state;
4667 
4668  state = SetConstraintStateCreate(origstate->numstates);
4669 
4670  state->all_isset = origstate->all_isset;
4671  state->all_isdeferred = origstate->all_isdeferred;
4672  state->numstates = origstate->numstates;
4673  memcpy(state->trigstates, origstate->trigstates,
4674  origstate->numstates * sizeof(SetConstraintTriggerData));
4675 
4676  return state;
4677 }
4678 
4679 /*
4680  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
4681  * pointer to the state object (it will change if we have to repalloc).
4682  */
4683 static SetConstraintState
4685  Oid tgoid, bool tgisdeferred)
4686 {
4687  if (state->numstates >= state->numalloc)
4688  {
4689  int newalloc = state->numalloc * 2;
4690 
4691  newalloc = Max(newalloc, 8); /* in case original has size 0 */
4692  state = (SetConstraintState)
4693  repalloc(state,
4694  offsetof(SetConstraintStateData, trigstates) +
4695  newalloc * sizeof(SetConstraintTriggerData));
4696  state->numalloc = newalloc;
4697  Assert(state->numstates < state->numalloc);
4698  }
4699 
4700  state->trigstates[state->numstates].sct_tgoid = tgoid;
4701  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4702  state->numstates++;
4703 
4704  return state;
4705 }
4706 
4707 /* ----------
4708  * AfterTriggerSetState()
4709  *
4710  * Execute the SET CONSTRAINTS ... utility command.
4711  * ----------
4712  */
4713 void
4715 {
4716  int my_level = GetCurrentTransactionNestLevel();
4717 
4718  /* If we haven't already done so, initialize our state. */
4719  if (afterTriggers.state == NULL)
4720  afterTriggers.state = SetConstraintStateCreate(8);
4721 
4722  /*
4723  * If in a subtransaction, and we didn't save the current state already,
4724  * save it so it can be restored if the subtransaction aborts.
4725  */
4726  if (my_level > 1 &&
4727  afterTriggers.state_stack[my_level] == NULL)
4728  {
4729  afterTriggers.state_stack[my_level] =
4730  SetConstraintStateCopy(afterTriggers.state);
4731  }
4732 
4733  /*
4734  * Handle SET CONSTRAINTS ALL ...
4735  */
4736  if (stmt->constraints == NIL)
4737  {
4738  /*
4739  * Forget any previous SET CONSTRAINTS commands in this transaction.
4740  */
4741  afterTriggers.state->numstates = 0;
4742 
4743  /*
4744  * Set the per-transaction ALL state to known.
4745  */
4746  afterTriggers.state->all_isset = true;
4747  afterTriggers.state->all_isdeferred = stmt->deferred;
4748  }
4749  else
4750  {
4751  Relation conrel;
4752  Relation tgrel;
4753  List *conoidlist = NIL;
4754  List *tgoidlist = NIL;
4755  ListCell *lc;
4756 
4757  /*
4758  * Handle SET CONSTRAINTS constraint-name [, ...]
4759  *
4760  * First, identify all the named constraints and make a list of their
4761  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
4762  * the same name within a schema, the specifications are not
4763  * necessarily unique. Our strategy is to target all matching
4764  * constraints within the first search-path schema that has any
4765  * matches, but disregard matches in schemas beyond the first match.
4766  * (This is a bit odd but it's the historical behavior.)
4767  */
4769 
4770  foreach(lc, stmt->constraints)
4771  {
4772  RangeVar *constraint = lfirst(lc);
4773  bool found;
4774  List *namespacelist;
4775  ListCell *nslc;
4776 
4777  if (constraint->catalogname)
4778  {
4779  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
4780  ereport(ERROR,
4781  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4782  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
4783  constraint->catalogname, constraint->schemaname,
4784  constraint->relname)));
4785  }
4786 
4787  /*
4788  * If we're given the schema name with the constraint, look only
4789  * in that schema. If given a bare constraint name, use the
4790  * search path to find the first matching constraint.
4791  */
4792  if (constraint->schemaname)
4793  {
4794  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
4795  false);
4796 
4797  namespacelist = list_make1_oid(namespaceId);
4798  }
4799  else
4800  {
4801  namespacelist = fetch_search_path(true);
4802  }
4803 
4804  found = false;
4805  foreach(nslc, namespacelist)
4806  {
4807  Oid namespaceId = lfirst_oid(nslc);
4808  SysScanDesc conscan;
4809  ScanKeyData skey[2];
4810  HeapTuple tup;
4811 
4812  ScanKeyInit(&skey[0],
4814  BTEqualStrategyNumber, F_NAMEEQ,
4815  CStringGetDatum(constraint->relname));
4816  ScanKeyInit(&skey[1],
4818  BTEqualStrategyNumber, F_OIDEQ,
4819  ObjectIdGetDatum(namespaceId));
4820 
4821  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
4822  true, NULL, 2, skey);
4823 
4824  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
4825  {
4827 
4828  if (con->condeferrable)
4829  conoidlist = lappend_oid(conoidlist,
4830  HeapTupleGetOid(tup));
4831  else if (stmt->deferred)
4832  ereport(ERROR,
4833  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
4834  errmsg("constraint \"%s\" is not deferrable",
4835  constraint->relname)));
4836  found = true;
4837  }
4838 
4839  systable_endscan(conscan);
4840 
4841  /*
4842  * Once we've found a matching constraint we do not search
4843  * later parts of the search path.
4844  */
4845  if (found)
4846  break;
4847  }
4848 
4849  list_free(namespacelist);
4850 
4851  /*
4852  * Not found ?
4853  */
4854  if (!found)
4855  ereport(ERROR,
4856  (errcode(ERRCODE_UNDEFINED_OBJECT),
4857  errmsg("constraint \"%s\" does not exist",
4858  constraint->relname)));
4859  }
4860 
4861  heap_close(conrel, AccessShareLock);
4862 
4863  /*
4864  * Now, locate the trigger(s) implementing each of these constraints,
4865  * and make a list of their OIDs.
4866  */
4868 
4869  foreach(lc, conoidlist)
4870  {
4871  Oid conoid = lfirst_oid(lc);
4872  bool found;
4873  ScanKeyData skey;
4874  SysScanDesc tgscan;
4875  HeapTuple htup;
4876 
4877  found = false;
4878 
4879  ScanKeyInit(&skey,
4881  BTEqualStrategyNumber, F_OIDEQ,
4882  ObjectIdGetDatum(conoid));
4883 
4884  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
4885  NULL, 1, &skey);
4886 
4887  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
4888  {
4889  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
4890 
4891  /*
4892  * Silently skip triggers that are marked as non-deferrable in
4893  * pg_trigger. This is not an error condition, since a
4894  * deferrable RI constraint may have some non-deferrable
4895  * actions.
4896  */
4897  if (pg_trigger->tgdeferrable)
4898  tgoidlist = lappend_oid(tgoidlist,
4899  HeapTupleGetOid(htup));
4900 
4901  found = true;
4902  }
4903 
4904  systable_endscan(tgscan);
4905 
4906  /* Safety check: a deferrable constraint should have triggers */
4907  if (!found)
4908  elog(ERROR, "no triggers found for constraint with OID %u",
4909  conoid);
4910  }
4911 
4912  heap_close(tgrel, AccessShareLock);
4913 
4914  /*
4915  * Now we can set the trigger states of individual triggers for this
4916  * xact.
4917  */
4918  foreach(lc, tgoidlist)
4919  {
4920  Oid tgoid = lfirst_oid(lc);
4921  SetConstraintState state = afterTriggers.state;
4922  bool found = false;
4923  int i;
4924 
4925  for (i = 0; i < state->numstates; i++)
4926  {
4927  if (state->trigstates[i].sct_tgoid == tgoid)
4928  {
4929  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
4930  found = true;
4931  break;
4932  }
4933  }
4934  if (!found)
4935  {
4936  afterTriggers.state =
4937  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
4938  }
4939  }
4940  }
4941 
4942  /*
4943  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
4944  * checks against that constraint must be made when the SET CONSTRAINTS
4945  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
4946  * apply retroactively. We've updated the constraints state, so scan the
4947  * list of previously deferred events to fire any that have now become
4948  * immediate.
4949  *
4950  * Obviously, if this was SET ... DEFERRED then it can't have converted
4951  * any unfired events to immediate, so we need do nothing in that case.
4952  */
4953  if (!stmt->deferred)
4954  {
4955  AfterTriggerEventList *events = &afterTriggers.events;
4956  bool snapshot_set = false;
4957 
4958  while (afterTriggerMarkEvents(events, NULL, true))
4959  {
4960  CommandId firing_id = afterTriggers.firing_counter++;
4961 
4962  /*
4963  * Make sure a snapshot has been established in case trigger
4964  * functions need one. Note that we avoid setting a snapshot if
4965  * we don't find at least one trigger that has to be fired now.
4966  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
4967  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
4968  * at the start of a transaction it's not possible for any trigger
4969  * events to be queued yet.)
4970  */
4971  if (!snapshot_set)
4972  {
4974  snapshot_set = true;
4975  }
4976 
4977  /*
4978  * We can delete fired events if we are at top transaction level,
4979  * but we'd better not if inside a subtransaction, since the
4980  * subtransaction could later get rolled back.
4981  */
4982  if (afterTriggerInvokeEvents(events, firing_id, NULL,
4983  !IsSubTransaction()))
4984  break; /* all fired */
4985  }
4986 
4987  if (snapshot_set)
4989  }
4990 }
4991 
4992 /* ----------
4993  * AfterTriggerPendingOnRel()
4994  * Test to see if there are any pending after-trigger events for rel.
4995  *
4996  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
4997  * it is unsafe to perform major surgery on a relation. Note that only
4998  * local pending events are examined. We assume that having exclusive lock
4999  * on a rel guarantees there are no unserviced events in other backends ---
5000  * but having a lock does not prevent there being such events in our own.
5001  *
5002  * In some scenarios it'd be reasonable to remove pending events (more
5003  * specifically, mark them DONE by the current subxact) but without a lot
5004  * of knowledge of the trigger semantics we can't do this in general.
5005  * ----------
5006  */
5007 bool
5009 {
5010  AfterTriggerEvent event;
5011  AfterTriggerEventChunk *chunk;
5012  int depth;
5013 
5014  /* Scan queued events */
5015  for_each_event_chunk(event, chunk, afterTriggers.events)
5016  {
5017  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5018 
5019  /*
5020  * We can ignore completed events. (Even if a DONE flag is rolled
5021  * back by subxact abort, it's OK because the effects of the TRUNCATE
5022  * or whatever must get rolled back too.)
5023  */
5024  if (event->ate_flags & AFTER_TRIGGER_DONE)
5025  continue;
5026 
5027  if (evtshared->ats_relid == relid)
5028  return true;
5029  }
5030 
5031  /*
5032  * Also scan events queued by incomplete queries. This could only matter
5033  * if TRUNCATE/etc is executed by a function or trigger within an updating
5034  * query on the same relation, which is pretty perverse, but let's check.
5035  */
5036  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5037  {
5038  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth])
5039  {
5040  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5041 
5042  if (event->ate_flags & AFTER_TRIGGER_DONE)
5043  continue;
5044 
5045  if (evtshared->ats_relid == relid)
5046  return true;
5047  }
5048  }
5049 
5050  return false;
5051 }
5052 
5053 
5054 /* ----------
5055  * AfterTriggerSaveEvent()
5056  *
5057  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5058  * be fired for an event.
5059  *
5060  * NOTE: this is called whenever there are any triggers associated with
5061  * the event (even if they are disabled). This function decides which
5062  * triggers actually need to be queued. It is also called after each row,
5063  * even if there are no triggers for that event, if there are any AFTER
5064  * STATEMENT triggers for the statement which use transition tables, so that
5065  * the transition tuplestores can be built.
5066  *
5067  * Transition tuplestores are built now, rather than when events are pulled
5068  * off of the queue because AFTER ROW triggers are allowed to select from the
5069  * transition tables for the statement.
5070  * ----------
5071  */
5072 static void
5074  int event, bool row_trigger,
5075  HeapTuple oldtup, HeapTuple newtup,
5076  List *recheckIndexes, Bitmapset *modifiedCols)
5077 {
5078  Relation rel = relinfo->ri_RelationDesc;
5079  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5080  AfterTriggerEventData new_event;
5081  AfterTriggerSharedData new_shared;
5082  char relkind = relinfo->ri_RelationDesc->rd_rel->relkind;
5083  int tgtype_event;
5084  int tgtype_level;
5085  int i;
5086  Tuplestorestate *fdw_tuplestore = NULL;
5087 
5088  /*
5089  * Check state. We use a normal test not Assert because it is possible to
5090  * reach here in the wrong state given misconfigured RI triggers, in
5091  * particular deferring a cascade action trigger.
5092  */
5093  if (afterTriggers.query_depth < 0)
5094  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5095 
5096  /* Be sure we have enough space to record events at this query depth. */
5097  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5099 
5100  /*
5101  * If the relation has AFTER ... FOR EACH ROW triggers, capture rows into
5102  * transition tuplestores for this depth.
5103  */
5104  if (row_trigger)
5105  {
5106  if ((event == TRIGGER_EVENT_DELETE &&
5107  trigdesc->trig_delete_old_table) ||
5108  (event == TRIGGER_EVENT_UPDATE &&
5109  trigdesc->trig_update_old_table))
5110  {
5111  Tuplestorestate *old_tuplestore;
5112 
5113  Assert(oldtup != NULL);
5114  old_tuplestore =
5116  (afterTriggers.old_tuplestores);
5117  tuplestore_puttuple(old_tuplestore, oldtup);
5118  }
5119  if ((event == TRIGGER_EVENT_INSERT &&
5120  trigdesc->trig_insert_new_table) ||
5121  (event == TRIGGER_EVENT_UPDATE &&
5122  trigdesc->trig_update_new_table))
5123  {
5124  Tuplestorestate *new_tuplestore;
5125 
5126  Assert(newtup != NULL);
5127  new_tuplestore =
5129  (afterTriggers.new_tuplestores);
5130  tuplestore_puttuple(new_tuplestore, newtup);
5131  }
5132 
5133  /* If transition tables are the only reason we're here, return. */
5134  if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5135  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5136  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
5137  return;
5138  }
5139 
5140  /*
5141  * Validate the event code and collect the associated tuple CTIDs.
5142  *
5143  * The event code will be used both as a bitmask and an array offset, so
5144  * validation is important to make sure we don't walk off the edge of our
5145  * arrays.
5146  */
5147  switch (event)
5148  {
5149  case TRIGGER_EVENT_INSERT:
5150  tgtype_event = TRIGGER_TYPE_INSERT;
5151  if (row_trigger)
5152  {
5153  Assert(oldtup == NULL);
5154  Assert(newtup != NULL);
5155  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid1));
5156  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5157  }
5158  else
5159  {
5160  Assert(oldtup == NULL);
5161  Assert(newtup == NULL);
5162  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5163  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5164  }
5165  break;
5166  case TRIGGER_EVENT_DELETE:
5167  tgtype_event = TRIGGER_TYPE_DELETE;
5168  if (row_trigger)
5169  {
5170  Assert(oldtup != NULL);
5171  Assert(newtup == NULL);
5172  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5173  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5174  }
5175  else
5176  {
5177  Assert(oldtup == NULL);
5178  Assert(newtup == NULL);
5179  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5180  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5181  }
5182  break;
5183  case TRIGGER_EVENT_UPDATE:
5184  tgtype_event = TRIGGER_TYPE_UPDATE;
5185  if (row_trigger)
5186  {
5187  Assert(oldtup != NULL);
5188  Assert(newtup != NULL);
5189  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5190  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid2));
5191  }
5192  else
5193  {
5194  Assert(oldtup == NULL);
5195  Assert(newtup == NULL);
5196  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5197  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5198  }
5199  break;
5201  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5202  Assert(oldtup == NULL);
5203  Assert(newtup == NULL);
5204  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5205  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5206  break;
5207  default:
5208  elog(ERROR, "invalid after-trigger event code: %d", event);
5209  tgtype_event = 0; /* keep compiler quiet */
5210  break;
5211  }
5212 
5213  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5214  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5216  /* else, we'll initialize ate_flags for each trigger */
5217 
5218  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5219 
5220  for (i = 0; i < trigdesc->numtriggers; i++)
5221  {
5222  Trigger *trigger = &trigdesc->triggers[i];
5223 
5224  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5225  tgtype_level,
5227  tgtype_event))
5228  continue;
5229  if (!TriggerEnabled(estate, relinfo, trigger, event,
5230  modifiedCols, oldtup, newtup))
5231  continue;
5232 
5233  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5234  {
5235  if (fdw_tuplestore == NULL)
5236  {
5237  fdw_tuplestore =
5239  (afterTriggers.fdw_tuplestores);
5240  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5241  }
5242  else
5243  /* subsequent event for the same tuple */
5244  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5245  }
5246 
5247  /*
5248  * If the trigger is a foreign key enforcement trigger, there are
5249  * certain cases where we can skip queueing the event because we can
5250  * tell by inspection that the FK constraint will still pass.
5251  */
5252  if (TRIGGER_FIRED_BY_UPDATE(event))
5253  {
5254  switch (RI_FKey_trigger_type(trigger->tgfoid))
5255  {
5256  case RI_TRIGGER_PK:
5257  /* Update on trigger's PK table */
5258  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5259  oldtup, newtup))
5260  {
5261  /* skip queuing this event */
5262  continue;
5263  }
5264  break;
5265 
5266  case RI_TRIGGER_FK:
5267  /* Update on trigger's FK table */
5268  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5269  oldtup, newtup))
5270  {
5271  /* skip queuing this event */
5272  continue;
5273  }
5274  break;
5275 
5276  case RI_TRIGGER_NONE:
5277  /* Not an FK trigger */
5278  break;
5279  }
5280  }
5281 
5282  /*
5283  * If the trigger is a deferred unique constraint check trigger, only
5284  * queue it if the unique constraint was potentially violated, which
5285  * we know from index insertion time.
5286  */
5287  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5288  {
5289  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5290  continue; /* Uniqueness definitely not violated */
5291  }
5292 
5293  /*
5294  * Fill in event structure and add it to the current query's queue.
5295  */
5296  new_shared.ats_event =
5297  (event & TRIGGER_EVENT_OPMASK) |
5298  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5299  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5300  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5301  new_shared.ats_tgoid = trigger->tgoid;
5302  new_shared.ats_relid = RelationGetRelid(rel);
5303  new_shared.ats_firing_id = 0;
5304 
5305  afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth],
5306  &new_event, &new_shared);
5307  }
5308 
5309  /*
5310  * Finally, spool any foreign tuple(s). The tuplestore squashes them to
5311  * minimal tuples, so this loses any system columns. The executor lost
5312  * those columns before us, for an unrelated reason, so this is fine.
5313  */
5314  if (fdw_tuplestore)
5315  {
5316  if (oldtup != NULL)
5317  tuplestore_puttuple(fdw_tuplestore, oldtup);
5318  if (newtup != NULL)
5319  tuplestore_puttuple(fdw_tuplestore, newtup);
5320  }
5321 }
5322 
5323 Datum
5325 {
5327 }
void RemoveTriggerById(Oid trigOid)
Definition: trigger.c:1190
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
signed short int16
Definition: c.h:255
#define TRIGGER_EVENT_ROW
Definition: trigger.h:58
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:608
#define NIL
Definition: pg_list.h:69
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2369
uint32 CommandId
Definition: c.h:411
TriggerEvent ats_event
Definition: trigger.c:3247
#define Anum_pg_trigger_tgdeferrable
Definition: pg_trigger.h:88
void InstrStopNode(Instrumentation *instr, double nTuples)
Definition: instrument.c:80
Tuplestorestate ** old_tuplestores
Definition: trigger.c:3390
TupleTableSlot * ExecStoreTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree)
Definition: execTuples.c:320
#define FKCONSTR_MATCH_SIMPLE
Definition: parsenodes.h:2008
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
JunkFilter * ri_junkFilter
Definition: execnodes.h:351
Definition: fmgr.h:53
void * stringToNode(char *str)
Definition: read.c:38
Relation ri_RelationDesc
Definition: execnodes.h:337
#define TRIGGER_FOR_DELETE(type)
Definition: pg_trigger.h:135
ExprState * ExecPrepareExpr(Expr *node, EState *estate)
Definition: execQual.c:5005
struct AfterTriggerEventDataOneCtid AfterTriggerEventDataOneCtid
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition: trigger.c:2478
#define NameGetDatum(X)
Definition: postgres.h:601
int RI_FKey_trigger_type(Oid tgfoid)
Definition: ri_triggers.c:3704
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
Datum namein(PG_FUNCTION_ARGS)
Definition: name.c:46
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate)
Definition: execTuples.c:852
#define AFTER_TRIGGER_FDW_REUSE
Definition: trigger.c:3237
#define TriggerOidIndexId
Definition: indexing.h:244
#define AFTER_TRIGGER_INITDEFERRED
Definition: trigger.h:68
Oid LookupExplicitNamespace(const char *nspname, bool missing_ok)
Definition: namespace.c:2687
int errhint(const char *fmt,...)
Definition: elog.c:987
#define VARDATA_ANY(PTR)
Definition: postgres.h:347
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2317
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
#define GETSTRUCT(TUP)
Definition: htup_details.h:656
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
MemoryContext TopTransactionContext
Definition: mcxt.c:48
CommandId es_output_cid
Definition: execnodes.h:381
static void test(void)
bool IsSystemRelation(Relation relation)
Definition: catalog.c:62
char * subname
Definition: parsenodes.h:2720
const char * quote_identifier(const char *ident)
Definition: ruleutils.c:10114
ItemPointerData ate_ctid2
Definition: trigger.c:3259
#define TRIGGER_TYPE_DELETE
Definition: pg_trigger.h:101
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:2923
#define RelationGetDescr(relation)
Definition: rel.h:425
#define TRIGGER_EVENT_DELETE
Definition: trigger.h:53
Oid GetUserId(void)
Definition: miscinit.c:283
SetConstraintStateData * SetConstraintState
Definition: trigger.c:3197
TupleTableSlot * es_trig_newtup_slot
Definition: execnodes.h:392
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
Oid tgfoid
Definition: reltrigger.h:28
#define MIN_CHUNK_SIZE
TriggerFlags ate_flags
Definition: trigger.c:3257
HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
Definition: heapam.c:4594
#define castNode(_type_, nodeptr)
Definition: nodes.h:587
Oid RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, bool missing_ok, bool nowait, RangeVarGetRelidCallback callback, void *callback_arg)
Definition: namespace.c:217
#define AFTER_TRIGGER_DEFERRABLE
Definition: trigger.h:67
ResourceOwner TopTransactionResourceOwner
Definition: resowner.c:140
void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2580
#define PointerGetDatum(X)
Definition: postgres.h:562
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define RangeVarGetRelid(relation, lockmode, missing_ok)
Definition: namespace.h:53
Buffer tg_newtuplebuf
Definition: trigger.h:39
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
Definition: heapam.c:1865
#define Anum_pg_trigger_tgconstraint
Definition: pg_trigger.h:87
#define FKCONSTR_ACTION_NOACTION
Definition: parsenodes.h:1999
#define ProcedureRelationId
Definition: pg_proc.h:33
char fk_matchtype
Definition: parsenodes.h:2044
ResourceOwner CurrentResourceOwner
Definition: resowner.c:138
Node * whenClause
Definition: parsenodes.h:2294