PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/indexing.h"
24 #include "catalog/objectaccess.h"
25 #include "catalog/pg_constraint.h"
27 #include "catalog/pg_proc.h"
28 #include "catalog/pg_trigger.h"
29 #include "catalog/pg_type.h"
30 #include "commands/dbcommands.h"
31 #include "commands/defrem.h"
32 #include "commands/trigger.h"
33 #include "executor/executor.h"
34 #include "miscadmin.h"
35 #include "nodes/bitmapset.h"
36 #include "nodes/makefuncs.h"
37 #include "optimizer/clauses.h"
38 #include "optimizer/var.h"
39 #include "parser/parse_clause.h"
40 #include "parser/parse_collate.h"
41 #include "parser/parse_func.h"
42 #include "parser/parse_relation.h"
43 #include "parser/parsetree.h"
44 #include "pgstat.h"
45 #include "rewrite/rewriteManip.h"
46 #include "storage/bufmgr.h"
47 #include "storage/lmgr.h"
48 #include "tcop/utility.h"
49 #include "utils/acl.h"
50 #include "utils/builtins.h"
51 #include "utils/bytea.h"
52 #include "utils/fmgroids.h"
53 #include "utils/inval.h"
54 #include "utils/lsyscache.h"
55 #include "utils/memutils.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tqual.h"
60 #include "utils/tuplestore.h"
61 
62 
63 /* GUC variables */
65 
66 /* How many levels deep into trigger execution are we? */
67 static int MyTriggerDepth = 0;
68 
69 /*
70  * Note that similar macros also exist in executor/execMain.c. There does not
71  * appear to be any good header to put them into, given the structures that
72  * they use, so we let them be duplicated. Be sure to update all if one needs
73  * to be changed, however.
74  */
75 #define GetUpdatedColumns(relinfo, estate) \
76  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
77 
78 /* Local function prototypes */
79 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
80 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
81 static HeapTuple GetTupleForTrigger(EState *estate,
82  EPQState *epqstate,
83  ResultRelInfo *relinfo,
84  ItemPointer tid,
85  LockTupleMode lockmode,
86  TupleTableSlot **newSlot);
87 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
88  Trigger *trigger, TriggerEvent event,
89  Bitmapset *modifiedCols,
90  HeapTuple oldtup, HeapTuple newtup);
92  int tgindx,
93  FmgrInfo *finfo,
94  Instrumentation *instr,
95  MemoryContext per_tuple_context);
96 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
97  int event, bool row_trigger,
98  HeapTuple oldtup, HeapTuple newtup,
99  List *recheckIndexes, Bitmapset *modifiedCols);
100 static void AfterTriggerEnlargeQueryState(void);
101 
102 
103 /*
104  * Create a trigger. Returns the address of the created trigger.
105  *
106  * queryString is the source text of the CREATE TRIGGER command.
107  * This must be supplied if a whenClause is specified, else it can be NULL.
108  *
109  * relOid, if nonzero, is the relation on which the trigger should be
110  * created. If zero, the name provided in the statement will be looked up.
111  *
112  * refRelOid, if nonzero, is the relation to which the constraint trigger
113  * refers. If zero, the constraint relation name provided in the statement
114  * will be looked up as needed.
115  *
116  * constraintOid, if nonzero, says that this trigger is being created
117  * internally to implement that constraint. A suitable pg_depend entry will
118  * be made to link the trigger to that constraint. constraintOid is zero when
119  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
120  * TRIGGER, we build a pg_constraint entry internally.)
121  *
122  * indexOid, if nonzero, is the OID of an index associated with the constraint.
123  * We do nothing with this except store it into pg_trigger.tgconstrindid.
124  *
125  * If isInternal is true then this is an internally-generated trigger.
126  * This argument sets the tgisinternal field of the pg_trigger entry, and
127  * if TRUE causes us to modify the given trigger name to ensure uniqueness.
128  *
129  * When isInternal is not true we require ACL_TRIGGER permissions on the
130  * relation, as well as ACL_EXECUTE on the trigger function. For internal
131  * triggers the caller must apply any required permission checks.
132  *
133  * Note: can return InvalidObjectAddress if we decided to not create a trigger
134  * at all, but a foreign-key constraint. This is a kluge for backwards
135  * compatibility.
136  */
138 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
139  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
140  bool isInternal)
141 {
142  int16 tgtype;
143  int ncolumns;
144  int16 *columns;
145  int2vector *tgattr;
146  Node *whenClause;
147  List *whenRtable;
148  char *qual;
150  bool nulls[Natts_pg_trigger];
151  Relation rel;
152  AclResult aclresult;
153  Relation tgrel;
154  SysScanDesc tgscan;
155  ScanKeyData key;
156  Relation pgrel;
157  HeapTuple tuple;
158  Oid fargtypes[1]; /* dummy */
159  Oid funcoid;
160  Oid funcrettype;
161  Oid trigoid;
162  char internaltrigname[NAMEDATALEN];
163  char *trigname;
164  Oid constrrelid = InvalidOid;
165  ObjectAddress myself,
166  referenced;
167  char *oldtablename = NULL;
168  char *newtablename = NULL;
169 
170  if (OidIsValid(relOid))
171  rel = heap_open(relOid, ShareRowExclusiveLock);
172  else
174 
175  /*
176  * Triggers must be on tables or views, and there are additional
177  * relation-type-specific restrictions.
178  */
179  if (rel->rd_rel->relkind == RELKIND_RELATION ||
180  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
181  {
182  /* Tables can't have INSTEAD OF triggers */
183  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
184  stmt->timing != TRIGGER_TYPE_AFTER)
185  ereport(ERROR,
186  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
187  errmsg("\"%s\" is a table",
189  errdetail("Tables cannot have INSTEAD OF triggers.")));
190  /* Disallow ROW triggers on partitioned tables */
191  if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
192  ereport(ERROR,
193  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
194  errmsg("\"%s\" is a partitioned table",
196  errdetail("Partitioned tables cannot have ROW triggers.")));
197  }
198  else if (rel->rd_rel->relkind == RELKIND_VIEW)
199  {
200  /*
201  * Views can have INSTEAD OF triggers (which we check below are
202  * row-level), or statement-level BEFORE/AFTER triggers.
203  */
204  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
205  ereport(ERROR,
206  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
207  errmsg("\"%s\" is a view",
209  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
210  /* Disallow TRUNCATE triggers on VIEWs */
211  if (TRIGGER_FOR_TRUNCATE(stmt->events))
212  ereport(ERROR,
213  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
214  errmsg("\"%s\" is a view",
216  errdetail("Views cannot have TRUNCATE triggers.")));
217  }
218  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
219  {
220  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
221  stmt->timing != TRIGGER_TYPE_AFTER)
222  ereport(ERROR,
223  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
224  errmsg("\"%s\" is a foreign table",
226  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
227 
228  if (TRIGGER_FOR_TRUNCATE(stmt->events))
229  ereport(ERROR,
230  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
231  errmsg("\"%s\" is a foreign table",
233  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
234 
235  if (stmt->isconstraint)
236  ereport(ERROR,
237  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
238  errmsg("\"%s\" is a foreign table",
240  errdetail("Foreign tables cannot have constraint triggers.")));
241  }
242  else
243  ereport(ERROR,
244  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
245  errmsg("\"%s\" is not a table or view",
246  RelationGetRelationName(rel))));
247 
249  ereport(ERROR,
250  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
251  errmsg("permission denied: \"%s\" is a system catalog",
252  RelationGetRelationName(rel))));
253 
254  if (stmt->isconstraint)
255  {
256  /*
257  * We must take a lock on the target relation to protect against
258  * concurrent drop. It's not clear that AccessShareLock is strong
259  * enough, but we certainly need at least that much... otherwise, we
260  * might end up creating a pg_constraint entry referencing a
261  * nonexistent table.
262  */
263  if (OidIsValid(refRelOid))
264  {
265  LockRelationOid(refRelOid, AccessShareLock);
266  constrrelid = refRelOid;
267  }
268  else if (stmt->constrrel != NULL)
269  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
270  false);
271  }
272 
273  /* permission checks */
274  if (!isInternal)
275  {
276  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
277  ACL_TRIGGER);
278  if (aclresult != ACLCHECK_OK)
279  aclcheck_error(aclresult, ACL_KIND_CLASS,
281 
282  if (OidIsValid(constrrelid))
283  {
284  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
285  ACL_TRIGGER);
286  if (aclresult != ACLCHECK_OK)
287  aclcheck_error(aclresult, ACL_KIND_CLASS,
288  get_rel_name(constrrelid));
289  }
290  }
291 
292  /* Compute tgtype */
293  TRIGGER_CLEAR_TYPE(tgtype);
294  if (stmt->row)
295  TRIGGER_SETT_ROW(tgtype);
296  tgtype |= stmt->timing;
297  tgtype |= stmt->events;
298 
299  /* Disallow ROW-level TRUNCATE triggers */
300  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
301  ereport(ERROR,
302  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
303  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
304 
305  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
306  if (TRIGGER_FOR_INSTEAD(tgtype))
307  {
308  if (!TRIGGER_FOR_ROW(tgtype))
309  ereport(ERROR,
310  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
311  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
312  if (stmt->whenClause)
313  ereport(ERROR,
314  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
315  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
316  if (stmt->columns != NIL)
317  ereport(ERROR,
318  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
319  errmsg("INSTEAD OF triggers cannot have column lists")));
320  }
321 
322  /*
323  * We don't yet support naming ROW transition variables, but the parser
324  * recognizes the syntax so we can give a nicer message here.
325  *
326  * Per standard, REFERENCING TABLE names are only allowed on AFTER
327  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
328  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
329  * only allowed once. Per standard, OLD may not be specified when
330  * creating a trigger only for INSERT, and NEW may not be specified when
331  * creating a trigger only for DELETE.
332  *
333  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
334  * reference both ROW and TABLE transition data.
335  */
336  if (stmt->transitionRels != NIL)
337  {
338  List *varList = stmt->transitionRels;
339  ListCell *lc;
340 
341  foreach(lc, varList)
342  {
344 
345  if (!(tt->isTable))
346  ereport(ERROR,
347  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
348  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
349  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
350 
351  /*
352  * Because of the above test, we omit further ROW-related testing
353  * below. If we later allow naming OLD and NEW ROW variables,
354  * adjustments will be needed below.
355  */
356 
357  if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
358  ereport(ERROR,
359  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
360  errmsg("\"%s\" is a partitioned table",
362  errdetail("Triggers on partitioned tables cannot have transition tables.")));
363 
364  if (stmt->timing != TRIGGER_TYPE_AFTER)
365  ereport(ERROR,
366  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
367  errmsg("transition table name can only be specified for an AFTER trigger")));
368 
369  if (tt->isNew)
370  {
371  if (!(TRIGGER_FOR_INSERT(tgtype) ||
372  TRIGGER_FOR_UPDATE(tgtype)))
373  ereport(ERROR,
374  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
375  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
376 
377  if (newtablename != NULL)
378  ereport(ERROR,
379  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
380  errmsg("NEW TABLE cannot be specified multiple times")));
381 
382  newtablename = tt->name;
383  }
384  else
385  {
386  if (!(TRIGGER_FOR_DELETE(tgtype) ||
387  TRIGGER_FOR_UPDATE(tgtype)))
388  ereport(ERROR,
389  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
390  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
391 
392  if (oldtablename != NULL)
393  ereport(ERROR,
394  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
395  errmsg("OLD TABLE cannot be specified multiple times")));
396 
397  oldtablename = tt->name;
398  }
399  }
400 
401  if (newtablename != NULL && oldtablename != NULL &&
402  strcmp(newtablename, oldtablename) == 0)
403  ereport(ERROR,
404  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
405  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
406  }
407 
408  /*
409  * Parse the WHEN clause, if any
410  */
411  if (stmt->whenClause)
412  {
413  ParseState *pstate;
414  RangeTblEntry *rte;
415  List *varList;
416  ListCell *lc;
417 
418  /* Set up a pstate to parse with */
419  pstate = make_parsestate(NULL);
420  pstate->p_sourcetext = queryString;
421 
422  /*
423  * Set up RTEs for OLD and NEW references.
424  *
425  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
426  */
427  rte = addRangeTableEntryForRelation(pstate, rel,
428  makeAlias("old", NIL),
429  false, false);
430  addRTEtoQuery(pstate, rte, false, true, true);
431  rte = addRangeTableEntryForRelation(pstate, rel,
432  makeAlias("new", NIL),
433  false, false);
434  addRTEtoQuery(pstate, rte, false, true, true);
435 
436  /* Transform expression. Copy to be sure we don't modify original */
437  whenClause = transformWhereClause(pstate,
438  copyObject(stmt->whenClause),
440  "WHEN");
441  /* we have to fix its collations too */
442  assign_expr_collations(pstate, whenClause);
443 
444  /*
445  * Check for disallowed references to OLD/NEW.
446  *
447  * NB: pull_var_clause is okay here only because we don't allow
448  * subselects in WHEN clauses; it would fail to examine the contents
449  * of subselects.
450  */
451  varList = pull_var_clause(whenClause, 0);
452  foreach(lc, varList)
453  {
454  Var *var = (Var *) lfirst(lc);
455 
456  switch (var->varno)
457  {
458  case PRS2_OLD_VARNO:
459  if (!TRIGGER_FOR_ROW(tgtype))
460  ereport(ERROR,
461  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
462  errmsg("statement trigger's WHEN condition cannot reference column values"),
463  parser_errposition(pstate, var->location)));
464  if (TRIGGER_FOR_INSERT(tgtype))
465  ereport(ERROR,
466  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
467  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
468  parser_errposition(pstate, var->location)));
469  /* system columns are okay here */
470  break;
471  case PRS2_NEW_VARNO:
472  if (!TRIGGER_FOR_ROW(tgtype))
473  ereport(ERROR,
474  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
475  errmsg("statement trigger's WHEN condition cannot reference column values"),
476  parser_errposition(pstate, var->location)));
477  if (TRIGGER_FOR_DELETE(tgtype))
478  ereport(ERROR,
479  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
480  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
481  parser_errposition(pstate, var->location)));
482  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
483  ereport(ERROR,
484  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
485  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
486  parser_errposition(pstate, var->location)));
487  break;
488  default:
489  /* can't happen without add_missing_from, so just elog */
490  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
491  break;
492  }
493  }
494 
495  /* we'll need the rtable for recordDependencyOnExpr */
496  whenRtable = pstate->p_rtable;
497 
498  qual = nodeToString(whenClause);
499 
500  free_parsestate(pstate);
501  }
502  else
503  {
504  whenClause = NULL;
505  whenRtable = NIL;
506  qual = NULL;
507  }
508 
509  /*
510  * Find and validate the trigger function.
511  */
512  funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
513  if (!isInternal)
514  {
515  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
516  if (aclresult != ACLCHECK_OK)
517  aclcheck_error(aclresult, ACL_KIND_PROC,
518  NameListToString(stmt->funcname));
519  }
520  funcrettype = get_func_rettype(funcoid);
521  if (funcrettype != TRIGGEROID)
522  {
523  /*
524  * We allow OPAQUE just so we can load old dump files. When we see a
525  * trigger function declared OPAQUE, change it to TRIGGER.
526  */
527  if (funcrettype == OPAQUEOID)
528  {
530  (errmsg("changing return type of function %s from %s to %s",
531  NameListToString(stmt->funcname),
532  "opaque", "trigger")));
534  }
535  else
536  ereport(ERROR,
537  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
538  errmsg("function %s must return type %s",
539  NameListToString(stmt->funcname), "trigger")));
540  }
541 
542  /*
543  * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
544  * references one of the built-in RI_FKey trigger functions, assume it is
545  * from a dump of a pre-7.3 foreign key constraint, and take steps to
546  * convert this legacy representation into a regular foreign key
547  * constraint. Ugly, but necessary for loading old dump files.
548  */
549  if (stmt->isconstraint && !isInternal &&
550  list_length(stmt->args) >= 6 &&
551  (list_length(stmt->args) % 2) == 0 &&
553  {
554  /* Keep lock on target rel until end of xact */
555  heap_close(rel, NoLock);
556 
557  ConvertTriggerToFK(stmt, funcoid);
558 
559  return InvalidObjectAddress;
560  }
561 
562  /*
563  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
564  * corresponding pg_constraint entry.
565  */
566  if (stmt->isconstraint && !OidIsValid(constraintOid))
567  {
568  /* Internal callers should have made their own constraints */
569  Assert(!isInternal);
570  constraintOid = CreateConstraintEntry(stmt->trigname,
573  stmt->deferrable,
574  stmt->initdeferred,
575  true,
576  RelationGetRelid(rel),
577  NULL, /* no conkey */
578  0,
579  InvalidOid, /* no domain */
580  InvalidOid, /* no index */
581  InvalidOid, /* no foreign key */
582  NULL,
583  NULL,
584  NULL,
585  NULL,
586  0,
587  ' ',
588  ' ',
589  ' ',
590  NULL, /* no exclusion */
591  NULL, /* no check constraint */
592  NULL,
593  NULL,
594  true, /* islocal */
595  0, /* inhcount */
596  true, /* isnoinherit */
597  isInternal); /* is_internal */
598  }
599 
600  /*
601  * Generate the trigger's OID now, so that we can use it in the name if
602  * needed.
603  */
605 
606  trigoid = GetNewOid(tgrel);
607 
608  /*
609  * If trigger is internally generated, modify the provided trigger name to
610  * ensure uniqueness by appending the trigger OID. (Callers will usually
611  * supply a simple constant trigger name in these cases.)
612  */
613  if (isInternal)
614  {
615  snprintf(internaltrigname, sizeof(internaltrigname),
616  "%s_%u", stmt->trigname, trigoid);
617  trigname = internaltrigname;
618  }
619  else
620  {
621  /* user-defined trigger; use the specified trigger name as-is */
622  trigname = stmt->trigname;
623  }
624 
625  /*
626  * Scan pg_trigger for existing triggers on relation. We do this only to
627  * give a nice error message if there's already a trigger of the same
628  * name. (The unique index on tgrelid/tgname would complain anyway.) We
629  * can skip this for internally generated triggers, since the name
630  * modification above should be sufficient.
631  *
632  * NOTE that this is cool only because we have ShareRowExclusiveLock on
633  * the relation, so the trigger set won't be changing underneath us.
634  */
635  if (!isInternal)
636  {
637  ScanKeyInit(&key,
639  BTEqualStrategyNumber, F_OIDEQ,
641  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
642  NULL, 1, &key);
643  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
644  {
645  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
646 
647  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
648  ereport(ERROR,
650  errmsg("trigger \"%s\" for relation \"%s\" already exists",
651  trigname, RelationGetRelationName(rel))));
652  }
653  systable_endscan(tgscan);
654  }
655 
656  /*
657  * Build the new pg_trigger tuple.
658  */
659  memset(nulls, false, sizeof(nulls));
660 
663  CStringGetDatum(trigname));
664  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
665  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
667  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
668  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
669  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
670  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
673 
674  if (stmt->args)
675  {
676  ListCell *le;
677  char *args;
678  int16 nargs = list_length(stmt->args);
679  int len = 0;
680 
681  foreach(le, stmt->args)
682  {
683  char *ar = strVal(lfirst(le));
684 
685  len += strlen(ar) + 4;
686  for (; *ar; ar++)
687  {
688  if (*ar == '\\')
689  len++;
690  }
691  }
692  args = (char *) palloc(len + 1);
693  args[0] = '\0';
694  foreach(le, stmt->args)
695  {
696  char *s = strVal(lfirst(le));
697  char *d = args + strlen(args);
698 
699  while (*s)
700  {
701  if (*s == '\\')
702  *d++ = '\\';
703  *d++ = *s++;
704  }
705  strcpy(d, "\\000");
706  }
707  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
709  CStringGetDatum(args));
710  }
711  else
712  {
713  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
715  CStringGetDatum(""));
716  }
717 
718  /* build column number array if it's a column-specific trigger */
719  ncolumns = list_length(stmt->columns);
720  if (ncolumns == 0)
721  columns = NULL;
722  else
723  {
724  ListCell *cell;
725  int i = 0;
726 
727  columns = (int16 *) palloc(ncolumns * sizeof(int16));
728  foreach(cell, stmt->columns)
729  {
730  char *name = strVal(lfirst(cell));
731  int16 attnum;
732  int j;
733 
734  /* Lookup column name. System columns are not allowed */
735  attnum = attnameAttNum(rel, name, false);
736  if (attnum == InvalidAttrNumber)
737  ereport(ERROR,
738  (errcode(ERRCODE_UNDEFINED_COLUMN),
739  errmsg("column \"%s\" of relation \"%s\" does not exist",
740  name, RelationGetRelationName(rel))));
741 
742  /* Check for duplicates */
743  for (j = i - 1; j >= 0; j--)
744  {
745  if (columns[j] == attnum)
746  ereport(ERROR,
747  (errcode(ERRCODE_DUPLICATE_COLUMN),
748  errmsg("column \"%s\" specified more than once",
749  name)));
750  }
751 
752  columns[i++] = attnum;
753  }
754  }
755  tgattr = buildint2vector(columns, ncolumns);
756  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
757 
758  /* set tgqual if trigger has WHEN clause */
759  if (qual)
760  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
761  else
762  nulls[Anum_pg_trigger_tgqual - 1] = true;
763 
764  if (oldtablename)
766  CStringGetDatum(oldtablename));
767  else
768  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
769  if (newtablename)
771  CStringGetDatum(newtablename));
772  else
773  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
774 
775  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
776 
777  /* force tuple to have the desired OID */
778  HeapTupleSetOid(tuple, trigoid);
779 
780  /*
781  * Insert tuple into pg_trigger.
782  */
783  CatalogTupleInsert(tgrel, tuple);
784 
785  heap_freetuple(tuple);
787 
791  if (oldtablename)
793  if (newtablename)
795 
796  /*
797  * Update relation's pg_class entry. Crucial side-effect: other backends
798  * (and this one too!) are sent SI message to make them rebuild relcache
799  * entries.
800  */
802  tuple = SearchSysCacheCopy1(RELOID,
804  if (!HeapTupleIsValid(tuple))
805  elog(ERROR, "cache lookup failed for relation %u",
806  RelationGetRelid(rel));
807 
808  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
809 
810  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
811 
812  heap_freetuple(tuple);
814 
815  /*
816  * We used to try to update the rel's relcache entry here, but that's
817  * fairly pointless since it will happen as a byproduct of the upcoming
818  * CommandCounterIncrement...
819  */
820 
821  /*
822  * Record dependencies for trigger. Always place a normal dependency on
823  * the function.
824  */
825  myself.classId = TriggerRelationId;
826  myself.objectId = trigoid;
827  myself.objectSubId = 0;
828 
829  referenced.classId = ProcedureRelationId;
830  referenced.objectId = funcoid;
831  referenced.objectSubId = 0;
832  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
833 
834  if (isInternal && OidIsValid(constraintOid))
835  {
836  /*
837  * Internally-generated trigger for a constraint, so make it an
838  * internal dependency of the constraint. We can skip depending on
839  * the relation(s), as there'll be an indirect dependency via the
840  * constraint.
841  */
842  referenced.classId = ConstraintRelationId;
843  referenced.objectId = constraintOid;
844  referenced.objectSubId = 0;
845  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
846  }
847  else
848  {
849  /*
850  * User CREATE TRIGGER, so place dependencies. We make trigger be
851  * auto-dropped if its relation is dropped or if the FK relation is
852  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
853  */
854  referenced.classId = RelationRelationId;
855  referenced.objectId = RelationGetRelid(rel);
856  referenced.objectSubId = 0;
857  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
858  if (OidIsValid(constrrelid))
859  {
860  referenced.classId = RelationRelationId;
861  referenced.objectId = constrrelid;
862  referenced.objectSubId = 0;
863  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
864  }
865  /* Not possible to have an index dependency in this case */
866  Assert(!OidIsValid(indexOid));
867 
868  /*
869  * If it's a user-specified constraint trigger, make the constraint
870  * internally dependent on the trigger instead of vice versa.
871  */
872  if (OidIsValid(constraintOid))
873  {
874  referenced.classId = ConstraintRelationId;
875  referenced.objectId = constraintOid;
876  referenced.objectSubId = 0;
877  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
878  }
879  }
880 
881  /* If column-specific trigger, add normal dependencies on columns */
882  if (columns != NULL)
883  {
884  int i;
885 
886  referenced.classId = RelationRelationId;
887  referenced.objectId = RelationGetRelid(rel);
888  for (i = 0; i < ncolumns; i++)
889  {
890  referenced.objectSubId = columns[i];
891  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
892  }
893  }
894 
895  /*
896  * If it has a WHEN clause, add dependencies on objects mentioned in the
897  * expression (eg, functions, as well as any columns used).
898  */
899  if (whenClause != NULL)
900  recordDependencyOnExpr(&myself, whenClause, whenRtable,
902 
903  /* Post creation hook for new trigger */
905  isInternal);
906 
907  /* Keep lock on target rel until end of xact */
908  heap_close(rel, NoLock);
909 
910  return myself;
911 }
912 
913 
914 /*
915  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
916  * full-fledged foreign key constraints.
917  *
918  * The conversion is complex because a pre-7.3 foreign key involved three
919  * separate triggers, which were reported separately in dumps. While the
920  * single trigger on the referencing table adds no new information, we need
921  * to know the trigger functions of both of the triggers on the referenced
922  * table to build the constraint declaration. Also, due to lack of proper
923  * dependency checking pre-7.3, it is possible that the source database had
924  * an incomplete set of triggers resulting in an only partially enforced
925  * FK constraint. (This would happen if one of the tables had been dropped
926  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
927  * that caused loss of tgconstrrelid information.) We choose to translate to
928  * an FK constraint only when we've seen all three triggers of a set. This is
929  * implemented by storing unmatched items in a list in TopMemoryContext.
930  * We match triggers together by comparing the trigger arguments (which
931  * include constraint name, table and column names, so should be good enough).
932  */
933 typedef struct
934 {
935  List *args; /* list of (T_String) Values or NIL */
936  Oid funcoids[3]; /* OIDs of trigger functions */
937  /* The three function OIDs are stored in the order update, delete, child */
939 
940 static void
942 {
943  static List *info_list = NIL;
944 
945  static const char *const funcdescr[3] = {
946  gettext_noop("Found referenced table's UPDATE trigger."),
947  gettext_noop("Found referenced table's DELETE trigger."),
948  gettext_noop("Found referencing table's trigger.")
949  };
950 
951  char *constr_name;
952  char *fk_table_name;
953  char *pk_table_name;
954  char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
955  List *fk_attrs = NIL;
956  List *pk_attrs = NIL;
958  int funcnum;
959  OldTriggerInfo *info = NULL;
960  ListCell *l;
961  int i;
962 
963  /* Parse out the trigger arguments */
964  constr_name = strVal(linitial(stmt->args));
965  fk_table_name = strVal(lsecond(stmt->args));
966  pk_table_name = strVal(lthird(stmt->args));
967  i = 0;
968  foreach(l, stmt->args)
969  {
970  Value *arg = (Value *) lfirst(l);
971 
972  i++;
973  if (i < 4) /* skip constraint and table names */
974  continue;
975  if (i == 4) /* handle match type */
976  {
977  if (strcmp(strVal(arg), "FULL") == 0)
978  fk_matchtype = FKCONSTR_MATCH_FULL;
979  else
980  fk_matchtype = FKCONSTR_MATCH_SIMPLE;
981  continue;
982  }
983  if (i % 2)
984  fk_attrs = lappend(fk_attrs, arg);
985  else
986  pk_attrs = lappend(pk_attrs, arg);
987  }
988 
989  /* Prepare description of constraint for use in messages */
990  initStringInfo(&buf);
991  appendStringInfo(&buf, "FOREIGN KEY %s(",
992  quote_identifier(fk_table_name));
993  i = 0;
994  foreach(l, fk_attrs)
995  {
996  Value *arg = (Value *) lfirst(l);
997 
998  if (i++ > 0)
999  appendStringInfoChar(&buf, ',');
1001  }
1002  appendStringInfo(&buf, ") REFERENCES %s(",
1003  quote_identifier(pk_table_name));
1004  i = 0;
1005  foreach(l, pk_attrs)
1006  {
1007  Value *arg = (Value *) lfirst(l);
1008 
1009  if (i++ > 0)
1010  appendStringInfoChar(&buf, ',');
1012  }
1013  appendStringInfoChar(&buf, ')');
1014 
1015  /* Identify class of trigger --- update, delete, or referencing-table */
1016  switch (funcoid)
1017  {
1018  case F_RI_FKEY_CASCADE_UPD:
1019  case F_RI_FKEY_RESTRICT_UPD:
1020  case F_RI_FKEY_SETNULL_UPD:
1021  case F_RI_FKEY_SETDEFAULT_UPD:
1022  case F_RI_FKEY_NOACTION_UPD:
1023  funcnum = 0;
1024  break;
1025 
1026  case F_RI_FKEY_CASCADE_DEL:
1027  case F_RI_FKEY_RESTRICT_DEL:
1028  case F_RI_FKEY_SETNULL_DEL:
1029  case F_RI_FKEY_SETDEFAULT_DEL:
1030  case F_RI_FKEY_NOACTION_DEL:
1031  funcnum = 1;
1032  break;
1033 
1034  default:
1035  funcnum = 2;
1036  break;
1037  }
1038 
1039  /* See if we have a match to this trigger */
1040  foreach(l, info_list)
1041  {
1042  info = (OldTriggerInfo *) lfirst(l);
1043  if (info->funcoids[funcnum] == InvalidOid &&
1044  equal(info->args, stmt->args))
1045  {
1046  info->funcoids[funcnum] = funcoid;
1047  break;
1048  }
1049  }
1050 
1051  if (l == NULL)
1052  {
1053  /* First trigger of set, so create a new list entry */
1054  MemoryContext oldContext;
1055 
1056  ereport(NOTICE,
1057  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1058  constr_name, buf.data),
1059  errdetail_internal("%s", _(funcdescr[funcnum]))));
1061  info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
1062  info->args = copyObject(stmt->args);
1063  info->funcoids[funcnum] = funcoid;
1064  info_list = lappend(info_list, info);
1065  MemoryContextSwitchTo(oldContext);
1066  }
1067  else if (info->funcoids[0] == InvalidOid ||
1068  info->funcoids[1] == InvalidOid ||
1069  info->funcoids[2] == InvalidOid)
1070  {
1071  /* Second trigger of set */
1072  ereport(NOTICE,
1073  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1074  constr_name, buf.data),
1075  errdetail_internal("%s", _(funcdescr[funcnum]))));
1076  }
1077  else
1078  {
1079  /* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
1082  Constraint *fkcon = makeNode(Constraint);
1083  PlannedStmt *wrapper = makeNode(PlannedStmt);
1084 
1085  ereport(NOTICE,
1086  (errmsg("converting trigger group into constraint \"%s\" %s",
1087  constr_name, buf.data),
1088  errdetail_internal("%s", _(funcdescr[funcnum]))));
1089  fkcon->contype = CONSTR_FOREIGN;
1090  fkcon->location = -1;
1091  if (funcnum == 2)
1092  {
1093  /* This trigger is on the FK table */
1094  atstmt->relation = stmt->relation;
1095  if (stmt->constrrel)
1096  fkcon->pktable = stmt->constrrel;
1097  else
1098  {
1099  /* Work around ancient pg_dump bug that omitted constrrel */
1100  fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
1101  }
1102  }
1103  else
1104  {
1105  /* This trigger is on the PK table */
1106  fkcon->pktable = stmt->relation;
1107  if (stmt->constrrel)
1108  atstmt->relation = stmt->constrrel;
1109  else
1110  {
1111  /* Work around ancient pg_dump bug that omitted constrrel */
1112  atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1113  }
1114  }
1115  atstmt->cmds = list_make1(atcmd);
1116  atstmt->relkind = OBJECT_TABLE;
1117  atcmd->subtype = AT_AddConstraint;
1118  atcmd->def = (Node *) fkcon;
1119  if (strcmp(constr_name, "<unnamed>") == 0)
1120  fkcon->conname = NULL;
1121  else
1122  fkcon->conname = constr_name;
1123  fkcon->fk_attrs = fk_attrs;
1124  fkcon->pk_attrs = pk_attrs;
1125  fkcon->fk_matchtype = fk_matchtype;
1126  switch (info->funcoids[0])
1127  {
1128  case F_RI_FKEY_NOACTION_UPD:
1130  break;
1131  case F_RI_FKEY_CASCADE_UPD:
1133  break;
1134  case F_RI_FKEY_RESTRICT_UPD:
1136  break;
1137  case F_RI_FKEY_SETNULL_UPD:
1139  break;
1140  case F_RI_FKEY_SETDEFAULT_UPD:
1142  break;
1143  default:
1144  /* can't get here because of earlier checks */
1145  elog(ERROR, "confused about RI update function");
1146  }
1147  switch (info->funcoids[1])
1148  {
1149  case F_RI_FKEY_NOACTION_DEL:
1151  break;
1152  case F_RI_FKEY_CASCADE_DEL:
1154  break;
1155  case F_RI_FKEY_RESTRICT_DEL:
1157  break;
1158  case F_RI_FKEY_SETNULL_DEL:
1160  break;
1161  case F_RI_FKEY_SETDEFAULT_DEL:
1163  break;
1164  default:
1165  /* can't get here because of earlier checks */
1166  elog(ERROR, "confused about RI delete function");
1167  }
1168  fkcon->deferrable = stmt->deferrable;
1169  fkcon->initdeferred = stmt->initdeferred;
1170  fkcon->skip_validation = false;
1171  fkcon->initially_valid = true;
1172 
1173  /* finally, wrap it in a dummy PlannedStmt */
1174  wrapper->commandType = CMD_UTILITY;
1175  wrapper->canSetTag = false;
1176  wrapper->utilityStmt = (Node *) atstmt;
1177  wrapper->stmt_location = -1;
1178  wrapper->stmt_len = -1;
1179 
1180  /* ... and execute it */
1181  ProcessUtility(wrapper,
1182  "(generated ALTER TABLE ADD FOREIGN KEY command)",
1184  None_Receiver, NULL);
1185 
1186  /* Remove the matched item from the list */
1187  info_list = list_delete_ptr(info_list, info);
1188  pfree(info);
1189  /* We leak the copied args ... not worth worrying about */
1190  }
1191 }
1192 
1193 /*
1194  * Guts of trigger deletion.
1195  */
1196 void
1198 {
1199  Relation tgrel;
1200  SysScanDesc tgscan;
1201  ScanKeyData skey[1];
1202  HeapTuple tup;
1203  Oid relid;
1204  Relation rel;
1205 
1207 
1208  /*
1209  * Find the trigger to delete.
1210  */
1211  ScanKeyInit(&skey[0],
1213  BTEqualStrategyNumber, F_OIDEQ,
1214  ObjectIdGetDatum(trigOid));
1215 
1216  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1217  NULL, 1, skey);
1218 
1219  tup = systable_getnext(tgscan);
1220  if (!HeapTupleIsValid(tup))
1221  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1222 
1223  /*
1224  * Open and exclusive-lock the relation the trigger belongs to.
1225  */
1226  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1227 
1228  rel = heap_open(relid, AccessExclusiveLock);
1229 
1230  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1231  rel->rd_rel->relkind != RELKIND_VIEW &&
1232  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1233  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1234  ereport(ERROR,
1235  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1236  errmsg("\"%s\" is not a table, view, or foreign table",
1237  RelationGetRelationName(rel))));
1238 
1240  ereport(ERROR,
1241  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1242  errmsg("permission denied: \"%s\" is a system catalog",
1243  RelationGetRelationName(rel))));
1244 
1245  /*
1246  * Delete the pg_trigger tuple.
1247  */
1248  CatalogTupleDelete(tgrel, &tup->t_self);
1249 
1250  systable_endscan(tgscan);
1251  heap_close(tgrel, RowExclusiveLock);
1252 
1253  /*
1254  * We do not bother to try to determine whether any other triggers remain,
1255  * which would be needed in order to decide whether it's safe to clear the
1256  * relation's relhastriggers. (In any case, there might be a concurrent
1257  * process adding new triggers.) Instead, just force a relcache inval to
1258  * make other backends (and this one too!) rebuild their relcache entries.
1259  * There's no great harm in leaving relhastriggers true even if there are
1260  * no triggers left.
1261  */
1263 
1264  /* Keep lock on trigger's rel until end of xact */
1265  heap_close(rel, NoLock);
1266 }
1267 
1268 /*
1269  * get_trigger_oid - Look up a trigger by name to find its OID.
1270  *
1271  * If missing_ok is false, throw an error if trigger not found. If
1272  * true, just return InvalidOid.
1273  */
1274 Oid
1275 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1276 {
1277  Relation tgrel;
1278  ScanKeyData skey[2];
1279  SysScanDesc tgscan;
1280  HeapTuple tup;
1281  Oid oid;
1282 
1283  /*
1284  * Find the trigger, verify permissions, set up object address
1285  */
1287 
1288  ScanKeyInit(&skey[0],
1290  BTEqualStrategyNumber, F_OIDEQ,
1291  ObjectIdGetDatum(relid));
1292  ScanKeyInit(&skey[1],
1294  BTEqualStrategyNumber, F_NAMEEQ,
1295  CStringGetDatum(trigname));
1296 
1297  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1298  NULL, 2, skey);
1299 
1300  tup = systable_getnext(tgscan);
1301 
1302  if (!HeapTupleIsValid(tup))
1303  {
1304  if (!missing_ok)
1305  ereport(ERROR,
1306  (errcode(ERRCODE_UNDEFINED_OBJECT),
1307  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1308  trigname, get_rel_name(relid))));
1309  oid = InvalidOid;
1310  }
1311  else
1312  {
1313  oid = HeapTupleGetOid(tup);
1314  }
1315 
1316  systable_endscan(tgscan);
1317  heap_close(tgrel, AccessShareLock);
1318  return oid;
1319 }
1320 
1321 /*
1322  * Perform permissions and integrity checks before acquiring a relation lock.
1323  */
1324 static void
1326  void *arg)
1327 {
1328  HeapTuple tuple;
1329  Form_pg_class form;
1330 
1331  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1332  if (!HeapTupleIsValid(tuple))
1333  return; /* concurrently dropped */
1334  form = (Form_pg_class) GETSTRUCT(tuple);
1335 
1336  /* only tables and views can have triggers */
1337  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1338  form->relkind != RELKIND_FOREIGN_TABLE &&
1339  form->relkind != RELKIND_PARTITIONED_TABLE)
1340  ereport(ERROR,
1341  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1342  errmsg("\"%s\" is not a table, view, or foreign table",
1343  rv->relname)));
1344 
1345  /* you must own the table to rename one of its triggers */
1346  if (!pg_class_ownercheck(relid, GetUserId()))
1348  if (!allowSystemTableMods && IsSystemClass(relid, form))
1349  ereport(ERROR,
1350  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1351  errmsg("permission denied: \"%s\" is a system catalog",
1352  rv->relname)));
1353 
1354  ReleaseSysCache(tuple);
1355 }
1356 
1357 /*
1358  * renametrig - changes the name of a trigger on a relation
1359  *
1360  * trigger name is changed in trigger catalog.
1361  * No record of the previous name is kept.
1362  *
1363  * get proper relrelation from relation catalog (if not arg)
1364  * scan trigger catalog
1365  * for name conflict (within rel)
1366  * for original trigger (if not arg)
1367  * modify tgname in trigger tuple
1368  * update row in catalog
1369  */
1372 {
1373  Oid tgoid;
1374  Relation targetrel;
1375  Relation tgrel;
1376  HeapTuple tuple;
1377  SysScanDesc tgscan;
1378  ScanKeyData key[2];
1379  Oid relid;
1380  ObjectAddress address;
1381 
1382  /*
1383  * Look up name, check permissions, and acquire lock (which we will NOT
1384  * release until end of transaction).
1385  */
1387  false, false,
1389  NULL);
1390 
1391  /* Have lock already, so just need to build relcache entry. */
1392  targetrel = relation_open(relid, NoLock);
1393 
1394  /*
1395  * Scan pg_trigger twice for existing triggers on relation. We do this in
1396  * order to ensure a trigger does not exist with newname (The unique index
1397  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1398  * exist with oldname.
1399  *
1400  * NOTE that this is cool only because we have AccessExclusiveLock on the
1401  * relation, so the trigger set won't be changing underneath us.
1402  */
1404 
1405  /*
1406  * First pass -- look for name conflict
1407  */
1408  ScanKeyInit(&key[0],
1410  BTEqualStrategyNumber, F_OIDEQ,
1411  ObjectIdGetDatum(relid));
1412  ScanKeyInit(&key[1],
1414  BTEqualStrategyNumber, F_NAMEEQ,
1415  PointerGetDatum(stmt->newname));
1416  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1417  NULL, 2, key);
1418  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1419  ereport(ERROR,
1421  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1422  stmt->newname, RelationGetRelationName(targetrel))));
1423  systable_endscan(tgscan);
1424 
1425  /*
1426  * Second pass -- look for trigger existing with oldname and update
1427  */
1428  ScanKeyInit(&key[0],
1430  BTEqualStrategyNumber, F_OIDEQ,
1431  ObjectIdGetDatum(relid));
1432  ScanKeyInit(&key[1],
1434  BTEqualStrategyNumber, F_NAMEEQ,
1435  PointerGetDatum(stmt->subname));
1436  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1437  NULL, 2, key);
1438  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1439  {
1440  tgoid = HeapTupleGetOid(tuple);
1441 
1442  /*
1443  * Update pg_trigger tuple with new tgname.
1444  */
1445  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1446 
1447  namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1448  stmt->newname);
1449 
1450  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1451 
1453  HeapTupleGetOid(tuple), 0);
1454 
1455  /*
1456  * Invalidate relation's relcache entry so that other backends (and
1457  * this one too!) are sent SI message to make them rebuild relcache
1458  * entries. (Ideally this should happen automatically...)
1459  */
1460  CacheInvalidateRelcache(targetrel);
1461  }
1462  else
1463  {
1464  ereport(ERROR,
1465  (errcode(ERRCODE_UNDEFINED_OBJECT),
1466  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1467  stmt->subname, RelationGetRelationName(targetrel))));
1468  }
1469 
1470  ObjectAddressSet(address, TriggerRelationId, tgoid);
1471 
1472  systable_endscan(tgscan);
1473 
1474  heap_close(tgrel, RowExclusiveLock);
1475 
1476  /*
1477  * Close rel, but keep exclusive lock!
1478  */
1479  relation_close(targetrel, NoLock);
1480 
1481  return address;
1482 }
1483 
1484 
1485 /*
1486  * EnableDisableTrigger()
1487  *
1488  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1489  * to change 'tgenabled' field for the specified trigger(s)
1490  *
1491  * rel: relation to process (caller must hold suitable lock on it)
1492  * tgname: trigger to process, or NULL to scan all triggers
1493  * fires_when: new value for tgenabled field. In addition to generic
1494  * enablement/disablement, this also defines when the trigger
1495  * should be fired in session replication roles.
1496  * skip_system: if true, skip "system" triggers (constraint triggers)
1497  *
1498  * Caller should have checked permissions for the table; here we also
1499  * enforce that superuser privilege is required to alter the state of
1500  * system triggers
1501  */
1502 void
1503 EnableDisableTrigger(Relation rel, const char *tgname,
1504  char fires_when, bool skip_system)
1505 {
1506  Relation tgrel;
1507  int nkeys;
1508  ScanKeyData keys[2];
1509  SysScanDesc tgscan;
1510  HeapTuple tuple;
1511  bool found;
1512  bool changed;
1513 
1514  /* Scan the relevant entries in pg_triggers */
1516 
1517  ScanKeyInit(&keys[0],
1519  BTEqualStrategyNumber, F_OIDEQ,
1521  if (tgname)
1522  {
1523  ScanKeyInit(&keys[1],
1525  BTEqualStrategyNumber, F_NAMEEQ,
1526  CStringGetDatum(tgname));
1527  nkeys = 2;
1528  }
1529  else
1530  nkeys = 1;
1531 
1532  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1533  NULL, nkeys, keys);
1534 
1535  found = changed = false;
1536 
1537  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1538  {
1539  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1540 
1541  if (oldtrig->tgisinternal)
1542  {
1543  /* system trigger ... ok to process? */
1544  if (skip_system)
1545  continue;
1546  if (!superuser())
1547  ereport(ERROR,
1548  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1549  errmsg("permission denied: \"%s\" is a system trigger",
1550  NameStr(oldtrig->tgname))));
1551  }
1552 
1553  found = true;
1554 
1555  if (oldtrig->tgenabled != fires_when)
1556  {
1557  /* need to change this one ... make a copy to scribble on */
1558  HeapTuple newtup = heap_copytuple(tuple);
1559  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1560 
1561  newtrig->tgenabled = fires_when;
1562 
1563  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1564 
1565  heap_freetuple(newtup);
1566 
1567  changed = true;
1568  }
1569 
1571  HeapTupleGetOid(tuple), 0);
1572  }
1573 
1574  systable_endscan(tgscan);
1575 
1576  heap_close(tgrel, RowExclusiveLock);
1577 
1578  if (tgname && !found)
1579  ereport(ERROR,
1580  (errcode(ERRCODE_UNDEFINED_OBJECT),
1581  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1582  tgname, RelationGetRelationName(rel))));
1583 
1584  /*
1585  * If we changed anything, broadcast a SI inval message to force each
1586  * backend (including our own!) to rebuild relation's relcache entry.
1587  * Otherwise they will fail to apply the change promptly.
1588  */
1589  if (changed)
1591 }
1592 
1593 
1594 /*
1595  * Build trigger data to attach to the given relcache entry.
1596  *
1597  * Note that trigger data attached to a relcache entry must be stored in
1598  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1599  * But we should be running in a less long-lived working context. To avoid
1600  * leaking cache memory if this routine fails partway through, we build a
1601  * temporary TriggerDesc in working memory and then copy the completed
1602  * structure into cache memory.
1603  */
1604 void
1606 {
1607  TriggerDesc *trigdesc;
1608  int numtrigs;
1609  int maxtrigs;
1610  Trigger *triggers;
1611  Relation tgrel;
1612  ScanKeyData skey;
1613  SysScanDesc tgscan;
1614  HeapTuple htup;
1615  MemoryContext oldContext;
1616  int i;
1617 
1618  /*
1619  * Allocate a working array to hold the triggers (the array is extended if
1620  * necessary)
1621  */
1622  maxtrigs = 16;
1623  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1624  numtrigs = 0;
1625 
1626  /*
1627  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1628  * be reading the triggers in name order, except possibly during
1629  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1630  * ensures that triggers will be fired in name order.
1631  */
1632  ScanKeyInit(&skey,
1634  BTEqualStrategyNumber, F_OIDEQ,
1635  ObjectIdGetDatum(RelationGetRelid(relation)));
1636 
1638  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1639  NULL, 1, &skey);
1640 
1641  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1642  {
1643  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1644  Trigger *build;
1645  Datum datum;
1646  bool isnull;
1647 
1648  if (numtrigs >= maxtrigs)
1649  {
1650  maxtrigs *= 2;
1651  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1652  }
1653  build = &(triggers[numtrigs]);
1654 
1655  build->tgoid = HeapTupleGetOid(htup);
1657  NameGetDatum(&pg_trigger->tgname)));
1658  build->tgfoid = pg_trigger->tgfoid;
1659  build->tgtype = pg_trigger->tgtype;
1660  build->tgenabled = pg_trigger->tgenabled;
1661  build->tgisinternal = pg_trigger->tgisinternal;
1662  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1663  build->tgconstrindid = pg_trigger->tgconstrindid;
1664  build->tgconstraint = pg_trigger->tgconstraint;
1665  build->tgdeferrable = pg_trigger->tgdeferrable;
1666  build->tginitdeferred = pg_trigger->tginitdeferred;
1667  build->tgnargs = pg_trigger->tgnargs;
1668  /* tgattr is first var-width field, so OK to access directly */
1669  build->tgnattr = pg_trigger->tgattr.dim1;
1670  if (build->tgnattr > 0)
1671  {
1672  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1673  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1674  build->tgnattr * sizeof(int16));
1675  }
1676  else
1677  build->tgattr = NULL;
1678  if (build->tgnargs > 0)
1679  {
1680  bytea *val;
1681  char *p;
1682 
1683  val = DatumGetByteaPP(fastgetattr(htup,
1685  tgrel->rd_att, &isnull));
1686  if (isnull)
1687  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1688  RelationGetRelationName(relation));
1689  p = (char *) VARDATA_ANY(val);
1690  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1691  for (i = 0; i < build->tgnargs; i++)
1692  {
1693  build->tgargs[i] = pstrdup(p);
1694  p += strlen(p) + 1;
1695  }
1696  }
1697  else
1698  build->tgargs = NULL;
1699 
1701  tgrel->rd_att, &isnull);
1702  if (!isnull)
1703  build->tgoldtable =
1705  else
1706  build->tgoldtable = NULL;
1707 
1709  tgrel->rd_att, &isnull);
1710  if (!isnull)
1711  build->tgnewtable =
1713  else
1714  build->tgnewtable = NULL;
1715 
1716  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1717  tgrel->rd_att, &isnull);
1718  if (!isnull)
1719  build->tgqual = TextDatumGetCString(datum);
1720  else
1721  build->tgqual = NULL;
1722 
1723  numtrigs++;
1724  }
1725 
1726  systable_endscan(tgscan);
1727  heap_close(tgrel, AccessShareLock);
1728 
1729  /* There might not be any triggers */
1730  if (numtrigs == 0)
1731  {
1732  pfree(triggers);
1733  return;
1734  }
1735 
1736  /* Build trigdesc */
1737  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1738  trigdesc->triggers = triggers;
1739  trigdesc->numtriggers = numtrigs;
1740  for (i = 0; i < numtrigs; i++)
1741  SetTriggerFlags(trigdesc, &(triggers[i]));
1742 
1743  /* Copy completed trigdesc into cache storage */
1745  relation->trigdesc = CopyTriggerDesc(trigdesc);
1746  MemoryContextSwitchTo(oldContext);
1747 
1748  /* Release working memory */
1749  FreeTriggerDesc(trigdesc);
1750 }
1751 
1752 /*
1753  * Update the TriggerDesc's hint flags to include the specified trigger
1754  */
1755 static void
1757 {
1758  int16 tgtype = trigger->tgtype;
1759 
1760  trigdesc->trig_insert_before_row |=
1763  trigdesc->trig_insert_after_row |=
1766  trigdesc->trig_insert_instead_row |=
1769  trigdesc->trig_insert_before_statement |=
1772  trigdesc->trig_insert_after_statement |=
1775  trigdesc->trig_update_before_row |=
1778  trigdesc->trig_update_after_row |=
1781  trigdesc->trig_update_instead_row |=
1784  trigdesc->trig_update_before_statement |=
1787  trigdesc->trig_update_after_statement |=
1790  trigdesc->trig_delete_before_row |=
1793  trigdesc->trig_delete_after_row |=
1796  trigdesc->trig_delete_instead_row |=
1799  trigdesc->trig_delete_before_statement |=
1802  trigdesc->trig_delete_after_statement |=
1805  /* there are no row-level truncate triggers */
1806  trigdesc->trig_truncate_before_statement |=
1809  trigdesc->trig_truncate_after_statement |=
1812 
1813  trigdesc->trig_insert_new_table |=
1814  (TRIGGER_FOR_INSERT(tgtype) &&
1816  trigdesc->trig_update_old_table |=
1817  (TRIGGER_FOR_UPDATE(tgtype) &&
1819  trigdesc->trig_update_new_table |=
1820  (TRIGGER_FOR_UPDATE(tgtype) &&
1822  trigdesc->trig_delete_old_table |=
1823  (TRIGGER_FOR_DELETE(tgtype) &&
1825 }
1826 
1827 /*
1828  * Copy a TriggerDesc data structure.
1829  *
1830  * The copy is allocated in the current memory context.
1831  */
1832 TriggerDesc *
1834 {
1835  TriggerDesc *newdesc;
1836  Trigger *trigger;
1837  int i;
1838 
1839  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1840  return NULL;
1841 
1842  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1843  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1844 
1845  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1846  memcpy(trigger, trigdesc->triggers,
1847  trigdesc->numtriggers * sizeof(Trigger));
1848  newdesc->triggers = trigger;
1849 
1850  for (i = 0; i < trigdesc->numtriggers; i++)
1851  {
1852  trigger->tgname = pstrdup(trigger->tgname);
1853  if (trigger->tgnattr > 0)
1854  {
1855  int16 *newattr;
1856 
1857  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1858  memcpy(newattr, trigger->tgattr,
1859  trigger->tgnattr * sizeof(int16));
1860  trigger->tgattr = newattr;
1861  }
1862  if (trigger->tgnargs > 0)
1863  {
1864  char **newargs;
1865  int16 j;
1866 
1867  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1868  for (j = 0; j < trigger->tgnargs; j++)
1869  newargs[j] = pstrdup(trigger->tgargs[j]);
1870  trigger->tgargs = newargs;
1871  }
1872  if (trigger->tgqual)
1873  trigger->tgqual = pstrdup(trigger->tgqual);
1874  if (trigger->tgoldtable)
1875  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1876  if (trigger->tgnewtable)
1877  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1878  trigger++;
1879  }
1880 
1881  return newdesc;
1882 }
1883 
1884 /*
1885  * Free a TriggerDesc data structure.
1886  */
1887 void
1889 {
1890  Trigger *trigger;
1891  int i;
1892 
1893  if (trigdesc == NULL)
1894  return;
1895 
1896  trigger = trigdesc->triggers;
1897  for (i = 0; i < trigdesc->numtriggers; i++)
1898  {
1899  pfree(trigger->tgname);
1900  if (trigger->tgnattr > 0)
1901  pfree(trigger->tgattr);
1902  if (trigger->tgnargs > 0)
1903  {
1904  while (--(trigger->tgnargs) >= 0)
1905  pfree(trigger->tgargs[trigger->tgnargs]);
1906  pfree(trigger->tgargs);
1907  }
1908  if (trigger->tgqual)
1909  pfree(trigger->tgqual);
1910  if (trigger->tgoldtable)
1911  pfree(trigger->tgoldtable);
1912  if (trigger->tgnewtable)
1913  pfree(trigger->tgnewtable);
1914  trigger++;
1915  }
1916  pfree(trigdesc->triggers);
1917  pfree(trigdesc);
1918 }
1919 
1920 /*
1921  * Compare two TriggerDesc structures for logical equality.
1922  */
1923 #ifdef NOT_USED
1924 bool
1925 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1926 {
1927  int i,
1928  j;
1929 
1930  /*
1931  * We need not examine the hint flags, just the trigger array itself; if
1932  * we have the same triggers with the same types, the flags should match.
1933  *
1934  * As of 7.3 we assume trigger set ordering is significant in the
1935  * comparison; so we just compare corresponding slots of the two sets.
1936  *
1937  * Note: comparing the stringToNode forms of the WHEN clauses means that
1938  * parse column locations will affect the result. This is okay as long as
1939  * this function is only used for detecting exact equality, as for example
1940  * in checking for staleness of a cache entry.
1941  */
1942  if (trigdesc1 != NULL)
1943  {
1944  if (trigdesc2 == NULL)
1945  return false;
1946  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1947  return false;
1948  for (i = 0; i < trigdesc1->numtriggers; i++)
1949  {
1950  Trigger *trig1 = trigdesc1->triggers + i;
1951  Trigger *trig2 = trigdesc2->triggers + i;
1952 
1953  if (trig1->tgoid != trig2->tgoid)
1954  return false;
1955  if (strcmp(trig1->tgname, trig2->tgname) != 0)
1956  return false;
1957  if (trig1->tgfoid != trig2->tgfoid)
1958  return false;
1959  if (trig1->tgtype != trig2->tgtype)
1960  return false;
1961  if (trig1->tgenabled != trig2->tgenabled)
1962  return false;
1963  if (trig1->tgisinternal != trig2->tgisinternal)
1964  return false;
1965  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
1966  return false;
1967  if (trig1->tgconstrindid != trig2->tgconstrindid)
1968  return false;
1969  if (trig1->tgconstraint != trig2->tgconstraint)
1970  return false;
1971  if (trig1->tgdeferrable != trig2->tgdeferrable)
1972  return false;
1973  if (trig1->tginitdeferred != trig2->tginitdeferred)
1974  return false;
1975  if (trig1->tgnargs != trig2->tgnargs)
1976  return false;
1977  if (trig1->tgnattr != trig2->tgnattr)
1978  return false;
1979  if (trig1->tgnattr > 0 &&
1980  memcmp(trig1->tgattr, trig2->tgattr,
1981  trig1->tgnattr * sizeof(int16)) != 0)
1982  return false;
1983  for (j = 0; j < trig1->tgnargs; j++)
1984  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
1985  return false;
1986  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
1987  /* ok */ ;
1988  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
1989  return false;
1990  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
1991  return false;
1992  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
1993  /* ok */ ;
1994  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
1995  return false;
1996  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
1997  return false;
1998  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
1999  /* ok */ ;
2000  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2001  return false;
2002  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2003  return false;
2004  }
2005  }
2006  else if (trigdesc2 != NULL)
2007  return false;
2008  return true;
2009 }
2010 #endif /* NOT_USED */
2011 
2012 /*
2013  * Call a trigger function.
2014  *
2015  * trigdata: trigger descriptor.
2016  * tgindx: trigger's index in finfo and instr arrays.
2017  * finfo: array of cached trigger function call information.
2018  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2019  * per_tuple_context: memory context to execute the function in.
2020  *
2021  * Returns the tuple (or NULL) as returned by the function.
2022  */
2023 static HeapTuple
2025  int tgindx,
2026  FmgrInfo *finfo,
2027  Instrumentation *instr,
2028  MemoryContext per_tuple_context)
2029 {
2030  FunctionCallInfoData fcinfo;
2031  PgStat_FunctionCallUsage fcusage;
2032  Datum result;
2033  MemoryContext oldContext;
2034 
2035  /*
2036  * Protect against code paths that may fail to initialize transition table
2037  * info.
2038  */
2039  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2040  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2041  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2042  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2043  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2044  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2045  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2046 
2047  finfo += tgindx;
2048 
2049  /*
2050  * We cache fmgr lookup info, to avoid making the lookup again on each
2051  * call.
2052  */
2053  if (finfo->fn_oid == InvalidOid)
2054  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2055 
2056  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2057 
2058  /*
2059  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2060  */
2061  if (instr)
2062  InstrStartNode(instr + tgindx);
2063 
2064  /*
2065  * Do the function evaluation in the per-tuple memory context, so that
2066  * leaked memory will be reclaimed once per tuple. Note in particular that
2067  * any new tuple created by the trigger function will live till the end of
2068  * the tuple cycle.
2069  */
2070  oldContext = MemoryContextSwitchTo(per_tuple_context);
2071 
2072  /*
2073  * Call the function, passing no arguments but setting a context.
2074  */
2075  InitFunctionCallInfoData(fcinfo, finfo, 0,
2076  InvalidOid, (Node *) trigdata, NULL);
2077 
2078  pgstat_init_function_usage(&fcinfo, &fcusage);
2079 
2080  MyTriggerDepth++;
2081  PG_TRY();
2082  {
2083  result = FunctionCallInvoke(&fcinfo);
2084  }
2085  PG_CATCH();
2086  {
2087  MyTriggerDepth--;
2088  PG_RE_THROW();
2089  }
2090  PG_END_TRY();
2091  MyTriggerDepth--;
2092 
2093  pgstat_end_function_usage(&fcusage, true);
2094 
2095  MemoryContextSwitchTo(oldContext);
2096 
2097  /*
2098  * Trigger protocol allows function to return a null pointer, but NOT to
2099  * set the isnull result flag.
2100  */
2101  if (fcinfo.isnull)
2102  ereport(ERROR,
2103  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2104  errmsg("trigger function %u returned null value",
2105  fcinfo.flinfo->fn_oid)));
2106 
2107  /*
2108  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2109  * one "tuple returned" (really the number of firings).
2110  */
2111  if (instr)
2112  InstrStopNode(instr + tgindx, 1);
2113 
2114  return (HeapTuple) DatumGetPointer(result);
2115 }
2116 
2117 void
2119 {
2120  TriggerDesc *trigdesc;
2121  int i;
2122  TriggerData LocTriggerData;
2123 
2124  trigdesc = relinfo->ri_TrigDesc;
2125 
2126  if (trigdesc == NULL)
2127  return;
2128  if (!trigdesc->trig_insert_before_statement)
2129  return;
2130 
2131  LocTriggerData.type = T_TriggerData;
2132  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2134  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2135  LocTriggerData.tg_trigtuple = NULL;
2136  LocTriggerData.tg_newtuple = NULL;
2137  LocTriggerData.tg_oldtable = NULL;
2138  LocTriggerData.tg_newtable = NULL;
2139  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2140  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2141  for (i = 0; i < trigdesc->numtriggers; i++)
2142  {
2143  Trigger *trigger = &trigdesc->triggers[i];
2144  HeapTuple newtuple;
2145 
2146  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2150  continue;
2151  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2152  NULL, NULL, NULL))
2153  continue;
2154 
2155  LocTriggerData.tg_trigger = trigger;
2156  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2157  i,
2158  relinfo->ri_TrigFunctions,
2159  relinfo->ri_TrigInstrument,
2160  GetPerTupleMemoryContext(estate));
2161 
2162  if (newtuple)
2163  ereport(ERROR,
2164  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2165  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2166  }
2167 }
2168 
2169 void
2171 {
2172  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2173 
2174  if (trigdesc && trigdesc->trig_insert_after_statement)
2176  false, NULL, NULL, NIL, NULL);
2177 }
2178 
2181  TupleTableSlot *slot)
2182 {
2183  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2184  HeapTuple slottuple = ExecMaterializeSlot(slot);
2185  HeapTuple newtuple = slottuple;
2186  HeapTuple oldtuple;
2187  TriggerData LocTriggerData;
2188  int i;
2189 
2190  LocTriggerData.type = T_TriggerData;
2191  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2194  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2195  LocTriggerData.tg_newtuple = NULL;
2196  LocTriggerData.tg_oldtable = NULL;
2197  LocTriggerData.tg_newtable = NULL;
2198  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2199  for (i = 0; i < trigdesc->numtriggers; i++)
2200  {
2201  Trigger *trigger = &trigdesc->triggers[i];
2202 
2203  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2207  continue;
2208  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2209  NULL, NULL, newtuple))
2210  continue;
2211 
2212  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2213  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2214  LocTriggerData.tg_trigger = trigger;
2215  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2216  i,
2217  relinfo->ri_TrigFunctions,
2218  relinfo->ri_TrigInstrument,
2219  GetPerTupleMemoryContext(estate));
2220  if (oldtuple != newtuple && oldtuple != slottuple)
2221  heap_freetuple(oldtuple);
2222  if (newtuple == NULL)
2223  return NULL; /* "do nothing" */
2224  }
2225 
2226  if (newtuple != slottuple)
2227  {
2228  /*
2229  * Return the modified tuple using the es_trig_tuple_slot. We assume
2230  * the tuple was allocated in per-tuple memory context, and therefore
2231  * will go away by itself. The tuple table slot should not try to
2232  * clear it.
2233  */
2234  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2235  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2236 
2237  if (newslot->tts_tupleDescriptor != tupdesc)
2238  ExecSetSlotDescriptor(newslot, tupdesc);
2239  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2240  slot = newslot;
2241  }
2242  return slot;
2243 }
2244 
2245 void
2247  HeapTuple trigtuple, List *recheckIndexes)
2248 {
2249  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2250 
2251  if (trigdesc &&
2252  (trigdesc->trig_insert_after_row || trigdesc->trig_insert_new_table))
2254  true, NULL, trigtuple, recheckIndexes, NULL);
2255 }
2256 
2259  TupleTableSlot *slot)
2260 {
2261  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2262  HeapTuple slottuple = ExecMaterializeSlot(slot);
2263  HeapTuple newtuple = slottuple;
2264  HeapTuple oldtuple;
2265  TriggerData LocTriggerData;
2266  int i;
2267 
2268  LocTriggerData.type = T_TriggerData;
2269  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2272  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2273  LocTriggerData.tg_newtuple = NULL;
2274  LocTriggerData.tg_oldtable = NULL;
2275  LocTriggerData.tg_newtable = NULL;
2276  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2277  for (i = 0; i < trigdesc->numtriggers; i++)
2278  {
2279  Trigger *trigger = &trigdesc->triggers[i];
2280 
2281  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2285  continue;
2286  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2287  NULL, NULL, newtuple))
2288  continue;
2289 
2290  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2291  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2292  LocTriggerData.tg_trigger = trigger;
2293  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2294  i,
2295  relinfo->ri_TrigFunctions,
2296  relinfo->ri_TrigInstrument,
2297  GetPerTupleMemoryContext(estate));
2298  if (oldtuple != newtuple && oldtuple != slottuple)
2299  heap_freetuple(oldtuple);
2300  if (newtuple == NULL)
2301  return NULL; /* "do nothing" */
2302  }
2303 
2304  if (newtuple != slottuple)
2305  {
2306  /*
2307  * Return the modified tuple using the es_trig_tuple_slot. We assume
2308  * the tuple was allocated in per-tuple memory context, and therefore
2309  * will go away by itself. The tuple table slot should not try to
2310  * clear it.
2311  */
2312  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2313  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2314 
2315  if (newslot->tts_tupleDescriptor != tupdesc)
2316  ExecSetSlotDescriptor(newslot, tupdesc);
2317  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2318  slot = newslot;
2319  }
2320  return slot;
2321 }
2322 
2323 void
2325 {
2326  TriggerDesc *trigdesc;
2327  int i;
2328  TriggerData LocTriggerData;
2329 
2330  trigdesc = relinfo->ri_TrigDesc;
2331 
2332  if (trigdesc == NULL)
2333  return;
2334  if (!trigdesc->trig_delete_before_statement)
2335  return;
2336 
2337  LocTriggerData.type = T_TriggerData;
2338  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2340  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2341  LocTriggerData.tg_trigtuple = NULL;
2342  LocTriggerData.tg_newtuple = NULL;
2343  LocTriggerData.tg_oldtable = NULL;
2344  LocTriggerData.tg_newtable = NULL;
2345  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2346  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2347  for (i = 0; i < trigdesc->numtriggers; i++)
2348  {
2349  Trigger *trigger = &trigdesc->triggers[i];
2350  HeapTuple newtuple;
2351 
2352  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2356  continue;
2357  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2358  NULL, NULL, NULL))
2359  continue;
2360 
2361  LocTriggerData.tg_trigger = trigger;
2362  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2363  i,
2364  relinfo->ri_TrigFunctions,
2365  relinfo->ri_TrigInstrument,
2366  GetPerTupleMemoryContext(estate));
2367 
2368  if (newtuple)
2369  ereport(ERROR,
2370  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2371  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2372  }
2373 }
2374 
2375 void
2377 {
2378  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2379 
2380  if (trigdesc && trigdesc->trig_delete_after_statement)
2382  false, NULL, NULL, NIL, NULL);
2383 }
2384 
2385 bool
2387  ResultRelInfo *relinfo,
2388  ItemPointer tupleid,
2389  HeapTuple fdw_trigtuple)
2390 {
2391  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2392  bool result = true;
2393  TriggerData LocTriggerData;
2394  HeapTuple trigtuple;
2395  HeapTuple newtuple;
2396  TupleTableSlot *newSlot;
2397  int i;
2398 
2399  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2400  if (fdw_trigtuple == NULL)
2401  {
2402  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2403  LockTupleExclusive, &newSlot);
2404  if (trigtuple == NULL)
2405  return false;
2406  }
2407  else
2408  trigtuple = fdw_trigtuple;
2409 
2410  LocTriggerData.type = T_TriggerData;
2411  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2414  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2415  LocTriggerData.tg_newtuple = NULL;
2416  LocTriggerData.tg_oldtable = NULL;
2417  LocTriggerData.tg_newtable = NULL;
2418  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2419  for (i = 0; i < trigdesc->numtriggers; i++)
2420  {
2421  Trigger *trigger = &trigdesc->triggers[i];
2422 
2423  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2427  continue;
2428  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2429  NULL, trigtuple, NULL))
2430  continue;
2431 
2432  LocTriggerData.tg_trigtuple = trigtuple;
2433  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2434  LocTriggerData.tg_trigger = trigger;
2435  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2436  i,
2437  relinfo->ri_TrigFunctions,
2438  relinfo->ri_TrigInstrument,
2439  GetPerTupleMemoryContext(estate));
2440  if (newtuple == NULL)
2441  {
2442  result = false; /* tell caller to suppress delete */
2443  break;
2444  }
2445  if (newtuple != trigtuple)
2446  heap_freetuple(newtuple);
2447  }
2448  if (trigtuple != fdw_trigtuple)
2449  heap_freetuple(trigtuple);
2450 
2451  return result;
2452 }
2453 
2454 void
2456  ItemPointer tupleid,
2457  HeapTuple fdw_trigtuple)
2458 {
2459  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2460 
2461  if (trigdesc &&
2462  (trigdesc->trig_delete_after_row || trigdesc->trig_delete_old_table))
2463  {
2464  HeapTuple trigtuple;
2465 
2466  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2467  if (fdw_trigtuple == NULL)
2468  trigtuple = GetTupleForTrigger(estate,
2469  NULL,
2470  relinfo,
2471  tupleid,
2473  NULL);
2474  else
2475  trigtuple = fdw_trigtuple;
2476 
2478  true, trigtuple, NULL, NIL, NULL);
2479  if (trigtuple != fdw_trigtuple)
2480  heap_freetuple(trigtuple);
2481  }
2482 }
2483 
2484 bool
2486  HeapTuple trigtuple)
2487 {
2488  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2489  TriggerData LocTriggerData;
2490  HeapTuple rettuple;
2491  int i;
2492 
2493  LocTriggerData.type = T_TriggerData;
2494  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2497  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2498  LocTriggerData.tg_newtuple = NULL;
2499  LocTriggerData.tg_oldtable = NULL;
2500  LocTriggerData.tg_newtable = NULL;
2501  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2502  for (i = 0; i < trigdesc->numtriggers; i++)
2503  {
2504  Trigger *trigger = &trigdesc->triggers[i];
2505 
2506  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2510  continue;
2511  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2512  NULL, trigtuple, NULL))
2513  continue;
2514 
2515  LocTriggerData.tg_trigtuple = trigtuple;
2516  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2517  LocTriggerData.tg_trigger = trigger;
2518  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2519  i,
2520  relinfo->ri_TrigFunctions,
2521  relinfo->ri_TrigInstrument,
2522  GetPerTupleMemoryContext(estate));
2523  if (rettuple == NULL)
2524  return false; /* Delete was suppressed */
2525  if (rettuple != trigtuple)
2526  heap_freetuple(rettuple);
2527  }
2528  return true;
2529 }
2530 
2531 void
2533 {
2534  TriggerDesc *trigdesc;
2535  int i;
2536  TriggerData LocTriggerData;
2537  Bitmapset *updatedCols;
2538 
2539  trigdesc = relinfo->ri_TrigDesc;
2540 
2541  if (trigdesc == NULL)
2542  return;
2543  if (!trigdesc->trig_update_before_statement)
2544  return;
2545 
2546  updatedCols = GetUpdatedColumns(relinfo, estate);
2547 
2548  LocTriggerData.type = T_TriggerData;
2549  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2551  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2552  LocTriggerData.tg_trigtuple = NULL;
2553  LocTriggerData.tg_newtuple = NULL;
2554  LocTriggerData.tg_oldtable = NULL;
2555  LocTriggerData.tg_newtable = NULL;
2556  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2557  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2558  for (i = 0; i < trigdesc->numtriggers; i++)
2559  {
2560  Trigger *trigger = &trigdesc->triggers[i];
2561  HeapTuple newtuple;
2562 
2563  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2567  continue;
2568  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2569  updatedCols, NULL, NULL))
2570  continue;
2571 
2572  LocTriggerData.tg_trigger = trigger;
2573  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2574  i,
2575  relinfo->ri_TrigFunctions,
2576  relinfo->ri_TrigInstrument,
2577  GetPerTupleMemoryContext(estate));
2578 
2579  if (newtuple)
2580  ereport(ERROR,
2581  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2582  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2583  }
2584 }
2585 
2586 void
2588 {
2589  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2590 
2591  if (trigdesc && trigdesc->trig_update_after_statement)
2593  false, NULL, NULL, NIL,
2594  GetUpdatedColumns(relinfo, estate));
2595 }
2596 
2599  ResultRelInfo *relinfo,
2600  ItemPointer tupleid,
2601  HeapTuple fdw_trigtuple,
2602  TupleTableSlot *slot)
2603 {
2604  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2605  HeapTuple slottuple = ExecMaterializeSlot(slot);
2606  HeapTuple newtuple = slottuple;
2607  TriggerData LocTriggerData;
2608  HeapTuple trigtuple;
2609  HeapTuple oldtuple;
2610  TupleTableSlot *newSlot;
2611  int i;
2612  Bitmapset *updatedCols;
2613  LockTupleMode lockmode;
2614 
2615  /* Determine lock mode to use */
2616  lockmode = ExecUpdateLockMode(estate, relinfo);
2617 
2618  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2619  if (fdw_trigtuple == NULL)
2620  {
2621  /* get a copy of the on-disk tuple we are planning to update */
2622  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2623  lockmode, &newSlot);
2624  if (trigtuple == NULL)
2625  return NULL; /* cancel the update action */
2626  }
2627  else
2628  {
2629  trigtuple = fdw_trigtuple;
2630  newSlot = NULL;
2631  }
2632 
2633  /*
2634  * In READ COMMITTED isolation level it's possible that target tuple was
2635  * changed due to concurrent update. In that case we have a raw subplan
2636  * output tuple in newSlot, and need to run it through the junk filter to
2637  * produce an insertable tuple.
2638  *
2639  * Caution: more than likely, the passed-in slot is the same as the
2640  * junkfilter's output slot, so we are clobbering the original value of
2641  * slottuple by doing the filtering. This is OK since neither we nor our
2642  * caller have any more interest in the prior contents of that slot.
2643  */
2644  if (newSlot != NULL)
2645  {
2646  slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
2647  slottuple = ExecMaterializeSlot(slot);
2648  newtuple = slottuple;
2649  }
2650 
2651 
2652  LocTriggerData.type = T_TriggerData;
2653  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2656  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2657  LocTriggerData.tg_oldtable = NULL;
2658  LocTriggerData.tg_newtable = NULL;
2659  updatedCols = GetUpdatedColumns(relinfo, estate);
2660  for (i = 0; i < trigdesc->numtriggers; i++)
2661  {
2662  Trigger *trigger = &trigdesc->triggers[i];
2663 
2664  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2668  continue;
2669  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2670  updatedCols, trigtuple, newtuple))
2671  continue;
2672 
2673  LocTriggerData.tg_trigtuple = trigtuple;
2674  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2675  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2676  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2677  LocTriggerData.tg_trigger = trigger;
2678  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2679  i,
2680  relinfo->ri_TrigFunctions,
2681  relinfo->ri_TrigInstrument,
2682  GetPerTupleMemoryContext(estate));
2683  if (oldtuple != newtuple && oldtuple != slottuple)
2684  heap_freetuple(oldtuple);
2685  if (newtuple == NULL)
2686  {
2687  if (trigtuple != fdw_trigtuple)
2688  heap_freetuple(trigtuple);
2689  return NULL; /* "do nothing" */
2690  }
2691  }
2692  if (trigtuple != fdw_trigtuple)
2693  heap_freetuple(trigtuple);
2694 
2695  if (newtuple != slottuple)
2696  {
2697  /*
2698  * Return the modified tuple using the es_trig_tuple_slot. We assume
2699  * the tuple was allocated in per-tuple memory context, and therefore
2700  * will go away by itself. The tuple table slot should not try to
2701  * clear it.
2702  */
2703  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2704  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2705 
2706  if (newslot->tts_tupleDescriptor != tupdesc)
2707  ExecSetSlotDescriptor(newslot, tupdesc);
2708  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2709  slot = newslot;
2710  }
2711  return slot;
2712 }
2713 
2714 void
2716  ItemPointer tupleid,
2717  HeapTuple fdw_trigtuple,
2718  HeapTuple newtuple,
2719  List *recheckIndexes)
2720 {
2721  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2722 
2723  if (trigdesc && (trigdesc->trig_update_after_row ||
2724  trigdesc->trig_update_old_table || trigdesc->trig_update_new_table))
2725  {
2726  HeapTuple trigtuple;
2727 
2728  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2729  if (fdw_trigtuple == NULL)
2730  trigtuple = GetTupleForTrigger(estate,
2731  NULL,
2732  relinfo,
2733  tupleid,
2735  NULL);
2736  else
2737  trigtuple = fdw_trigtuple;
2738 
2740  true, trigtuple, newtuple, recheckIndexes,
2741  GetUpdatedColumns(relinfo, estate));
2742  if (trigtuple != fdw_trigtuple)
2743  heap_freetuple(trigtuple);
2744  }
2745 }
2746 
2749  HeapTuple trigtuple, TupleTableSlot *slot)
2750 {
2751  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2752  HeapTuple slottuple = ExecMaterializeSlot(slot);
2753  HeapTuple newtuple = slottuple;
2754  TriggerData LocTriggerData;
2755  HeapTuple oldtuple;
2756  int i;
2757 
2758  LocTriggerData.type = T_TriggerData;
2759  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2762  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2763  LocTriggerData.tg_oldtable = NULL;
2764  LocTriggerData.tg_newtable = NULL;
2765  for (i = 0; i < trigdesc->numtriggers; i++)
2766  {
2767  Trigger *trigger = &trigdesc->triggers[i];
2768 
2769  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2773  continue;
2774  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2775  NULL, trigtuple, newtuple))
2776  continue;
2777 
2778  LocTriggerData.tg_trigtuple = trigtuple;
2779  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2780  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2781  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2782  LocTriggerData.tg_trigger = trigger;
2783  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2784  i,
2785  relinfo->ri_TrigFunctions,
2786  relinfo->ri_TrigInstrument,
2787  GetPerTupleMemoryContext(estate));
2788  if (oldtuple != newtuple && oldtuple != slottuple)
2789  heap_freetuple(oldtuple);
2790  if (newtuple == NULL)
2791  return NULL; /* "do nothing" */
2792  }
2793 
2794  if (newtuple != slottuple)
2795  {
2796  /*
2797  * Return the modified tuple using the es_trig_tuple_slot. We assume
2798  * the tuple was allocated in per-tuple memory context, and therefore
2799  * will go away by itself. The tuple table slot should not try to
2800  * clear it.
2801  */
2802  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2803  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2804 
2805  if (newslot->tts_tupleDescriptor != tupdesc)
2806  ExecSetSlotDescriptor(newslot, tupdesc);
2807  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2808  slot = newslot;
2809  }
2810  return slot;
2811 }
2812 
2813 void
2815 {
2816  TriggerDesc *trigdesc;
2817  int i;
2818  TriggerData LocTriggerData;
2819 
2820  trigdesc = relinfo->ri_TrigDesc;
2821 
2822  if (trigdesc == NULL)
2823  return;
2824  if (!trigdesc->trig_truncate_before_statement)
2825  return;
2826 
2827  LocTriggerData.type = T_TriggerData;
2828  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2830  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2831  LocTriggerData.tg_trigtuple = NULL;
2832  LocTriggerData.tg_newtuple = NULL;
2833  LocTriggerData.tg_oldtable = NULL;
2834  LocTriggerData.tg_newtable = NULL;
2835  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2836  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2837  for (i = 0; i < trigdesc->numtriggers; i++)
2838  {
2839  Trigger *trigger = &trigdesc->triggers[i];
2840  HeapTuple newtuple;
2841 
2842  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2846  continue;
2847  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2848  NULL, NULL, NULL))
2849  continue;
2850 
2851  LocTriggerData.tg_trigger = trigger;
2852  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2853  i,
2854  relinfo->ri_TrigFunctions,
2855  relinfo->ri_TrigInstrument,
2856  GetPerTupleMemoryContext(estate));
2857 
2858  if (newtuple)
2859  ereport(ERROR,
2860  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2861  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2862  }
2863 }
2864 
2865 void
2867 {
2868  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2869 
2870  if (trigdesc && trigdesc->trig_truncate_after_statement)
2872  false, NULL, NULL, NIL, NULL);
2873 }
2874 
2875 
2876 static HeapTuple
2878  EPQState *epqstate,
2879  ResultRelInfo *relinfo,
2880  ItemPointer tid,
2881  LockTupleMode lockmode,
2882  TupleTableSlot **newSlot)
2883 {
2884  Relation relation = relinfo->ri_RelationDesc;
2885  HeapTupleData tuple;
2886  HeapTuple result;
2887  Buffer buffer;
2888 
2889  if (newSlot != NULL)
2890  {
2891  HTSU_Result test;
2892  HeapUpdateFailureData hufd;
2893 
2894  *newSlot = NULL;
2895 
2896  /* caller must pass an epqstate if EvalPlanQual is possible */
2897  Assert(epqstate != NULL);
2898 
2899  /*
2900  * lock tuple for update
2901  */
2902 ltrmark:;
2903  tuple.t_self = *tid;
2904  test = heap_lock_tuple(relation, &tuple,
2905  estate->es_output_cid,
2906  lockmode, LockWaitBlock,
2907  false, &buffer, &hufd);
2908  switch (test)
2909  {
2910  case HeapTupleSelfUpdated:
2911 
2912  /*
2913  * The target tuple was already updated or deleted by the
2914  * current command, or by a later command in the current
2915  * transaction. We ignore the tuple in the former case, and
2916  * throw error in the latter case, for the same reasons
2917  * enumerated in ExecUpdate and ExecDelete in
2918  * nodeModifyTable.c.
2919  */
2920  if (hufd.cmax != estate->es_output_cid)
2921  ereport(ERROR,
2922  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2923  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2924  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2925 
2926  /* treat it as deleted; do not process */
2927  ReleaseBuffer(buffer);
2928  return NULL;
2929 
2930  case HeapTupleMayBeUpdated:
2931  break;
2932 
2933  case HeapTupleUpdated:
2934  ReleaseBuffer(buffer);
2936  ereport(ERROR,
2937  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2938  errmsg("could not serialize access due to concurrent update")));
2939  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2940  {
2941  /* it was updated, so look at the updated version */
2942  TupleTableSlot *epqslot;
2943 
2944  epqslot = EvalPlanQual(estate,
2945  epqstate,
2946  relation,
2947  relinfo->ri_RangeTableIndex,
2948  lockmode,
2949  &hufd.ctid,
2950  hufd.xmax);
2951  if (!TupIsNull(epqslot))
2952  {
2953  *tid = hufd.ctid;
2954  *newSlot = epqslot;
2955 
2956  /*
2957  * EvalPlanQual already locked the tuple, but we
2958  * re-call heap_lock_tuple anyway as an easy way of
2959  * re-fetching the correct tuple. Speed is hardly a
2960  * criterion in this path anyhow.
2961  */
2962  goto ltrmark;
2963  }
2964  }
2965 
2966  /*
2967  * if tuple was deleted or PlanQual failed for updated tuple -
2968  * we must not process this tuple!
2969  */
2970  return NULL;
2971 
2972  case HeapTupleInvisible:
2973  elog(ERROR, "attempted to lock invisible tuple");
2974 
2975  default:
2976  ReleaseBuffer(buffer);
2977  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
2978  return NULL; /* keep compiler quiet */
2979  }
2980  }
2981  else
2982  {
2983  Page page;
2984  ItemId lp;
2985 
2986  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2987 
2988  /*
2989  * Although we already know this tuple is valid, we must lock the
2990  * buffer to ensure that no one has a buffer cleanup lock; otherwise
2991  * they might move the tuple while we try to copy it. But we can
2992  * release the lock before actually doing the heap_copytuple call,
2993  * since holding pin is sufficient to prevent anyone from getting a
2994  * cleanup lock they don't already hold.
2995  */
2996  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2997 
2998  page = BufferGetPage(buffer);
2999  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3000 
3001  Assert(ItemIdIsNormal(lp));
3002 
3003  tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3004  tuple.t_len = ItemIdGetLength(lp);
3005  tuple.t_self = *tid;
3006  tuple.t_tableOid = RelationGetRelid(relation);
3007 
3008  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3009  }
3010 
3011  result = heap_copytuple(&tuple);
3012  ReleaseBuffer(buffer);
3013 
3014  return result;
3015 }
3016 
3017 /*
3018  * Is trigger enabled to fire?
3019  */
3020 static bool
3022  Trigger *trigger, TriggerEvent event,
3023  Bitmapset *modifiedCols,
3024  HeapTuple oldtup, HeapTuple newtup)
3025 {
3026  /* Check replication-role-dependent enable state */
3028  {
3029  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3030  trigger->tgenabled == TRIGGER_DISABLED)
3031  return false;
3032  }
3033  else /* ORIGIN or LOCAL role */
3034  {
3035  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3036  trigger->tgenabled == TRIGGER_DISABLED)
3037  return false;
3038  }
3039 
3040  /*
3041  * Check for column-specific trigger (only possible for UPDATE, and in
3042  * fact we *must* ignore tgattr for other event types)
3043  */
3044  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3045  {
3046  int i;
3047  bool modified;
3048 
3049  modified = false;
3050  for (i = 0; i < trigger->tgnattr; i++)
3051  {
3053  modifiedCols))
3054  {
3055  modified = true;
3056  break;
3057  }
3058  }
3059  if (!modified)
3060  return false;
3061  }
3062 
3063  /* Check for WHEN clause */
3064  if (trigger->tgqual)
3065  {
3066  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3067  ExprState **predicate;
3068  ExprContext *econtext;
3069  TupleTableSlot *oldslot = NULL;
3070  TupleTableSlot *newslot = NULL;
3071  MemoryContext oldContext;
3072  int i;
3073 
3074  Assert(estate != NULL);
3075 
3076  /*
3077  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3078  * matching element of relinfo->ri_TrigWhenExprs[]
3079  */
3080  i = trigger - relinfo->ri_TrigDesc->triggers;
3081  predicate = &relinfo->ri_TrigWhenExprs[i];
3082 
3083  /*
3084  * If first time through for this WHEN expression, build expression
3085  * nodetrees for it. Keep them in the per-query memory context so
3086  * they'll survive throughout the query.
3087  */
3088  if (*predicate == NULL)
3089  {
3090  Node *tgqual;
3091 
3092  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3093  tgqual = stringToNode(trigger->tgqual);
3094  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3097  /* ExecPrepareQual wants implicit-AND form */
3098  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3099  *predicate = ExecPrepareQual((List *) tgqual, estate);
3100  MemoryContextSwitchTo(oldContext);
3101  }
3102 
3103  /*
3104  * We will use the EState's per-tuple context for evaluating WHEN
3105  * expressions (creating it if it's not already there).
3106  */
3107  econtext = GetPerTupleExprContext(estate);
3108 
3109  /*
3110  * Put OLD and NEW tuples into tupleslots for expression evaluation.
3111  * These slots can be shared across the whole estate, but be careful
3112  * that they have the current resultrel's tupdesc.
3113  */
3114  if (HeapTupleIsValid(oldtup))
3115  {
3116  if (estate->es_trig_oldtup_slot == NULL)
3117  {
3118  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3119  estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate);
3120  MemoryContextSwitchTo(oldContext);
3121  }
3122  oldslot = estate->es_trig_oldtup_slot;
3123  if (oldslot->tts_tupleDescriptor != tupdesc)
3124  ExecSetSlotDescriptor(oldslot, tupdesc);
3125  ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
3126  }
3127  if (HeapTupleIsValid(newtup))
3128  {
3129  if (estate->es_trig_newtup_slot == NULL)
3130  {
3131  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3132  estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
3133  MemoryContextSwitchTo(oldContext);
3134  }
3135  newslot = estate->es_trig_newtup_slot;
3136  if (newslot->tts_tupleDescriptor != tupdesc)
3137  ExecSetSlotDescriptor(newslot, tupdesc);
3138  ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
3139  }
3140 
3141  /*
3142  * Finally evaluate the expression, making the old and/or new tuples
3143  * available as INNER_VAR/OUTER_VAR respectively.
3144  */
3145  econtext->ecxt_innertuple = oldslot;
3146  econtext->ecxt_outertuple = newslot;
3147  if (!ExecQual(*predicate, econtext))
3148  return false;
3149  }
3150 
3151  return true;
3152 }
3153 
3154 
3155 /* ----------
3156  * After-trigger stuff
3157  *
3158  * The AfterTriggersData struct holds data about pending AFTER trigger events
3159  * during the current transaction tree. (BEFORE triggers are fired
3160  * immediately so we don't need any persistent state about them.) The struct
3161  * and most of its subsidiary data are kept in TopTransactionContext; however
3162  * the individual event records are kept in a separate sub-context. This is
3163  * done mainly so that it's easy to tell from a memory context dump how much
3164  * space is being eaten by trigger events.
3165  *
3166  * Because the list of pending events can grow large, we go to some
3167  * considerable effort to minimize per-event memory consumption. The event
3168  * records are grouped into chunks and common data for similar events in the
3169  * same chunk is only stored once.
3170  *
3171  * XXX We need to be able to save the per-event data in a file if it grows too
3172  * large.
3173  * ----------
3174  */
3175 
3176 /* Per-trigger SET CONSTRAINT status */
3178 {
3182 
3184 
3185 /*
3186  * SET CONSTRAINT intra-transaction status.
3187  *
3188  * We make this a single palloc'd object so it can be copied and freed easily.
3189  *
3190  * all_isset and all_isdeferred are used to keep track
3191  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3192  *
3193  * trigstates[] stores per-trigger tgisdeferred settings.
3194  */
3196 {
3199  int numstates; /* number of trigstates[] entries in use */
3200  int numalloc; /* allocated size of trigstates[] */
3201  SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3203 
3205 
3206 
3207 /*
3208  * Per-trigger-event data
3209  *
3210  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3211  * status bits and up to two tuple CTIDs. Each event record also has an
3212  * associated AfterTriggerSharedData that is shared across all instances of
3213  * similar events within a "chunk".
3214  *
3215  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3216  * fields. Updates of regular tables use two; inserts and deletes of regular
3217  * tables use one; foreign tables always use zero and save the tuple(s) to a
3218  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3219  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3220  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3221  * tuple(s). This permits storing tuples once regardless of the number of
3222  * row-level triggers on a foreign table.
3223  *
3224  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3225  * require no ctid field. We lack the flag bit space to neatly represent that
3226  * distinct case, and it seems unlikely to be worth much trouble.
3227  *
3228  * Note: ats_firing_id is initially zero and is set to something else when
3229  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3230  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3231  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3232  * because all instances of the same type of event in a given event list will
3233  * be fired at the same time, if they were queued between the same firing
3234  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3235  * a new event to an existing AfterTriggerSharedData record.
3236  */
3238 
3239 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order
3240  * bits */
3241 #define AFTER_TRIGGER_DONE 0x10000000
3242 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3243 /* bits describing the size and tuple sources of this event */
3244 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3245 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3246 #define AFTER_TRIGGER_1CTID 0x40000000
3247 #define AFTER_TRIGGER_2CTID 0xC0000000
3248 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3249 
3251 
3253 {
3254  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3255  Oid ats_tgoid; /* the trigger's ID */
3256  Oid ats_relid; /* the relation it's on */
3257  CommandId ats_firing_id; /* ID for firing cycle */
3259 
3261 
3263 {
3264  TriggerFlags ate_flags; /* status bits and offset to shared data */
3265  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3266  ItemPointerData ate_ctid2; /* new updated tuple */
3268 
3269 /* AfterTriggerEventData, minus ate_ctid2 */
3271 {
3272  TriggerFlags ate_flags; /* status bits and offset to shared data */
3273  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3275 
3276 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3278 {
3279  TriggerFlags ate_flags; /* status bits and offset to shared data */
3281 
3282 #define SizeofTriggerEvent(evt) \
3283  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3284  sizeof(AfterTriggerEventData) : \
3285  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3286  sizeof(AfterTriggerEventDataOneCtid) : \
3287  sizeof(AfterTriggerEventDataZeroCtids))
3288 
3289 #define GetTriggerSharedData(evt) \
3290  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3291 
3292 /*
3293  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3294  * larger chunks (a slightly more sophisticated version of an expansible
3295  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3296  * AfterTriggerEventData records; the space between endfree and endptr is
3297  * occupied by AfterTriggerSharedData records.
3298  */
3300 {
3301  struct AfterTriggerEventChunk *next; /* list link */
3302  char *freeptr; /* start of free space in chunk */
3303  char *endfree; /* end of free space in chunk */
3304  char *endptr; /* end of chunk */
3305  /* event data follows here */
3307 
3308 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3309 
3310 /* A list of events */
3312 {
3315  char *tailfree; /* freeptr of tail chunk */
3317 
3318 /* Macros to help in iterating over a list of events */
3319 #define for_each_chunk(cptr, evtlist) \
3320  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3321 #define for_each_event(eptr, cptr) \
3322  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3323  (char *) eptr < (cptr)->freeptr; \
3324  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3325 /* Use this if no special per-chunk processing is needed */
3326 #define for_each_event_chunk(eptr, cptr, evtlist) \
3327  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3328 
3329 
3330 /*
3331  * All per-transaction data for the AFTER TRIGGERS module.
3332  *
3333  * AfterTriggersData has the following fields:
3334  *
3335  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3336  * We mark firable events with the current firing cycle's ID so that we can
3337  * tell which ones to work on. This ensures sane behavior if a trigger
3338  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3339  * only fire those events that weren't already scheduled for firing.
3340  *
3341  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3342  * This is saved and restored across failed subtransactions.
3343  *
3344  * events is the current list of deferred events. This is global across
3345  * all subtransactions of the current transaction. In a subtransaction
3346  * abort, we know that the events added by the subtransaction are at the
3347  * end of the list, so it is relatively easy to discard them. The event
3348  * list chunks themselves are stored in event_cxt.
3349  *
3350  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3351  * (-1 when the stack is empty).
3352  *
3353  * query_stack[query_depth] is a list of AFTER trigger events queued by the
3354  * current query (and the query_stack entries below it are lists of trigger
3355  * events queued by calling queries). None of these are valid until the
3356  * matching AfterTriggerEndQuery call occurs. At that point we fire
3357  * immediate-mode triggers, and append any deferred events to the main events
3358  * list.
3359  *
3360  * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples
3361  * needed for the current query.
3362  *
3363  * old_tuplestores[query_depth] and new_tuplestores[query_depth] hold the
3364  * transition relations for the current query.
3365  *
3366  * maxquerydepth is just the allocated length of query_stack and the
3367  * tuplestores.
3368  *
3369  * state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS
3370  * state data; each subtransaction level that modifies that state first
3371  * saves a copy, which we use to restore the state if we abort.
3372  *
3373  * events_stack is a stack of copies of the events head/tail pointers,
3374  * which we use to restore those values during subtransaction abort.
3375  *
3376  * depth_stack is a stack of copies of subtransaction-start-time query_depth,
3377  * which we similarly use to clean up at subtransaction abort.
3378  *
3379  * firing_stack is a stack of copies of subtransaction-start-time
3380  * firing_counter. We use this to recognize which deferred triggers were
3381  * fired (or marked for firing) within an aborted subtransaction.
3382  *
3383  * We use GetCurrentTransactionNestLevel() to determine the correct array
3384  * index in these stacks. maxtransdepth is the number of allocated entries in
3385  * each stack. (By not keeping our own stack pointer, we can avoid trouble
3386  * in cases where errors during subxact abort cause multiple invocations
3387  * of AfterTriggerEndSubXact() at the same nesting depth.)
3388  */
3389 typedef struct AfterTriggersData
3390 {
3391  CommandId firing_counter; /* next firing ID to assign */
3392  SetConstraintState state; /* the active S C state */
3393  AfterTriggerEventList events; /* deferred-event list */
3394  int query_depth; /* current query list index */
3395  AfterTriggerEventList *query_stack; /* events pending from each query */
3396  Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from each query */
3397  Tuplestorestate **old_tuplestores; /* all old tuples from each query */
3398  Tuplestorestate **new_tuplestores; /* all new tuples from each query */
3399  int maxquerydepth; /* allocated len of above array */
3400  MemoryContext event_cxt; /* memory context for events, if any */
3401 
3402  /* these fields are just for resetting at subtrans abort: */
3403 
3404  SetConstraintState *state_stack; /* stacked S C states */
3405  AfterTriggerEventList *events_stack; /* stacked list pointers */
3406  int *depth_stack; /* stacked query_depths */
3407  CommandId *firing_stack; /* stacked firing_counters */
3408  int maxtransdepth; /* allocated len of above arrays */
3410 
3412 
3413 static void AfterTriggerExecute(AfterTriggerEvent event,
3414  Relation rel, TriggerDesc *trigdesc,
3415  FmgrInfo *finfo,
3416  Instrumentation *instr,
3417  MemoryContext per_tuple_context,
3418  TupleTableSlot *trig_tuple_slot1,
3419  TupleTableSlot *trig_tuple_slot2);
3420 static SetConstraintState SetConstraintStateCreate(int numalloc);
3421 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3422 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3423  Oid tgoid, bool tgisdeferred);
3424 
3425 
3426 /*
3427  * Gets a current query transition tuplestore and initializes it if necessary.
3428  * This can be holding a single transition row tuple (in the case of an FDW)
3429  * or a transition table (for an AFTER trigger).
3430  */
3431 static Tuplestorestate *
3433 {
3434  Tuplestorestate *ret;
3435 
3436  ret = tss[afterTriggers.query_depth];
3437  if (ret == NULL)
3438  {
3439  MemoryContext oldcxt;
3440  ResourceOwner saveResourceOwner;
3441 
3442  /*
3443  * Make the tuplestore valid until end of transaction. This is the
3444  * allocation lifespan of the associated events list, but we really
3445  * only need it until AfterTriggerEndQuery().
3446  */
3448  saveResourceOwner = CurrentResourceOwner;
3449  PG_TRY();
3450  {
3452  ret = tuplestore_begin_heap(false, false, work_mem);
3453  }
3454  PG_CATCH();
3455  {
3456  CurrentResourceOwner = saveResourceOwner;
3457  PG_RE_THROW();
3458  }
3459  PG_END_TRY();
3460  CurrentResourceOwner = saveResourceOwner;
3461  MemoryContextSwitchTo(oldcxt);
3462 
3463  tss[afterTriggers.query_depth] = ret;
3464  }
3465 
3466  return ret;
3467 }
3468 
3469 /* ----------
3470  * afterTriggerCheckState()
3471  *
3472  * Returns true if the trigger event is actually in state DEFERRED.
3473  * ----------
3474  */
3475 static bool
3476 afterTriggerCheckState(AfterTriggerShared evtshared)
3477 {
3478  Oid tgoid = evtshared->ats_tgoid;
3479  SetConstraintState state = afterTriggers.state;
3480  int i;
3481 
3482  /*
3483  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3484  * constraints declared NOT DEFERRABLE), the state is always false.
3485  */
3486  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3487  return false;
3488 
3489  /*
3490  * If constraint state exists, SET CONSTRAINTS might have been executed
3491  * either for this trigger or for all triggers.
3492  */
3493  if (state != NULL)
3494  {
3495  /* Check for SET CONSTRAINTS for this specific trigger. */
3496  for (i = 0; i < state->numstates; i++)
3497  {
3498  if (state->trigstates[i].sct_tgoid == tgoid)
3499  return state->trigstates[i].sct_tgisdeferred;
3500  }
3501 
3502  /* Check for SET CONSTRAINTS ALL. */
3503  if (state->all_isset)
3504  return state->all_isdeferred;
3505  }
3506 
3507  /*
3508  * Otherwise return the default state for the trigger.
3509  */
3510  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3511 }
3512 
3513 
3514 /* ----------
3515  * afterTriggerAddEvent()
3516  *
3517  * Add a new trigger event to the specified queue.
3518  * The passed-in event data is copied.
3519  * ----------
3520  */
3521 static void
3523  AfterTriggerEvent event, AfterTriggerShared evtshared)
3524 {
3525  Size eventsize = SizeofTriggerEvent(event);
3526  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3527  AfterTriggerEventChunk *chunk;
3528  AfterTriggerShared newshared;
3529  AfterTriggerEvent newevent;
3530 
3531  /*
3532  * If empty list or not enough room in the tail chunk, make a new chunk.
3533  * We assume here that a new shared record will always be needed.
3534  */
3535  chunk = events->tail;
3536  if (chunk == NULL ||
3537  chunk->endfree - chunk->freeptr < needed)
3538  {
3539  Size chunksize;
3540 
3541  /* Create event context if we didn't already */
3542  if (afterTriggers.event_cxt == NULL)
3543  afterTriggers.event_cxt =
3545  "AfterTriggerEvents",
3547 
3548  /*
3549  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3550  * These numbers are fairly arbitrary, though there is a hard limit at
3551  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3552  * shared records using the available space in ate_flags. Another
3553  * constraint is that if the chunk size gets too huge, the search loop
3554  * below would get slow given a (not too common) usage pattern with
3555  * many distinct event types in a chunk. Therefore, we double the
3556  * preceding chunk size only if there weren't too many shared records
3557  * in the preceding chunk; otherwise we halve it. This gives us some
3558  * ability to adapt to the actual usage pattern of the current query
3559  * while still having large chunk sizes in typical usage. All chunk
3560  * sizes used should be MAXALIGN multiples, to ensure that the shared
3561  * records will be aligned safely.
3562  */
3563 #define MIN_CHUNK_SIZE 1024
3564 #define MAX_CHUNK_SIZE (1024*1024)
3565 
3566 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3567 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3568 #endif
3569 
3570  if (chunk == NULL)
3571  chunksize = MIN_CHUNK_SIZE;
3572  else
3573  {
3574  /* preceding chunk size... */
3575  chunksize = chunk->endptr - (char *) chunk;
3576  /* check number of shared records in preceding chunk */
3577  if ((chunk->endptr - chunk->endfree) <=
3578  (100 * sizeof(AfterTriggerSharedData)))
3579  chunksize *= 2; /* okay, double it */
3580  else
3581  chunksize /= 2; /* too many shared records */
3582  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3583  }
3584  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3585  chunk->next = NULL;
3586  chunk->freeptr = CHUNK_DATA_START(chunk);
3587  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3588  Assert(chunk->endfree - chunk->freeptr >= needed);
3589 
3590  if (events->head == NULL)
3591  events->head = chunk;
3592  else
3593  events->tail->next = chunk;
3594  events->tail = chunk;
3595  /* events->tailfree is now out of sync, but we'll fix it below */
3596  }
3597 
3598  /*
3599  * Try to locate a matching shared-data record already in the chunk. If
3600  * none, make a new one.
3601  */
3602  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3603  (char *) newshared >= chunk->endfree;
3604  newshared--)
3605  {
3606  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3607  newshared->ats_relid == evtshared->ats_relid &&
3608  newshared->ats_event == evtshared->ats_event &&
3609  newshared->ats_firing_id == 0)
3610  break;
3611  }
3612  if ((char *) newshared < chunk->endfree)
3613  {
3614  *newshared = *evtshared;
3615  newshared->ats_firing_id = 0; /* just to be sure */
3616  chunk->endfree = (char *) newshared;
3617  }
3618 
3619  /* Insert the data */
3620  newevent = (AfterTriggerEvent) chunk->freeptr;
3621  memcpy(newevent, event, eventsize);
3622  /* ... and link the new event to its shared record */
3623  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3624  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3625 
3626  chunk->freeptr += eventsize;
3627  events->tailfree = chunk->freeptr;
3628 }
3629 
3630 /* ----------
3631  * afterTriggerFreeEventList()
3632  *
3633  * Free all the event storage in the given list.
3634  * ----------
3635  */
3636 static void
3638 {
3639  AfterTriggerEventChunk *chunk;
3640  AfterTriggerEventChunk *next_chunk;
3641 
3642  for (chunk = events->head; chunk != NULL; chunk = next_chunk)
3643  {
3644  next_chunk = chunk->next;
3645  pfree(chunk);
3646  }
3647  events->head = NULL;
3648  events->tail = NULL;
3649  events->tailfree = NULL;
3650 }
3651 
3652 /* ----------
3653  * afterTriggerRestoreEventList()
3654  *
3655  * Restore an event list to its prior length, removing all the events
3656  * added since it had the value old_events.
3657  * ----------
3658  */
3659 static void
3661  const AfterTriggerEventList *old_events)
3662 {
3663  AfterTriggerEventChunk *chunk;
3664  AfterTriggerEventChunk *next_chunk;
3665 
3666  if (old_events->tail == NULL)
3667  {
3668  /* restoring to a completely empty state, so free everything */
3669  afterTriggerFreeEventList(events);
3670  }
3671  else
3672  {
3673  *events = *old_events;
3674  /* free any chunks after the last one we want to keep */
3675  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3676  {
3677  next_chunk = chunk->next;
3678  pfree(chunk);
3679  }
3680  /* and clean up the tail chunk to be the right length */
3681  events->tail->next = NULL;
3682  events->tail->freeptr = events->tailfree;
3683 
3684  /*
3685  * We don't make any effort to remove now-unused shared data records.
3686  * They might still be useful, anyway.
3687  */
3688  }
3689 }
3690 
3691 
3692 /* ----------
3693  * AfterTriggerExecute()
3694  *
3695  * Fetch the required tuples back from the heap and fire one
3696  * single trigger function.
3697  *
3698  * Frequently, this will be fired many times in a row for triggers of
3699  * a single relation. Therefore, we cache the open relation and provide
3700  * fmgr lookup cache space at the caller level. (For triggers fired at
3701  * the end of a query, we can even piggyback on the executor's state.)
3702  *
3703  * event: event currently being fired.
3704  * rel: open relation for event.
3705  * trigdesc: working copy of rel's trigger info.
3706  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3707  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3708  * or NULL if no instrumentation is wanted.
3709  * per_tuple_context: memory context to call trigger function in.
3710  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3711  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3712  * ----------
3713  */
3714 static void
3715 AfterTriggerExecute(AfterTriggerEvent event,
3716  Relation rel, TriggerDesc *trigdesc,
3717  FmgrInfo *finfo, Instrumentation *instr,
3718  MemoryContext per_tuple_context,
3719  TupleTableSlot *trig_tuple_slot1,
3720  TupleTableSlot *trig_tuple_slot2)
3721 {
3722  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3723  Oid tgoid = evtshared->ats_tgoid;
3724  TriggerData LocTriggerData;
3725  HeapTupleData tuple1;
3726  HeapTupleData tuple2;
3727  HeapTuple rettuple;
3728  Buffer buffer1 = InvalidBuffer;
3729  Buffer buffer2 = InvalidBuffer;
3730  int tgindx;
3731 
3732  /*
3733  * Locate trigger in trigdesc.
3734  */
3735  LocTriggerData.tg_trigger = NULL;
3736  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3737  {
3738  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3739  {
3740  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3741  break;
3742  }
3743  }
3744  if (LocTriggerData.tg_trigger == NULL)
3745  elog(ERROR, "could not find trigger %u", tgoid);
3746 
3747  /*
3748  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3749  * to include time spent re-fetching tuples in the trigger cost.
3750  */
3751  if (instr)
3752  InstrStartNode(instr + tgindx);
3753 
3754  /*
3755  * Fetch the required tuple(s).
3756  */
3757  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3758  {
3760  {
3761  Tuplestorestate *fdw_tuplestore =
3763  (afterTriggers.fdw_tuplestores);
3764 
3765  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3766  trig_tuple_slot1))
3767  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3768 
3769  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3771  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3772  trig_tuple_slot2))
3773  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3774  }
3775  /* fall through */
3777 
3778  /*
3779  * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
3780  * ensures that tg_trigtuple does not reference tuplestore memory.
3781  * (It is formally possible for the trigger function to queue
3782  * trigger events that add to the same tuplestore, which can push
3783  * other tuples out of memory.) The distinction is academic,
3784  * because we start with a minimal tuple that ExecFetchSlotTuple()
3785  * must materialize anyway.
3786  */
3787  LocTriggerData.tg_trigtuple =
3788  ExecMaterializeSlot(trig_tuple_slot1);
3789  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3790 
3791  LocTriggerData.tg_newtuple =
3792  ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3794  ExecMaterializeSlot(trig_tuple_slot2) : NULL;
3795  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3796 
3797  break;
3798 
3799  default:
3800  if (ItemPointerIsValid(&(event->ate_ctid1)))
3801  {
3802  ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
3803  if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
3804  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3805  LocTriggerData.tg_trigtuple = &tuple1;
3806  LocTriggerData.tg_trigtuplebuf = buffer1;
3807  }
3808  else
3809  {
3810  LocTriggerData.tg_trigtuple = NULL;
3811  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3812  }
3813 
3814  /* don't touch ctid2 if not there */
3815  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3817  ItemPointerIsValid(&(event->ate_ctid2)))
3818  {
3819  ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
3820  if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
3821  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3822  LocTriggerData.tg_newtuple = &tuple2;
3823  LocTriggerData.tg_newtuplebuf = buffer2;
3824  }
3825  else
3826  {
3827  LocTriggerData.tg_newtuple = NULL;
3828  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3829  }
3830  }
3831 
3832  /*
3833  * Set up the tuplestore information.
3834  */
3835  if (LocTriggerData.tg_trigger->tgoldtable)
3836  LocTriggerData.tg_oldtable =
3838  else
3839  LocTriggerData.tg_oldtable = NULL;
3840  if (LocTriggerData.tg_trigger->tgnewtable)
3841  LocTriggerData.tg_newtable =
3843  else
3844  LocTriggerData.tg_newtable = NULL;
3845 
3846  /*
3847  * Setup the remaining trigger information
3848  */
3849  LocTriggerData.type = T_TriggerData;
3850  LocTriggerData.tg_event =
3852  LocTriggerData.tg_relation = rel;
3853 
3854  MemoryContextReset(per_tuple_context);
3855 
3856  /*
3857  * Call the trigger and throw away any possibly returned updated tuple.
3858  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
3859  */
3860  rettuple = ExecCallTriggerFunc(&LocTriggerData,
3861  tgindx,
3862  finfo,
3863  NULL,
3864  per_tuple_context);
3865  if (rettuple != NULL &&
3866  rettuple != LocTriggerData.tg_trigtuple &&
3867  rettuple != LocTriggerData.tg_newtuple)
3868  heap_freetuple(rettuple);
3869 
3870  /*
3871  * Release buffers
3872  */
3873  if (buffer1 != InvalidBuffer)
3874  ReleaseBuffer(buffer1);
3875  if (buffer2 != InvalidBuffer)
3876  ReleaseBuffer(buffer2);
3877 
3878  /*
3879  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
3880  * one "tuple returned" (really the number of firings).
3881  */
3882  if (instr)
3883  InstrStopNode(instr + tgindx, 1);
3884 }
3885 
3886 
3887 /*
3888  * afterTriggerMarkEvents()
3889  *
3890  * Scan the given event list for not yet invoked events. Mark the ones
3891  * that can be invoked now with the current firing ID.
3892  *
3893  * If move_list isn't NULL, events that are not to be invoked now are
3894  * transferred to move_list.
3895  *
3896  * When immediate_only is TRUE, do not invoke currently-deferred triggers.
3897  * (This will be FALSE only at main transaction exit.)
3898  *
3899  * Returns TRUE if any invokable events were found.
3900  */
3901 static bool
3903  AfterTriggerEventList *move_list,
3904  bool immediate_only)
3905 {
3906  bool found = false;
3907  AfterTriggerEvent event;
3908  AfterTriggerEventChunk *chunk;
3909 
3910  for_each_event_chunk(event, chunk, *events)
3911  {
3912  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3913  bool defer_it = false;
3914 
3915  if (!(event->ate_flags &
3917  {
3918  /*
3919  * This trigger hasn't been called or scheduled yet. Check if we
3920  * should call it now.
3921  */
3922  if (immediate_only && afterTriggerCheckState(evtshared))
3923  {
3924  defer_it = true;
3925  }
3926  else
3927  {
3928  /*
3929  * Mark it as to be fired in this firing cycle.
3930  */
3931  evtshared->ats_firing_id = afterTriggers.firing_counter;
3932  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
3933  found = true;
3934  }
3935  }
3936 
3937  /*
3938  * If it's deferred, move it to move_list, if requested.
3939  */
3940  if (defer_it && move_list != NULL)
3941  {
3942  /* add it to move_list */
3943  afterTriggerAddEvent(move_list, event, evtshared);
3944  /* mark original copy "done" so we don't do it again */
3945  event->ate_flags |= AFTER_TRIGGER_DONE;
3946  }
3947  }
3948 
3949  return found;
3950 }
3951 
3952 /*
3953  * afterTriggerInvokeEvents()
3954  *
3955  * Scan the given event list for events that are marked as to be fired
3956  * in the current firing cycle, and fire them.
3957  *
3958  * If estate isn't NULL, we use its result relation info to avoid repeated
3959  * openings and closing of trigger target relations. If it is NULL, we
3960  * make one locally to cache the info in case there are multiple trigger
3961  * events per rel.
3962  *
3963  * When delete_ok is TRUE, it's safe to delete fully-processed events.
3964  * (We are not very tense about that: we simply reset a chunk to be empty
3965  * if all its events got fired. The objective here is just to avoid useless
3966  * rescanning of events when a trigger queues new events during transaction
3967  * end, so it's not necessary to worry much about the case where only
3968  * some events are fired.)
3969  *
3970  * Returns TRUE if no unfired events remain in the list (this allows us
3971  * to avoid repeating afterTriggerMarkEvents).
3972  */
3973 static bool
3975  CommandId firing_id,
3976  EState *estate,
3977  bool delete_ok)
3978 {
3979  bool all_fired = true;
3980  AfterTriggerEventChunk *chunk;
3981  MemoryContext per_tuple_context;
3982  bool local_estate = false;
3983  Relation rel = NULL;
3984  TriggerDesc *trigdesc = NULL;
3985  FmgrInfo *finfo = NULL;
3986  Instrumentation *instr = NULL;
3987  TupleTableSlot *slot1 = NULL,
3988  *slot2 = NULL;
3989 
3990  /* Make a local EState if need be */
3991  if (estate == NULL)
3992  {
3993  estate = CreateExecutorState();
3994  local_estate = true;
3995  }
3996 
3997  /* Make a per-tuple memory context for trigger function calls */
3998  per_tuple_context =
4000  "AfterTriggerTupleContext",
4002 
4003  for_each_chunk(chunk, *events)
4004  {
4005  AfterTriggerEvent event;
4006  bool all_fired_in_chunk = true;
4007 
4008  for_each_event(event, chunk)
4009  {
4010  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4011 
4012  /*
4013  * Is it one for me to fire?
4014  */
4015  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4016  evtshared->ats_firing_id == firing_id)
4017  {
4018  /*
4019  * So let's fire it... but first, find the correct relation if
4020  * this is not the same relation as before.
4021  */
4022  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4023  {
4024  ResultRelInfo *rInfo;
4025 
4026  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4027  rel = rInfo->ri_RelationDesc;
4028  trigdesc = rInfo->ri_TrigDesc;
4029  finfo = rInfo->ri_TrigFunctions;
4030  instr = rInfo->ri_TrigInstrument;
4031  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4032  {
4033  if (slot1 != NULL)
4034  {
4037  }
4038  slot1 = MakeSingleTupleTableSlot(rel->rd_att);
4039  slot2 = MakeSingleTupleTableSlot(rel->rd_att);
4040  }
4041  if (trigdesc == NULL) /* should not happen */
4042  elog(ERROR, "relation %u has no triggers",
4043  evtshared->ats_relid);
4044  }
4045 
4046  /*
4047  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4048  * still set, so recursive examinations of the event list
4049  * won't try to re-fire it.
4050  */
4051  AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
4052  per_tuple_context, slot1, slot2);
4053 
4054  /*
4055  * Mark the event as done.
4056  */
4057  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4058  event->ate_flags |= AFTER_TRIGGER_DONE;
4059  }
4060  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4061  {
4062  /* something remains to be done */
4063  all_fired = all_fired_in_chunk = false;
4064  }
4065  }
4066 
4067  /* Clear the chunk if delete_ok and nothing left of interest */
4068  if (delete_ok && all_fired_in_chunk)
4069  {
4070  chunk->freeptr = CHUNK_DATA_START(chunk);
4071  chunk->endfree = chunk->endptr;
4072 
4073  /*
4074  * If it's last chunk, must sync event list's tailfree too. Note
4075  * that delete_ok must NOT be passed as true if there could be
4076  * stacked AfterTriggerEventList values pointing at this event
4077  * list, since we'd fail to fix their copies of tailfree.
4078  */
4079  if (chunk == events->tail)
4080  events->tailfree = chunk->freeptr;
4081  }
4082  }
4083  if (slot1 != NULL)
4084  {
4087  }
4088 
4089  /* Release working resources */
4090  MemoryContextDelete(per_tuple_context);
4091 
4092  if (local_estate)
4093  {
4094  ListCell *l;
4095 
4096  foreach(l, estate->es_trig_target_relations)
4097  {
4098  ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
4099 
4100  /* Close indices and then the relation itself */
4101  ExecCloseIndices(resultRelInfo);
4102  heap_close(resultRelInfo->ri_RelationDesc, NoLock);
4103  }
4104  FreeExecutorState(estate);
4105  }
4106 
4107  return all_fired;
4108 }
4109 
4110 
4111 /* ----------
4112  * AfterTriggerBeginXact()
4113  *
4114  * Called at transaction start (either BEGIN or implicit for single
4115  * statement outside of transaction block).
4116  * ----------
4117  */
4118 void
4120 {
4121  /*
4122  * Initialize after-trigger state structure to empty
4123  */
4124  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4125  afterTriggers.query_depth = -1;
4126 
4127  /*
4128  * Verify that there is no leftover state remaining. If these assertions
4129  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4130  * up properly.
4131  */
4132  Assert(afterTriggers.state == NULL);
4133  Assert(afterTriggers.query_stack == NULL);
4134  Assert(afterTriggers.fdw_tuplestores == NULL);
4135  Assert(afterTriggers.old_tuplestores == NULL);
4136  Assert(afterTriggers.new_tuplestores == NULL);
4137  Assert(afterTriggers.maxquerydepth == 0);
4138  Assert(afterTriggers.event_cxt == NULL);
4139  Assert(afterTriggers.events.head == NULL);
4140  Assert(afterTriggers.state_stack == NULL);
4141  Assert(afterTriggers.events_stack == NULL);
4142  Assert(afterTriggers.depth_stack == NULL);
4143  Assert(afterTriggers.firing_stack == NULL);
4144  Assert(afterTriggers.maxtransdepth == 0);
4145 }
4146 
4147 
4148 /* ----------
4149  * AfterTriggerBeginQuery()
4150  *
4151  * Called just before we start processing a single query within a
4152  * transaction (or subtransaction). Most of the real work gets deferred
4153  * until somebody actually tries to queue a trigger event.
4154  * ----------
4155  */
4156 void
4158 {
4159  /* Increase the query stack depth */
4160  afterTriggers.query_depth++;
4161 }
4162 
4163 
4164 /* ----------
4165  * AfterTriggerEndQuery()
4166  *
4167  * Called after one query has been completely processed. At this time
4168  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4169  * transfer deferred trigger events to the global deferred-trigger list.
4170  *
4171  * Note that this must be called BEFORE closing down the executor
4172  * with ExecutorEnd, because we make use of the EState's info about
4173  * target relations. Normally it is called from ExecutorFinish.
4174  * ----------
4175  */
4176 void
4178 {
4179  AfterTriggerEventList *events;
4180  Tuplestorestate *fdw_tuplestore;
4181  Tuplestorestate *old_tuplestore;
4182  Tuplestorestate *new_tuplestore;
4183 
4184  /* Must be inside a query, too */
4185  Assert(afterTriggers.query_depth >= 0);
4186 
4187  /*
4188  * If we never even got as far as initializing the event stack, there
4189  * certainly won't be any events, so exit quickly.
4190  */
4191  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4192  {
4193  afterTriggers.query_depth--;
4194  return;
4195  }
4196 
4197  /*
4198  * Process all immediate-mode triggers queued by the query, and move the
4199  * deferred ones to the main list of deferred events.
4200  *
4201  * Notice that we decide which ones will be fired, and put the deferred
4202  * ones on the main list, before anything is actually fired. This ensures
4203  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4204  * IMMEDIATE: all events we have decided to defer will be available for it
4205  * to fire.
4206  *
4207  * We loop in case a trigger queues more events at the same query level.
4208  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4209  * will instead fire any triggers in a dedicated query level. Foreign key
4210  * enforcement triggers do add to the current query level, thanks to their
4211  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4212  * C-language triggers might do likewise. Be careful here: firing a
4213  * trigger could result in query_stack being repalloc'd, so we can't save
4214  * its address across afterTriggerInvokeEvents calls.
4215  *
4216  * If we find no firable events, we don't have to increment
4217  * firing_counter.
4218  */
4219  for (;;)
4220  {
4221  events = &afterTriggers.query_stack[afterTriggers.query_depth];
4222  if (afterTriggerMarkEvents(events, &afterTriggers.events, true))
4223  {
4224  CommandId firing_id = afterTriggers.firing_counter++;
4225 
4226  /* OK to delete the immediate events after processing them */
4227  if (afterTriggerInvokeEvents(events, firing_id, estate, true))
4228  break; /* all fired */
4229  }
4230  else
4231  break;
4232  }
4233 
4234  /* Release query-local storage for events, including tuplestore if any */
4235  fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4236  if (fdw_tuplestore)
4237  {
4238  tuplestore_end(fdw_tuplestore);
4239  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4240  }
4241  old_tuplestore = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4242  if (old_tuplestore)
4243  {
4244  tuplestore_end(old_tuplestore);
4245  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4246  }
4247  new_tuplestore = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4248  if (new_tuplestore)
4249  {
4250  tuplestore_end(new_tuplestore);
4251  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4252  }
4253  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4254 
4255  afterTriggers.query_depth--;
4256 }
4257 
4258 
4259 /* ----------
4260  * AfterTriggerFireDeferred()
4261  *
4262  * Called just before the current transaction is committed. At this
4263  * time we invoke all pending DEFERRED triggers.
4264  *
4265  * It is possible for other modules to queue additional deferred triggers
4266  * during pre-commit processing; therefore xact.c may have to call this
4267  * multiple times.
4268  * ----------
4269  */
4270 void
4272 {
4273  AfterTriggerEventList *events;
4274  bool snap_pushed = false;
4275 
4276  /* Must not be inside a query */
4277  Assert(afterTriggers.query_depth == -1);
4278 
4279  /*
4280  * If there are any triggers to fire, make sure we have set a snapshot for
4281  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4282  * can't assume ActiveSnapshot is valid on entry.)
4283  */
4284  events = &afterTriggers.events;
4285  if (events->head != NULL)
4286  {
4288  snap_pushed = true;
4289  }
4290 
4291  /*
4292  * Run all the remaining triggers. Loop until they are all gone, in case
4293  * some trigger queues more for us to do.
4294  */
4295  while (afterTriggerMarkEvents(events, NULL, false))
4296  {
4297  CommandId firing_id = afterTriggers.firing_counter++;
4298 
4299  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4300  break; /* all fired */
4301  }
4302 
4303  /*
4304  * We don't bother freeing the event list, since it will go away anyway
4305  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4306  */
4307 
4308  if (snap_pushed)
4310 }
4311 
4312 
4313 /* ----------
4314  * AfterTriggerEndXact()
4315  *
4316  * The current transaction is finishing.
4317  *
4318  * Any unfired triggers are canceled so we simply throw
4319  * away anything we know.
4320  *
4321  * Note: it is possible for this to be called repeatedly in case of
4322  * error during transaction abort; therefore, do not complain if
4323  * already closed down.
4324  * ----------
4325  */
4326 void
4327 AfterTriggerEndXact(bool isCommit)
4328 {
4329  /*
4330  * Forget the pending-events list.
4331  *
4332  * Since all the info is in TopTransactionContext or children thereof, we
4333  * don't really need to do anything to reclaim memory. However, the
4334  * pending-events list could be large, and so it's useful to discard it as
4335  * soon as possible --- especially if we are aborting because we ran out
4336  * of memory for the list!
4337  */
4338  if (afterTriggers.event_cxt)
4339  {
4340  MemoryContextDelete(afterTriggers.event_cxt);
4341  afterTriggers.event_cxt = NULL;
4342  afterTriggers.events.head = NULL;
4343  afterTriggers.events.tail = NULL;
4344  afterTriggers.events.tailfree = NULL;
4345  }
4346 
4347  /*
4348  * Forget any subtransaction state as well. Since this can't be very
4349  * large, we let the eventual reset of TopTransactionContext free the
4350  * memory instead of doing it here.
4351  */
4352  afterTriggers.state_stack = NULL;
4353  afterTriggers.events_stack = NULL;
4354  afterTriggers.depth_stack = NULL;
4355  afterTriggers.firing_stack = NULL;
4356  afterTriggers.maxtransdepth = 0;
4357 
4358 
4359  /*
4360  * Forget the query stack and constraint-related state information. As
4361  * with the subtransaction state information, we don't bother freeing the
4362  * memory here.
4363  */
4364  afterTriggers.query_stack = NULL;
4365  afterTriggers.fdw_tuplestores = NULL;
4366  afterTriggers.old_tuplestores = NULL;
4367  afterTriggers.new_tuplestores = NULL;
4368  afterTriggers.maxquerydepth = 0;
4369  afterTriggers.state = NULL;
4370 
4371  /* No more afterTriggers manipulation until next transaction starts. */
4372  afterTriggers.query_depth = -1;
4373 }
4374 
4375 /*
4376  * AfterTriggerBeginSubXact()
4377  *
4378  * Start a subtransaction.
4379  */
4380 void
4382 {
4383  int my_level = GetCurrentTransactionNestLevel();
4384 
4385  /*
4386  * Allocate more space in the stacks if needed. (Note: because the
4387  * minimum nest level of a subtransaction is 2, we waste the first couple
4388  * entries of each array; not worth the notational effort to avoid it.)
4389  */
4390  while (my_level >= afterTriggers.maxtransdepth)
4391  {
4392  if (afterTriggers.maxtransdepth == 0)
4393  {
4394  MemoryContext old_cxt;
4395 
4397 
4398 #define DEFTRIG_INITALLOC 8
4399  afterTriggers.state_stack = (SetConstraintState *)
4400  palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState));
4401  afterTriggers.events_stack = (AfterTriggerEventList *)
4403  afterTriggers.depth_stack = (int *)
4404  palloc(DEFTRIG_INITALLOC * sizeof(int));
4405  afterTriggers.firing_stack = (CommandId *)
4406  palloc(DEFTRIG_INITALLOC * sizeof(CommandId));
4407  afterTriggers.maxtransdepth = DEFTRIG_INITALLOC;
4408 
4409  MemoryContextSwitchTo(old_cxt);
4410  }
4411  else
4412  {
4413  /* repalloc will keep the stacks in the same context */
4414  int new_alloc = afterTriggers.maxtransdepth * 2;
4415 
4416  afterTriggers.state_stack = (SetConstraintState *)
4417  repalloc(afterTriggers.state_stack,
4418  new_alloc * sizeof(SetConstraintState));
4419  afterTriggers.events_stack = (AfterTriggerEventList *)
4420  repalloc(afterTriggers.events_stack,
4421  new_alloc * sizeof(AfterTriggerEventList));
4422  afterTriggers.depth_stack = (int *)
4423  repalloc(afterTriggers.depth_stack,
4424  new_alloc * sizeof(int));
4425  afterTriggers.firing_stack = (CommandId *)
4426  repalloc(afterTriggers.firing_stack,
4427  new_alloc * sizeof(CommandId));
4428  afterTriggers.maxtransdepth = new_alloc;
4429  }
4430  }
4431 
4432  /*
4433  * Push the current information into the stack. The SET CONSTRAINTS state
4434  * is not saved until/unless changed. Likewise, we don't make a
4435  * per-subtransaction event context until needed.
4436  */
4437  afterTriggers.state_stack[my_level] = NULL;
4438  afterTriggers.events_stack[my_level] = afterTriggers.events;
4439  afterTriggers.depth_stack[my_level] = afterTriggers.query_depth;
4440  afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter;
4441 }
4442 
4443 /*
4444  * AfterTriggerEndSubXact()
4445  *
4446  * The current subtransaction is ending.
4447  */
4448 void
4450 {
4451  int my_level = GetCurrentTransactionNestLevel();
4452  SetConstraintState state;
4453  AfterTriggerEvent event;
4454  AfterTriggerEventChunk *chunk;
4455  CommandId subxact_firing_id;
4456 
4457  /*
4458  * Pop the prior state if needed.
4459  */
4460  if (isCommit)
4461  {
4462  Assert(my_level < afterTriggers.maxtransdepth);
4463  /* If we saved a prior state, we don't need it anymore */
4464  state = afterTriggers.state_stack[my_level];
4465  if (state != NULL)
4466  pfree(state);
4467  /* this avoids double pfree if error later: */
4468  afterTriggers.state_stack[my_level] = NULL;
4469  Assert(afterTriggers.query_depth ==
4470  afterTriggers.depth_stack[my_level]);
4471  }
4472  else
4473  {
4474  /*
4475  * Aborting. It is possible subxact start failed before calling
4476  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4477  * stack levels that aren't there.
4478  */
4479  if (my_level >= afterTriggers.maxtransdepth)
4480  return;
4481 
4482  /*
4483  * Release any event lists from queries being aborted, and restore
4484  * query_depth to its pre-subxact value. This assumes that a
4485  * subtransaction will not add events to query levels started in a
4486  * earlier transaction state.
4487  */
4488  while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level])
4489  {
4490  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4491  {
4492  Tuplestorestate *ts;
4493 
4494  ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4495  if (ts)
4496  {
4497  tuplestore_end(ts);
4498  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4499  }
4500  ts = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4501  if (ts)
4502  {
4503  tuplestore_end(ts);
4504  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4505  }
4506  ts = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4507  if (ts)
4508  {
4509  tuplestore_end(ts);
4510  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4511  }
4512 
4513  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4514  }
4515 
4516  afterTriggers.query_depth--;
4517  }
4518  Assert(afterTriggers.query_depth ==
4519  afterTriggers.depth_stack[my_level]);
4520 
4521  /*
4522  * Restore the global deferred-event list to its former length,
4523  * discarding any events queued by the subxact.
4524  */
4525  afterTriggerRestoreEventList(&afterTriggers.events,
4526  &afterTriggers.events_stack[my_level]);
4527 
4528  /*
4529  * Restore the trigger state. If the saved state is NULL, then this
4530  * subxact didn't save it, so it doesn't need restoring.
4531  */
4532  state = afterTriggers.state_stack[my_level];
4533  if (state != NULL)
4534  {
4535  pfree(afterTriggers.state);
4536  afterTriggers.state = state;
4537  }
4538  /* this avoids double pfree if error later: */
4539  afterTriggers.state_stack[my_level] = NULL;
4540 
4541  /*
4542  * Scan for any remaining deferred events that were marked DONE or IN
4543  * PROGRESS by this subxact or a child, and un-mark them. We can
4544  * recognize such events because they have a firing ID greater than or
4545  * equal to the firing_counter value we saved at subtransaction start.
4546  * (This essentially assumes that the current subxact includes all
4547  * subxacts started after it.)
4548  */
4549  subxact_firing_id = afterTriggers.firing_stack[my_level];
4550  for_each_event_chunk(event, chunk, afterTriggers.events)
4551  {
4552  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4553 
4554  if (event->ate_flags &
4556  {
4557  if (evtshared->ats_firing_id >= subxact_firing_id)
4558  event->ate_flags &=
4560  }
4561  }
4562  }
4563 }
4564 
4565 /* ----------
4566  * AfterTriggerEnlargeQueryState()
4567  *
4568  * Prepare the necessary state so that we can record AFTER trigger events
4569  * queued by a query. It is allowed to have nested queries within a
4570  * (sub)transaction, so we need to have separate state for each query
4571  * nesting level.
4572  * ----------
4573  */
4574 static void
4576 {
4577  int init_depth = afterTriggers.maxquerydepth;
4578 
4579  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4580 
4581  if (afterTriggers.maxquerydepth == 0)
4582  {
4583  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4584 
4585  afterTriggers.query_stack = (AfterTriggerEventList *)
4587  new_alloc * sizeof(AfterTriggerEventList));
4588  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4590  new_alloc * sizeof(Tuplestorestate *));
4591  afterTriggers.old_tuplestores = (Tuplestorestate **)
4593  new_alloc * sizeof(Tuplestorestate *));
4594  afterTriggers.new_tuplestores = (Tuplestorestate **)
4596  new_alloc * sizeof(Tuplestorestate *));
4597  afterTriggers.maxquerydepth = new_alloc;
4598  }
4599  else
4600  {
4601  /* repalloc will keep the stack in the same context */
4602  int old_alloc = afterTriggers.maxquerydepth;
4603  int new_alloc = Max(afterTriggers.query_depth + 1,
4604  old_alloc * 2);
4605 
4606  afterTriggers.query_stack = (AfterTriggerEventList *)
4607  repalloc(afterTriggers.query_stack,
4608  new_alloc * sizeof(AfterTriggerEventList));
4609  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4610  repalloc(afterTriggers.fdw_tuplestores,
4611  new_alloc * sizeof(Tuplestorestate *));
4612  afterTriggers.old_tuplestores = (Tuplestorestate **)
4613  repalloc(afterTriggers.old_tuplestores,
4614  new_alloc * sizeof(Tuplestorestate *));
4615  afterTriggers.new_tuplestores = (Tuplestorestate **)
4616  repalloc(afterTriggers.new_tuplestores,
4617  new_alloc * sizeof(Tuplestorestate *));
4618  /* Clear newly-allocated slots for subsequent lazy initialization. */
4619  memset(afterTriggers.fdw_tuplestores + old_alloc,
4620  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4621  memset(afterTriggers.old_tuplestores + old_alloc,
4622  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4623  memset(afterTriggers.new_tuplestores + old_alloc,
4624  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4625  afterTriggers.maxquerydepth = new_alloc;
4626  }
4627 
4628  /* Initialize new query lists to empty */
4629  while (init_depth < afterTriggers.maxquerydepth)
4630  {
4631  AfterTriggerEventList *events;
4632 
4633  events = &afterTriggers.query_stack[init_depth];
4634  events->head = NULL;
4635  events->tail = NULL;
4636  events->tailfree = NULL;
4637 
4638  ++init_depth;
4639  }
4640 }
4641 
4642 /*
4643  * Create an empty SetConstraintState with room for numalloc trigstates
4644  */
4645 static SetConstraintState
4647 {
4648  SetConstraintState state;
4649 
4650  /* Behave sanely with numalloc == 0 */
4651  if (numalloc <= 0)
4652  numalloc = 1;
4653 
4654  /*
4655  * We assume that zeroing will correctly initialize the state values.
4656  */
4657  state = (SetConstraintState)
4659  offsetof(SetConstraintStateData, trigstates) +
4660  numalloc * sizeof(SetConstraintTriggerData));
4661 
4662  state->numalloc = numalloc;
4663 
4664  return state;
4665 }
4666 
4667 /*
4668  * Copy a SetConstraintState
4669  */
4670 static SetConstraintState
4671 SetConstraintStateCopy(SetConstraintState origstate)
4672 {
4673  SetConstraintState state;
4674 
4675  state = SetConstraintStateCreate(origstate->numstates);
4676 
4677  state->all_isset = origstate->all_isset;
4678  state->all_isdeferred = origstate->all_isdeferred;
4679  state->numstates = origstate->numstates;
4680  memcpy(state->trigstates, origstate->trigstates,
4681  origstate->numstates * sizeof(SetConstraintTriggerData));
4682 
4683  return state;
4684 }
4685 
4686 /*
4687  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
4688  * pointer to the state object (it will change if we have to repalloc).
4689  */
4690 static SetConstraintState
4692  Oid tgoid, bool tgisdeferred)
4693 {
4694  if (state->numstates >= state->numalloc)
4695  {
4696  int newalloc = state->numalloc * 2;
4697 
4698  newalloc = Max(newalloc, 8); /* in case original has size 0 */
4699  state = (SetConstraintState)
4700  repalloc(state,
4701  offsetof(SetConstraintStateData, trigstates) +
4702  newalloc * sizeof(SetConstraintTriggerData));
4703  state->numalloc = newalloc;
4704  Assert(state->numstates < state->numalloc);
4705  }
4706 
4707  state->trigstates[state->numstates].sct_tgoid = tgoid;
4708  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4709  state->numstates++;
4710 
4711  return state;
4712 }
4713 
4714 /* ----------
4715  * AfterTriggerSetState()
4716  *
4717  * Execute the SET CONSTRAINTS ... utility command.
4718  * ----------
4719  */
4720 void
4722 {
4723  int my_level = GetCurrentTransactionNestLevel();
4724 
4725  /* If we haven't already done so, initialize our state. */
4726  if (afterTriggers.state == NULL)
4727  afterTriggers.state = SetConstraintStateCreate(8);
4728 
4729  /*
4730  * If in a subtransaction, and we didn't save the current state already,
4731  * save it so it can be restored if the subtransaction aborts.
4732  */
4733  if (my_level > 1 &&
4734  afterTriggers.state_stack[my_level] == NULL)
4735  {
4736  afterTriggers.state_stack[my_level] =
4737  SetConstraintStateCopy(afterTriggers.state);
4738  }
4739 
4740  /*
4741  * Handle SET CONSTRAINTS ALL ...
4742  */
4743  if (stmt->constraints == NIL)
4744  {
4745  /*
4746  * Forget any previous SET CONSTRAINTS commands in this transaction.
4747  */
4748  afterTriggers.state->numstates = 0;
4749 
4750  /*
4751  * Set the per-transaction ALL state to known.
4752  */
4753  afterTriggers.state->all_isset = true;
4754  afterTriggers.state->all_isdeferred = stmt->deferred;
4755  }
4756  else
4757  {
4758  Relation conrel;
4759  Relation tgrel;
4760  List *conoidlist = NIL;
4761  List *tgoidlist = NIL;
4762  ListCell *lc;
4763 
4764  /*
4765  * Handle SET CONSTRAINTS constraint-name [, ...]
4766  *
4767  * First, identify all the named constraints and make a list of their
4768  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
4769  * the same name within a schema, the specifications are not
4770  * necessarily unique. Our strategy is to target all matching
4771  * constraints within the first search-path schema that has any
4772  * matches, but disregard matches in schemas beyond the first match.
4773  * (This is a bit odd but it's the historical behavior.)
4774  */
4776 
4777  foreach(lc, stmt->constraints)
4778  {
4779  RangeVar *constraint = lfirst(lc);
4780  bool found;
4781  List *namespacelist;
4782  ListCell *nslc;
4783 
4784  if (constraint->catalogname)
4785  {
4786  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
4787  ereport(ERROR,
4788  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4789  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
4790  constraint->catalogname, constraint->schemaname,
4791  constraint->relname)));
4792  }
4793 
4794  /*
4795  * If we're given the schema name with the constraint, look only
4796  * in that schema. If given a bare constraint name, use the
4797  * search path to find the first matching constraint.
4798  */
4799  if (constraint->schemaname)
4800  {
4801  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
4802  false);
4803 
4804  namespacelist = list_make1_oid(namespaceId);
4805  }
4806  else
4807  {
4808  namespacelist = fetch_search_path(true);
4809  }
4810 
4811  found = false;
4812  foreach(nslc, namespacelist)
4813  {
4814  Oid namespaceId = lfirst_oid(nslc);
4815  SysScanDesc conscan;
4816  ScanKeyData skey[2];
4817  HeapTuple tup;
4818 
4819  ScanKeyInit(&skey[0],
4821  BTEqualStrategyNumber, F_NAMEEQ,
4822  CStringGetDatum(constraint->relname));
4823  ScanKeyInit(&skey[1],
4825  BTEqualStrategyNumber, F_OIDEQ,
4826  ObjectIdGetDatum(namespaceId));
4827 
4828  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
4829  true, NULL, 2, skey);
4830 
4831  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
4832  {
4834 
4835  if (con->condeferrable)
4836  conoidlist = lappend_oid(conoidlist,
4837  HeapTupleGetOid(tup));
4838  else if (stmt->deferred)
4839  ereport(ERROR,
4840  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
4841  errmsg("constraint \"%s\" is not deferrable",
4842  constraint->relname)));
4843  found = true;
4844  }
4845 
4846  systable_endscan(conscan);
4847 
4848  /*
4849  * Once we've found a matching constraint we do not search
4850  * later parts of the search path.
4851  */
4852  if (found)
4853  break;
4854  }
4855 
4856  list_free(namespacelist);
4857 
4858  /*
4859  * Not found ?
4860  */
4861  if (!found)
4862  ereport(ERROR,
4863  (errcode(ERRCODE_UNDEFINED_OBJECT),
4864  errmsg("constraint \"%s\" does not exist",
4865  constraint->relname)));
4866  }
4867 
4868  heap_close(conrel, AccessShareLock);
4869 
4870  /*
4871  * Now, locate the trigger(s) implementing each of these constraints,
4872  * and make a list of their OIDs.
4873  */
4875 
4876  foreach(lc, conoidlist)
4877  {
4878  Oid conoid = lfirst_oid(lc);
4879  bool found;
4880  ScanKeyData skey;
4881  SysScanDesc tgscan;
4882  HeapTuple htup;
4883 
4884  found = false;
4885 
4886  ScanKeyInit(&skey,
4888  BTEqualStrategyNumber, F_OIDEQ,
4889  ObjectIdGetDatum(conoid));
4890 
4891  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
4892  NULL, 1, &skey);
4893 
4894  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
4895  {
4896  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
4897 
4898  /*
4899  * Silently skip triggers that are marked as non-deferrable in
4900  * pg_trigger. This is not an error condition, since a
4901  * deferrable RI constraint may have some non-deferrable
4902  * actions.
4903  */
4904  if (pg_trigger->tgdeferrable)
4905  tgoidlist = lappend_oid(tgoidlist,
4906  HeapTupleGetOid(htup));
4907 
4908  found = true;
4909  }
4910 
4911  systable_endscan(tgscan);
4912 
4913  /* Safety check: a deferrable constraint should have triggers */
4914  if (!found)
4915  elog(ERROR, "no triggers found for constraint with OID %u",
4916  conoid);
4917  }
4918 
4919  heap_close(tgrel, AccessShareLock);
4920 
4921  /*
4922  * Now we can set the trigger states of individual triggers for this
4923  * xact.
4924  */
4925  foreach(lc, tgoidlist)
4926  {
4927  Oid tgoid = lfirst_oid(lc);
4928  SetConstraintState state = afterTriggers.state;
4929  bool found = false;
4930  int i;
4931 
4932  for (i = 0; i < state->numstates; i++)
4933  {
4934  if (state->trigstates[i].sct_tgoid == tgoid)
4935  {
4936  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
4937  found = true;
4938  break;
4939  }
4940  }
4941  if (!found)
4942  {
4943  afterTriggers.state =
4944  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
4945  }
4946  }
4947  }
4948 
4949  /*
4950  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
4951  * checks against that constraint must be made when the SET CONSTRAINTS
4952  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
4953  * apply retroactively. We've updated the constraints state, so scan the
4954  * list of previously deferred events to fire any that have now become
4955  * immediate.
4956  *
4957  * Obviously, if this was SET ... DEFERRED then it can't have converted
4958  * any unfired events to immediate, so we need do nothing in that case.
4959  */
4960  if (!stmt->deferred)
4961  {
4962  AfterTriggerEventList *events = &afterTriggers.events;
4963  bool snapshot_set = false;
4964 
4965  while (afterTriggerMarkEvents(events, NULL, true))
4966  {
4967  CommandId firing_id = afterTriggers.firing_counter++;
4968 
4969  /*
4970  * Make sure a snapshot has been established in case trigger
4971  * functions need one. Note that we avoid setting a snapshot if
4972  * we don't find at least one trigger that has to be fired now.
4973  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
4974  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
4975  * at the start of a transaction it's not possible for any trigger
4976  * events to be queued yet.)
4977  */
4978  if (!snapshot_set)
4979  {
4981  snapshot_set = true;
4982  }
4983 
4984  /*
4985  * We can delete fired events if we are at top transaction level,
4986  * but we'd better not if inside a subtransaction, since the
4987  * subtransaction could later get rolled back.
4988  */
4989  if (afterTriggerInvokeEvents(events, firing_id, NULL,
4990  !IsSubTransaction()))
4991  break; /* all fired */
4992  }
4993 
4994  if (snapshot_set)
4996  }
4997 }
4998 
4999 /* ----------
5000  * AfterTriggerPendingOnRel()
5001  * Test to see if there are any pending after-trigger events for rel.
5002  *
5003  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5004  * it is unsafe to perform major surgery on a relation. Note that only
5005  * local pending events are examined. We assume that having exclusive lock
5006  * on a rel guarantees there are no unserviced events in other backends ---
5007  * but having a lock does not prevent there being such events in our own.
5008  *
5009  * In some scenarios it'd be reasonable to remove pending events (more
5010  * specifically, mark them DONE by the current subxact) but without a lot
5011  * of knowledge of the trigger semantics we can't do this in general.
5012  * ----------
5013  */
5014 bool
5016 {
5017  AfterTriggerEvent event;
5018  AfterTriggerEventChunk *chunk;
5019  int depth;
5020 
5021  /* Scan queued events */
5022  for_each_event_chunk(event, chunk, afterTriggers.events)
5023  {
5024  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5025 
5026  /*
5027  * We can ignore completed events. (Even if a DONE flag is rolled
5028  * back by subxact abort, it's OK because the effects of the TRUNCATE
5029  * or whatever must get rolled back too.)
5030  */
5031  if (event->ate_flags & AFTER_TRIGGER_DONE)
5032  continue;
5033 
5034  if (evtshared->ats_relid == relid)
5035  return true;
5036  }
5037 
5038  /*
5039  * Also scan events queued by incomplete queries. This could only matter
5040  * if TRUNCATE/etc is executed by a function or trigger within an updating
5041  * query on the same relation, which is pretty perverse, but let's check.
5042  */
5043  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5044  {
5045  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth])
5046  {
5047  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5048 
5049  if (event->ate_flags & AFTER_TRIGGER_DONE)
5050  continue;
5051 
5052  if (evtshared->ats_relid == relid)
5053  return true;
5054  }
5055  }
5056 
5057  return false;
5058 }
5059 
5060 
5061 /* ----------
5062  * AfterTriggerSaveEvent()
5063  *
5064  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5065  * be fired for an event.
5066  *
5067  * NOTE: this is called whenever there are any triggers associated with
5068  * the event (even if they are disabled). This function decides which
5069  * triggers actually need to be queued. It is also called after each row,
5070  * even if there are no triggers for that event, if there are any AFTER
5071  * STATEMENT triggers for the statement which use transition tables, so that
5072  * the transition tuplestores can be built.
5073  *
5074  * Transition tuplestores are built now, rather than when events are pulled
5075  * off of the queue because AFTER ROW triggers are allowed to select from the
5076  * transition tables for the statement.
5077  * ----------
5078  */
5079 static void
5081  int event, bool row_trigger,
5082  HeapTuple oldtup, HeapTuple newtup,
5083  List *recheckIndexes, Bitmapset *modifiedCols)
5084 {
5085  Relation rel = relinfo->ri_RelationDesc;
5086  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5087  AfterTriggerEventData new_event;
5088  AfterTriggerSharedData new_shared;
5089  char relkind = relinfo->ri_RelationDesc->rd_rel->relkind;
5090  int tgtype_event;
5091  int tgtype_level;
5092  int i;
5093  Tuplestorestate *fdw_tuplestore = NULL;
5094 
5095  /*
5096  * Check state. We use a normal test not Assert because it is possible to
5097  * reach here in the wrong state given misconfigured RI triggers, in
5098  * particular deferring a cascade action trigger.
5099  */
5100  if (afterTriggers.query_depth < 0)
5101  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5102 
5103  /* Be sure we have enough space to record events at this query depth. */
5104  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5106 
5107  /*
5108  * If the relation has AFTER ... FOR EACH ROW triggers, capture rows into
5109  * transition tuplestores for this depth.
5110  */
5111  if (row_trigger)
5112  {
5113  if ((event == TRIGGER_EVENT_DELETE &&
5114  trigdesc->trig_delete_old_table) ||
5115  (event == TRIGGER_EVENT_UPDATE &&
5116  trigdesc->trig_update_old_table))
5117  {
5118  Tuplestorestate *old_tuplestore;
5119 
5120  Assert(oldtup != NULL);
5121  old_tuplestore =
5123  (afterTriggers.old_tuplestores);
5124  tuplestore_puttuple(old_tuplestore, oldtup);
5125  }
5126  if ((event == TRIGGER_EVENT_INSERT &&
5127  trigdesc->trig_insert_new_table) ||
5128  (event == TRIGGER_EVENT_UPDATE &&
5129  trigdesc->trig_update_new_table))
5130  {
5131  Tuplestorestate *new_tuplestore;
5132 
5133  Assert(newtup != NULL);
5134  new_tuplestore =
5136  (afterTriggers.new_tuplestores);
5137  tuplestore_puttuple(new_tuplestore, newtup);
5138  }
5139 
5140  /* If transition tables are the only reason we're here, return. */
5141  if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5142  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5143  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
5144  return;
5145  }
5146 
5147  /*
5148  * Validate the event code and collect the associated tuple CTIDs.
5149  *
5150  * The event code will be used both as a bitmask and an array offset, so
5151  * validation is important to make sure we don't walk off the edge of our
5152  * arrays.
5153  */
5154  switch (event)
5155  {
5156  case TRIGGER_EVENT_INSERT:
5157  tgtype_event = TRIGGER_TYPE_INSERT;
5158  if (row_trigger)
5159  {
5160  Assert(oldtup == NULL);
5161  Assert(newtup != NULL);
5162  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid1));
5163  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5164  }
5165  else
5166  {
5167  Assert(oldtup == NULL);
5168  Assert(newtup == NULL);
5169  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5170  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5171  }
5172  break;
5173  case TRIGGER_EVENT_DELETE:
5174  tgtype_event = TRIGGER_TYPE_DELETE;
5175  if (row_trigger)
5176  {
5177  Assert(oldtup != NULL);
5178  Assert(newtup == NULL);
5179  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5180  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5181  }
5182  else
5183  {
5184  Assert(oldtup == NULL);
5185  Assert(newtup == NULL);
5186  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5187  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5188  }
5189  break;
5190  case TRIGGER_EVENT_UPDATE:
5191  tgtype_event = TRIGGER_TYPE_UPDATE;
5192  if (row_trigger)
5193  {
5194  Assert(oldtup != NULL);
5195  Assert(newtup != NULL);
5196  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5197  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid2));
5198  }
5199  else
5200  {
5201  Assert(oldtup == NULL);
5202  Assert(newtup == NULL);
5203  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5204  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5205  }
5206  break;
5208  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5209  Assert(oldtup == NULL);
5210  Assert(newtup == NULL);
5211  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5212  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5213  break;
5214  default:
5215  elog(ERROR, "invalid after-trigger event code: %d", event);
5216  tgtype_event = 0; /* keep compiler quiet */
5217  break;
5218  }
5219 
5220  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5221  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5223  /* else, we'll initialize ate_flags for each trigger */
5224 
5225  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5226 
5227  for (i = 0; i < trigdesc->numtriggers; i++)
5228  {
5229  Trigger *trigger = &trigdesc->triggers[i];
5230 
5231  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5232  tgtype_level,
5234  tgtype_event))
5235  continue;
5236  if (!TriggerEnabled(estate, relinfo, trigger, event,
5237  modifiedCols, oldtup, newtup))
5238  continue;
5239 
5240  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5241  {
5242  if (fdw_tuplestore == NULL)
5243  {
5244  fdw_tuplestore =
5246  (afterTriggers.fdw_tuplestores);
5247  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5248  }
5249  else
5250  /* subsequent event for the same tuple */
5251  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5252  }
5253 
5254  /*
5255  * If the trigger is a foreign key enforcement trigger, there are
5256  * certain cases where we can skip queueing the event because we can
5257  * tell by inspection that the FK constraint will still pass.
5258  */
5259  if (TRIGGER_FIRED_BY_UPDATE(event))
5260  {
5261  switch (RI_FKey_trigger_type(trigger->tgfoid))
5262  {
5263  case RI_TRIGGER_PK:
5264  /* Update on trigger's PK table */
5265  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5266  oldtup, newtup))
5267  {
5268  /* skip queuing this event */
5269  continue;
5270  }
5271  break;
5272 
5273  case RI_TRIGGER_FK:
5274  /* Update on trigger's FK table */
5275  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5276  oldtup, newtup))
5277  {
5278  /* skip queuing this event */
5279  continue;
5280  }
5281  break;
5282 
5283  case RI_TRIGGER_NONE:
5284  /* Not an FK trigger */
5285  break;
5286  }
5287  }
5288 
5289  /*
5290  * If the trigger is a deferred unique constraint check trigger, only
5291  * queue it if the unique constraint was potentially violated, which
5292  * we know from index insertion time.
5293  */
5294  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5295  {
5296  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5297  continue; /* Uniqueness definitely not violated */
5298  }
5299 
5300  /*
5301  * Fill in event structure and add it to the current query's queue.
5302  */
5303  new_shared.ats_event =
5304  (event & TRIGGER_EVENT_OPMASK) |
5305  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5306  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5307  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5308  new_shared.ats_tgoid = trigger->tgoid;
5309  new_shared.ats_relid = RelationGetRelid(rel);
5310  new_shared.ats_firing_id = 0;
5311 
5312  afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth],
5313  &new_event, &new_shared);
5314  }
5315 
5316  /*
5317  * Finally, spool any foreign tuple(s). The tuplestore squashes them to
5318  * minimal tuples, so this loses any system columns. The executor lost
5319  * those columns before us, for an unrelated reason, so this is fine.
5320  */
5321  if (fdw_tuplestore)
5322  {
5323  if (oldtup != NULL)
5324  tuplestore_puttuple(fdw_tuplestore, oldtup);
5325  if (newtup != NULL)
5326  tuplestore_puttuple(fdw_tuplestore, newtup);
5327  }
5328 }
5329 
5330 Datum
5332 {
5334 }
void RemoveTriggerById(Oid trigOid)
Definition: trigger.c:1197
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
signed short int16
Definition: c.h:255
#define TRIGGER_EVENT_ROW
Definition: trigger.h:58
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:608
#define NIL
Definition: pg_list.h:69
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2376
uint32 CommandId
Definition: c.h:411
TriggerEvent ats_event
Definition: trigger.c:3254
#define Anum_pg_trigger_tgdeferrable
Definition: pg_trigger.h:88
void InstrStopNode(Instrumentation *instr, double nTuples)
Definition: instrument.c:80
Tuplestorestate ** old_tuplestores
Definition: trigger.c:3397
TupleTableSlot * ExecStoreTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree)
Definition: execTuples.c:320
#define FKCONSTR_MATCH_SIMPLE
Definition: parsenodes.h:2040
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
JunkFilter * ri_junkFilter
Definition: execnodes.h:388
Definition: fmgr.h:56
void * stringToNode(char *str)
Definition: read.c:38
Relation ri_RelationDesc
Definition: execnodes.h:374
#define TRIGGER_FOR_DELETE(type)
Definition: pg_trigger.h:135
struct AfterTriggerEventDataOneCtid AfterTriggerEventDataOneCtid
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition: trigger.c:2485
#define NameGetDatum(X)
Definition: postgres.h:601
int RI_FKey_trigger_type(Oid tgfoid)
Definition: ri_triggers.c:3704
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
Datum namein(PG_FUNCTION_ARGS)
Definition: name.c:46
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate)
Definition: execTuples.c:852
#define AFTER_TRIGGER_FDW_REUSE
Definition: trigger.c:3244
#define TriggerOidIndexId
Definition: indexing.h:251
#define AFTER_TRIGGER_INITDEFERRED
Definition: trigger.h:68
Oid LookupExplicitNamespace(const char *nspname, bool missing_ok)
Definition: namespace.c:2743
int errhint(const char *fmt,...)
Definition: elog.c:987
#define VARDATA_ANY(PTR)
Definition: postgres.h:347
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2324
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
#define GETSTRUCT(TUP)
Definition: htup_details.h:656
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
MemoryContext TopTransactionContext
Definition: mcxt.c:48
CommandId es_output_cid
Definition: execnodes.h:418
static void test(void)
bool IsSystemRelation(Relation relation)
Definition: catalog.c:62
char * subname
Definition: parsenodes.h:2769
const char * quote_identifier(const char *ident)
Definition: ruleutils.c:10246
ItemPointerData ate_ctid2
Definition: trigger.c:3266
#define TRIGGER_TYPE_DELETE
Definition: pg_trigger.h:101
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:2961
#define RelationGetDescr(relation)
Definition: rel.h:429
#define TRIGGER_EVENT_DELETE
Definition: trigger.h:53
Oid GetUserId(void)
Definition: miscinit.c:283
SetConstraintStateData * SetConstraintState
Definition: trigger.c:3204
TupleTableSlot * es_trig_newtup_slot
Definition: execnodes.h:429
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
Oid tgfoid
Definition: reltrigger.h:28
#define MIN_CHUNK_SIZE
TriggerFlags ate_flags
Definition: trigger.c:3264
HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
Definition: heapam.c:4539
Oid RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, bool missing_ok, bool nowait, RangeVarGetRelidCallback callback, void *callback_arg)
Definition: namespace.c:217
#define AFTER_TRIGGER_DEFERRABLE
Definition: trigger.h:67
ResourceOwner TopTransactionResourceOwner
Definition: resowner.c:140
void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2587
#define PointerGetDatum(X)
Definition: postgres.h:562
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define RangeVarGetRelid(relation, lockmode, missing_ok)
Definition: namespace.h:53
Buffer tg_newtuplebuf
Definition: trigger.h:39
bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
Definition: heapam.c:1862
#define Anum_pg_trigger_tgconstraint
Definition: pg_trigger.h:87