PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/indexing.h"
24 #include "catalog/objectaccess.h"
25 #include "catalog/pg_constraint.h"
27 #include "catalog/pg_proc.h"
28 #include "catalog/pg_trigger.h"
29 #include "catalog/pg_type.h"
30 #include "commands/dbcommands.h"
31 #include "commands/defrem.h"
32 #include "commands/trigger.h"
33 #include "executor/executor.h"
34 #include "miscadmin.h"
35 #include "nodes/bitmapset.h"
36 #include "nodes/makefuncs.h"
37 #include "optimizer/clauses.h"
38 #include "optimizer/var.h"
39 #include "parser/parse_clause.h"
40 #include "parser/parse_collate.h"
41 #include "parser/parse_func.h"
42 #include "parser/parse_relation.h"
43 #include "parser/parsetree.h"
44 #include "pgstat.h"
45 #include "rewrite/rewriteManip.h"
46 #include "storage/bufmgr.h"
47 #include "storage/lmgr.h"
48 #include "tcop/utility.h"
49 #include "utils/acl.h"
50 #include "utils/builtins.h"
51 #include "utils/bytea.h"
52 #include "utils/fmgroids.h"
53 #include "utils/inval.h"
54 #include "utils/lsyscache.h"
55 #include "utils/memutils.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tqual.h"
60 #include "utils/tuplestore.h"
61 
62 
63 /* GUC variables */
65 
66 /* How many levels deep into trigger execution are we? */
67 static int MyTriggerDepth = 0;
68 
69 /*
70  * Note that similar macros also exist in executor/execMain.c. There does not
71  * appear to be any good header to put them into, given the structures that
72  * they use, so we let them be duplicated. Be sure to update all if one needs
73  * to be changed, however.
74  */
75 #define GetUpdatedColumns(relinfo, estate) \
76  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
77 
78 /* Local function prototypes */
79 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
80 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
81 static HeapTuple GetTupleForTrigger(EState *estate,
82  EPQState *epqstate,
83  ResultRelInfo *relinfo,
84  ItemPointer tid,
85  LockTupleMode lockmode,
86  TupleTableSlot **newSlot);
87 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
88  Trigger *trigger, TriggerEvent event,
89  Bitmapset *modifiedCols,
90  HeapTuple oldtup, HeapTuple newtup);
92  int tgindx,
93  FmgrInfo *finfo,
94  Instrumentation *instr,
95  MemoryContext per_tuple_context);
96 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
97  int event, bool row_trigger,
98  HeapTuple oldtup, HeapTuple newtup,
99  List *recheckIndexes, Bitmapset *modifiedCols);
100 static void AfterTriggerEnlargeQueryState(void);
101 
102 
103 /*
104  * Create a trigger. Returns the address of the created trigger.
105  *
106  * queryString is the source text of the CREATE TRIGGER command.
107  * This must be supplied if a whenClause is specified, else it can be NULL.
108  *
109  * relOid, if nonzero, is the relation on which the trigger should be
110  * created. If zero, the name provided in the statement will be looked up.
111  *
112  * refRelOid, if nonzero, is the relation to which the constraint trigger
113  * refers. If zero, the constraint relation name provided in the statement
114  * will be looked up as needed.
115  *
116  * constraintOid, if nonzero, says that this trigger is being created
117  * internally to implement that constraint. A suitable pg_depend entry will
118  * be made to link the trigger to that constraint. constraintOid is zero when
119  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
120  * TRIGGER, we build a pg_constraint entry internally.)
121  *
122  * indexOid, if nonzero, is the OID of an index associated with the constraint.
123  * We do nothing with this except store it into pg_trigger.tgconstrindid.
124  *
125  * If isInternal is true then this is an internally-generated trigger.
126  * This argument sets the tgisinternal field of the pg_trigger entry, and
127  * if TRUE causes us to modify the given trigger name to ensure uniqueness.
128  *
129  * When isInternal is not true we require ACL_TRIGGER permissions on the
130  * relation, as well as ACL_EXECUTE on the trigger function. For internal
131  * triggers the caller must apply any required permission checks.
132  *
133  * Note: can return InvalidObjectAddress if we decided to not create a trigger
134  * at all, but a foreign-key constraint. This is a kluge for backwards
135  * compatibility.
136  */
138 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
139  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
140  bool isInternal)
141 {
142  int16 tgtype;
143  int ncolumns;
144  int16 *columns;
145  int2vector *tgattr;
146  Node *whenClause;
147  List *whenRtable;
148  char *qual;
150  bool nulls[Natts_pg_trigger];
151  Relation rel;
152  AclResult aclresult;
153  Relation tgrel;
154  SysScanDesc tgscan;
155  ScanKeyData key;
156  Relation pgrel;
157  HeapTuple tuple;
158  Oid fargtypes[1]; /* dummy */
159  Oid funcoid;
160  Oid funcrettype;
161  Oid trigoid;
162  char internaltrigname[NAMEDATALEN];
163  char *trigname;
164  Oid constrrelid = InvalidOid;
165  ObjectAddress myself,
166  referenced;
167  char *oldtablename = NULL;
168  char *newtablename = NULL;
169 
170  if (OidIsValid(relOid))
171  rel = heap_open(relOid, ShareRowExclusiveLock);
172  else
174 
175  /*
176  * Triggers must be on tables or views, and there are additional
177  * relation-type-specific restrictions.
178  */
179  if (rel->rd_rel->relkind == RELKIND_RELATION ||
180  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
181  {
182  /* Tables can't have INSTEAD OF triggers */
183  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
184  stmt->timing != TRIGGER_TYPE_AFTER)
185  ereport(ERROR,
186  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
187  errmsg("\"%s\" is a table",
189  errdetail("Tables cannot have INSTEAD OF triggers.")));
190  /* Disallow ROW triggers on partitioned tables */
191  if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
192  ereport(ERROR,
193  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
194  errmsg("\"%s\" is a partitioned table",
196  errdetail("Partitioned tables cannot have ROW triggers.")));
197  }
198  else if (rel->rd_rel->relkind == RELKIND_VIEW)
199  {
200  /*
201  * Views can have INSTEAD OF triggers (which we check below are
202  * row-level), or statement-level BEFORE/AFTER triggers.
203  */
204  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
205  ereport(ERROR,
206  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
207  errmsg("\"%s\" is a view",
209  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
210  /* Disallow TRUNCATE triggers on VIEWs */
211  if (TRIGGER_FOR_TRUNCATE(stmt->events))
212  ereport(ERROR,
213  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
214  errmsg("\"%s\" is a view",
216  errdetail("Views cannot have TRUNCATE triggers.")));
217  }
218  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
219  {
220  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
221  stmt->timing != TRIGGER_TYPE_AFTER)
222  ereport(ERROR,
223  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
224  errmsg("\"%s\" is a foreign table",
226  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
227 
228  if (TRIGGER_FOR_TRUNCATE(stmt->events))
229  ereport(ERROR,
230  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
231  errmsg("\"%s\" is a foreign table",
233  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
234 
235  if (stmt->isconstraint)
236  ereport(ERROR,
237  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
238  errmsg("\"%s\" is a foreign table",
240  errdetail("Foreign tables cannot have constraint triggers.")));
241  }
242  else
243  ereport(ERROR,
244  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
245  errmsg("\"%s\" is not a table or view",
246  RelationGetRelationName(rel))));
247 
249  ereport(ERROR,
250  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
251  errmsg("permission denied: \"%s\" is a system catalog",
252  RelationGetRelationName(rel))));
253 
254  if (stmt->isconstraint)
255  {
256  /*
257  * We must take a lock on the target relation to protect against
258  * concurrent drop. It's not clear that AccessShareLock is strong
259  * enough, but we certainly need at least that much... otherwise, we
260  * might end up creating a pg_constraint entry referencing a
261  * nonexistent table.
262  */
263  if (OidIsValid(refRelOid))
264  {
265  LockRelationOid(refRelOid, AccessShareLock);
266  constrrelid = refRelOid;
267  }
268  else if (stmt->constrrel != NULL)
269  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
270  false);
271  }
272 
273  /* permission checks */
274  if (!isInternal)
275  {
276  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
277  ACL_TRIGGER);
278  if (aclresult != ACLCHECK_OK)
279  aclcheck_error(aclresult, ACL_KIND_CLASS,
281 
282  if (OidIsValid(constrrelid))
283  {
284  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
285  ACL_TRIGGER);
286  if (aclresult != ACLCHECK_OK)
287  aclcheck_error(aclresult, ACL_KIND_CLASS,
288  get_rel_name(constrrelid));
289  }
290  }
291 
292  /* Compute tgtype */
293  TRIGGER_CLEAR_TYPE(tgtype);
294  if (stmt->row)
295  TRIGGER_SETT_ROW(tgtype);
296  tgtype |= stmt->timing;
297  tgtype |= stmt->events;
298 
299  /* Disallow ROW-level TRUNCATE triggers */
300  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
301  ereport(ERROR,
302  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
303  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
304 
305  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
306  if (TRIGGER_FOR_INSTEAD(tgtype))
307  {
308  if (!TRIGGER_FOR_ROW(tgtype))
309  ereport(ERROR,
310  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
311  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
312  if (stmt->whenClause)
313  ereport(ERROR,
314  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
315  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
316  if (stmt->columns != NIL)
317  ereport(ERROR,
318  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
319  errmsg("INSTEAD OF triggers cannot have column lists")));
320  }
321 
322  /*
323  * We don't yet support naming ROW transition variables, but the parser
324  * recognizes the syntax so we can give a nicer message here.
325  *
326  * Per standard, REFERENCING TABLE names are only allowed on AFTER
327  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
328  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
329  * only allowed once. Per standard, OLD may not be specified when
330  * creating a trigger only for INSERT, and NEW may not be specified when
331  * creating a trigger only for DELETE.
332  *
333  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
334  * reference both ROW and TABLE transition data.
335  */
336  if (stmt->transitionRels != NIL)
337  {
338  List *varList = stmt->transitionRels;
339  ListCell *lc;
340 
341  foreach(lc, varList)
342  {
344 
345  if (!(tt->isTable))
346  ereport(ERROR,
347  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
348  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
349  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
350 
351  /*
352  * Because of the above test, we omit further ROW-related testing
353  * below. If we later allow naming OLD and NEW ROW variables,
354  * adjustments will be needed below.
355  */
356 
357  if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
358  ereport(ERROR,
359  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
360  errmsg("\"%s\" is a partitioned table",
362  errdetail("Triggers on partitioned tables cannot have transition tables.")));
363 
364  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
365  ereport(ERROR,
366  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
367  errmsg("\"%s\" is a foreign table",
369  errdetail("Triggers on foreign tables cannot have transition tables.")));
370 
371  if (rel->rd_rel->relkind == RELKIND_VIEW)
372  ereport(ERROR,
373  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
374  errmsg("\"%s\" is a view",
376  errdetail("Triggers on views cannot have transition tables.")));
377 
378  if (stmt->timing != TRIGGER_TYPE_AFTER)
379  ereport(ERROR,
380  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
381  errmsg("transition table name can only be specified for an AFTER trigger")));
382 
383  if (TRIGGER_FOR_TRUNCATE(tgtype))
384  ereport(ERROR,
385  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
386  errmsg("TRUNCATE triggers with transition tables are not supported")));
387 
388  if (tt->isNew)
389  {
390  if (!(TRIGGER_FOR_INSERT(tgtype) ||
391  TRIGGER_FOR_UPDATE(tgtype)))
392  ereport(ERROR,
393  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
394  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
395 
396  if (newtablename != NULL)
397  ereport(ERROR,
398  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
399  errmsg("NEW TABLE cannot be specified multiple times")));
400 
401  newtablename = tt->name;
402  }
403  else
404  {
405  if (!(TRIGGER_FOR_DELETE(tgtype) ||
406  TRIGGER_FOR_UPDATE(tgtype)))
407  ereport(ERROR,
408  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
409  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
410 
411  if (oldtablename != NULL)
412  ereport(ERROR,
413  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
414  errmsg("OLD TABLE cannot be specified multiple times")));
415 
416  oldtablename = tt->name;
417  }
418  }
419 
420  if (newtablename != NULL && oldtablename != NULL &&
421  strcmp(newtablename, oldtablename) == 0)
422  ereport(ERROR,
423  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
424  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
425  }
426 
427  /*
428  * Parse the WHEN clause, if any
429  */
430  if (stmt->whenClause)
431  {
432  ParseState *pstate;
433  RangeTblEntry *rte;
434  List *varList;
435  ListCell *lc;
436 
437  /* Set up a pstate to parse with */
438  pstate = make_parsestate(NULL);
439  pstate->p_sourcetext = queryString;
440 
441  /*
442  * Set up RTEs for OLD and NEW references.
443  *
444  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
445  */
446  rte = addRangeTableEntryForRelation(pstate, rel,
447  makeAlias("old", NIL),
448  false, false);
449  addRTEtoQuery(pstate, rte, false, true, true);
450  rte = addRangeTableEntryForRelation(pstate, rel,
451  makeAlias("new", NIL),
452  false, false);
453  addRTEtoQuery(pstate, rte, false, true, true);
454 
455  /* Transform expression. Copy to be sure we don't modify original */
456  whenClause = transformWhereClause(pstate,
457  copyObject(stmt->whenClause),
459  "WHEN");
460  /* we have to fix its collations too */
461  assign_expr_collations(pstate, whenClause);
462 
463  /*
464  * Check for disallowed references to OLD/NEW.
465  *
466  * NB: pull_var_clause is okay here only because we don't allow
467  * subselects in WHEN clauses; it would fail to examine the contents
468  * of subselects.
469  */
470  varList = pull_var_clause(whenClause, 0);
471  foreach(lc, varList)
472  {
473  Var *var = (Var *) lfirst(lc);
474 
475  switch (var->varno)
476  {
477  case PRS2_OLD_VARNO:
478  if (!TRIGGER_FOR_ROW(tgtype))
479  ereport(ERROR,
480  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
481  errmsg("statement trigger's WHEN condition cannot reference column values"),
482  parser_errposition(pstate, var->location)));
483  if (TRIGGER_FOR_INSERT(tgtype))
484  ereport(ERROR,
485  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
486  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
487  parser_errposition(pstate, var->location)));
488  /* system columns are okay here */
489  break;
490  case PRS2_NEW_VARNO:
491  if (!TRIGGER_FOR_ROW(tgtype))
492  ereport(ERROR,
493  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
494  errmsg("statement trigger's WHEN condition cannot reference column values"),
495  parser_errposition(pstate, var->location)));
496  if (TRIGGER_FOR_DELETE(tgtype))
497  ereport(ERROR,
498  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
499  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
500  parser_errposition(pstate, var->location)));
501  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
502  ereport(ERROR,
503  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
504  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
505  parser_errposition(pstate, var->location)));
506  break;
507  default:
508  /* can't happen without add_missing_from, so just elog */
509  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
510  break;
511  }
512  }
513 
514  /* we'll need the rtable for recordDependencyOnExpr */
515  whenRtable = pstate->p_rtable;
516 
517  qual = nodeToString(whenClause);
518 
519  free_parsestate(pstate);
520  }
521  else
522  {
523  whenClause = NULL;
524  whenRtable = NIL;
525  qual = NULL;
526  }
527 
528  /*
529  * Find and validate the trigger function.
530  */
531  funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
532  if (!isInternal)
533  {
534  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
535  if (aclresult != ACLCHECK_OK)
536  aclcheck_error(aclresult, ACL_KIND_PROC,
537  NameListToString(stmt->funcname));
538  }
539  funcrettype = get_func_rettype(funcoid);
540  if (funcrettype != TRIGGEROID)
541  {
542  /*
543  * We allow OPAQUE just so we can load old dump files. When we see a
544  * trigger function declared OPAQUE, change it to TRIGGER.
545  */
546  if (funcrettype == OPAQUEOID)
547  {
549  (errmsg("changing return type of function %s from %s to %s",
550  NameListToString(stmt->funcname),
551  "opaque", "trigger")));
553  }
554  else
555  ereport(ERROR,
556  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
557  errmsg("function %s must return type %s",
558  NameListToString(stmt->funcname), "trigger")));
559  }
560 
561  /*
562  * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
563  * references one of the built-in RI_FKey trigger functions, assume it is
564  * from a dump of a pre-7.3 foreign key constraint, and take steps to
565  * convert this legacy representation into a regular foreign key
566  * constraint. Ugly, but necessary for loading old dump files.
567  */
568  if (stmt->isconstraint && !isInternal &&
569  list_length(stmt->args) >= 6 &&
570  (list_length(stmt->args) % 2) == 0 &&
572  {
573  /* Keep lock on target rel until end of xact */
574  heap_close(rel, NoLock);
575 
576  ConvertTriggerToFK(stmt, funcoid);
577 
578  return InvalidObjectAddress;
579  }
580 
581  /*
582  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
583  * corresponding pg_constraint entry.
584  */
585  if (stmt->isconstraint && !OidIsValid(constraintOid))
586  {
587  /* Internal callers should have made their own constraints */
588  Assert(!isInternal);
589  constraintOid = CreateConstraintEntry(stmt->trigname,
592  stmt->deferrable,
593  stmt->initdeferred,
594  true,
595  RelationGetRelid(rel),
596  NULL, /* no conkey */
597  0,
598  InvalidOid, /* no domain */
599  InvalidOid, /* no index */
600  InvalidOid, /* no foreign key */
601  NULL,
602  NULL,
603  NULL,
604  NULL,
605  0,
606  ' ',
607  ' ',
608  ' ',
609  NULL, /* no exclusion */
610  NULL, /* no check constraint */
611  NULL,
612  NULL,
613  true, /* islocal */
614  0, /* inhcount */
615  true, /* isnoinherit */
616  isInternal); /* is_internal */
617  }
618 
619  /*
620  * Generate the trigger's OID now, so that we can use it in the name if
621  * needed.
622  */
624 
625  trigoid = GetNewOid(tgrel);
626 
627  /*
628  * If trigger is internally generated, modify the provided trigger name to
629  * ensure uniqueness by appending the trigger OID. (Callers will usually
630  * supply a simple constant trigger name in these cases.)
631  */
632  if (isInternal)
633  {
634  snprintf(internaltrigname, sizeof(internaltrigname),
635  "%s_%u", stmt->trigname, trigoid);
636  trigname = internaltrigname;
637  }
638  else
639  {
640  /* user-defined trigger; use the specified trigger name as-is */
641  trigname = stmt->trigname;
642  }
643 
644  /*
645  * Scan pg_trigger for existing triggers on relation. We do this only to
646  * give a nice error message if there's already a trigger of the same
647  * name. (The unique index on tgrelid/tgname would complain anyway.) We
648  * can skip this for internally generated triggers, since the name
649  * modification above should be sufficient.
650  *
651  * NOTE that this is cool only because we have ShareRowExclusiveLock on
652  * the relation, so the trigger set won't be changing underneath us.
653  */
654  if (!isInternal)
655  {
656  ScanKeyInit(&key,
658  BTEqualStrategyNumber, F_OIDEQ,
660  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
661  NULL, 1, &key);
662  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
663  {
664  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
665 
666  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
667  ereport(ERROR,
669  errmsg("trigger \"%s\" for relation \"%s\" already exists",
670  trigname, RelationGetRelationName(rel))));
671  }
672  systable_endscan(tgscan);
673  }
674 
675  /*
676  * Build the new pg_trigger tuple.
677  */
678  memset(nulls, false, sizeof(nulls));
679 
682  CStringGetDatum(trigname));
683  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
684  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
686  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
687  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
688  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
689  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
692 
693  if (stmt->args)
694  {
695  ListCell *le;
696  char *args;
697  int16 nargs = list_length(stmt->args);
698  int len = 0;
699 
700  foreach(le, stmt->args)
701  {
702  char *ar = strVal(lfirst(le));
703 
704  len += strlen(ar) + 4;
705  for (; *ar; ar++)
706  {
707  if (*ar == '\\')
708  len++;
709  }
710  }
711  args = (char *) palloc(len + 1);
712  args[0] = '\0';
713  foreach(le, stmt->args)
714  {
715  char *s = strVal(lfirst(le));
716  char *d = args + strlen(args);
717 
718  while (*s)
719  {
720  if (*s == '\\')
721  *d++ = '\\';
722  *d++ = *s++;
723  }
724  strcpy(d, "\\000");
725  }
726  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
728  CStringGetDatum(args));
729  }
730  else
731  {
732  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
734  CStringGetDatum(""));
735  }
736 
737  /* build column number array if it's a column-specific trigger */
738  ncolumns = list_length(stmt->columns);
739  if (ncolumns == 0)
740  columns = NULL;
741  else
742  {
743  ListCell *cell;
744  int i = 0;
745 
746  columns = (int16 *) palloc(ncolumns * sizeof(int16));
747  foreach(cell, stmt->columns)
748  {
749  char *name = strVal(lfirst(cell));
750  int16 attnum;
751  int j;
752 
753  /* Lookup column name. System columns are not allowed */
754  attnum = attnameAttNum(rel, name, false);
755  if (attnum == InvalidAttrNumber)
756  ereport(ERROR,
757  (errcode(ERRCODE_UNDEFINED_COLUMN),
758  errmsg("column \"%s\" of relation \"%s\" does not exist",
759  name, RelationGetRelationName(rel))));
760 
761  /* Check for duplicates */
762  for (j = i - 1; j >= 0; j--)
763  {
764  if (columns[j] == attnum)
765  ereport(ERROR,
766  (errcode(ERRCODE_DUPLICATE_COLUMN),
767  errmsg("column \"%s\" specified more than once",
768  name)));
769  }
770 
771  columns[i++] = attnum;
772  }
773  }
774  tgattr = buildint2vector(columns, ncolumns);
775  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
776 
777  /* set tgqual if trigger has WHEN clause */
778  if (qual)
779  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
780  else
781  nulls[Anum_pg_trigger_tgqual - 1] = true;
782 
783  if (oldtablename)
785  CStringGetDatum(oldtablename));
786  else
787  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
788  if (newtablename)
790  CStringGetDatum(newtablename));
791  else
792  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
793 
794  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
795 
796  /* force tuple to have the desired OID */
797  HeapTupleSetOid(tuple, trigoid);
798 
799  /*
800  * Insert tuple into pg_trigger.
801  */
802  CatalogTupleInsert(tgrel, tuple);
803 
804  heap_freetuple(tuple);
806 
810  if (oldtablename)
812  if (newtablename)
814 
815  /*
816  * Update relation's pg_class entry. Crucial side-effect: other backends
817  * (and this one too!) are sent SI message to make them rebuild relcache
818  * entries.
819  */
821  tuple = SearchSysCacheCopy1(RELOID,
823  if (!HeapTupleIsValid(tuple))
824  elog(ERROR, "cache lookup failed for relation %u",
825  RelationGetRelid(rel));
826 
827  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
828 
829  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
830 
831  heap_freetuple(tuple);
833 
834  /*
835  * We used to try to update the rel's relcache entry here, but that's
836  * fairly pointless since it will happen as a byproduct of the upcoming
837  * CommandCounterIncrement...
838  */
839 
840  /*
841  * Record dependencies for trigger. Always place a normal dependency on
842  * the function.
843  */
844  myself.classId = TriggerRelationId;
845  myself.objectId = trigoid;
846  myself.objectSubId = 0;
847 
848  referenced.classId = ProcedureRelationId;
849  referenced.objectId = funcoid;
850  referenced.objectSubId = 0;
851  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
852 
853  if (isInternal && OidIsValid(constraintOid))
854  {
855  /*
856  * Internally-generated trigger for a constraint, so make it an
857  * internal dependency of the constraint. We can skip depending on
858  * the relation(s), as there'll be an indirect dependency via the
859  * constraint.
860  */
861  referenced.classId = ConstraintRelationId;
862  referenced.objectId = constraintOid;
863  referenced.objectSubId = 0;
864  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
865  }
866  else
867  {
868  /*
869  * User CREATE TRIGGER, so place dependencies. We make trigger be
870  * auto-dropped if its relation is dropped or if the FK relation is
871  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
872  */
873  referenced.classId = RelationRelationId;
874  referenced.objectId = RelationGetRelid(rel);
875  referenced.objectSubId = 0;
876  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
877  if (OidIsValid(constrrelid))
878  {
879  referenced.classId = RelationRelationId;
880  referenced.objectId = constrrelid;
881  referenced.objectSubId = 0;
882  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
883  }
884  /* Not possible to have an index dependency in this case */
885  Assert(!OidIsValid(indexOid));
886 
887  /*
888  * If it's a user-specified constraint trigger, make the constraint
889  * internally dependent on the trigger instead of vice versa.
890  */
891  if (OidIsValid(constraintOid))
892  {
893  referenced.classId = ConstraintRelationId;
894  referenced.objectId = constraintOid;
895  referenced.objectSubId = 0;
896  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
897  }
898  }
899 
900  /* If column-specific trigger, add normal dependencies on columns */
901  if (columns != NULL)
902  {
903  int i;
904 
905  referenced.classId = RelationRelationId;
906  referenced.objectId = RelationGetRelid(rel);
907  for (i = 0; i < ncolumns; i++)
908  {
909  referenced.objectSubId = columns[i];
910  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
911  }
912  }
913 
914  /*
915  * If it has a WHEN clause, add dependencies on objects mentioned in the
916  * expression (eg, functions, as well as any columns used).
917  */
918  if (whenClause != NULL)
919  recordDependencyOnExpr(&myself, whenClause, whenRtable,
921 
922  /* Post creation hook for new trigger */
924  isInternal);
925 
926  /* Keep lock on target rel until end of xact */
927  heap_close(rel, NoLock);
928 
929  return myself;
930 }
931 
932 
933 /*
934  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
935  * full-fledged foreign key constraints.
936  *
937  * The conversion is complex because a pre-7.3 foreign key involved three
938  * separate triggers, which were reported separately in dumps. While the
939  * single trigger on the referencing table adds no new information, we need
940  * to know the trigger functions of both of the triggers on the referenced
941  * table to build the constraint declaration. Also, due to lack of proper
942  * dependency checking pre-7.3, it is possible that the source database had
943  * an incomplete set of triggers resulting in an only partially enforced
944  * FK constraint. (This would happen if one of the tables had been dropped
945  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
946  * that caused loss of tgconstrrelid information.) We choose to translate to
947  * an FK constraint only when we've seen all three triggers of a set. This is
948  * implemented by storing unmatched items in a list in TopMemoryContext.
949  * We match triggers together by comparing the trigger arguments (which
950  * include constraint name, table and column names, so should be good enough).
951  */
952 typedef struct
953 {
954  List *args; /* list of (T_String) Values or NIL */
955  Oid funcoids[3]; /* OIDs of trigger functions */
956  /* The three function OIDs are stored in the order update, delete, child */
958 
959 static void
961 {
962  static List *info_list = NIL;
963 
964  static const char *const funcdescr[3] = {
965  gettext_noop("Found referenced table's UPDATE trigger."),
966  gettext_noop("Found referenced table's DELETE trigger."),
967  gettext_noop("Found referencing table's trigger.")
968  };
969 
970  char *constr_name;
971  char *fk_table_name;
972  char *pk_table_name;
973  char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
974  List *fk_attrs = NIL;
975  List *pk_attrs = NIL;
977  int funcnum;
978  OldTriggerInfo *info = NULL;
979  ListCell *l;
980  int i;
981 
982  /* Parse out the trigger arguments */
983  constr_name = strVal(linitial(stmt->args));
984  fk_table_name = strVal(lsecond(stmt->args));
985  pk_table_name = strVal(lthird(stmt->args));
986  i = 0;
987  foreach(l, stmt->args)
988  {
989  Value *arg = (Value *) lfirst(l);
990 
991  i++;
992  if (i < 4) /* skip constraint and table names */
993  continue;
994  if (i == 4) /* handle match type */
995  {
996  if (strcmp(strVal(arg), "FULL") == 0)
997  fk_matchtype = FKCONSTR_MATCH_FULL;
998  else
999  fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1000  continue;
1001  }
1002  if (i % 2)
1003  fk_attrs = lappend(fk_attrs, arg);
1004  else
1005  pk_attrs = lappend(pk_attrs, arg);
1006  }
1007 
1008  /* Prepare description of constraint for use in messages */
1009  initStringInfo(&buf);
1010  appendStringInfo(&buf, "FOREIGN KEY %s(",
1011  quote_identifier(fk_table_name));
1012  i = 0;
1013  foreach(l, fk_attrs)
1014  {
1015  Value *arg = (Value *) lfirst(l);
1016 
1017  if (i++ > 0)
1018  appendStringInfoChar(&buf, ',');
1020  }
1021  appendStringInfo(&buf, ") REFERENCES %s(",
1022  quote_identifier(pk_table_name));
1023  i = 0;
1024  foreach(l, pk_attrs)
1025  {
1026  Value *arg = (Value *) lfirst(l);
1027 
1028  if (i++ > 0)
1029  appendStringInfoChar(&buf, ',');
1031  }
1032  appendStringInfoChar(&buf, ')');
1033 
1034  /* Identify class of trigger --- update, delete, or referencing-table */
1035  switch (funcoid)
1036  {
1037  case F_RI_FKEY_CASCADE_UPD:
1038  case F_RI_FKEY_RESTRICT_UPD:
1039  case F_RI_FKEY_SETNULL_UPD:
1040  case F_RI_FKEY_SETDEFAULT_UPD:
1041  case F_RI_FKEY_NOACTION_UPD:
1042  funcnum = 0;
1043  break;
1044 
1045  case F_RI_FKEY_CASCADE_DEL:
1046  case F_RI_FKEY_RESTRICT_DEL:
1047  case F_RI_FKEY_SETNULL_DEL:
1048  case F_RI_FKEY_SETDEFAULT_DEL:
1049  case F_RI_FKEY_NOACTION_DEL:
1050  funcnum = 1;
1051  break;
1052 
1053  default:
1054  funcnum = 2;
1055  break;
1056  }
1057 
1058  /* See if we have a match to this trigger */
1059  foreach(l, info_list)
1060  {
1061  info = (OldTriggerInfo *) lfirst(l);
1062  if (info->funcoids[funcnum] == InvalidOid &&
1063  equal(info->args, stmt->args))
1064  {
1065  info->funcoids[funcnum] = funcoid;
1066  break;
1067  }
1068  }
1069 
1070  if (l == NULL)
1071  {
1072  /* First trigger of set, so create a new list entry */
1073  MemoryContext oldContext;
1074 
1075  ereport(NOTICE,
1076  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1077  constr_name, buf.data),
1078  errdetail_internal("%s", _(funcdescr[funcnum]))));
1080  info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
1081  info->args = copyObject(stmt->args);
1082  info->funcoids[funcnum] = funcoid;
1083  info_list = lappend(info_list, info);
1084  MemoryContextSwitchTo(oldContext);
1085  }
1086  else if (info->funcoids[0] == InvalidOid ||
1087  info->funcoids[1] == InvalidOid ||
1088  info->funcoids[2] == InvalidOid)
1089  {
1090  /* Second trigger of set */
1091  ereport(NOTICE,
1092  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1093  constr_name, buf.data),
1094  errdetail_internal("%s", _(funcdescr[funcnum]))));
1095  }
1096  else
1097  {
1098  /* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
1101  Constraint *fkcon = makeNode(Constraint);
1102  PlannedStmt *wrapper = makeNode(PlannedStmt);
1103 
1104  ereport(NOTICE,
1105  (errmsg("converting trigger group into constraint \"%s\" %s",
1106  constr_name, buf.data),
1107  errdetail_internal("%s", _(funcdescr[funcnum]))));
1108  fkcon->contype = CONSTR_FOREIGN;
1109  fkcon->location = -1;
1110  if (funcnum == 2)
1111  {
1112  /* This trigger is on the FK table */
1113  atstmt->relation = stmt->relation;
1114  if (stmt->constrrel)
1115  fkcon->pktable = stmt->constrrel;
1116  else
1117  {
1118  /* Work around ancient pg_dump bug that omitted constrrel */
1119  fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
1120  }
1121  }
1122  else
1123  {
1124  /* This trigger is on the PK table */
1125  fkcon->pktable = stmt->relation;
1126  if (stmt->constrrel)
1127  atstmt->relation = stmt->constrrel;
1128  else
1129  {
1130  /* Work around ancient pg_dump bug that omitted constrrel */
1131  atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1132  }
1133  }
1134  atstmt->cmds = list_make1(atcmd);
1135  atstmt->relkind = OBJECT_TABLE;
1136  atcmd->subtype = AT_AddConstraint;
1137  atcmd->def = (Node *) fkcon;
1138  if (strcmp(constr_name, "<unnamed>") == 0)
1139  fkcon->conname = NULL;
1140  else
1141  fkcon->conname = constr_name;
1142  fkcon->fk_attrs = fk_attrs;
1143  fkcon->pk_attrs = pk_attrs;
1144  fkcon->fk_matchtype = fk_matchtype;
1145  switch (info->funcoids[0])
1146  {
1147  case F_RI_FKEY_NOACTION_UPD:
1149  break;
1150  case F_RI_FKEY_CASCADE_UPD:
1152  break;
1153  case F_RI_FKEY_RESTRICT_UPD:
1155  break;
1156  case F_RI_FKEY_SETNULL_UPD:
1158  break;
1159  case F_RI_FKEY_SETDEFAULT_UPD:
1161  break;
1162  default:
1163  /* can't get here because of earlier checks */
1164  elog(ERROR, "confused about RI update function");
1165  }
1166  switch (info->funcoids[1])
1167  {
1168  case F_RI_FKEY_NOACTION_DEL:
1170  break;
1171  case F_RI_FKEY_CASCADE_DEL:
1173  break;
1174  case F_RI_FKEY_RESTRICT_DEL:
1176  break;
1177  case F_RI_FKEY_SETNULL_DEL:
1179  break;
1180  case F_RI_FKEY_SETDEFAULT_DEL:
1182  break;
1183  default:
1184  /* can't get here because of earlier checks */
1185  elog(ERROR, "confused about RI delete function");
1186  }
1187  fkcon->deferrable = stmt->deferrable;
1188  fkcon->initdeferred = stmt->initdeferred;
1189  fkcon->skip_validation = false;
1190  fkcon->initially_valid = true;
1191 
1192  /* finally, wrap it in a dummy PlannedStmt */
1193  wrapper->commandType = CMD_UTILITY;
1194  wrapper->canSetTag = false;
1195  wrapper->utilityStmt = (Node *) atstmt;
1196  wrapper->stmt_location = -1;
1197  wrapper->stmt_len = -1;
1198 
1199  /* ... and execute it */
1200  ProcessUtility(wrapper,
1201  "(generated ALTER TABLE ADD FOREIGN KEY command)",
1203  None_Receiver, NULL);
1204 
1205  /* Remove the matched item from the list */
1206  info_list = list_delete_ptr(info_list, info);
1207  pfree(info);
1208  /* We leak the copied args ... not worth worrying about */
1209  }
1210 }
1211 
1212 /*
1213  * Guts of trigger deletion.
1214  */
1215 void
1217 {
1218  Relation tgrel;
1219  SysScanDesc tgscan;
1220  ScanKeyData skey[1];
1221  HeapTuple tup;
1222  Oid relid;
1223  Relation rel;
1224 
1226 
1227  /*
1228  * Find the trigger to delete.
1229  */
1230  ScanKeyInit(&skey[0],
1232  BTEqualStrategyNumber, F_OIDEQ,
1233  ObjectIdGetDatum(trigOid));
1234 
1235  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1236  NULL, 1, skey);
1237 
1238  tup = systable_getnext(tgscan);
1239  if (!HeapTupleIsValid(tup))
1240  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1241 
1242  /*
1243  * Open and exclusive-lock the relation the trigger belongs to.
1244  */
1245  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1246 
1247  rel = heap_open(relid, AccessExclusiveLock);
1248 
1249  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1250  rel->rd_rel->relkind != RELKIND_VIEW &&
1251  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1252  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1253  ereport(ERROR,
1254  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1255  errmsg("\"%s\" is not a table, view, or foreign table",
1256  RelationGetRelationName(rel))));
1257 
1259  ereport(ERROR,
1260  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1261  errmsg("permission denied: \"%s\" is a system catalog",
1262  RelationGetRelationName(rel))));
1263 
1264  /*
1265  * Delete the pg_trigger tuple.
1266  */
1267  CatalogTupleDelete(tgrel, &tup->t_self);
1268 
1269  systable_endscan(tgscan);
1270  heap_close(tgrel, RowExclusiveLock);
1271 
1272  /*
1273  * We do not bother to try to determine whether any other triggers remain,
1274  * which would be needed in order to decide whether it's safe to clear the
1275  * relation's relhastriggers. (In any case, there might be a concurrent
1276  * process adding new triggers.) Instead, just force a relcache inval to
1277  * make other backends (and this one too!) rebuild their relcache entries.
1278  * There's no great harm in leaving relhastriggers true even if there are
1279  * no triggers left.
1280  */
1282 
1283  /* Keep lock on trigger's rel until end of xact */
1284  heap_close(rel, NoLock);
1285 }
1286 
1287 /*
1288  * get_trigger_oid - Look up a trigger by name to find its OID.
1289  *
1290  * If missing_ok is false, throw an error if trigger not found. If
1291  * true, just return InvalidOid.
1292  */
1293 Oid
1294 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1295 {
1296  Relation tgrel;
1297  ScanKeyData skey[2];
1298  SysScanDesc tgscan;
1299  HeapTuple tup;
1300  Oid oid;
1301 
1302  /*
1303  * Find the trigger, verify permissions, set up object address
1304  */
1306 
1307  ScanKeyInit(&skey[0],
1309  BTEqualStrategyNumber, F_OIDEQ,
1310  ObjectIdGetDatum(relid));
1311  ScanKeyInit(&skey[1],
1313  BTEqualStrategyNumber, F_NAMEEQ,
1314  CStringGetDatum(trigname));
1315 
1316  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1317  NULL, 2, skey);
1318 
1319  tup = systable_getnext(tgscan);
1320 
1321  if (!HeapTupleIsValid(tup))
1322  {
1323  if (!missing_ok)
1324  ereport(ERROR,
1325  (errcode(ERRCODE_UNDEFINED_OBJECT),
1326  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1327  trigname, get_rel_name(relid))));
1328  oid = InvalidOid;
1329  }
1330  else
1331  {
1332  oid = HeapTupleGetOid(tup);
1333  }
1334 
1335  systable_endscan(tgscan);
1336  heap_close(tgrel, AccessShareLock);
1337  return oid;
1338 }
1339 
1340 /*
1341  * Perform permissions and integrity checks before acquiring a relation lock.
1342  */
1343 static void
1345  void *arg)
1346 {
1347  HeapTuple tuple;
1348  Form_pg_class form;
1349 
1350  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1351  if (!HeapTupleIsValid(tuple))
1352  return; /* concurrently dropped */
1353  form = (Form_pg_class) GETSTRUCT(tuple);
1354 
1355  /* only tables and views can have triggers */
1356  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1357  form->relkind != RELKIND_FOREIGN_TABLE &&
1358  form->relkind != RELKIND_PARTITIONED_TABLE)
1359  ereport(ERROR,
1360  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1361  errmsg("\"%s\" is not a table, view, or foreign table",
1362  rv->relname)));
1363 
1364  /* you must own the table to rename one of its triggers */
1365  if (!pg_class_ownercheck(relid, GetUserId()))
1367  if (!allowSystemTableMods && IsSystemClass(relid, form))
1368  ereport(ERROR,
1369  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1370  errmsg("permission denied: \"%s\" is a system catalog",
1371  rv->relname)));
1372 
1373  ReleaseSysCache(tuple);
1374 }
1375 
1376 /*
1377  * renametrig - changes the name of a trigger on a relation
1378  *
1379  * trigger name is changed in trigger catalog.
1380  * No record of the previous name is kept.
1381  *
1382  * get proper relrelation from relation catalog (if not arg)
1383  * scan trigger catalog
1384  * for name conflict (within rel)
1385  * for original trigger (if not arg)
1386  * modify tgname in trigger tuple
1387  * update row in catalog
1388  */
1391 {
1392  Oid tgoid;
1393  Relation targetrel;
1394  Relation tgrel;
1395  HeapTuple tuple;
1396  SysScanDesc tgscan;
1397  ScanKeyData key[2];
1398  Oid relid;
1399  ObjectAddress address;
1400 
1401  /*
1402  * Look up name, check permissions, and acquire lock (which we will NOT
1403  * release until end of transaction).
1404  */
1406  false, false,
1408  NULL);
1409 
1410  /* Have lock already, so just need to build relcache entry. */
1411  targetrel = relation_open(relid, NoLock);
1412 
1413  /*
1414  * Scan pg_trigger twice for existing triggers on relation. We do this in
1415  * order to ensure a trigger does not exist with newname (The unique index
1416  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1417  * exist with oldname.
1418  *
1419  * NOTE that this is cool only because we have AccessExclusiveLock on the
1420  * relation, so the trigger set won't be changing underneath us.
1421  */
1423 
1424  /*
1425  * First pass -- look for name conflict
1426  */
1427  ScanKeyInit(&key[0],
1429  BTEqualStrategyNumber, F_OIDEQ,
1430  ObjectIdGetDatum(relid));
1431  ScanKeyInit(&key[1],
1433  BTEqualStrategyNumber, F_NAMEEQ,
1434  PointerGetDatum(stmt->newname));
1435  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1436  NULL, 2, key);
1437  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1438  ereport(ERROR,
1440  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1441  stmt->newname, RelationGetRelationName(targetrel))));
1442  systable_endscan(tgscan);
1443 
1444  /*
1445  * Second pass -- look for trigger existing with oldname and update
1446  */
1447  ScanKeyInit(&key[0],
1449  BTEqualStrategyNumber, F_OIDEQ,
1450  ObjectIdGetDatum(relid));
1451  ScanKeyInit(&key[1],
1453  BTEqualStrategyNumber, F_NAMEEQ,
1454  PointerGetDatum(stmt->subname));
1455  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1456  NULL, 2, key);
1457  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1458  {
1459  tgoid = HeapTupleGetOid(tuple);
1460 
1461  /*
1462  * Update pg_trigger tuple with new tgname.
1463  */
1464  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1465 
1466  namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1467  stmt->newname);
1468 
1469  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1470 
1472  HeapTupleGetOid(tuple), 0);
1473 
1474  /*
1475  * Invalidate relation's relcache entry so that other backends (and
1476  * this one too!) are sent SI message to make them rebuild relcache
1477  * entries. (Ideally this should happen automatically...)
1478  */
1479  CacheInvalidateRelcache(targetrel);
1480  }
1481  else
1482  {
1483  ereport(ERROR,
1484  (errcode(ERRCODE_UNDEFINED_OBJECT),
1485  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1486  stmt->subname, RelationGetRelationName(targetrel))));
1487  }
1488 
1489  ObjectAddressSet(address, TriggerRelationId, tgoid);
1490 
1491  systable_endscan(tgscan);
1492 
1493  heap_close(tgrel, RowExclusiveLock);
1494 
1495  /*
1496  * Close rel, but keep exclusive lock!
1497  */
1498  relation_close(targetrel, NoLock);
1499 
1500  return address;
1501 }
1502 
1503 
1504 /*
1505  * EnableDisableTrigger()
1506  *
1507  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1508  * to change 'tgenabled' field for the specified trigger(s)
1509  *
1510  * rel: relation to process (caller must hold suitable lock on it)
1511  * tgname: trigger to process, or NULL to scan all triggers
1512  * fires_when: new value for tgenabled field. In addition to generic
1513  * enablement/disablement, this also defines when the trigger
1514  * should be fired in session replication roles.
1515  * skip_system: if true, skip "system" triggers (constraint triggers)
1516  *
1517  * Caller should have checked permissions for the table; here we also
1518  * enforce that superuser privilege is required to alter the state of
1519  * system triggers
1520  */
1521 void
1522 EnableDisableTrigger(Relation rel, const char *tgname,
1523  char fires_when, bool skip_system)
1524 {
1525  Relation tgrel;
1526  int nkeys;
1527  ScanKeyData keys[2];
1528  SysScanDesc tgscan;
1529  HeapTuple tuple;
1530  bool found;
1531  bool changed;
1532 
1533  /* Scan the relevant entries in pg_triggers */
1535 
1536  ScanKeyInit(&keys[0],
1538  BTEqualStrategyNumber, F_OIDEQ,
1540  if (tgname)
1541  {
1542  ScanKeyInit(&keys[1],
1544  BTEqualStrategyNumber, F_NAMEEQ,
1545  CStringGetDatum(tgname));
1546  nkeys = 2;
1547  }
1548  else
1549  nkeys = 1;
1550 
1551  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1552  NULL, nkeys, keys);
1553 
1554  found = changed = false;
1555 
1556  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1557  {
1558  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1559 
1560  if (oldtrig->tgisinternal)
1561  {
1562  /* system trigger ... ok to process? */
1563  if (skip_system)
1564  continue;
1565  if (!superuser())
1566  ereport(ERROR,
1567  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1568  errmsg("permission denied: \"%s\" is a system trigger",
1569  NameStr(oldtrig->tgname))));
1570  }
1571 
1572  found = true;
1573 
1574  if (oldtrig->tgenabled != fires_when)
1575  {
1576  /* need to change this one ... make a copy to scribble on */
1577  HeapTuple newtup = heap_copytuple(tuple);
1578  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1579 
1580  newtrig->tgenabled = fires_when;
1581 
1582  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1583 
1584  heap_freetuple(newtup);
1585 
1586  changed = true;
1587  }
1588 
1590  HeapTupleGetOid(tuple), 0);
1591  }
1592 
1593  systable_endscan(tgscan);
1594 
1595  heap_close(tgrel, RowExclusiveLock);
1596 
1597  if (tgname && !found)
1598  ereport(ERROR,
1599  (errcode(ERRCODE_UNDEFINED_OBJECT),
1600  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1601  tgname, RelationGetRelationName(rel))));
1602 
1603  /*
1604  * If we changed anything, broadcast a SI inval message to force each
1605  * backend (including our own!) to rebuild relation's relcache entry.
1606  * Otherwise they will fail to apply the change promptly.
1607  */
1608  if (changed)
1610 }
1611 
1612 
1613 /*
1614  * Build trigger data to attach to the given relcache entry.
1615  *
1616  * Note that trigger data attached to a relcache entry must be stored in
1617  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1618  * But we should be running in a less long-lived working context. To avoid
1619  * leaking cache memory if this routine fails partway through, we build a
1620  * temporary TriggerDesc in working memory and then copy the completed
1621  * structure into cache memory.
1622  */
1623 void
1625 {
1626  TriggerDesc *trigdesc;
1627  int numtrigs;
1628  int maxtrigs;
1629  Trigger *triggers;
1630  Relation tgrel;
1631  ScanKeyData skey;
1632  SysScanDesc tgscan;
1633  HeapTuple htup;
1634  MemoryContext oldContext;
1635  int i;
1636 
1637  /*
1638  * Allocate a working array to hold the triggers (the array is extended if
1639  * necessary)
1640  */
1641  maxtrigs = 16;
1642  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1643  numtrigs = 0;
1644 
1645  /*
1646  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1647  * be reading the triggers in name order, except possibly during
1648  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1649  * ensures that triggers will be fired in name order.
1650  */
1651  ScanKeyInit(&skey,
1653  BTEqualStrategyNumber, F_OIDEQ,
1654  ObjectIdGetDatum(RelationGetRelid(relation)));
1655 
1657  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1658  NULL, 1, &skey);
1659 
1660  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1661  {
1662  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1663  Trigger *build;
1664  Datum datum;
1665  bool isnull;
1666 
1667  if (numtrigs >= maxtrigs)
1668  {
1669  maxtrigs *= 2;
1670  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1671  }
1672  build = &(triggers[numtrigs]);
1673 
1674  build->tgoid = HeapTupleGetOid(htup);
1676  NameGetDatum(&pg_trigger->tgname)));
1677  build->tgfoid = pg_trigger->tgfoid;
1678  build->tgtype = pg_trigger->tgtype;
1679  build->tgenabled = pg_trigger->tgenabled;
1680  build->tgisinternal = pg_trigger->tgisinternal;
1681  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1682  build->tgconstrindid = pg_trigger->tgconstrindid;
1683  build->tgconstraint = pg_trigger->tgconstraint;
1684  build->tgdeferrable = pg_trigger->tgdeferrable;
1685  build->tginitdeferred = pg_trigger->tginitdeferred;
1686  build->tgnargs = pg_trigger->tgnargs;
1687  /* tgattr is first var-width field, so OK to access directly */
1688  build->tgnattr = pg_trigger->tgattr.dim1;
1689  if (build->tgnattr > 0)
1690  {
1691  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1692  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1693  build->tgnattr * sizeof(int16));
1694  }
1695  else
1696  build->tgattr = NULL;
1697  if (build->tgnargs > 0)
1698  {
1699  bytea *val;
1700  char *p;
1701 
1702  val = DatumGetByteaPP(fastgetattr(htup,
1704  tgrel->rd_att, &isnull));
1705  if (isnull)
1706  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1707  RelationGetRelationName(relation));
1708  p = (char *) VARDATA_ANY(val);
1709  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1710  for (i = 0; i < build->tgnargs; i++)
1711  {
1712  build->tgargs[i] = pstrdup(p);
1713  p += strlen(p) + 1;
1714  }
1715  }
1716  else
1717  build->tgargs = NULL;
1718 
1720  tgrel->rd_att, &isnull);
1721  if (!isnull)
1722  build->tgoldtable =
1724  else
1725  build->tgoldtable = NULL;
1726 
1728  tgrel->rd_att, &isnull);
1729  if (!isnull)
1730  build->tgnewtable =
1732  else
1733  build->tgnewtable = NULL;
1734 
1735  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1736  tgrel->rd_att, &isnull);
1737  if (!isnull)
1738  build->tgqual = TextDatumGetCString(datum);
1739  else
1740  build->tgqual = NULL;
1741 
1742  numtrigs++;
1743  }
1744 
1745  systable_endscan(tgscan);
1746  heap_close(tgrel, AccessShareLock);
1747 
1748  /* There might not be any triggers */
1749  if (numtrigs == 0)
1750  {
1751  pfree(triggers);
1752  return;
1753  }
1754 
1755  /* Build trigdesc */
1756  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1757  trigdesc->triggers = triggers;
1758  trigdesc->numtriggers = numtrigs;
1759  for (i = 0; i < numtrigs; i++)
1760  SetTriggerFlags(trigdesc, &(triggers[i]));
1761 
1762  /* Copy completed trigdesc into cache storage */
1764  relation->trigdesc = CopyTriggerDesc(trigdesc);
1765  MemoryContextSwitchTo(oldContext);
1766 
1767  /* Release working memory */
1768  FreeTriggerDesc(trigdesc);
1769 }
1770 
1771 /*
1772  * Update the TriggerDesc's hint flags to include the specified trigger
1773  */
1774 static void
1776 {
1777  int16 tgtype = trigger->tgtype;
1778 
1779  trigdesc->trig_insert_before_row |=
1782  trigdesc->trig_insert_after_row |=
1785  trigdesc->trig_insert_instead_row |=
1788  trigdesc->trig_insert_before_statement |=
1791  trigdesc->trig_insert_after_statement |=
1794  trigdesc->trig_update_before_row |=
1797  trigdesc->trig_update_after_row |=
1800  trigdesc->trig_update_instead_row |=
1803  trigdesc->trig_update_before_statement |=
1806  trigdesc->trig_update_after_statement |=
1809  trigdesc->trig_delete_before_row |=
1812  trigdesc->trig_delete_after_row |=
1815  trigdesc->trig_delete_instead_row |=
1818  trigdesc->trig_delete_before_statement |=
1821  trigdesc->trig_delete_after_statement |=
1824  /* there are no row-level truncate triggers */
1825  trigdesc->trig_truncate_before_statement |=
1828  trigdesc->trig_truncate_after_statement |=
1831 
1832  trigdesc->trig_insert_new_table |=
1833  (TRIGGER_FOR_INSERT(tgtype) &&
1835  trigdesc->trig_update_old_table |=
1836  (TRIGGER_FOR_UPDATE(tgtype) &&
1838  trigdesc->trig_update_new_table |=
1839  (TRIGGER_FOR_UPDATE(tgtype) &&
1841  trigdesc->trig_delete_old_table |=
1842  (TRIGGER_FOR_DELETE(tgtype) &&
1844 }
1845 
1846 /*
1847  * Copy a TriggerDesc data structure.
1848  *
1849  * The copy is allocated in the current memory context.
1850  */
1851 TriggerDesc *
1853 {
1854  TriggerDesc *newdesc;
1855  Trigger *trigger;
1856  int i;
1857 
1858  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1859  return NULL;
1860 
1861  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1862  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1863 
1864  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1865  memcpy(trigger, trigdesc->triggers,
1866  trigdesc->numtriggers * sizeof(Trigger));
1867  newdesc->triggers = trigger;
1868 
1869  for (i = 0; i < trigdesc->numtriggers; i++)
1870  {
1871  trigger->tgname = pstrdup(trigger->tgname);
1872  if (trigger->tgnattr > 0)
1873  {
1874  int16 *newattr;
1875 
1876  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1877  memcpy(newattr, trigger->tgattr,
1878  trigger->tgnattr * sizeof(int16));
1879  trigger->tgattr = newattr;
1880  }
1881  if (trigger->tgnargs > 0)
1882  {
1883  char **newargs;
1884  int16 j;
1885 
1886  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1887  for (j = 0; j < trigger->tgnargs; j++)
1888  newargs[j] = pstrdup(trigger->tgargs[j]);
1889  trigger->tgargs = newargs;
1890  }
1891  if (trigger->tgqual)
1892  trigger->tgqual = pstrdup(trigger->tgqual);
1893  if (trigger->tgoldtable)
1894  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1895  if (trigger->tgnewtable)
1896  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1897  trigger++;
1898  }
1899 
1900  return newdesc;
1901 }
1902 
1903 /*
1904  * Free a TriggerDesc data structure.
1905  */
1906 void
1908 {
1909  Trigger *trigger;
1910  int i;
1911 
1912  if (trigdesc == NULL)
1913  return;
1914 
1915  trigger = trigdesc->triggers;
1916  for (i = 0; i < trigdesc->numtriggers; i++)
1917  {
1918  pfree(trigger->tgname);
1919  if (trigger->tgnattr > 0)
1920  pfree(trigger->tgattr);
1921  if (trigger->tgnargs > 0)
1922  {
1923  while (--(trigger->tgnargs) >= 0)
1924  pfree(trigger->tgargs[trigger->tgnargs]);
1925  pfree(trigger->tgargs);
1926  }
1927  if (trigger->tgqual)
1928  pfree(trigger->tgqual);
1929  if (trigger->tgoldtable)
1930  pfree(trigger->tgoldtable);
1931  if (trigger->tgnewtable)
1932  pfree(trigger->tgnewtable);
1933  trigger++;
1934  }
1935  pfree(trigdesc->triggers);
1936  pfree(trigdesc);
1937 }
1938 
1939 /*
1940  * Compare two TriggerDesc structures for logical equality.
1941  */
1942 #ifdef NOT_USED
1943 bool
1944 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1945 {
1946  int i,
1947  j;
1948 
1949  /*
1950  * We need not examine the hint flags, just the trigger array itself; if
1951  * we have the same triggers with the same types, the flags should match.
1952  *
1953  * As of 7.3 we assume trigger set ordering is significant in the
1954  * comparison; so we just compare corresponding slots of the two sets.
1955  *
1956  * Note: comparing the stringToNode forms of the WHEN clauses means that
1957  * parse column locations will affect the result. This is okay as long as
1958  * this function is only used for detecting exact equality, as for example
1959  * in checking for staleness of a cache entry.
1960  */
1961  if (trigdesc1 != NULL)
1962  {
1963  if (trigdesc2 == NULL)
1964  return false;
1965  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1966  return false;
1967  for (i = 0; i < trigdesc1->numtriggers; i++)
1968  {
1969  Trigger *trig1 = trigdesc1->triggers + i;
1970  Trigger *trig2 = trigdesc2->triggers + i;
1971 
1972  if (trig1->tgoid != trig2->tgoid)
1973  return false;
1974  if (strcmp(trig1->tgname, trig2->tgname) != 0)
1975  return false;
1976  if (trig1->tgfoid != trig2->tgfoid)
1977  return false;
1978  if (trig1->tgtype != trig2->tgtype)
1979  return false;
1980  if (trig1->tgenabled != trig2->tgenabled)
1981  return false;
1982  if (trig1->tgisinternal != trig2->tgisinternal)
1983  return false;
1984  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
1985  return false;
1986  if (trig1->tgconstrindid != trig2->tgconstrindid)
1987  return false;
1988  if (trig1->tgconstraint != trig2->tgconstraint)
1989  return false;
1990  if (trig1->tgdeferrable != trig2->tgdeferrable)
1991  return false;
1992  if (trig1->tginitdeferred != trig2->tginitdeferred)
1993  return false;
1994  if (trig1->tgnargs != trig2->tgnargs)
1995  return false;
1996  if (trig1->tgnattr != trig2->tgnattr)
1997  return false;
1998  if (trig1->tgnattr > 0 &&
1999  memcmp(trig1->tgattr, trig2->tgattr,
2000  trig1->tgnattr * sizeof(int16)) != 0)
2001  return false;
2002  for (j = 0; j < trig1->tgnargs; j++)
2003  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2004  return false;
2005  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2006  /* ok */ ;
2007  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2008  return false;
2009  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2010  return false;
2011  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2012  /* ok */ ;
2013  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2014  return false;
2015  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2016  return false;
2017  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2018  /* ok */ ;
2019  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2020  return false;
2021  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2022  return false;
2023  }
2024  }
2025  else if (trigdesc2 != NULL)
2026  return false;
2027  return true;
2028 }
2029 #endif /* NOT_USED */
2030 
2031 /*
2032  * Call a trigger function.
2033  *
2034  * trigdata: trigger descriptor.
2035  * tgindx: trigger's index in finfo and instr arrays.
2036  * finfo: array of cached trigger function call information.
2037  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2038  * per_tuple_context: memory context to execute the function in.
2039  *
2040  * Returns the tuple (or NULL) as returned by the function.
2041  */
2042 static HeapTuple
2044  int tgindx,
2045  FmgrInfo *finfo,
2046  Instrumentation *instr,
2047  MemoryContext per_tuple_context)
2048 {
2049  FunctionCallInfoData fcinfo;
2050  PgStat_FunctionCallUsage fcusage;
2051  Datum result;
2052  MemoryContext oldContext;
2053 
2054  /*
2055  * Protect against code paths that may fail to initialize transition table
2056  * info.
2057  */
2058  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2059  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2060  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2061  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2062  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2063  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2064  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2065 
2066  finfo += tgindx;
2067 
2068  /*
2069  * We cache fmgr lookup info, to avoid making the lookup again on each
2070  * call.
2071  */
2072  if (finfo->fn_oid == InvalidOid)
2073  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2074 
2075  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2076 
2077  /*
2078  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2079  */
2080  if (instr)
2081  InstrStartNode(instr + tgindx);
2082 
2083  /*
2084  * Do the function evaluation in the per-tuple memory context, so that
2085  * leaked memory will be reclaimed once per tuple. Note in particular that
2086  * any new tuple created by the trigger function will live till the end of
2087  * the tuple cycle.
2088  */
2089  oldContext = MemoryContextSwitchTo(per_tuple_context);
2090 
2091  /*
2092  * Call the function, passing no arguments but setting a context.
2093  */
2094  InitFunctionCallInfoData(fcinfo, finfo, 0,
2095  InvalidOid, (Node *) trigdata, NULL);
2096 
2097  pgstat_init_function_usage(&fcinfo, &fcusage);
2098 
2099  MyTriggerDepth++;
2100  PG_TRY();
2101  {
2102  result = FunctionCallInvoke(&fcinfo);
2103  }
2104  PG_CATCH();
2105  {
2106  MyTriggerDepth--;
2107  PG_RE_THROW();
2108  }
2109  PG_END_TRY();
2110  MyTriggerDepth--;
2111 
2112  pgstat_end_function_usage(&fcusage, true);
2113 
2114  MemoryContextSwitchTo(oldContext);
2115 
2116  /*
2117  * Trigger protocol allows function to return a null pointer, but NOT to
2118  * set the isnull result flag.
2119  */
2120  if (fcinfo.isnull)
2121  ereport(ERROR,
2122  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2123  errmsg("trigger function %u returned null value",
2124  fcinfo.flinfo->fn_oid)));
2125 
2126  /*
2127  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2128  * one "tuple returned" (really the number of firings).
2129  */
2130  if (instr)
2131  InstrStopNode(instr + tgindx, 1);
2132 
2133  return (HeapTuple) DatumGetPointer(result);
2134 }
2135 
2136 void
2138 {
2139  TriggerDesc *trigdesc;
2140  int i;
2141  TriggerData LocTriggerData;
2142 
2143  trigdesc = relinfo->ri_TrigDesc;
2144 
2145  if (trigdesc == NULL)
2146  return;
2147  if (!trigdesc->trig_insert_before_statement)
2148  return;
2149 
2150  LocTriggerData.type = T_TriggerData;
2151  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2153  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2154  LocTriggerData.tg_trigtuple = NULL;
2155  LocTriggerData.tg_newtuple = NULL;
2156  LocTriggerData.tg_oldtable = NULL;
2157  LocTriggerData.tg_newtable = NULL;
2158  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2159  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2160  for (i = 0; i < trigdesc->numtriggers; i++)
2161  {
2162  Trigger *trigger = &trigdesc->triggers[i];
2163  HeapTuple newtuple;
2164 
2165  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2169  continue;
2170  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2171  NULL, NULL, NULL))
2172  continue;
2173 
2174  LocTriggerData.tg_trigger = trigger;
2175  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2176  i,
2177  relinfo->ri_TrigFunctions,
2178  relinfo->ri_TrigInstrument,
2179  GetPerTupleMemoryContext(estate));
2180 
2181  if (newtuple)
2182  ereport(ERROR,
2183  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2184  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2185  }
2186 }
2187 
2188 void
2190 {
2191  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2192 
2193  if (trigdesc && trigdesc->trig_insert_after_statement)
2195  false, NULL, NULL, NIL, NULL);
2196 }
2197 
2200  TupleTableSlot *slot)
2201 {
2202  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2203  HeapTuple slottuple = ExecMaterializeSlot(slot);
2204  HeapTuple newtuple = slottuple;
2205  HeapTuple oldtuple;
2206  TriggerData LocTriggerData;
2207  int i;
2208 
2209  LocTriggerData.type = T_TriggerData;
2210  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2213  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2214  LocTriggerData.tg_newtuple = NULL;
2215  LocTriggerData.tg_oldtable = NULL;
2216  LocTriggerData.tg_newtable = NULL;
2217  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2218  for (i = 0; i < trigdesc->numtriggers; i++)
2219  {
2220  Trigger *trigger = &trigdesc->triggers[i];
2221 
2222  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2226  continue;
2227  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2228  NULL, NULL, newtuple))
2229  continue;
2230 
2231  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2232  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2233  LocTriggerData.tg_trigger = trigger;
2234  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2235  i,
2236  relinfo->ri_TrigFunctions,
2237  relinfo->ri_TrigInstrument,
2238  GetPerTupleMemoryContext(estate));
2239  if (oldtuple != newtuple && oldtuple != slottuple)
2240  heap_freetuple(oldtuple);
2241  if (newtuple == NULL)
2242  return NULL; /* "do nothing" */
2243  }
2244 
2245  if (newtuple != slottuple)
2246  {
2247  /*
2248  * Return the modified tuple using the es_trig_tuple_slot. We assume
2249  * the tuple was allocated in per-tuple memory context, and therefore
2250  * will go away by itself. The tuple table slot should not try to
2251  * clear it.
2252  */
2253  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2254  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2255 
2256  if (newslot->tts_tupleDescriptor != tupdesc)
2257  ExecSetSlotDescriptor(newslot, tupdesc);
2258  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2259  slot = newslot;
2260  }
2261  return slot;
2262 }
2263 
2264 void
2266  HeapTuple trigtuple, List *recheckIndexes)
2267 {
2268  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2269 
2270  if (trigdesc &&
2271  (trigdesc->trig_insert_after_row || trigdesc->trig_insert_new_table))
2273  true, NULL, trigtuple, recheckIndexes, NULL);
2274 }
2275 
2278  TupleTableSlot *slot)
2279 {
2280  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2281  HeapTuple slottuple = ExecMaterializeSlot(slot);
2282  HeapTuple newtuple = slottuple;
2283  HeapTuple oldtuple;
2284  TriggerData LocTriggerData;
2285  int i;
2286 
2287  LocTriggerData.type = T_TriggerData;
2288  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2291  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2292  LocTriggerData.tg_newtuple = NULL;
2293  LocTriggerData.tg_oldtable = NULL;
2294  LocTriggerData.tg_newtable = NULL;
2295  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2296  for (i = 0; i < trigdesc->numtriggers; i++)
2297  {
2298  Trigger *trigger = &trigdesc->triggers[i];
2299 
2300  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2304  continue;
2305  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2306  NULL, NULL, newtuple))
2307  continue;
2308 
2309  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2310  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2311  LocTriggerData.tg_trigger = trigger;
2312  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2313  i,
2314  relinfo->ri_TrigFunctions,
2315  relinfo->ri_TrigInstrument,
2316  GetPerTupleMemoryContext(estate));
2317  if (oldtuple != newtuple && oldtuple != slottuple)
2318  heap_freetuple(oldtuple);
2319  if (newtuple == NULL)
2320  return NULL; /* "do nothing" */
2321  }
2322 
2323  if (newtuple != slottuple)
2324  {
2325  /*
2326  * Return the modified tuple using the es_trig_tuple_slot. We assume
2327  * the tuple was allocated in per-tuple memory context, and therefore
2328  * will go away by itself. The tuple table slot should not try to
2329  * clear it.
2330  */
2331  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2332  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2333 
2334  if (newslot->tts_tupleDescriptor != tupdesc)
2335  ExecSetSlotDescriptor(newslot, tupdesc);
2336  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2337  slot = newslot;
2338  }
2339  return slot;
2340 }
2341 
2342 void
2344 {
2345  TriggerDesc *trigdesc;
2346  int i;
2347  TriggerData LocTriggerData;
2348 
2349  trigdesc = relinfo->ri_TrigDesc;
2350 
2351  if (trigdesc == NULL)
2352  return;
2353  if (!trigdesc->trig_delete_before_statement)
2354  return;
2355 
2356  LocTriggerData.type = T_TriggerData;
2357  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2359  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2360  LocTriggerData.tg_trigtuple = NULL;
2361  LocTriggerData.tg_newtuple = NULL;
2362  LocTriggerData.tg_oldtable = NULL;
2363  LocTriggerData.tg_newtable = NULL;
2364  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2365  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2366  for (i = 0; i < trigdesc->numtriggers; i++)
2367  {
2368  Trigger *trigger = &trigdesc->triggers[i];
2369  HeapTuple newtuple;
2370 
2371  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2375  continue;
2376  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2377  NULL, NULL, NULL))
2378  continue;
2379 
2380  LocTriggerData.tg_trigger = trigger;
2381  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2382  i,
2383  relinfo->ri_TrigFunctions,
2384  relinfo->ri_TrigInstrument,
2385  GetPerTupleMemoryContext(estate));
2386 
2387  if (newtuple)
2388  ereport(ERROR,
2389  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2390  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2391  }
2392 }
2393 
2394 void
2396 {
2397  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2398 
2399  if (trigdesc && trigdesc->trig_delete_after_statement)
2401  false, NULL, NULL, NIL, NULL);
2402 }
2403 
2404 bool
2406  ResultRelInfo *relinfo,
2407  ItemPointer tupleid,
2408  HeapTuple fdw_trigtuple)
2409 {
2410  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2411  bool result = true;
2412  TriggerData LocTriggerData;
2413  HeapTuple trigtuple;
2414  HeapTuple newtuple;
2415  TupleTableSlot *newSlot;
2416  int i;
2417 
2418  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2419  if (fdw_trigtuple == NULL)
2420  {
2421  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2422  LockTupleExclusive, &newSlot);
2423  if (trigtuple == NULL)
2424  return false;
2425  }
2426  else
2427  trigtuple = fdw_trigtuple;
2428 
2429  LocTriggerData.type = T_TriggerData;
2430  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2433  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2434  LocTriggerData.tg_newtuple = NULL;
2435  LocTriggerData.tg_oldtable = NULL;
2436  LocTriggerData.tg_newtable = NULL;
2437  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2438  for (i = 0; i < trigdesc->numtriggers; i++)
2439  {
2440  Trigger *trigger = &trigdesc->triggers[i];
2441 
2442  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2446  continue;
2447  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2448  NULL, trigtuple, NULL))
2449  continue;
2450 
2451  LocTriggerData.tg_trigtuple = trigtuple;
2452  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2453  LocTriggerData.tg_trigger = trigger;
2454  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2455  i,
2456  relinfo->ri_TrigFunctions,
2457  relinfo->ri_TrigInstrument,
2458  GetPerTupleMemoryContext(estate));
2459  if (newtuple == NULL)
2460  {
2461  result = false; /* tell caller to suppress delete */
2462  break;
2463  }
2464  if (newtuple != trigtuple)
2465  heap_freetuple(newtuple);
2466  }
2467  if (trigtuple != fdw_trigtuple)
2468  heap_freetuple(trigtuple);
2469 
2470  return result;
2471 }
2472 
2473 void
2475  ItemPointer tupleid,
2476  HeapTuple fdw_trigtuple)
2477 {
2478  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2479 
2480  if (trigdesc &&
2481  (trigdesc->trig_delete_after_row || trigdesc->trig_delete_old_table))
2482  {
2483  HeapTuple trigtuple;
2484 
2485  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2486  if (fdw_trigtuple == NULL)
2487  trigtuple = GetTupleForTrigger(estate,
2488  NULL,
2489  relinfo,
2490  tupleid,
2492  NULL);
2493  else
2494  trigtuple = fdw_trigtuple;
2495 
2497  true, trigtuple, NULL, NIL, NULL);
2498  if (trigtuple != fdw_trigtuple)
2499  heap_freetuple(trigtuple);
2500  }
2501 }
2502 
2503 bool
2505  HeapTuple trigtuple)
2506 {
2507  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2508  TriggerData LocTriggerData;
2509  HeapTuple rettuple;
2510  int i;
2511 
2512  LocTriggerData.type = T_TriggerData;
2513  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2516  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2517  LocTriggerData.tg_newtuple = NULL;
2518  LocTriggerData.tg_oldtable = NULL;
2519  LocTriggerData.tg_newtable = NULL;
2520  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2521  for (i = 0; i < trigdesc->numtriggers; i++)
2522  {
2523  Trigger *trigger = &trigdesc->triggers[i];
2524 
2525  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2529  continue;
2530  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2531  NULL, trigtuple, NULL))
2532  continue;
2533 
2534  LocTriggerData.tg_trigtuple = trigtuple;
2535  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2536  LocTriggerData.tg_trigger = trigger;
2537  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2538  i,
2539  relinfo->ri_TrigFunctions,
2540  relinfo->ri_TrigInstrument,
2541  GetPerTupleMemoryContext(estate));
2542  if (rettuple == NULL)
2543  return false; /* Delete was suppressed */
2544  if (rettuple != trigtuple)
2545  heap_freetuple(rettuple);
2546  }
2547  return true;
2548 }
2549 
2550 void
2552 {
2553  TriggerDesc *trigdesc;
2554  int i;
2555  TriggerData LocTriggerData;
2556  Bitmapset *updatedCols;
2557 
2558  trigdesc = relinfo->ri_TrigDesc;
2559 
2560  if (trigdesc == NULL)
2561  return;
2562  if (!trigdesc->trig_update_before_statement)
2563  return;
2564 
2565  updatedCols = GetUpdatedColumns(relinfo, estate);
2566 
2567  LocTriggerData.type = T_TriggerData;
2568  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2570  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2571  LocTriggerData.tg_trigtuple = NULL;
2572  LocTriggerData.tg_newtuple = NULL;
2573  LocTriggerData.tg_oldtable = NULL;
2574  LocTriggerData.tg_newtable = NULL;
2575  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2576  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2577  for (i = 0; i < trigdesc->numtriggers; i++)
2578  {
2579  Trigger *trigger = &trigdesc->triggers[i];
2580  HeapTuple newtuple;
2581 
2582  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2586  continue;
2587  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2588  updatedCols, NULL, NULL))
2589  continue;
2590 
2591  LocTriggerData.tg_trigger = trigger;
2592  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2593  i,
2594  relinfo->ri_TrigFunctions,
2595  relinfo->ri_TrigInstrument,
2596  GetPerTupleMemoryContext(estate));
2597 
2598  if (newtuple)
2599  ereport(ERROR,
2600  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2601  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2602  }
2603 }
2604 
2605 void
2607 {
2608  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2609 
2610  if (trigdesc && trigdesc->trig_update_after_statement)
2612  false, NULL, NULL, NIL,
2613  GetUpdatedColumns(relinfo, estate));
2614 }
2615 
2618  ResultRelInfo *relinfo,
2619  ItemPointer tupleid,
2620  HeapTuple fdw_trigtuple,
2621  TupleTableSlot *slot)
2622 {
2623  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2624  HeapTuple slottuple = ExecMaterializeSlot(slot);
2625  HeapTuple newtuple = slottuple;
2626  TriggerData LocTriggerData;
2627  HeapTuple trigtuple;
2628  HeapTuple oldtuple;
2629  TupleTableSlot *newSlot;
2630  int i;
2631  Bitmapset *updatedCols;
2632  LockTupleMode lockmode;
2633 
2634  /* Determine lock mode to use */
2635  lockmode = ExecUpdateLockMode(estate, relinfo);
2636 
2637  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2638  if (fdw_trigtuple == NULL)
2639  {
2640  /* get a copy of the on-disk tuple we are planning to update */
2641  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2642  lockmode, &newSlot);
2643  if (trigtuple == NULL)
2644  return NULL; /* cancel the update action */
2645  }
2646  else
2647  {
2648  trigtuple = fdw_trigtuple;
2649  newSlot = NULL;
2650  }
2651 
2652  /*
2653  * In READ COMMITTED isolation level it's possible that target tuple was
2654  * changed due to concurrent update. In that case we have a raw subplan
2655  * output tuple in newSlot, and need to run it through the junk filter to
2656  * produce an insertable tuple.
2657  *
2658  * Caution: more than likely, the passed-in slot is the same as the
2659  * junkfilter's output slot, so we are clobbering the original value of
2660  * slottuple by doing the filtering. This is OK since neither we nor our
2661  * caller have any more interest in the prior contents of that slot.
2662  */
2663  if (newSlot != NULL)
2664  {
2665  slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
2666  slottuple = ExecMaterializeSlot(slot);
2667  newtuple = slottuple;
2668  }
2669 
2670 
2671  LocTriggerData.type = T_TriggerData;
2672  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2675  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2676  LocTriggerData.tg_oldtable = NULL;
2677  LocTriggerData.tg_newtable = NULL;
2678  updatedCols = GetUpdatedColumns(relinfo, estate);
2679  for (i = 0; i < trigdesc->numtriggers; i++)
2680  {
2681  Trigger *trigger = &trigdesc->triggers[i];
2682 
2683  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2687  continue;
2688  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2689  updatedCols, trigtuple, newtuple))
2690  continue;
2691 
2692  LocTriggerData.tg_trigtuple = trigtuple;
2693  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2694  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2695  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2696  LocTriggerData.tg_trigger = trigger;
2697  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2698  i,
2699  relinfo->ri_TrigFunctions,
2700  relinfo->ri_TrigInstrument,
2701  GetPerTupleMemoryContext(estate));
2702  if (oldtuple != newtuple && oldtuple != slottuple)
2703  heap_freetuple(oldtuple);
2704  if (newtuple == NULL)
2705  {
2706  if (trigtuple != fdw_trigtuple)
2707  heap_freetuple(trigtuple);
2708  return NULL; /* "do nothing" */
2709  }
2710  }
2711  if (trigtuple != fdw_trigtuple)
2712  heap_freetuple(trigtuple);
2713 
2714  if (newtuple != slottuple)
2715  {
2716  /*
2717  * Return the modified tuple using the es_trig_tuple_slot. We assume
2718  * the tuple was allocated in per-tuple memory context, and therefore
2719  * will go away by itself. The tuple table slot should not try to
2720  * clear it.
2721  */
2722  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2723  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2724 
2725  if (newslot->tts_tupleDescriptor != tupdesc)
2726  ExecSetSlotDescriptor(newslot, tupdesc);
2727  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2728  slot = newslot;
2729  }
2730  return slot;
2731 }
2732 
2733 void
2735  ItemPointer tupleid,
2736  HeapTuple fdw_trigtuple,
2737  HeapTuple newtuple,
2738  List *recheckIndexes)
2739 {
2740  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2741 
2742  if (trigdesc && (trigdesc->trig_update_after_row ||
2743  trigdesc->trig_update_old_table || trigdesc->trig_update_new_table))
2744  {
2745  HeapTuple trigtuple;
2746 
2747  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2748  if (fdw_trigtuple == NULL)
2749  trigtuple = GetTupleForTrigger(estate,
2750  NULL,
2751  relinfo,
2752  tupleid,
2754  NULL);
2755  else
2756  trigtuple = fdw_trigtuple;
2757 
2759  true, trigtuple, newtuple, recheckIndexes,
2760  GetUpdatedColumns(relinfo, estate));
2761  if (trigtuple != fdw_trigtuple)
2762  heap_freetuple(trigtuple);
2763  }
2764 }
2765 
2768  HeapTuple trigtuple, TupleTableSlot *slot)
2769 {
2770  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2771  HeapTuple slottuple = ExecMaterializeSlot(slot);
2772  HeapTuple newtuple = slottuple;
2773  TriggerData LocTriggerData;
2774  HeapTuple oldtuple;
2775  int i;
2776 
2777  LocTriggerData.type = T_TriggerData;
2778  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2781  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2782  LocTriggerData.tg_oldtable = NULL;
2783  LocTriggerData.tg_newtable = NULL;
2784  for (i = 0; i < trigdesc->numtriggers; i++)
2785  {
2786  Trigger *trigger = &trigdesc->triggers[i];
2787 
2788  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2792  continue;
2793  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2794  NULL, trigtuple, newtuple))
2795  continue;
2796 
2797  LocTriggerData.tg_trigtuple = trigtuple;
2798  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2799  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2800  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2801  LocTriggerData.tg_trigger = trigger;
2802  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2803  i,
2804  relinfo->ri_TrigFunctions,
2805  relinfo->ri_TrigInstrument,
2806  GetPerTupleMemoryContext(estate));
2807  if (oldtuple != newtuple && oldtuple != slottuple)
2808  heap_freetuple(oldtuple);
2809  if (newtuple == NULL)
2810  return NULL; /* "do nothing" */
2811  }
2812 
2813  if (newtuple != slottuple)
2814  {
2815  /*
2816  * Return the modified tuple using the es_trig_tuple_slot. We assume
2817  * the tuple was allocated in per-tuple memory context, and therefore
2818  * will go away by itself. The tuple table slot should not try to
2819  * clear it.
2820  */
2821  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2822  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2823 
2824  if (newslot->tts_tupleDescriptor != tupdesc)
2825  ExecSetSlotDescriptor(newslot, tupdesc);
2826  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2827  slot = newslot;
2828  }
2829  return slot;
2830 }
2831 
2832 void
2834 {
2835  TriggerDesc *trigdesc;
2836  int i;
2837  TriggerData LocTriggerData;
2838 
2839  trigdesc = relinfo->ri_TrigDesc;
2840 
2841  if (trigdesc == NULL)
2842  return;
2843  if (!trigdesc->trig_truncate_before_statement)
2844  return;
2845 
2846  LocTriggerData.type = T_TriggerData;
2847  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2849  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2850  LocTriggerData.tg_trigtuple = NULL;
2851  LocTriggerData.tg_newtuple = NULL;
2852  LocTriggerData.tg_oldtable = NULL;
2853  LocTriggerData.tg_newtable = NULL;
2854  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2855  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2856  for (i = 0; i < trigdesc->numtriggers; i++)
2857  {
2858  Trigger *trigger = &trigdesc->triggers[i];
2859  HeapTuple newtuple;
2860 
2861  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2865  continue;
2866  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2867  NULL, NULL, NULL))
2868  continue;
2869 
2870  LocTriggerData.tg_trigger = trigger;
2871  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2872  i,
2873  relinfo->ri_TrigFunctions,
2874  relinfo->ri_TrigInstrument,
2875  GetPerTupleMemoryContext(estate));
2876 
2877  if (newtuple)
2878  ereport(ERROR,
2879  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2880  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2881  }
2882 }
2883 
2884 void
2886 {
2887  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2888 
2889  if (trigdesc && trigdesc->trig_truncate_after_statement)
2891  false, NULL, NULL, NIL, NULL);
2892 }
2893 
2894 
2895 static HeapTuple
2897  EPQState *epqstate,
2898  ResultRelInfo *relinfo,
2899  ItemPointer tid,
2900  LockTupleMode lockmode,
2901  TupleTableSlot **newSlot)
2902 {
2903  Relation relation = relinfo->ri_RelationDesc;
2904  HeapTupleData tuple;
2905  HeapTuple result;
2906  Buffer buffer;
2907 
2908  if (newSlot != NULL)
2909  {
2910  HTSU_Result test;
2911  HeapUpdateFailureData hufd;
2912 
2913  *newSlot = NULL;
2914 
2915  /* caller must pass an epqstate if EvalPlanQual is possible */
2916  Assert(epqstate != NULL);
2917 
2918  /*
2919  * lock tuple for update
2920  */
2921 ltrmark:;
2922  tuple.t_self = *tid;
2923  test = heap_lock_tuple(relation, &tuple,
2924  estate->es_output_cid,
2925  lockmode, LockWaitBlock,
2926  false, &buffer, &hufd);
2927  switch (test)
2928  {
2929  case HeapTupleSelfUpdated:
2930 
2931  /*
2932  * The target tuple was already updated or deleted by the
2933  * current command, or by a later command in the current
2934  * transaction. We ignore the tuple in the former case, and
2935  * throw error in the latter case, for the same reasons
2936  * enumerated in ExecUpdate and ExecDelete in
2937  * nodeModifyTable.c.
2938  */
2939  if (hufd.cmax != estate->es_output_cid)
2940  ereport(ERROR,
2941  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2942  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2943  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2944 
2945  /* treat it as deleted; do not process */
2946  ReleaseBuffer(buffer);
2947  return NULL;
2948 
2949  case HeapTupleMayBeUpdated:
2950  break;
2951 
2952  case HeapTupleUpdated:
2953  ReleaseBuffer(buffer);
2955  ereport(ERROR,
2956  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2957  errmsg("could not serialize access due to concurrent update")));
2958  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2959  {
2960  /* it was updated, so look at the updated version */
2961  TupleTableSlot *epqslot;
2962 
2963  epqslot = EvalPlanQual(estate,
2964  epqstate,
2965  relation,
2966  relinfo->ri_RangeTableIndex,
2967  lockmode,
2968  &hufd.ctid,
2969  hufd.xmax);
2970  if (!TupIsNull(epqslot))
2971  {
2972  *tid = hufd.ctid;
2973  *newSlot = epqslot;
2974 
2975  /*
2976  * EvalPlanQual already locked the tuple, but we
2977  * re-call heap_lock_tuple anyway as an easy way of
2978  * re-fetching the correct tuple. Speed is hardly a
2979  * criterion in this path anyhow.
2980  */
2981  goto ltrmark;
2982  }
2983  }
2984 
2985  /*
2986  * if tuple was deleted or PlanQual failed for updated tuple -
2987  * we must not process this tuple!
2988  */
2989  return NULL;
2990 
2991  case HeapTupleInvisible:
2992  elog(ERROR, "attempted to lock invisible tuple");
2993 
2994  default:
2995  ReleaseBuffer(buffer);
2996  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
2997  return NULL; /* keep compiler quiet */
2998  }
2999  }
3000  else
3001  {
3002  Page page;
3003  ItemId lp;
3004 
3005  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3006 
3007  /*
3008  * Although we already know this tuple is valid, we must lock the
3009  * buffer to ensure that no one has a buffer cleanup lock; otherwise
3010  * they might move the tuple while we try to copy it. But we can
3011  * release the lock before actually doing the heap_copytuple call,
3012  * since holding pin is sufficient to prevent anyone from getting a
3013  * cleanup lock they don't already hold.
3014  */
3015  LockBuffer(buffer, BUFFER_LOCK_SHARE);
3016 
3017  page = BufferGetPage(buffer);
3018  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3019 
3020  Assert(ItemIdIsNormal(lp));
3021 
3022  tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3023  tuple.t_len = ItemIdGetLength(lp);
3024  tuple.t_self = *tid;
3025  tuple.t_tableOid = RelationGetRelid(relation);
3026 
3027  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3028  }
3029 
3030  result = heap_copytuple(&tuple);
3031  ReleaseBuffer(buffer);
3032 
3033  return result;
3034 }
3035 
3036 /*
3037  * Is trigger enabled to fire?
3038  */
3039 static bool
3041  Trigger *trigger, TriggerEvent event,
3042  Bitmapset *modifiedCols,
3043  HeapTuple oldtup, HeapTuple newtup)
3044 {
3045  /* Check replication-role-dependent enable state */
3047  {
3048  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3049  trigger->tgenabled == TRIGGER_DISABLED)
3050  return false;
3051  }
3052  else /* ORIGIN or LOCAL role */
3053  {
3054  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3055  trigger->tgenabled == TRIGGER_DISABLED)
3056  return false;
3057  }
3058 
3059  /*
3060  * Check for column-specific trigger (only possible for UPDATE, and in
3061  * fact we *must* ignore tgattr for other event types)
3062  */
3063  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3064  {
3065  int i;
3066  bool modified;
3067 
3068  modified = false;
3069  for (i = 0; i < trigger->tgnattr; i++)
3070  {
3072  modifiedCols))
3073  {
3074  modified = true;
3075  break;
3076  }
3077  }
3078  if (!modified)
3079  return false;
3080  }
3081 
3082  /* Check for WHEN clause */
3083  if (trigger->tgqual)
3084  {
3085  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3086  ExprState **predicate;
3087  ExprContext *econtext;
3088  TupleTableSlot *oldslot = NULL;
3089  TupleTableSlot *newslot = NULL;
3090  MemoryContext oldContext;
3091  int i;
3092 
3093  Assert(estate != NULL);
3094 
3095  /*
3096  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3097  * matching element of relinfo->ri_TrigWhenExprs[]
3098  */
3099  i = trigger - relinfo->ri_TrigDesc->triggers;
3100  predicate = &relinfo->ri_TrigWhenExprs[i];
3101 
3102  /*
3103  * If first time through for this WHEN expression, build expression
3104  * nodetrees for it. Keep them in the per-query memory context so
3105  * they'll survive throughout the query.
3106  */
3107  if (*predicate == NULL)
3108  {
3109  Node *tgqual;
3110 
3111  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3112  tgqual = stringToNode(trigger->tgqual);
3113  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3116  /* ExecPrepareQual wants implicit-AND form */
3117  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3118  *predicate = ExecPrepareQual((List *) tgqual, estate);
3119  MemoryContextSwitchTo(oldContext);
3120  }
3121 
3122  /*
3123  * We will use the EState's per-tuple context for evaluating WHEN
3124  * expressions (creating it if it's not already there).
3125  */
3126  econtext = GetPerTupleExprContext(estate);
3127 
3128  /*
3129  * Put OLD and NEW tuples into tupleslots for expression evaluation.
3130  * These slots can be shared across the whole estate, but be careful
3131  * that they have the current resultrel's tupdesc.
3132  */
3133  if (HeapTupleIsValid(oldtup))
3134  {
3135  if (estate->es_trig_oldtup_slot == NULL)
3136  {
3137  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3138  estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate);
3139  MemoryContextSwitchTo(oldContext);
3140  }
3141  oldslot = estate->es_trig_oldtup_slot;
3142  if (oldslot->tts_tupleDescriptor != tupdesc)
3143  ExecSetSlotDescriptor(oldslot, tupdesc);
3144  ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
3145  }
3146  if (HeapTupleIsValid(newtup))
3147  {
3148  if (estate->es_trig_newtup_slot == NULL)
3149  {
3150  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3151  estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
3152  MemoryContextSwitchTo(oldContext);
3153  }
3154  newslot = estate->es_trig_newtup_slot;
3155  if (newslot->tts_tupleDescriptor != tupdesc)
3156  ExecSetSlotDescriptor(newslot, tupdesc);
3157  ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
3158  }
3159 
3160  /*
3161  * Finally evaluate the expression, making the old and/or new tuples
3162  * available as INNER_VAR/OUTER_VAR respectively.
3163  */
3164  econtext->ecxt_innertuple = oldslot;
3165  econtext->ecxt_outertuple = newslot;
3166  if (!ExecQual(*predicate, econtext))
3167  return false;
3168  }
3169 
3170  return true;
3171 }
3172 
3173 
3174 /* ----------
3175  * After-trigger stuff
3176  *
3177  * The AfterTriggersData struct holds data about pending AFTER trigger events
3178  * during the current transaction tree. (BEFORE triggers are fired
3179  * immediately so we don't need any persistent state about them.) The struct
3180  * and most of its subsidiary data are kept in TopTransactionContext; however
3181  * the individual event records are kept in a separate sub-context. This is
3182  * done mainly so that it's easy to tell from a memory context dump how much
3183  * space is being eaten by trigger events.
3184  *
3185  * Because the list of pending events can grow large, we go to some
3186  * considerable effort to minimize per-event memory consumption. The event
3187  * records are grouped into chunks and common data for similar events in the
3188  * same chunk is only stored once.
3189  *
3190  * XXX We need to be able to save the per-event data in a file if it grows too
3191  * large.
3192  * ----------
3193  */
3194 
3195 /* Per-trigger SET CONSTRAINT status */
3197 {
3201 
3203 
3204 /*
3205  * SET CONSTRAINT intra-transaction status.
3206  *
3207  * We make this a single palloc'd object so it can be copied and freed easily.
3208  *
3209  * all_isset and all_isdeferred are used to keep track
3210  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3211  *
3212  * trigstates[] stores per-trigger tgisdeferred settings.
3213  */
3215 {
3218  int numstates; /* number of trigstates[] entries in use */
3219  int numalloc; /* allocated size of trigstates[] */
3220  SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3222 
3224 
3225 
3226 /*
3227  * Per-trigger-event data
3228  *
3229  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3230  * status bits and up to two tuple CTIDs. Each event record also has an
3231  * associated AfterTriggerSharedData that is shared across all instances of
3232  * similar events within a "chunk".
3233  *
3234  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3235  * fields. Updates of regular tables use two; inserts and deletes of regular
3236  * tables use one; foreign tables always use zero and save the tuple(s) to a
3237  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3238  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3239  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3240  * tuple(s). This permits storing tuples once regardless of the number of
3241  * row-level triggers on a foreign table.
3242  *
3243  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3244  * require no ctid field. We lack the flag bit space to neatly represent that
3245  * distinct case, and it seems unlikely to be worth much trouble.
3246  *
3247  * Note: ats_firing_id is initially zero and is set to something else when
3248  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3249  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3250  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3251  * because all instances of the same type of event in a given event list will
3252  * be fired at the same time, if they were queued between the same firing
3253  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3254  * a new event to an existing AfterTriggerSharedData record.
3255  */
3257 
3258 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order
3259  * bits */
3260 #define AFTER_TRIGGER_DONE 0x10000000
3261 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3262 /* bits describing the size and tuple sources of this event */
3263 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3264 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3265 #define AFTER_TRIGGER_1CTID 0x40000000
3266 #define AFTER_TRIGGER_2CTID 0xC0000000
3267 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3268 
3270 
3272 {
3273  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3274  Oid ats_tgoid; /* the trigger's ID */
3275  Oid ats_relid; /* the relation it's on */
3276  CommandId ats_firing_id; /* ID for firing cycle */
3278 
3280 
3282 {
3283  TriggerFlags ate_flags; /* status bits and offset to shared data */
3284  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3285  ItemPointerData ate_ctid2; /* new updated tuple */
3287 
3288 /* AfterTriggerEventData, minus ate_ctid2 */
3290 {
3291  TriggerFlags ate_flags; /* status bits and offset to shared data */
3292  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3294 
3295 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3297 {
3298  TriggerFlags ate_flags; /* status bits and offset to shared data */
3300 
3301 #define SizeofTriggerEvent(evt) \
3302  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3303  sizeof(AfterTriggerEventData) : \
3304  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3305  sizeof(AfterTriggerEventDataOneCtid) : \
3306  sizeof(AfterTriggerEventDataZeroCtids))
3307 
3308 #define GetTriggerSharedData(evt) \
3309  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3310 
3311 /*
3312  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3313  * larger chunks (a slightly more sophisticated version of an expansible
3314  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3315  * AfterTriggerEventData records; the space between endfree and endptr is
3316  * occupied by AfterTriggerSharedData records.
3317  */
3319 {
3320  struct AfterTriggerEventChunk *next; /* list link */
3321  char *freeptr; /* start of free space in chunk */
3322  char *endfree; /* end of free space in chunk */
3323  char *endptr; /* end of chunk */
3324  /* event data follows here */
3326 
3327 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3328 
3329 /* A list of events */
3331 {
3334  char *tailfree; /* freeptr of tail chunk */
3336 
3337 /* Macros to help in iterating over a list of events */
3338 #define for_each_chunk(cptr, evtlist) \
3339  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3340 #define for_each_event(eptr, cptr) \
3341  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3342  (char *) eptr < (cptr)->freeptr; \
3343  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3344 /* Use this if no special per-chunk processing is needed */
3345 #define for_each_event_chunk(eptr, cptr, evtlist) \
3346  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3347 
3348 
3349 /*
3350  * All per-transaction data for the AFTER TRIGGERS module.
3351  *
3352  * AfterTriggersData has the following fields:
3353  *
3354  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3355  * We mark firable events with the current firing cycle's ID so that we can
3356  * tell which ones to work on. This ensures sane behavior if a trigger
3357  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3358  * only fire those events that weren't already scheduled for firing.
3359  *
3360  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3361  * This is saved and restored across failed subtransactions.
3362  *
3363  * events is the current list of deferred events. This is global across
3364  * all subtransactions of the current transaction. In a subtransaction
3365  * abort, we know that the events added by the subtransaction are at the
3366  * end of the list, so it is relatively easy to discard them. The event
3367  * list chunks themselves are stored in event_cxt.
3368  *
3369  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3370  * (-1 when the stack is empty).
3371  *
3372  * query_stack[query_depth] is a list of AFTER trigger events queued by the
3373  * current query (and the query_stack entries below it are lists of trigger
3374  * events queued by calling queries). None of these are valid until the
3375  * matching AfterTriggerEndQuery call occurs. At that point we fire
3376  * immediate-mode triggers, and append any deferred events to the main events
3377  * list.
3378  *
3379  * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples
3380  * needed for the current query.
3381  *
3382  * old_tuplestores[query_depth] and new_tuplestores[query_depth] hold the
3383  * transition relations for the current query.
3384  *
3385  * maxquerydepth is just the allocated length of query_stack and the
3386  * tuplestores.
3387  *
3388  * state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS
3389  * state data; each subtransaction level that modifies that state first
3390  * saves a copy, which we use to restore the state if we abort.
3391  *
3392  * events_stack is a stack of copies of the events head/tail pointers,
3393  * which we use to restore those values during subtransaction abort.
3394  *
3395  * depth_stack is a stack of copies of subtransaction-start-time query_depth,
3396  * which we similarly use to clean up at subtransaction abort.
3397  *
3398  * firing_stack is a stack of copies of subtransaction-start-time
3399  * firing_counter. We use this to recognize which deferred triggers were
3400  * fired (or marked for firing) within an aborted subtransaction.
3401  *
3402  * We use GetCurrentTransactionNestLevel() to determine the correct array
3403  * index in these stacks. maxtransdepth is the number of allocated entries in
3404  * each stack. (By not keeping our own stack pointer, we can avoid trouble
3405  * in cases where errors during subxact abort cause multiple invocations
3406  * of AfterTriggerEndSubXact() at the same nesting depth.)
3407  */
3408 typedef struct AfterTriggersData
3409 {
3410  CommandId firing_counter; /* next firing ID to assign */
3411  SetConstraintState state; /* the active S C state */
3412  AfterTriggerEventList events; /* deferred-event list */
3413  int query_depth; /* current query list index */
3414  AfterTriggerEventList *query_stack; /* events pending from each query */
3415  Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from
3416  * each query */
3417  Tuplestorestate **old_tuplestores; /* all old tuples from each query */
3418  Tuplestorestate **new_tuplestores; /* all new tuples from each query */
3419  int maxquerydepth; /* allocated len of above array */
3420  MemoryContext event_cxt; /* memory context for events, if any */
3421 
3422  /* these fields are just for resetting at subtrans abort: */
3423 
3424  SetConstraintState *state_stack; /* stacked S C states */
3425  AfterTriggerEventList *events_stack; /* stacked list pointers */
3426  int *depth_stack; /* stacked query_depths */
3427  CommandId *firing_stack; /* stacked firing_counters */
3428  int maxtransdepth; /* allocated len of above arrays */
3430 
3432 
3433 static void AfterTriggerExecute(AfterTriggerEvent event,
3434  Relation rel, TriggerDesc *trigdesc,
3435  FmgrInfo *finfo,
3436  Instrumentation *instr,
3437  MemoryContext per_tuple_context,
3438  TupleTableSlot *trig_tuple_slot1,
3439  TupleTableSlot *trig_tuple_slot2);
3440 static SetConstraintState SetConstraintStateCreate(int numalloc);
3441 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3442 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3443  Oid tgoid, bool tgisdeferred);
3444 
3445 
3446 /*
3447  * Gets a current query transition tuplestore and initializes it if necessary.
3448  * This can be holding a single transition row tuple (in the case of an FDW)
3449  * or a transition table (for an AFTER trigger).
3450  */
3451 static Tuplestorestate *
3453 {
3454  Tuplestorestate *ret;
3455 
3456  ret = tss[afterTriggers.query_depth];
3457  if (ret == NULL)
3458  {
3459  MemoryContext oldcxt;
3460  ResourceOwner saveResourceOwner;
3461 
3462  /*
3463  * Make the tuplestore valid until end of transaction. This is the
3464  * allocation lifespan of the associated events list, but we really
3465  * only need it until AfterTriggerEndQuery().
3466  */
3468  saveResourceOwner = CurrentResourceOwner;
3469  PG_TRY();
3470  {
3472  ret = tuplestore_begin_heap(false, false, work_mem);
3473  }
3474  PG_CATCH();
3475  {
3476  CurrentResourceOwner = saveResourceOwner;
3477  PG_RE_THROW();
3478  }
3479  PG_END_TRY();
3480  CurrentResourceOwner = saveResourceOwner;
3481  MemoryContextSwitchTo(oldcxt);
3482 
3483  tss[afterTriggers.query_depth] = ret;
3484  }
3485 
3486  return ret;
3487 }
3488 
3489 /* ----------
3490  * afterTriggerCheckState()
3491  *
3492  * Returns true if the trigger event is actually in state DEFERRED.
3493  * ----------
3494  */
3495 static bool
3496 afterTriggerCheckState(AfterTriggerShared evtshared)
3497 {
3498  Oid tgoid = evtshared->ats_tgoid;
3499  SetConstraintState state = afterTriggers.state;
3500  int i;
3501 
3502  /*
3503  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3504  * constraints declared NOT DEFERRABLE), the state is always false.
3505  */
3506  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3507  return false;
3508 
3509  /*
3510  * If constraint state exists, SET CONSTRAINTS might have been executed
3511  * either for this trigger or for all triggers.
3512  */
3513  if (state != NULL)
3514  {
3515  /* Check for SET CONSTRAINTS for this specific trigger. */
3516  for (i = 0; i < state->numstates; i++)
3517  {
3518  if (state->trigstates[i].sct_tgoid == tgoid)
3519  return state->trigstates[i].sct_tgisdeferred;
3520  }
3521 
3522  /* Check for SET CONSTRAINTS ALL. */
3523  if (state->all_isset)
3524  return state->all_isdeferred;
3525  }
3526 
3527  /*
3528  * Otherwise return the default state for the trigger.
3529  */
3530  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3531 }
3532 
3533 
3534 /* ----------
3535  * afterTriggerAddEvent()
3536  *
3537  * Add a new trigger event to the specified queue.
3538  * The passed-in event data is copied.
3539  * ----------
3540  */
3541 static void
3543  AfterTriggerEvent event, AfterTriggerShared evtshared)
3544 {
3545  Size eventsize = SizeofTriggerEvent(event);
3546  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3547  AfterTriggerEventChunk *chunk;
3548  AfterTriggerShared newshared;
3549  AfterTriggerEvent newevent;
3550 
3551  /*
3552  * If empty list or not enough room in the tail chunk, make a new chunk.
3553  * We assume here that a new shared record will always be needed.
3554  */
3555  chunk = events->tail;
3556  if (chunk == NULL ||
3557  chunk->endfree - chunk->freeptr < needed)
3558  {
3559  Size chunksize;
3560 
3561  /* Create event context if we didn't already */
3562  if (afterTriggers.event_cxt == NULL)
3563  afterTriggers.event_cxt =
3565  "AfterTriggerEvents",
3567 
3568  /*
3569  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3570  * These numbers are fairly arbitrary, though there is a hard limit at
3571  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3572  * shared records using the available space in ate_flags. Another
3573  * constraint is that if the chunk size gets too huge, the search loop
3574  * below would get slow given a (not too common) usage pattern with
3575  * many distinct event types in a chunk. Therefore, we double the
3576  * preceding chunk size only if there weren't too many shared records
3577  * in the preceding chunk; otherwise we halve it. This gives us some
3578  * ability to adapt to the actual usage pattern of the current query
3579  * while still having large chunk sizes in typical usage. All chunk
3580  * sizes used should be MAXALIGN multiples, to ensure that the shared
3581  * records will be aligned safely.
3582  */
3583 #define MIN_CHUNK_SIZE 1024
3584 #define MAX_CHUNK_SIZE (1024*1024)
3585 
3586 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3587 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3588 #endif
3589 
3590  if (chunk == NULL)
3591  chunksize = MIN_CHUNK_SIZE;
3592  else
3593  {
3594  /* preceding chunk size... */
3595  chunksize = chunk->endptr - (char *) chunk;
3596  /* check number of shared records in preceding chunk */
3597  if ((chunk->endptr - chunk->endfree) <=
3598  (100 * sizeof(AfterTriggerSharedData)))
3599  chunksize *= 2; /* okay, double it */
3600  else
3601  chunksize /= 2; /* too many shared records */
3602  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3603  }
3604  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3605  chunk->next = NULL;
3606  chunk->freeptr = CHUNK_DATA_START(chunk);
3607  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3608  Assert(chunk->endfree - chunk->freeptr >= needed);
3609 
3610  if (events->head == NULL)
3611  events->head = chunk;
3612  else
3613  events->tail->next = chunk;
3614  events->tail = chunk;
3615  /* events->tailfree is now out of sync, but we'll fix it below */
3616  }
3617 
3618  /*
3619  * Try to locate a matching shared-data record already in the chunk. If
3620  * none, make a new one.
3621  */
3622  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3623  (char *) newshared >= chunk->endfree;
3624  newshared--)
3625  {
3626  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3627  newshared->ats_relid == evtshared->ats_relid &&
3628  newshared->ats_event == evtshared->ats_event &&
3629  newshared->ats_firing_id == 0)
3630  break;
3631  }
3632  if ((char *) newshared < chunk->endfree)
3633  {
3634  *newshared = *evtshared;
3635  newshared->ats_firing_id = 0; /* just to be sure */
3636  chunk->endfree = (char *) newshared;
3637  }
3638 
3639  /* Insert the data */
3640  newevent = (AfterTriggerEvent) chunk->freeptr;
3641  memcpy(newevent, event, eventsize);
3642  /* ... and link the new event to its shared record */
3643  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3644  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3645 
3646  chunk->freeptr += eventsize;
3647  events->tailfree = chunk->freeptr;
3648 }
3649 
3650 /* ----------
3651  * afterTriggerFreeEventList()
3652  *
3653  * Free all the event storage in the given list.
3654  * ----------
3655  */
3656 static void
3658 {
3659  AfterTriggerEventChunk *chunk;
3660  AfterTriggerEventChunk *next_chunk;
3661 
3662  for (chunk = events->head; chunk != NULL; chunk = next_chunk)
3663  {
3664  next_chunk = chunk->next;
3665  pfree(chunk);
3666  }
3667  events->head = NULL;
3668  events->tail = NULL;
3669  events->tailfree = NULL;
3670 }
3671 
3672 /* ----------
3673  * afterTriggerRestoreEventList()
3674  *
3675  * Restore an event list to its prior length, removing all the events
3676  * added since it had the value old_events.
3677  * ----------
3678  */
3679 static void
3681  const AfterTriggerEventList *old_events)
3682 {
3683  AfterTriggerEventChunk *chunk;
3684  AfterTriggerEventChunk *next_chunk;
3685 
3686  if (old_events->tail == NULL)
3687  {
3688  /* restoring to a completely empty state, so free everything */
3689  afterTriggerFreeEventList(events);
3690  }
3691  else
3692  {
3693  *events = *old_events;
3694  /* free any chunks after the last one we want to keep */
3695  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3696  {
3697  next_chunk = chunk->next;
3698  pfree(chunk);
3699  }
3700  /* and clean up the tail chunk to be the right length */
3701  events->tail->next = NULL;
3702  events->tail->freeptr = events->tailfree;
3703 
3704  /*
3705  * We don't make any effort to remove now-unused shared data records.
3706  * They might still be useful, anyway.
3707  */
3708  }
3709 }
3710 
3711 
3712 /* ----------
3713  * AfterTriggerExecute()
3714  *
3715  * Fetch the required tuples back from the heap and fire one
3716  * single trigger function.
3717  *
3718  * Frequently, this will be fired many times in a row for triggers of
3719  * a single relation. Therefore, we cache the open relation and provide
3720  * fmgr lookup cache space at the caller level. (For triggers fired at
3721  * the end of a query, we can even piggyback on the executor's state.)
3722  *
3723  * event: event currently being fired.
3724  * rel: open relation for event.
3725  * trigdesc: working copy of rel's trigger info.
3726  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3727  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3728  * or NULL if no instrumentation is wanted.
3729  * per_tuple_context: memory context to call trigger function in.
3730  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3731  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3732  * ----------
3733  */
3734 static void
3735 AfterTriggerExecute(AfterTriggerEvent event,
3736  Relation rel, TriggerDesc *trigdesc,
3737  FmgrInfo *finfo, Instrumentation *instr,
3738  MemoryContext per_tuple_context,
3739  TupleTableSlot *trig_tuple_slot1,
3740  TupleTableSlot *trig_tuple_slot2)
3741 {
3742  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3743  Oid tgoid = evtshared->ats_tgoid;
3744  TriggerData LocTriggerData;
3745  HeapTupleData tuple1;
3746  HeapTupleData tuple2;
3747  HeapTuple rettuple;
3748  Buffer buffer1 = InvalidBuffer;
3749  Buffer buffer2 = InvalidBuffer;
3750  int tgindx;
3751 
3752  /*
3753  * Locate trigger in trigdesc.
3754  */
3755  LocTriggerData.tg_trigger = NULL;
3756  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3757  {
3758  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3759  {
3760  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3761  break;
3762  }
3763  }
3764  if (LocTriggerData.tg_trigger == NULL)
3765  elog(ERROR, "could not find trigger %u", tgoid);
3766 
3767  /*
3768  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3769  * to include time spent re-fetching tuples in the trigger cost.
3770  */
3771  if (instr)
3772  InstrStartNode(instr + tgindx);
3773 
3774  /*
3775  * Fetch the required tuple(s).
3776  */
3777  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3778  {
3780  {
3781  Tuplestorestate *fdw_tuplestore =
3783  (afterTriggers.fdw_tuplestores);
3784 
3785  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3786  trig_tuple_slot1))
3787  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3788 
3789  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3791  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3792  trig_tuple_slot2))
3793  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3794  }
3795  /* fall through */
3797 
3798  /*
3799  * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
3800  * ensures that tg_trigtuple does not reference tuplestore memory.
3801  * (It is formally possible for the trigger function to queue
3802  * trigger events that add to the same tuplestore, which can push
3803  * other tuples out of memory.) The distinction is academic,
3804  * because we start with a minimal tuple that ExecFetchSlotTuple()
3805  * must materialize anyway.
3806  */
3807  LocTriggerData.tg_trigtuple =
3808  ExecMaterializeSlot(trig_tuple_slot1);
3809  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3810 
3811  LocTriggerData.tg_newtuple =
3812  ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3814  ExecMaterializeSlot(trig_tuple_slot2) : NULL;
3815  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3816 
3817  break;
3818 
3819  default:
3820  if (ItemPointerIsValid(&(event->ate_ctid1)))
3821  {
3822  ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
3823  if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
3824  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3825  LocTriggerData.tg_trigtuple = &tuple1;
3826  LocTriggerData.tg_trigtuplebuf = buffer1;
3827  }
3828  else
3829  {
3830  LocTriggerData.tg_trigtuple = NULL;
3831  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3832  }
3833 
3834  /* don't touch ctid2 if not there */
3835  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3837  ItemPointerIsValid(&(event->ate_ctid2)))
3838  {
3839  ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
3840  if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
3841  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3842  LocTriggerData.tg_newtuple = &tuple2;
3843  LocTriggerData.tg_newtuplebuf = buffer2;
3844  }
3845  else
3846  {
3847  LocTriggerData.tg_newtuple = NULL;
3848  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3849  }
3850  }
3851 
3852  /*
3853  * Set up the tuplestore information.
3854  */
3855  if (LocTriggerData.tg_trigger->tgoldtable)
3856  LocTriggerData.tg_oldtable =
3858  else
3859  LocTriggerData.tg_oldtable = NULL;
3860  if (LocTriggerData.tg_trigger->tgnewtable)
3861  LocTriggerData.tg_newtable =
3863  else
3864  LocTriggerData.tg_newtable = NULL;
3865 
3866  /*
3867  * Setup the remaining trigger information
3868  */
3869  LocTriggerData.type = T_TriggerData;
3870  LocTriggerData.tg_event =
3872  LocTriggerData.tg_relation = rel;
3873 
3874  MemoryContextReset(per_tuple_context);
3875 
3876  /*
3877  * Call the trigger and throw away any possibly returned updated tuple.
3878  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
3879  */
3880  rettuple = ExecCallTriggerFunc(&LocTriggerData,
3881  tgindx,
3882  finfo,
3883  NULL,
3884  per_tuple_context);
3885  if (rettuple != NULL &&
3886  rettuple != LocTriggerData.tg_trigtuple &&
3887  rettuple != LocTriggerData.tg_newtuple)
3888  heap_freetuple(rettuple);
3889 
3890  /*
3891  * Release buffers
3892  */
3893  if (buffer1 != InvalidBuffer)
3894  ReleaseBuffer(buffer1);
3895  if (buffer2 != InvalidBuffer)
3896  ReleaseBuffer(buffer2);
3897 
3898  /*
3899  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
3900  * one "tuple returned" (really the number of firings).
3901  */
3902  if (instr)
3903  InstrStopNode(instr + tgindx, 1);
3904 }
3905 
3906 
3907 /*
3908  * afterTriggerMarkEvents()
3909  *
3910  * Scan the given event list for not yet invoked events. Mark the ones
3911  * that can be invoked now with the current firing ID.
3912  *
3913  * If move_list isn't NULL, events that are not to be invoked now are
3914  * transferred to move_list.
3915  *
3916  * When immediate_only is TRUE, do not invoke currently-deferred triggers.
3917  * (This will be FALSE only at main transaction exit.)
3918  *
3919  * Returns TRUE if any invokable events were found.
3920  */
3921 static bool
3923  AfterTriggerEventList *move_list,
3924  bool immediate_only)
3925 {
3926  bool found = false;
3927  AfterTriggerEvent event;
3928  AfterTriggerEventChunk *chunk;
3929 
3930  for_each_event_chunk(event, chunk, *events)
3931  {
3932  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3933  bool defer_it = false;
3934 
3935  if (!(event->ate_flags &
3937  {
3938  /*
3939  * This trigger hasn't been called or scheduled yet. Check if we
3940  * should call it now.
3941  */
3942  if (immediate_only && afterTriggerCheckState(evtshared))
3943  {
3944  defer_it = true;
3945  }
3946  else
3947  {
3948  /*
3949  * Mark it as to be fired in this firing cycle.
3950  */
3951  evtshared->ats_firing_id = afterTriggers.firing_counter;
3952  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
3953  found = true;
3954  }
3955  }
3956 
3957  /*
3958  * If it's deferred, move it to move_list, if requested.
3959  */
3960  if (defer_it && move_list != NULL)
3961  {
3962  /* add it to move_list */
3963  afterTriggerAddEvent(move_list, event, evtshared);
3964  /* mark original copy "done" so we don't do it again */
3965  event->ate_flags |= AFTER_TRIGGER_DONE;
3966  }
3967  }
3968 
3969  return found;
3970 }
3971 
3972 /*
3973  * afterTriggerInvokeEvents()
3974  *
3975  * Scan the given event list for events that are marked as to be fired
3976  * in the current firing cycle, and fire them.
3977  *
3978  * If estate isn't NULL, we use its result relation info to avoid repeated
3979  * openings and closing of trigger target relations. If it is NULL, we
3980  * make one locally to cache the info in case there are multiple trigger
3981  * events per rel.
3982  *
3983  * When delete_ok is TRUE, it's safe to delete fully-processed events.
3984  * (We are not very tense about that: we simply reset a chunk to be empty
3985  * if all its events got fired. The objective here is just to avoid useless
3986  * rescanning of events when a trigger queues new events during transaction
3987  * end, so it's not necessary to worry much about the case where only
3988  * some events are fired.)
3989  *
3990  * Returns TRUE if no unfired events remain in the list (this allows us
3991  * to avoid repeating afterTriggerMarkEvents).
3992  */
3993 static bool
3995  CommandId firing_id,
3996  EState *estate,
3997  bool delete_ok)
3998 {
3999  bool all_fired = true;
4000  AfterTriggerEventChunk *chunk;
4001  MemoryContext per_tuple_context;
4002  bool local_estate = false;
4003  Relation rel = NULL;
4004  TriggerDesc *trigdesc = NULL;
4005  FmgrInfo *finfo = NULL;
4006  Instrumentation *instr = NULL;
4007  TupleTableSlot *slot1 = NULL,
4008  *slot2 = NULL;
4009 
4010  /* Make a local EState if need be */
4011  if (estate == NULL)
4012  {
4013  estate = CreateExecutorState();
4014  local_estate = true;
4015  }
4016 
4017  /* Make a per-tuple memory context for trigger function calls */
4018  per_tuple_context =
4020  "AfterTriggerTupleContext",
4022 
4023  for_each_chunk(chunk, *events)
4024  {
4025  AfterTriggerEvent event;
4026  bool all_fired_in_chunk = true;
4027 
4028  for_each_event(event, chunk)
4029  {
4030  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4031 
4032  /*
4033  * Is it one for me to fire?
4034  */
4035  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4036  evtshared->ats_firing_id == firing_id)
4037  {
4038  /*
4039  * So let's fire it... but first, find the correct relation if
4040  * this is not the same relation as before.
4041  */
4042  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4043  {
4044  ResultRelInfo *rInfo;
4045 
4046  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4047  rel = rInfo->ri_RelationDesc;
4048  trigdesc = rInfo->ri_TrigDesc;
4049  finfo = rInfo->ri_TrigFunctions;
4050  instr = rInfo->ri_TrigInstrument;
4051  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4052  {
4053  if (slot1 != NULL)
4054  {
4057  }
4058  slot1 = MakeSingleTupleTableSlot(rel->rd_att);
4059  slot2 = MakeSingleTupleTableSlot(rel->rd_att);
4060  }
4061  if (trigdesc == NULL) /* should not happen */
4062  elog(ERROR, "relation %u has no triggers",
4063  evtshared->ats_relid);
4064  }
4065 
4066  /*
4067  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4068  * still set, so recursive examinations of the event list
4069  * won't try to re-fire it.
4070  */
4071  AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
4072  per_tuple_context, slot1, slot2);
4073 
4074  /*
4075  * Mark the event as done.
4076  */
4077  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4078  event->ate_flags |= AFTER_TRIGGER_DONE;
4079  }
4080  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4081  {
4082  /* something remains to be done */
4083  all_fired = all_fired_in_chunk = false;
4084  }
4085  }
4086 
4087  /* Clear the chunk if delete_ok and nothing left of interest */
4088  if (delete_ok && all_fired_in_chunk)
4089  {
4090  chunk->freeptr = CHUNK_DATA_START(chunk);
4091  chunk->endfree = chunk->endptr;
4092 
4093  /*
4094  * If it's last chunk, must sync event list's tailfree too. Note
4095  * that delete_ok must NOT be passed as true if there could be
4096  * stacked AfterTriggerEventList values pointing at this event
4097  * list, since we'd fail to fix their copies of tailfree.
4098  */
4099  if (chunk == events->tail)
4100  events->tailfree = chunk->freeptr;
4101  }
4102  }
4103  if (slot1 != NULL)
4104  {
4107  }
4108 
4109  /* Release working resources */
4110  MemoryContextDelete(per_tuple_context);
4111 
4112  if (local_estate)
4113  {
4114  ExecCleanUpTriggerState(estate);
4115  FreeExecutorState(estate);
4116  }
4117 
4118  return all_fired;
4119 }
4120 
4121 
4122 /* ----------
4123  * AfterTriggerBeginXact()
4124  *
4125  * Called at transaction start (either BEGIN or implicit for single
4126  * statement outside of transaction block).
4127  * ----------
4128  */
4129 void
4131 {
4132  /*
4133  * Initialize after-trigger state structure to empty
4134  */
4135  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4136  afterTriggers.query_depth = -1;
4137 
4138  /*
4139  * Verify that there is no leftover state remaining. If these assertions
4140  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4141  * up properly.
4142  */
4143  Assert(afterTriggers.state == NULL);
4144  Assert(afterTriggers.query_stack == NULL);
4145  Assert(afterTriggers.fdw_tuplestores == NULL);
4146  Assert(afterTriggers.old_tuplestores == NULL);
4147  Assert(afterTriggers.new_tuplestores == NULL);
4148  Assert(afterTriggers.maxquerydepth == 0);
4149  Assert(afterTriggers.event_cxt == NULL);
4150  Assert(afterTriggers.events.head == NULL);
4151  Assert(afterTriggers.state_stack == NULL);
4152  Assert(afterTriggers.events_stack == NULL);
4153  Assert(afterTriggers.depth_stack == NULL);
4154  Assert(afterTriggers.firing_stack == NULL);
4155  Assert(afterTriggers.maxtransdepth == 0);
4156 }
4157 
4158 
4159 /* ----------
4160  * AfterTriggerBeginQuery()
4161  *
4162  * Called just before we start processing a single query within a
4163  * transaction (or subtransaction). Most of the real work gets deferred
4164  * until somebody actually tries to queue a trigger event.
4165  * ----------
4166  */
4167 void
4169 {
4170  /* Increase the query stack depth */
4171  afterTriggers.query_depth++;
4172 }
4173 
4174 
4175 /* ----------
4176  * AfterTriggerEndQuery()
4177  *
4178  * Called after one query has been completely processed. At this time
4179  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4180  * transfer deferred trigger events to the global deferred-trigger list.
4181  *
4182  * Note that this must be called BEFORE closing down the executor
4183  * with ExecutorEnd, because we make use of the EState's info about
4184  * target relations. Normally it is called from ExecutorFinish.
4185  * ----------
4186  */
4187 void
4189 {
4190  AfterTriggerEventList *events;
4191  Tuplestorestate *fdw_tuplestore;
4192  Tuplestorestate *old_tuplestore;
4193  Tuplestorestate *new_tuplestore;
4194 
4195  /* Must be inside a query, too */
4196  Assert(afterTriggers.query_depth >= 0);
4197 
4198  /*
4199  * If we never even got as far as initializing the event stack, there
4200  * certainly won't be any events, so exit quickly.
4201  */
4202  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4203  {
4204  afterTriggers.query_depth--;
4205  return;
4206  }
4207 
4208  /*
4209  * Process all immediate-mode triggers queued by the query, and move the
4210  * deferred ones to the main list of deferred events.
4211  *
4212  * Notice that we decide which ones will be fired, and put the deferred
4213  * ones on the main list, before anything is actually fired. This ensures
4214  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4215  * IMMEDIATE: all events we have decided to defer will be available for it
4216  * to fire.
4217  *
4218  * We loop in case a trigger queues more events at the same query level.
4219  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4220  * will instead fire any triggers in a dedicated query level. Foreign key
4221  * enforcement triggers do add to the current query level, thanks to their
4222  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4223  * C-language triggers might do likewise. Be careful here: firing a
4224  * trigger could result in query_stack being repalloc'd, so we can't save
4225  * its address across afterTriggerInvokeEvents calls.
4226  *
4227  * If we find no firable events, we don't have to increment
4228  * firing_counter.
4229  */
4230  for (;;)
4231  {
4232  events = &afterTriggers.query_stack[afterTriggers.query_depth];
4233  if (afterTriggerMarkEvents(events, &afterTriggers.events, true))
4234  {
4235  CommandId firing_id = afterTriggers.firing_counter++;
4236 
4237  /* OK to delete the immediate events after processing them */
4238  if (afterTriggerInvokeEvents(events, firing_id, estate, true))
4239  break; /* all fired */
4240  }
4241  else
4242  break;
4243  }
4244 
4245  /* Release query-local storage for events, including tuplestore if any */
4246  fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4247  if (fdw_tuplestore)
4248  {
4249  tuplestore_end(fdw_tuplestore);
4250  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4251  }
4252  old_tuplestore = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4253  if (old_tuplestore)
4254  {
4255  tuplestore_end(old_tuplestore);
4256  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4257  }
4258  new_tuplestore = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4259  if (new_tuplestore)
4260  {
4261  tuplestore_end(new_tuplestore);
4262  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4263  }
4264  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4265 
4266  afterTriggers.query_depth--;
4267 }
4268 
4269 
4270 /* ----------
4271  * AfterTriggerFireDeferred()
4272  *
4273  * Called just before the current transaction is committed. At this
4274  * time we invoke all pending DEFERRED triggers.
4275  *
4276  * It is possible for other modules to queue additional deferred triggers
4277  * during pre-commit processing; therefore xact.c may have to call this
4278  * multiple times.
4279  * ----------
4280  */
4281 void
4283 {
4284  AfterTriggerEventList *events;
4285  bool snap_pushed = false;
4286 
4287  /* Must not be inside a query */
4288  Assert(afterTriggers.query_depth == -1);
4289 
4290  /*
4291  * If there are any triggers to fire, make sure we have set a snapshot for
4292  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4293  * can't assume ActiveSnapshot is valid on entry.)
4294  */
4295  events = &afterTriggers.events;
4296  if (events->head != NULL)
4297  {
4299  snap_pushed = true;
4300  }
4301 
4302  /*
4303  * Run all the remaining triggers. Loop until they are all gone, in case
4304  * some trigger queues more for us to do.
4305  */
4306  while (afterTriggerMarkEvents(events, NULL, false))
4307  {
4308  CommandId firing_id = afterTriggers.firing_counter++;
4309 
4310  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4311  break; /* all fired */
4312  }
4313 
4314  /*
4315  * We don't bother freeing the event list, since it will go away anyway
4316  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4317  */
4318 
4319  if (snap_pushed)
4321 }
4322 
4323 
4324 /* ----------
4325  * AfterTriggerEndXact()
4326  *
4327  * The current transaction is finishing.
4328  *
4329  * Any unfired triggers are canceled so we simply throw
4330  * away anything we know.
4331  *
4332  * Note: it is possible for this to be called repeatedly in case of
4333  * error during transaction abort; therefore, do not complain if
4334  * already closed down.
4335  * ----------
4336  */
4337 void
4338 AfterTriggerEndXact(bool isCommit)
4339 {
4340  /*
4341  * Forget the pending-events list.
4342  *
4343  * Since all the info is in TopTransactionContext or children thereof, we
4344  * don't really need to do anything to reclaim memory. However, the
4345  * pending-events list could be large, and so it's useful to discard it as
4346  * soon as possible --- especially if we are aborting because we ran out
4347  * of memory for the list!
4348  */
4349  if (afterTriggers.event_cxt)
4350  {
4351  MemoryContextDelete(afterTriggers.event_cxt);
4352  afterTriggers.event_cxt = NULL;
4353  afterTriggers.events.head = NULL;
4354  afterTriggers.events.tail = NULL;
4355  afterTriggers.events.tailfree = NULL;
4356  }
4357 
4358  /*
4359  * Forget any subtransaction state as well. Since this can't be very
4360  * large, we let the eventual reset of TopTransactionContext free the
4361  * memory instead of doing it here.
4362  */
4363  afterTriggers.state_stack = NULL;
4364  afterTriggers.events_stack = NULL;
4365  afterTriggers.depth_stack = NULL;
4366  afterTriggers.firing_stack = NULL;
4367  afterTriggers.maxtransdepth = 0;
4368 
4369 
4370  /*
4371  * Forget the query stack and constraint-related state information. As
4372  * with the subtransaction state information, we don't bother freeing the
4373  * memory here.
4374  */
4375  afterTriggers.query_stack = NULL;
4376  afterTriggers.fdw_tuplestores = NULL;
4377  afterTriggers.old_tuplestores = NULL;
4378  afterTriggers.new_tuplestores = NULL;
4379  afterTriggers.maxquerydepth = 0;
4380  afterTriggers.state = NULL;
4381 
4382  /* No more afterTriggers manipulation until next transaction starts. */
4383  afterTriggers.query_depth = -1;
4384 }
4385 
4386 /*
4387  * AfterTriggerBeginSubXact()
4388  *
4389  * Start a subtransaction.
4390  */
4391 void
4393 {
4394  int my_level = GetCurrentTransactionNestLevel();
4395 
4396  /*
4397  * Allocate more space in the stacks if needed. (Note: because the
4398  * minimum nest level of a subtransaction is 2, we waste the first couple
4399  * entries of each array; not worth the notational effort to avoid it.)
4400  */
4401  while (my_level >= afterTriggers.maxtransdepth)
4402  {
4403  if (afterTriggers.maxtransdepth == 0)
4404  {
4405  MemoryContext old_cxt;
4406 
4408 
4409 #define DEFTRIG_INITALLOC 8
4410  afterTriggers.state_stack = (SetConstraintState *)
4411  palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState));
4412  afterTriggers.events_stack = (AfterTriggerEventList *)
4414  afterTriggers.depth_stack = (int *)
4415  palloc(DEFTRIG_INITALLOC * sizeof(int));
4416  afterTriggers.firing_stack = (CommandId *)
4417  palloc(DEFTRIG_INITALLOC * sizeof(CommandId));
4418  afterTriggers.maxtransdepth = DEFTRIG_INITALLOC;
4419 
4420  MemoryContextSwitchTo(old_cxt);
4421  }
4422  else
4423  {
4424  /* repalloc will keep the stacks in the same context */
4425  int new_alloc = afterTriggers.maxtransdepth * 2;
4426 
4427  afterTriggers.state_stack = (SetConstraintState *)
4428  repalloc(afterTriggers.state_stack,
4429  new_alloc * sizeof(SetConstraintState));
4430  afterTriggers.events_stack = (AfterTriggerEventList *)
4431  repalloc(afterTriggers.events_stack,
4432  new_alloc * sizeof(AfterTriggerEventList));
4433  afterTriggers.depth_stack = (int *)
4434  repalloc(afterTriggers.depth_stack,
4435  new_alloc * sizeof(int));
4436  afterTriggers.firing_stack = (CommandId *)
4437  repalloc(afterTriggers.firing_stack,
4438  new_alloc * sizeof(CommandId));
4439  afterTriggers.maxtransdepth = new_alloc;
4440  }
4441  }
4442 
4443  /*
4444  * Push the current information into the stack. The SET CONSTRAINTS state
4445  * is not saved until/unless changed. Likewise, we don't make a
4446  * per-subtransaction event context until needed.
4447  */
4448  afterTriggers.state_stack[my_level] = NULL;
4449  afterTriggers.events_stack[my_level] = afterTriggers.events;
4450  afterTriggers.depth_stack[my_level] = afterTriggers.query_depth;
4451  afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter;
4452 }
4453 
4454 /*
4455  * AfterTriggerEndSubXact()
4456  *
4457  * The current subtransaction is ending.
4458  */
4459 void
4461 {
4462  int my_level = GetCurrentTransactionNestLevel();
4463  SetConstraintState state;
4464  AfterTriggerEvent event;
4465  AfterTriggerEventChunk *chunk;
4466  CommandId subxact_firing_id;
4467 
4468  /*
4469  * Pop the prior state if needed.
4470  */
4471  if (isCommit)
4472  {
4473  Assert(my_level < afterTriggers.maxtransdepth);
4474  /* If we saved a prior state, we don't need it anymore */
4475  state = afterTriggers.state_stack[my_level];
4476  if (state != NULL)
4477  pfree(state);
4478  /* this avoids double pfree if error later: */
4479  afterTriggers.state_stack[my_level] = NULL;
4480  Assert(afterTriggers.query_depth ==
4481  afterTriggers.depth_stack[my_level]);
4482  }
4483  else
4484  {
4485  /*
4486  * Aborting. It is possible subxact start failed before calling
4487  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4488  * stack levels that aren't there.
4489  */
4490  if (my_level >= afterTriggers.maxtransdepth)
4491  return;
4492 
4493  /*
4494  * Release any event lists from queries being aborted, and restore
4495  * query_depth to its pre-subxact value. This assumes that a
4496  * subtransaction will not add events to query levels started in a
4497  * earlier transaction state.
4498  */
4499  while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level])
4500  {
4501  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4502  {
4503  Tuplestorestate *ts;
4504 
4505  ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4506  if (ts)
4507  {
4508  tuplestore_end(ts);
4509  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4510  }
4511  ts = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4512  if (ts)
4513  {
4514  tuplestore_end(ts);
4515  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4516  }
4517  ts = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4518  if (ts)
4519  {
4520  tuplestore_end(ts);
4521  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4522  }
4523 
4524  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4525  }
4526 
4527  afterTriggers.query_depth--;
4528  }
4529  Assert(afterTriggers.query_depth ==
4530  afterTriggers.depth_stack[my_level]);
4531 
4532  /*
4533  * Restore the global deferred-event list to its former length,
4534  * discarding any events queued by the subxact.
4535  */
4536  afterTriggerRestoreEventList(&afterTriggers.events,
4537  &afterTriggers.events_stack[my_level]);
4538 
4539  /*
4540  * Restore the trigger state. If the saved state is NULL, then this
4541  * subxact didn't save it, so it doesn't need restoring.
4542  */
4543  state = afterTriggers.state_stack[my_level];
4544  if (state != NULL)
4545  {
4546  pfree(afterTriggers.state);
4547  afterTriggers.state = state;
4548  }
4549  /* this avoids double pfree if error later: */
4550  afterTriggers.state_stack[my_level] = NULL;
4551 
4552  /*
4553  * Scan for any remaining deferred events that were marked DONE or IN
4554  * PROGRESS by this subxact or a child, and un-mark them. We can
4555  * recognize such events because they have a firing ID greater than or
4556  * equal to the firing_counter value we saved at subtransaction start.
4557  * (This essentially assumes that the current subxact includes all
4558  * subxacts started after it.)
4559  */
4560  subxact_firing_id = afterTriggers.firing_stack[my_level];
4561  for_each_event_chunk(event, chunk, afterTriggers.events)
4562  {
4563  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4564 
4565  if (event->ate_flags &
4567  {
4568  if (evtshared->ats_firing_id >= subxact_firing_id)
4569  event->ate_flags &=
4571  }
4572  }
4573  }
4574 }
4575 
4576 /* ----------
4577  * AfterTriggerEnlargeQueryState()
4578  *
4579  * Prepare the necessary state so that we can record AFTER trigger events
4580  * queued by a query. It is allowed to have nested queries within a
4581  * (sub)transaction, so we need to have separate state for each query
4582  * nesting level.
4583  * ----------
4584  */
4585 static void
4587 {
4588  int init_depth = afterTriggers.maxquerydepth;
4589 
4590  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4591 
4592  if (afterTriggers.maxquerydepth == 0)
4593  {
4594  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4595 
4596  afterTriggers.query_stack = (AfterTriggerEventList *)
4598  new_alloc * sizeof(AfterTriggerEventList));
4599  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4601  new_alloc * sizeof(Tuplestorestate *));
4602  afterTriggers.old_tuplestores = (Tuplestorestate **)
4604  new_alloc * sizeof(Tuplestorestate *));
4605  afterTriggers.new_tuplestores = (Tuplestorestate **)
4607  new_alloc * sizeof(Tuplestorestate *));
4608  afterTriggers.maxquerydepth = new_alloc;
4609  }
4610  else
4611  {
4612  /* repalloc will keep the stack in the same context */
4613  int old_alloc = afterTriggers.maxquerydepth;
4614  int new_alloc = Max(afterTriggers.query_depth + 1,
4615  old_alloc * 2);
4616 
4617  afterTriggers.query_stack = (AfterTriggerEventList *)
4618  repalloc(afterTriggers.query_stack,
4619  new_alloc * sizeof(AfterTriggerEventList));
4620  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4621  repalloc(afterTriggers.fdw_tuplestores,
4622  new_alloc * sizeof(Tuplestorestate *));
4623  afterTriggers.old_tuplestores = (Tuplestorestate **)
4624  repalloc(afterTriggers.old_tuplestores,
4625  new_alloc * sizeof(Tuplestorestate *));
4626  afterTriggers.new_tuplestores = (Tuplestorestate **)
4627  repalloc(afterTriggers.new_tuplestores,
4628  new_alloc * sizeof(Tuplestorestate *));
4629  /* Clear newly-allocated slots for subsequent lazy initialization. */
4630  memset(afterTriggers.fdw_tuplestores + old_alloc,
4631  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4632  memset(afterTriggers.old_tuplestores + old_alloc,
4633  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4634  memset(afterTriggers.new_tuplestores + old_alloc,
4635  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4636  afterTriggers.maxquerydepth = new_alloc;
4637  }
4638 
4639  /* Initialize new query lists to empty */
4640  while (init_depth < afterTriggers.maxquerydepth)
4641  {
4642  AfterTriggerEventList *events;
4643 
4644  events = &afterTriggers.query_stack[init_depth];
4645  events->head = NULL;
4646  events->tail = NULL;
4647  events->tailfree = NULL;
4648 
4649  ++init_depth;
4650  }
4651 }
4652 
4653 /*
4654  * Create an empty SetConstraintState with room for numalloc trigstates
4655  */
4656 static SetConstraintState
4658 {
4659  SetConstraintState state;
4660 
4661  /* Behave sanely with numalloc == 0 */
4662  if (numalloc <= 0)
4663  numalloc = 1;
4664 
4665  /*
4666  * We assume that zeroing will correctly initialize the state values.
4667  */
4668  state = (SetConstraintState)
4670  offsetof(SetConstraintStateData, trigstates) +
4671  numalloc * sizeof(SetConstraintTriggerData));
4672 
4673  state->numalloc = numalloc;
4674 
4675  return state;
4676 }
4677 
4678 /*
4679  * Copy a SetConstraintState
4680  */
4681 static SetConstraintState
4682 SetConstraintStateCopy(SetConstraintState origstate)
4683 {
4684  SetConstraintState state;
4685 
4686  state = SetConstraintStateCreate(origstate->numstates);
4687 
4688  state->all_isset = origstate->all_isset;
4689  state->all_isdeferred = origstate->all_isdeferred;
4690  state->numstates = origstate->numstates;
4691  memcpy(state->trigstates, origstate->trigstates,
4692  origstate->numstates * sizeof(SetConstraintTriggerData));
4693 
4694  return state;
4695 }
4696 
4697 /*
4698  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
4699  * pointer to the state object (it will change if we have to repalloc).
4700  */
4701 static SetConstraintState
4703  Oid tgoid, bool tgisdeferred)
4704 {
4705  if (state->numstates >= state->numalloc)
4706  {
4707  int newalloc = state->numalloc * 2;
4708 
4709  newalloc = Max(newalloc, 8); /* in case original has size 0 */
4710  state = (SetConstraintState)
4711  repalloc(state,
4712  offsetof(SetConstraintStateData, trigstates) +
4713  newalloc * sizeof(SetConstraintTriggerData));
4714  state->numalloc = newalloc;
4715  Assert(state->numstates < state->numalloc);
4716  }
4717 
4718  state->trigstates[state->numstates].sct_tgoid = tgoid;
4719  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4720  state->numstates++;
4721 
4722  return state;
4723 }
4724 
4725 /* ----------
4726  * AfterTriggerSetState()
4727  *
4728  * Execute the SET CONSTRAINTS ... utility command.
4729  * ----------
4730  */
4731 void
4733 {
4734  int my_level = GetCurrentTransactionNestLevel();
4735 
4736  /* If we haven't already done so, initialize our state. */
4737  if (afterTriggers.state == NULL)
4738  afterTriggers.state = SetConstraintStateCreate(8);
4739 
4740  /*
4741  * If in a subtransaction, and we didn't save the current state already,
4742  * save it so it can be restored if the subtransaction aborts.
4743  */
4744  if (my_level > 1 &&
4745  afterTriggers.state_stack[my_level] == NULL)
4746  {
4747  afterTriggers.state_stack[my_level] =
4748  SetConstraintStateCopy(afterTriggers.state);
4749  }
4750 
4751  /*
4752  * Handle SET CONSTRAINTS ALL ...
4753  */
4754  if (stmt->constraints == NIL)
4755  {
4756  /*
4757  * Forget any previous SET CONSTRAINTS commands in this transaction.
4758  */
4759  afterTriggers.state->numstates = 0;
4760 
4761  /*
4762  * Set the per-transaction ALL state to known.
4763  */
4764  afterTriggers.state->all_isset = true;
4765  afterTriggers.state->all_isdeferred = stmt->deferred;
4766  }
4767  else
4768  {
4769  Relation conrel;
4770  Relation tgrel;
4771  List *conoidlist = NIL;
4772  List *tgoidlist = NIL;
4773  ListCell *lc;
4774 
4775  /*
4776  * Handle SET CONSTRAINTS constraint-name [, ...]
4777  *
4778  * First, identify all the named constraints and make a list of their
4779  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
4780  * the same name within a schema, the specifications are not
4781  * necessarily unique. Our strategy is to target all matching
4782  * constraints within the first search-path schema that has any
4783  * matches, but disregard matches in schemas beyond the first match.
4784  * (This is a bit odd but it's the historical behavior.)
4785  */
4787 
4788  foreach(lc, stmt->constraints)
4789  {
4790  RangeVar *constraint = lfirst(lc);
4791  bool found;
4792  List *namespacelist;
4793  ListCell *nslc;
4794 
4795  if (constraint->catalogname)
4796  {
4797  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
4798  ereport(ERROR,
4799  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4800  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
4801  constraint->catalogname, constraint->schemaname,
4802  constraint->relname)));
4803  }
4804 
4805  /*
4806  * If we're given the schema name with the constraint, look only
4807  * in that schema. If given a bare constraint name, use the
4808  * search path to find the first matching constraint.
4809  */
4810  if (constraint->schemaname)
4811  {
4812  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
4813  false);
4814 
4815  namespacelist = list_make1_oid(namespaceId);
4816  }
4817  else
4818  {
4819  namespacelist = fetch_search_path(true);
4820  }
4821 
4822  found = false;
4823  foreach(nslc, namespacelist)
4824  {
4825  Oid namespaceId = lfirst_oid(nslc);
4826  SysScanDesc conscan;
4827  ScanKeyData skey[2];
4828  HeapTuple tup;
4829 
4830  ScanKeyInit(&skey[0],
4832  BTEqualStrategyNumber, F_NAMEEQ,
4833  CStringGetDatum(constraint->relname));
4834  ScanKeyInit(&skey[1],
4836  BTEqualStrategyNumber, F_OIDEQ,
4837  ObjectIdGetDatum(namespaceId));
4838 
4839  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
4840  true, NULL, 2, skey);
4841 
4842  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
4843  {
4845 
4846  if (con->condeferrable)
4847  conoidlist = lappend_oid(conoidlist,
4848  HeapTupleGetOid(tup));
4849  else if (stmt->deferred)
4850  ereport(ERROR,
4851  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
4852  errmsg("constraint \"%s\" is not deferrable",
4853  constraint->relname)));
4854  found = true;
4855  }
4856 
4857  systable_endscan(conscan);
4858 
4859  /*
4860  * Once we've found a matching constraint we do not search
4861  * later parts of the search path.
4862  */
4863  if (found)
4864  break;
4865  }
4866 
4867  list_free(namespacelist);
4868 
4869  /*
4870  * Not found ?
4871  */
4872  if (!found)
4873  ereport(ERROR,
4874  (errcode(ERRCODE_UNDEFINED_OBJECT),
4875  errmsg("constraint \"%s\" does not exist",
4876  constraint->relname)));
4877  }
4878 
4879  heap_close(conrel, AccessShareLock);
4880 
4881  /*
4882  * Now, locate the trigger(s) implementing each of these constraints,
4883  * and make a list of their OIDs.
4884  */
4886 
4887  foreach(lc, conoidlist)
4888  {
4889  Oid conoid = lfirst_oid(lc);
4890  bool found;
4891  ScanKeyData skey;
4892  SysScanDesc tgscan;
4893  HeapTuple htup;
4894 
4895  found = false;
4896 
4897  ScanKeyInit(&skey,
4899  BTEqualStrategyNumber, F_OIDEQ,
4900  ObjectIdGetDatum(conoid));
4901 
4902  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
4903  NULL, 1, &skey);
4904 
4905  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
4906  {
4907  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
4908 
4909  /*
4910  * Silently skip triggers that are marked as non-deferrable in
4911  * pg_trigger. This is not an error condition, since a
4912  * deferrable RI constraint may have some non-deferrable
4913  * actions.
4914  */
4915  if (pg_trigger->tgdeferrable)
4916  tgoidlist = lappend_oid(tgoidlist,
4917  HeapTupleGetOid(htup));
4918 
4919  found = true;
4920  }
4921 
4922  systable_endscan(tgscan);
4923 
4924  /* Safety check: a deferrable constraint should have triggers */
4925  if (!found)
4926  elog(ERROR, "no triggers found for constraint with OID %u",
4927  conoid);
4928  }
4929 
4930  heap_close(tgrel, AccessShareLock);
4931 
4932  /*
4933  * Now we can set the trigger states of individual triggers for this
4934  * xact.
4935  */
4936  foreach(lc, tgoidlist)
4937  {
4938  Oid tgoid = lfirst_oid(lc);
4939  SetConstraintState state = afterTriggers.state;
4940  bool found = false;
4941  int i;
4942 
4943  for (i = 0; i < state->numstates; i++)
4944  {
4945  if (state->trigstates[i].sct_tgoid == tgoid)
4946  {
4947  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
4948  found = true;
4949  break;
4950  }
4951  }
4952  if (!found)
4953  {
4954  afterTriggers.state =
4955  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
4956  }
4957  }
4958  }
4959 
4960  /*
4961  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
4962  * checks against that constraint must be made when the SET CONSTRAINTS
4963  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
4964  * apply retroactively. We've updated the constraints state, so scan the
4965  * list of previously deferred events to fire any that have now become
4966  * immediate.
4967  *
4968  * Obviously, if this was SET ... DEFERRED then it can't have converted
4969  * any unfired events to immediate, so we need do nothing in that case.
4970  */
4971  if (!stmt->deferred)
4972  {
4973  AfterTriggerEventList *events = &afterTriggers.events;
4974  bool snapshot_set = false;
4975 
4976  while (afterTriggerMarkEvents(events, NULL, true))
4977  {
4978  CommandId firing_id = afterTriggers.firing_counter++;
4979 
4980  /*
4981  * Make sure a snapshot has been established in case trigger
4982  * functions need one. Note that we avoid setting a snapshot if
4983  * we don't find at least one trigger that has to be fired now.
4984  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
4985  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
4986  * at the start of a transaction it's not possible for any trigger
4987  * events to be queued yet.)
4988  */
4989  if (!snapshot_set)
4990  {
4992  snapshot_set = true;
4993  }
4994 
4995  /*
4996  * We can delete fired events if we are at top transaction level,
4997  * but we'd better not if inside a subtransaction, since the
4998  * subtransaction could later get rolled back.
4999  */
5000  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5001  !IsSubTransaction()))
5002  break; /* all fired */
5003  }
5004 
5005  if (snapshot_set)
5007  }
5008 }
5009 
5010 /* ----------
5011  * AfterTriggerPendingOnRel()
5012  * Test to see if there are any pending after-trigger events for rel.
5013  *
5014  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5015  * it is unsafe to perform major surgery on a relation. Note that only
5016  * local pending events are examined. We assume that having exclusive lock
5017  * on a rel guarantees there are no unserviced events in other backends ---
5018  * but having a lock does not prevent there being such events in our own.
5019  *
5020  * In some scenarios it'd be reasonable to remove pending events (more
5021  * specifically, mark them DONE by the current subxact) but without a lot
5022  * of knowledge of the trigger semantics we can't do this in general.
5023  * ----------
5024  */
5025 bool
5027 {
5028  AfterTriggerEvent event;
5029  AfterTriggerEventChunk *chunk;
5030  int depth;
5031 
5032  /* Scan queued events */
5033  for_each_event_chunk(event, chunk, afterTriggers.events)
5034  {
5035  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5036 
5037  /*
5038  * We can ignore completed events. (Even if a DONE flag is rolled
5039  * back by subxact abort, it's OK because the effects of the TRUNCATE
5040  * or whatever must get rolled back too.)
5041  */
5042  if (event->ate_flags & AFTER_TRIGGER_DONE)
5043  continue;
5044 
5045  if (evtshared->ats_relid == relid)
5046  return true;
5047  }
5048 
5049  /*
5050  * Also scan events queued by incomplete queries. This could only matter
5051  * if TRUNCATE/etc is executed by a function or trigger within an updating
5052  * query on the same relation, which is pretty perverse, but let's check.
5053  */
5054  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5055  {
5056  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth])
5057  {
5058  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5059 
5060  if (event->ate_flags & AFTER_TRIGGER_DONE)
5061  continue;
5062 
5063  if (evtshared->ats_relid == relid)
5064  return true;
5065  }
5066  }
5067 
5068  return false;
5069 }
5070 
5071 
5072 /* ----------
5073  * AfterTriggerSaveEvent()
5074  *
5075  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5076  * be fired for an event.
5077  *
5078  * NOTE: this is called whenever there are any triggers associated with
5079  * the event (even if they are disabled). This function decides which
5080  * triggers actually need to be queued. It is also called after each row,
5081  * even if there are no triggers for that event, if there are any AFTER
5082  * STATEMENT triggers for the statement which use transition tables, so that
5083  * the transition tuplestores can be built.
5084  *
5085  * Transition tuplestores are built now, rather than when events are pulled
5086  * off of the queue because AFTER ROW triggers are allowed to select from the
5087  * transition tables for the statement.
5088  * ----------
5089  */
5090 static void
5092  int event, bool row_trigger,
5093  HeapTuple oldtup, HeapTuple newtup,
5094  List *recheckIndexes, Bitmapset *modifiedCols)
5095 {
5096  Relation rel = relinfo->ri_RelationDesc;
5097  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5098  AfterTriggerEventData new_event;
5099  AfterTriggerSharedData new_shared;
5100  char relkind = relinfo->ri_RelationDesc->rd_rel->relkind;
5101  int tgtype_event;
5102  int tgtype_level;
5103  int i;
5104  Tuplestorestate *fdw_tuplestore = NULL;
5105 
5106  /*
5107  * Check state. We use a normal test not Assert because it is possible to
5108  * reach here in the wrong state given misconfigured RI triggers, in
5109  * particular deferring a cascade action trigger.
5110  */
5111  if (afterTriggers.query_depth < 0)
5112  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5113 
5114  /* Be sure we have enough space to record events at this query depth. */
5115  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5117 
5118  /*
5119  * If the relation has AFTER ... FOR EACH ROW triggers, capture rows into
5120  * transition tuplestores for this depth.
5121  */
5122  if (row_trigger)
5123  {
5124  if ((event == TRIGGER_EVENT_DELETE &&
5125  trigdesc->trig_delete_old_table) ||
5126  (event == TRIGGER_EVENT_UPDATE &&
5127  trigdesc->trig_update_old_table))
5128  {
5129  Tuplestorestate *old_tuplestore;
5130 
5131  Assert(oldtup != NULL);
5132  old_tuplestore =
5134  (afterTriggers.old_tuplestores);
5135  tuplestore_puttuple(old_tuplestore, oldtup);
5136  }
5137  if ((event == TRIGGER_EVENT_INSERT &&
5138  trigdesc->trig_insert_new_table) ||
5139  (event == TRIGGER_EVENT_UPDATE &&
5140  trigdesc->trig_update_new_table))
5141  {
5142  Tuplestorestate *new_tuplestore;
5143 
5144  Assert(newtup != NULL);
5145  new_tuplestore =
5147  (afterTriggers.new_tuplestores);
5148  tuplestore_puttuple(new_tuplestore, newtup);
5149  }
5150 
5151  /* If transition tables are the only reason we're here, return. */
5152  if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5153  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5154  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
5155  return;
5156  }
5157 
5158  /*
5159  * Validate the event code and collect the associated tuple CTIDs.
5160  *
5161  * The event code will be used both as a bitmask and an array offset, so
5162  * validation is important to make sure we don't walk off the edge of our
5163  * arrays.
5164  */
5165  switch (event)
5166  {
5167  case TRIGGER_EVENT_INSERT:
5168  tgtype_event = TRIGGER_TYPE_INSERT;
5169  if (row_trigger)
5170  {
5171  Assert(oldtup == NULL);
5172  Assert(newtup != NULL);
5173  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid1));
5174  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5175  }
5176  else
5177  {
5178  Assert(oldtup == NULL);
5179  Assert(newtup == NULL);
5180  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5181  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5182  }
5183  break;
5184  case TRIGGER_EVENT_DELETE:
5185  tgtype_event = TRIGGER_TYPE_DELETE;
5186  if (row_trigger)
5187  {
5188  Assert(oldtup != NULL);
5189  Assert(newtup == NULL);
5190  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5191  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5192  }
5193  else
5194  {
5195  Assert(oldtup == NULL);
5196  Assert(newtup == NULL);
5197  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5198  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5199  }
5200  break;
5201  case TRIGGER_EVENT_UPDATE:
5202  tgtype_event = TRIGGER_TYPE_UPDATE;
5203  if (row_trigger)
5204  {
5205  Assert(oldtup != NULL);
5206  Assert(newtup != NULL);
5207  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5208  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid2));
5209  }
5210  else
5211  {
5212  Assert(oldtup == NULL);
5213  Assert(newtup == NULL);
5214  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5215  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5216  }
5217  break;
5219  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5220  Assert(oldtup == NULL);
5221  Assert(newtup == NULL);
5222  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5223  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5224  break;
5225  default:
5226  elog(ERROR, "invalid after-trigger event code: %d", event);
5227  tgtype_event = 0; /* keep compiler quiet */
5228  break;
5229  }
5230 
5231  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5232  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5234  /* else, we'll initialize ate_flags for each trigger */
5235 
5236  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5237 
5238  for (i = 0; i < trigdesc->numtriggers; i++)
5239  {
5240  Trigger *trigger = &trigdesc->triggers[i];
5241 
5242  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5243  tgtype_level,
5245  tgtype_event))
5246  continue;
5247  if (!TriggerEnabled(estate, relinfo, trigger, event,
5248  modifiedCols, oldtup, newtup))
5249  continue;
5250 
5251  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5252  {
5253  if (fdw_tuplestore == NULL)
5254  {
5255  fdw_tuplestore =
5257  (afterTriggers.fdw_tuplestores);
5258  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5259  }
5260  else
5261  /* subsequent event for the same tuple */
5262  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5263  }
5264 
5265  /*
5266  * If the trigger is a foreign key enforcement trigger, there are
5267  * certain cases where we can skip queueing the event because we can
5268  * tell by inspection that the FK constraint will still pass.
5269  */
5270  if (TRIGGER_FIRED_BY_UPDATE(event))
5271  {
5272  switch (RI_FKey_trigger_type(trigger->tgfoid))
5273  {
5274  case RI_TRIGGER_PK:
5275  /* Update on trigger's PK table */
5276  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5277  oldtup, newtup))
5278  {
5279  /* skip queuing this event */
5280  continue;
5281  }
5282  break;
5283 
5284  case RI_TRIGGER_FK:
5285  /* Update on trigger's FK table */
5286  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5287  oldtup, newtup))
5288  {
5289  /* skip queuing this event */
5290  continue;
5291  }
5292  break;
5293 
5294  case RI_TRIGGER_NONE:
5295  /* Not an FK trigger */
5296  break;
5297  }
5298  }
5299 
5300  /*
5301  * If the trigger is a deferred unique constraint check trigger, only
5302  * queue it if the unique constraint was potentially violated, which
5303  * we know from index insertion time.
5304  */
5305  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5306  {
5307  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5308  continue; /* Uniqueness definitely not violated */
5309  }
5310 
5311  /*
5312  * Fill in event structure and add it to the current query's queue.
5313  */
5314  new_shared.ats_event =
5315  (event & TRIGGER_EVENT_OPMASK) |
5316  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5317  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5318  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5319  new_shared.ats_tgoid = trigger->tgoid;
5320  new_shared.ats_relid = RelationGetRelid(rel);
5321  new_shared.ats_firing_id = 0;
5322 
5323  afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth],
5324  &new_event, &new_shared);
5325  }
5326 
5327  /*
5328  * Finally, spool any foreign tuple(s). The tuplestore squashes them to
5329  * minimal tuples, so this loses any system columns. The executor lost
5330  * those columns before us, for an unrelated reason, so this is fine.
5331  */
5332  if (fdw_tuplestore)
5333  {
5334  if (oldtup != NULL)
5335  tuplestore_puttuple(fdw_tuplestore, oldtup);
5336  if (newtup != NULL)
5337  tuplestore_puttuple(fdw_tuplestore, newtup);
5338  }
5339 }
5340 
5341 Datum
5343 {
5345 }
void RemoveTriggerById(Oid trigOid)
Definition: trigger.c:1216
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
signed short int16
Definition: c.h:255
#define TRIGGER_EVENT_ROW
Definition: trigger.h:58
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:608
#define NIL
Definition: pg_list.h:69
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2395
uint32 CommandId
Definition: c.h:411
TriggerEvent ats_event
Definition: trigger.c:3273
#define Anum_pg_trigger_tgdeferrable
Definition: pg_trigger.h:88
void InstrStopNode(Instrumentation *instr, double nTuples)
Definition: instrument.c:80
Tuplestorestate ** old_tuplestores
Definition: trigger.c:3417
TupleTableSlot * ExecStoreTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree)
Definition: execTuples.c:320
#define FKCONSTR_MATCH_SIMPLE
Definition: parsenodes.h:2041
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
JunkFilter * ri_junkFilter
Definition: execnodes.h:388
Definition: fmgr.h:56
void * stringToNode(char *str)
Definition: read.c:38
Relation ri_RelationDesc
Definition: execnodes.h:374
#define TRIGGER_FOR_DELETE(type)
Definition: pg_trigger.h:135
struct AfterTriggerEventDataOneCtid AfterTriggerEventDataOneCtid
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition: trigger.c:2504
#define NameGetDatum(X)
Definition: postgres.h:601
int RI_FKey_trigger_type(Oid tgfoid)
Definition: ri_triggers.c:3704
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
Datum namein(PG_FUNCTION_ARGS)
Definition: name.c:46
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate)
Definition: execTuples.c:852
#define AFTER_TRIGGER_FDW_REUSE
Definition: trigger.c:3263
#define TriggerOidIndexId
Definition: indexing.h:251
#define AFTER_TRIGGER_INITDEFERRED
Definition: trigger.h:68
Oid LookupExplicitNamespace(const char *nspname, bool missing_ok)
Definition: namespace.c:2810
int errhint(const char *fmt,...)
Definition: elog.c:987
#define VARDATA_ANY(PTR)
Definition: postgres.h:347
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2343
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
#define GETSTRUCT(TUP)
Definition: htup_details.h:656
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
MemoryContext TopTransactionContext
Definition: mcxt.c:48
CommandId es_output_cid
Definition: execnodes.h:418
static void test(void)
bool IsSystemRelation(Relation relation)
Definition: catalog.c:62
char * subname
Definition: parsenodes.h:2770
const char * quote_identifier(const char *ident)
Definition: ruleutils.c:10284
ItemPointerData ate_ctid2
Definition: trigger.c:3285
#define TRIGGER_TYPE_DELETE
Definition: pg_trigger.h:101
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:2962
#define RelationGetDescr(relation)
Definition: rel.h:429
#define TRIGGER_EVENT_DELETE
Definition: trigger.h:53
Oid GetUserId(void)
Definition: miscinit.c:283
SetConstraintStateData * SetConstraintState
Definition: trigger.c:3223
TupleTableSlot * es_trig_newtup_slot
Definition: execnodes.h:439
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
Oid tgfoid
Definition: reltrigger.h:28
#define MIN_CHUNK_SIZE
TriggerFlags ate_flags
Definition: trigger.c:3283
HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
Definition: heapam.c:4540
Oid RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, bool missing_ok, bool nowait, RangeVarGetRelidCallback callback, void *callback_arg)
Definition: namespace.c:218
#define AFTER_TRIGGER_DEFERRABLE
Definition: trigger.h:67
ResourceOwner TopTransactionResourceOwner
Definition: resowner.c:140
void ExecASUpdateTriggers(EState *estat