PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
trigger.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * trigger.c
4  * PostgreSQL TRIGGERs support code.
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  * src/backend/commands/trigger.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "access/genam.h"
17 #include "access/heapam.h"
18 #include "access/sysattr.h"
19 #include "access/htup_details.h"
20 #include "access/xact.h"
21 #include "catalog/catalog.h"
22 #include "catalog/dependency.h"
23 #include "catalog/indexing.h"
24 #include "catalog/objectaccess.h"
25 #include "catalog/pg_constraint.h"
27 #include "catalog/pg_proc.h"
28 #include "catalog/pg_trigger.h"
29 #include "catalog/pg_type.h"
30 #include "commands/dbcommands.h"
31 #include "commands/defrem.h"
32 #include "commands/trigger.h"
33 #include "executor/executor.h"
34 #include "miscadmin.h"
35 #include "nodes/bitmapset.h"
36 #include "nodes/makefuncs.h"
37 #include "optimizer/clauses.h"
38 #include "optimizer/var.h"
39 #include "parser/parse_clause.h"
40 #include "parser/parse_collate.h"
41 #include "parser/parse_func.h"
42 #include "parser/parse_relation.h"
43 #include "parser/parsetree.h"
44 #include "pgstat.h"
45 #include "rewrite/rewriteManip.h"
46 #include "storage/bufmgr.h"
47 #include "storage/lmgr.h"
48 #include "tcop/utility.h"
49 #include "utils/acl.h"
50 #include "utils/builtins.h"
51 #include "utils/bytea.h"
52 #include "utils/fmgroids.h"
53 #include "utils/inval.h"
54 #include "utils/lsyscache.h"
55 #include "utils/memutils.h"
56 #include "utils/rel.h"
57 #include "utils/snapmgr.h"
58 #include "utils/syscache.h"
59 #include "utils/tqual.h"
60 #include "utils/tuplestore.h"
61 
62 
63 /* GUC variables */
65 
66 /* How many levels deep into trigger execution are we? */
67 static int MyTriggerDepth = 0;
68 
69 /*
70  * Note that similar macros also exist in executor/execMain.c. There does not
71  * appear to be any good header to put them into, given the structures that
72  * they use, so we let them be duplicated. Be sure to update all if one needs
73  * to be changed, however.
74  */
75 #define GetUpdatedColumns(relinfo, estate) \
76  (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
77 
78 /* Local function prototypes */
79 static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid);
80 static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
81 static HeapTuple GetTupleForTrigger(EState *estate,
82  EPQState *epqstate,
83  ResultRelInfo *relinfo,
84  ItemPointer tid,
85  LockTupleMode lockmode,
86  TupleTableSlot **newSlot);
87 static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
88  Trigger *trigger, TriggerEvent event,
89  Bitmapset *modifiedCols,
90  HeapTuple oldtup, HeapTuple newtup);
92  int tgindx,
93  FmgrInfo *finfo,
94  Instrumentation *instr,
95  MemoryContext per_tuple_context);
96 static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
97  int event, bool row_trigger,
98  HeapTuple oldtup, HeapTuple newtup,
99  List *recheckIndexes, Bitmapset *modifiedCols);
100 static void AfterTriggerEnlargeQueryState(void);
101 
102 
103 /*
104  * Create a trigger. Returns the address of the created trigger.
105  *
106  * queryString is the source text of the CREATE TRIGGER command.
107  * This must be supplied if a whenClause is specified, else it can be NULL.
108  *
109  * relOid, if nonzero, is the relation on which the trigger should be
110  * created. If zero, the name provided in the statement will be looked up.
111  *
112  * refRelOid, if nonzero, is the relation to which the constraint trigger
113  * refers. If zero, the constraint relation name provided in the statement
114  * will be looked up as needed.
115  *
116  * constraintOid, if nonzero, says that this trigger is being created
117  * internally to implement that constraint. A suitable pg_depend entry will
118  * be made to link the trigger to that constraint. constraintOid is zero when
119  * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
120  * TRIGGER, we build a pg_constraint entry internally.)
121  *
122  * indexOid, if nonzero, is the OID of an index associated with the constraint.
123  * We do nothing with this except store it into pg_trigger.tgconstrindid.
124  *
125  * If isInternal is true then this is an internally-generated trigger.
126  * This argument sets the tgisinternal field of the pg_trigger entry, and
127  * if TRUE causes us to modify the given trigger name to ensure uniqueness.
128  *
129  * When isInternal is not true we require ACL_TRIGGER permissions on the
130  * relation, as well as ACL_EXECUTE on the trigger function. For internal
131  * triggers the caller must apply any required permission checks.
132  *
133  * Note: can return InvalidObjectAddress if we decided to not create a trigger
134  * at all, but a foreign-key constraint. This is a kluge for backwards
135  * compatibility.
136  */
138 CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
139  Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
140  bool isInternal)
141 {
142  int16 tgtype;
143  int ncolumns;
144  int16 *columns;
145  int2vector *tgattr;
146  Node *whenClause;
147  List *whenRtable;
148  char *qual;
150  bool nulls[Natts_pg_trigger];
151  Relation rel;
152  AclResult aclresult;
153  Relation tgrel;
154  SysScanDesc tgscan;
155  ScanKeyData key;
156  Relation pgrel;
157  HeapTuple tuple;
158  Oid fargtypes[1]; /* dummy */
159  Oid funcoid;
160  Oid funcrettype;
161  Oid trigoid;
162  char internaltrigname[NAMEDATALEN];
163  char *trigname;
164  Oid constrrelid = InvalidOid;
165  ObjectAddress myself,
166  referenced;
167  char *oldtablename = NULL;
168  char *newtablename = NULL;
169 
170  if (OidIsValid(relOid))
171  rel = heap_open(relOid, ShareRowExclusiveLock);
172  else
174 
175  /*
176  * Triggers must be on tables or views, and there are additional
177  * relation-type-specific restrictions.
178  */
179  if (rel->rd_rel->relkind == RELKIND_RELATION ||
180  rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
181  {
182  /* Tables can't have INSTEAD OF triggers */
183  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
184  stmt->timing != TRIGGER_TYPE_AFTER)
185  ereport(ERROR,
186  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
187  errmsg("\"%s\" is a table",
189  errdetail("Tables cannot have INSTEAD OF triggers.")));
190  /* Disallow ROW triggers on partitioned tables */
191  if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
192  ereport(ERROR,
193  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
194  errmsg("\"%s\" is a partitioned table",
196  errdetail("Partitioned tables cannot have ROW triggers.")));
197  }
198  else if (rel->rd_rel->relkind == RELKIND_VIEW)
199  {
200  /*
201  * Views can have INSTEAD OF triggers (which we check below are
202  * row-level), or statement-level BEFORE/AFTER triggers.
203  */
204  if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
205  ereport(ERROR,
206  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
207  errmsg("\"%s\" is a view",
209  errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
210  /* Disallow TRUNCATE triggers on VIEWs */
211  if (TRIGGER_FOR_TRUNCATE(stmt->events))
212  ereport(ERROR,
213  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
214  errmsg("\"%s\" is a view",
216  errdetail("Views cannot have TRUNCATE triggers.")));
217  }
218  else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
219  {
220  if (stmt->timing != TRIGGER_TYPE_BEFORE &&
221  stmt->timing != TRIGGER_TYPE_AFTER)
222  ereport(ERROR,
223  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
224  errmsg("\"%s\" is a foreign table",
226  errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
227 
228  if (TRIGGER_FOR_TRUNCATE(stmt->events))
229  ereport(ERROR,
230  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
231  errmsg("\"%s\" is a foreign table",
233  errdetail("Foreign tables cannot have TRUNCATE triggers.")));
234 
235  if (stmt->isconstraint)
236  ereport(ERROR,
237  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
238  errmsg("\"%s\" is a foreign table",
240  errdetail("Foreign tables cannot have constraint triggers.")));
241  }
242  else
243  ereport(ERROR,
244  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
245  errmsg("\"%s\" is not a table or view",
246  RelationGetRelationName(rel))));
247 
249  ereport(ERROR,
250  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
251  errmsg("permission denied: \"%s\" is a system catalog",
252  RelationGetRelationName(rel))));
253 
254  if (stmt->isconstraint)
255  {
256  /*
257  * We must take a lock on the target relation to protect against
258  * concurrent drop. It's not clear that AccessShareLock is strong
259  * enough, but we certainly need at least that much... otherwise, we
260  * might end up creating a pg_constraint entry referencing a
261  * nonexistent table.
262  */
263  if (OidIsValid(refRelOid))
264  {
265  LockRelationOid(refRelOid, AccessShareLock);
266  constrrelid = refRelOid;
267  }
268  else if (stmt->constrrel != NULL)
269  constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
270  false);
271  }
272 
273  /* permission checks */
274  if (!isInternal)
275  {
276  aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
277  ACL_TRIGGER);
278  if (aclresult != ACLCHECK_OK)
279  aclcheck_error(aclresult, ACL_KIND_CLASS,
281 
282  if (OidIsValid(constrrelid))
283  {
284  aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
285  ACL_TRIGGER);
286  if (aclresult != ACLCHECK_OK)
287  aclcheck_error(aclresult, ACL_KIND_CLASS,
288  get_rel_name(constrrelid));
289  }
290  }
291 
292  /* Compute tgtype */
293  TRIGGER_CLEAR_TYPE(tgtype);
294  if (stmt->row)
295  TRIGGER_SETT_ROW(tgtype);
296  tgtype |= stmt->timing;
297  tgtype |= stmt->events;
298 
299  /* Disallow ROW-level TRUNCATE triggers */
300  if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
301  ereport(ERROR,
302  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
303  errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
304 
305  /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
306  if (TRIGGER_FOR_INSTEAD(tgtype))
307  {
308  if (!TRIGGER_FOR_ROW(tgtype))
309  ereport(ERROR,
310  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
311  errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
312  if (stmt->whenClause)
313  ereport(ERROR,
314  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
315  errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
316  if (stmt->columns != NIL)
317  ereport(ERROR,
318  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
319  errmsg("INSTEAD OF triggers cannot have column lists")));
320  }
321 
322  /*
323  * We don't yet support naming ROW transition variables, but the parser
324  * recognizes the syntax so we can give a nicer message here.
325  *
326  * Per standard, REFERENCING TABLE names are only allowed on AFTER
327  * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
328  * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
329  * only allowed once. Per standard, OLD may not be specified when
330  * creating a trigger only for INSERT, and NEW may not be specified when
331  * creating a trigger only for DELETE.
332  *
333  * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
334  * reference both ROW and TABLE transition data.
335  */
336  if (stmt->transitionRels != NIL)
337  {
338  List *varList = stmt->transitionRels;
339  ListCell *lc;
340 
341  foreach(lc, varList)
342  {
344 
345  if (!(tt->isTable))
346  ereport(ERROR,
347  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
348  errmsg("ROW variable naming in the REFERENCING clause is not supported"),
349  errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
350 
351  /*
352  * Because of the above test, we omit further ROW-related testing
353  * below. If we later allow naming OLD and NEW ROW variables,
354  * adjustments will be needed below.
355  */
356 
357  if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
358  ereport(ERROR,
359  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
360  errmsg("\"%s\" is a partitioned table",
362  errdetail("Triggers on partitioned tables cannot have transition tables.")));
363 
364  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
365  ereport(ERROR,
366  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
367  errmsg("\"%s\" is a foreign table",
369  errdetail("Triggers on foreign tables cannot have transition tables.")));
370 
371  if (rel->rd_rel->relkind == RELKIND_VIEW)
372  ereport(ERROR,
373  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
374  errmsg("\"%s\" is a view",
376  errdetail("Triggers on views cannot have transition tables.")));
377 
378  if (stmt->timing != TRIGGER_TYPE_AFTER)
379  ereport(ERROR,
380  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
381  errmsg("transition table name can only be specified for an AFTER trigger")));
382 
383  if (TRIGGER_FOR_TRUNCATE(tgtype))
384  ereport(ERROR,
385  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
386  errmsg("TRUNCATE triggers with transition tables are not supported")));
387 
388  if (tt->isNew)
389  {
390  if (!(TRIGGER_FOR_INSERT(tgtype) ||
391  TRIGGER_FOR_UPDATE(tgtype)))
392  ereport(ERROR,
393  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
394  errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
395 
396  if (newtablename != NULL)
397  ereport(ERROR,
398  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
399  errmsg("NEW TABLE cannot be specified multiple times")));
400 
401  newtablename = tt->name;
402  }
403  else
404  {
405  if (!(TRIGGER_FOR_DELETE(tgtype) ||
406  TRIGGER_FOR_UPDATE(tgtype)))
407  ereport(ERROR,
408  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
409  errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
410 
411  if (oldtablename != NULL)
412  ereport(ERROR,
413  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
414  errmsg("OLD TABLE cannot be specified multiple times")));
415 
416  oldtablename = tt->name;
417  }
418  }
419 
420  if (newtablename != NULL && oldtablename != NULL &&
421  strcmp(newtablename, oldtablename) == 0)
422  ereport(ERROR,
423  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
424  errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
425  }
426 
427  /*
428  * Parse the WHEN clause, if any
429  */
430  if (stmt->whenClause)
431  {
432  ParseState *pstate;
433  RangeTblEntry *rte;
434  List *varList;
435  ListCell *lc;
436 
437  /* Set up a pstate to parse with */
438  pstate = make_parsestate(NULL);
439  pstate->p_sourcetext = queryString;
440 
441  /*
442  * Set up RTEs for OLD and NEW references.
443  *
444  * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
445  */
446  rte = addRangeTableEntryForRelation(pstate, rel,
447  makeAlias("old", NIL),
448  false, false);
449  addRTEtoQuery(pstate, rte, false, true, true);
450  rte = addRangeTableEntryForRelation(pstate, rel,
451  makeAlias("new", NIL),
452  false, false);
453  addRTEtoQuery(pstate, rte, false, true, true);
454 
455  /* Transform expression. Copy to be sure we don't modify original */
456  whenClause = transformWhereClause(pstate,
457  copyObject(stmt->whenClause),
459  "WHEN");
460  /* we have to fix its collations too */
461  assign_expr_collations(pstate, whenClause);
462 
463  /*
464  * Check for disallowed references to OLD/NEW.
465  *
466  * NB: pull_var_clause is okay here only because we don't allow
467  * subselects in WHEN clauses; it would fail to examine the contents
468  * of subselects.
469  */
470  varList = pull_var_clause(whenClause, 0);
471  foreach(lc, varList)
472  {
473  Var *var = (Var *) lfirst(lc);
474 
475  switch (var->varno)
476  {
477  case PRS2_OLD_VARNO:
478  if (!TRIGGER_FOR_ROW(tgtype))
479  ereport(ERROR,
480  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
481  errmsg("statement trigger's WHEN condition cannot reference column values"),
482  parser_errposition(pstate, var->location)));
483  if (TRIGGER_FOR_INSERT(tgtype))
484  ereport(ERROR,
485  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
486  errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
487  parser_errposition(pstate, var->location)));
488  /* system columns are okay here */
489  break;
490  case PRS2_NEW_VARNO:
491  if (!TRIGGER_FOR_ROW(tgtype))
492  ereport(ERROR,
493  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
494  errmsg("statement trigger's WHEN condition cannot reference column values"),
495  parser_errposition(pstate, var->location)));
496  if (TRIGGER_FOR_DELETE(tgtype))
497  ereport(ERROR,
498  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
499  errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
500  parser_errposition(pstate, var->location)));
501  if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
502  ereport(ERROR,
503  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
504  errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
505  parser_errposition(pstate, var->location)));
506  break;
507  default:
508  /* can't happen without add_missing_from, so just elog */
509  elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
510  break;
511  }
512  }
513 
514  /* we'll need the rtable for recordDependencyOnExpr */
515  whenRtable = pstate->p_rtable;
516 
517  qual = nodeToString(whenClause);
518 
519  free_parsestate(pstate);
520  }
521  else
522  {
523  whenClause = NULL;
524  whenRtable = NIL;
525  qual = NULL;
526  }
527 
528  /*
529  * Find and validate the trigger function.
530  */
531  funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false);
532  if (!isInternal)
533  {
534  aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
535  if (aclresult != ACLCHECK_OK)
536  aclcheck_error(aclresult, ACL_KIND_PROC,
537  NameListToString(stmt->funcname));
538  }
539  funcrettype = get_func_rettype(funcoid);
540  if (funcrettype != TRIGGEROID)
541  {
542  /*
543  * We allow OPAQUE just so we can load old dump files. When we see a
544  * trigger function declared OPAQUE, change it to TRIGGER.
545  */
546  if (funcrettype == OPAQUEOID)
547  {
549  (errmsg("changing return type of function %s from %s to %s",
550  NameListToString(stmt->funcname),
551  "opaque", "trigger")));
553  }
554  else
555  ereport(ERROR,
556  (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
557  errmsg("function %s must return type %s",
558  NameListToString(stmt->funcname), "trigger")));
559  }
560 
561  /*
562  * If the command is a user-entered CREATE CONSTRAINT TRIGGER command that
563  * references one of the built-in RI_FKey trigger functions, assume it is
564  * from a dump of a pre-7.3 foreign key constraint, and take steps to
565  * convert this legacy representation into a regular foreign key
566  * constraint. Ugly, but necessary for loading old dump files.
567  */
568  if (stmt->isconstraint && !isInternal &&
569  list_length(stmt->args) >= 6 &&
570  (list_length(stmt->args) % 2) == 0 &&
572  {
573  /* Keep lock on target rel until end of xact */
574  heap_close(rel, NoLock);
575 
576  ConvertTriggerToFK(stmt, funcoid);
577 
578  return InvalidObjectAddress;
579  }
580 
581  /*
582  * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
583  * corresponding pg_constraint entry.
584  */
585  if (stmt->isconstraint && !OidIsValid(constraintOid))
586  {
587  /* Internal callers should have made their own constraints */
588  Assert(!isInternal);
589  constraintOid = CreateConstraintEntry(stmt->trigname,
592  stmt->deferrable,
593  stmt->initdeferred,
594  true,
595  RelationGetRelid(rel),
596  NULL, /* no conkey */
597  0,
598  InvalidOid, /* no domain */
599  InvalidOid, /* no index */
600  InvalidOid, /* no foreign key */
601  NULL,
602  NULL,
603  NULL,
604  NULL,
605  0,
606  ' ',
607  ' ',
608  ' ',
609  NULL, /* no exclusion */
610  NULL, /* no check constraint */
611  NULL,
612  NULL,
613  true, /* islocal */
614  0, /* inhcount */
615  true, /* isnoinherit */
616  isInternal); /* is_internal */
617  }
618 
619  /*
620  * Generate the trigger's OID now, so that we can use it in the name if
621  * needed.
622  */
624 
625  trigoid = GetNewOid(tgrel);
626 
627  /*
628  * If trigger is internally generated, modify the provided trigger name to
629  * ensure uniqueness by appending the trigger OID. (Callers will usually
630  * supply a simple constant trigger name in these cases.)
631  */
632  if (isInternal)
633  {
634  snprintf(internaltrigname, sizeof(internaltrigname),
635  "%s_%u", stmt->trigname, trigoid);
636  trigname = internaltrigname;
637  }
638  else
639  {
640  /* user-defined trigger; use the specified trigger name as-is */
641  trigname = stmt->trigname;
642  }
643 
644  /*
645  * Scan pg_trigger for existing triggers on relation. We do this only to
646  * give a nice error message if there's already a trigger of the same
647  * name. (The unique index on tgrelid/tgname would complain anyway.) We
648  * can skip this for internally generated triggers, since the name
649  * modification above should be sufficient.
650  *
651  * NOTE that this is cool only because we have ShareRowExclusiveLock on
652  * the relation, so the trigger set won't be changing underneath us.
653  */
654  if (!isInternal)
655  {
656  ScanKeyInit(&key,
658  BTEqualStrategyNumber, F_OIDEQ,
660  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
661  NULL, 1, &key);
662  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
663  {
664  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
665 
666  if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
667  ereport(ERROR,
669  errmsg("trigger \"%s\" for relation \"%s\" already exists",
670  trigname, RelationGetRelationName(rel))));
671  }
672  systable_endscan(tgscan);
673  }
674 
675  /*
676  * Build the new pg_trigger tuple.
677  */
678  memset(nulls, false, sizeof(nulls));
679 
682  CStringGetDatum(trigname));
683  values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
684  values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
686  values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
687  values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
688  values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
689  values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
692 
693  if (stmt->args)
694  {
695  ListCell *le;
696  char *args;
697  int16 nargs = list_length(stmt->args);
698  int len = 0;
699 
700  foreach(le, stmt->args)
701  {
702  char *ar = strVal(lfirst(le));
703 
704  len += strlen(ar) + 4;
705  for (; *ar; ar++)
706  {
707  if (*ar == '\\')
708  len++;
709  }
710  }
711  args = (char *) palloc(len + 1);
712  args[0] = '\0';
713  foreach(le, stmt->args)
714  {
715  char *s = strVal(lfirst(le));
716  char *d = args + strlen(args);
717 
718  while (*s)
719  {
720  if (*s == '\\')
721  *d++ = '\\';
722  *d++ = *s++;
723  }
724  strcpy(d, "\\000");
725  }
726  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
728  CStringGetDatum(args));
729  }
730  else
731  {
732  values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
734  CStringGetDatum(""));
735  }
736 
737  /* build column number array if it's a column-specific trigger */
738  ncolumns = list_length(stmt->columns);
739  if (ncolumns == 0)
740  columns = NULL;
741  else
742  {
743  ListCell *cell;
744  int i = 0;
745 
746  columns = (int16 *) palloc(ncolumns * sizeof(int16));
747  foreach(cell, stmt->columns)
748  {
749  char *name = strVal(lfirst(cell));
750  int16 attnum;
751  int j;
752 
753  /* Lookup column name. System columns are not allowed */
754  attnum = attnameAttNum(rel, name, false);
755  if (attnum == InvalidAttrNumber)
756  ereport(ERROR,
757  (errcode(ERRCODE_UNDEFINED_COLUMN),
758  errmsg("column \"%s\" of relation \"%s\" does not exist",
759  name, RelationGetRelationName(rel))));
760 
761  /* Check for duplicates */
762  for (j = i - 1; j >= 0; j--)
763  {
764  if (columns[j] == attnum)
765  ereport(ERROR,
766  (errcode(ERRCODE_DUPLICATE_COLUMN),
767  errmsg("column \"%s\" specified more than once",
768  name)));
769  }
770 
771  columns[i++] = attnum;
772  }
773  }
774  tgattr = buildint2vector(columns, ncolumns);
775  values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
776 
777  /* set tgqual if trigger has WHEN clause */
778  if (qual)
779  values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
780  else
781  nulls[Anum_pg_trigger_tgqual - 1] = true;
782 
783  if (oldtablename)
785  CStringGetDatum(oldtablename));
786  else
787  nulls[Anum_pg_trigger_tgoldtable - 1] = true;
788  if (newtablename)
790  CStringGetDatum(newtablename));
791  else
792  nulls[Anum_pg_trigger_tgnewtable - 1] = true;
793 
794  tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
795 
796  /* force tuple to have the desired OID */
797  HeapTupleSetOid(tuple, trigoid);
798 
799  /*
800  * Insert tuple into pg_trigger.
801  */
802  CatalogTupleInsert(tgrel, tuple);
803 
804  heap_freetuple(tuple);
806 
810  if (oldtablename)
812  if (newtablename)
814 
815  /*
816  * Update relation's pg_class entry. Crucial side-effect: other backends
817  * (and this one too!) are sent SI message to make them rebuild relcache
818  * entries.
819  */
821  tuple = SearchSysCacheCopy1(RELOID,
823  if (!HeapTupleIsValid(tuple))
824  elog(ERROR, "cache lookup failed for relation %u",
825  RelationGetRelid(rel));
826 
827  ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
828 
829  CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
830 
831  heap_freetuple(tuple);
833 
834  /*
835  * We used to try to update the rel's relcache entry here, but that's
836  * fairly pointless since it will happen as a byproduct of the upcoming
837  * CommandCounterIncrement...
838  */
839 
840  /*
841  * Record dependencies for trigger. Always place a normal dependency on
842  * the function.
843  */
844  myself.classId = TriggerRelationId;
845  myself.objectId = trigoid;
846  myself.objectSubId = 0;
847 
848  referenced.classId = ProcedureRelationId;
849  referenced.objectId = funcoid;
850  referenced.objectSubId = 0;
851  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
852 
853  if (isInternal && OidIsValid(constraintOid))
854  {
855  /*
856  * Internally-generated trigger for a constraint, so make it an
857  * internal dependency of the constraint. We can skip depending on
858  * the relation(s), as there'll be an indirect dependency via the
859  * constraint.
860  */
861  referenced.classId = ConstraintRelationId;
862  referenced.objectId = constraintOid;
863  referenced.objectSubId = 0;
864  recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
865  }
866  else
867  {
868  /*
869  * User CREATE TRIGGER, so place dependencies. We make trigger be
870  * auto-dropped if its relation is dropped or if the FK relation is
871  * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
872  */
873  referenced.classId = RelationRelationId;
874  referenced.objectId = RelationGetRelid(rel);
875  referenced.objectSubId = 0;
876  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
877  if (OidIsValid(constrrelid))
878  {
879  referenced.classId = RelationRelationId;
880  referenced.objectId = constrrelid;
881  referenced.objectSubId = 0;
882  recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
883  }
884  /* Not possible to have an index dependency in this case */
885  Assert(!OidIsValid(indexOid));
886 
887  /*
888  * If it's a user-specified constraint trigger, make the constraint
889  * internally dependent on the trigger instead of vice versa.
890  */
891  if (OidIsValid(constraintOid))
892  {
893  referenced.classId = ConstraintRelationId;
894  referenced.objectId = constraintOid;
895  referenced.objectSubId = 0;
896  recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
897  }
898  }
899 
900  /* If column-specific trigger, add normal dependencies on columns */
901  if (columns != NULL)
902  {
903  int i;
904 
905  referenced.classId = RelationRelationId;
906  referenced.objectId = RelationGetRelid(rel);
907  for (i = 0; i < ncolumns; i++)
908  {
909  referenced.objectSubId = columns[i];
910  recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
911  }
912  }
913 
914  /*
915  * If it has a WHEN clause, add dependencies on objects mentioned in the
916  * expression (eg, functions, as well as any columns used).
917  */
918  if (whenClause != NULL)
919  recordDependencyOnExpr(&myself, whenClause, whenRtable,
921 
922  /* Post creation hook for new trigger */
924  isInternal);
925 
926  /* Keep lock on target rel until end of xact */
927  heap_close(rel, NoLock);
928 
929  return myself;
930 }
931 
932 
933 /*
934  * Convert legacy (pre-7.3) CREATE CONSTRAINT TRIGGER commands into
935  * full-fledged foreign key constraints.
936  *
937  * The conversion is complex because a pre-7.3 foreign key involved three
938  * separate triggers, which were reported separately in dumps. While the
939  * single trigger on the referencing table adds no new information, we need
940  * to know the trigger functions of both of the triggers on the referenced
941  * table to build the constraint declaration. Also, due to lack of proper
942  * dependency checking pre-7.3, it is possible that the source database had
943  * an incomplete set of triggers resulting in an only partially enforced
944  * FK constraint. (This would happen if one of the tables had been dropped
945  * and re-created, but only if the DB had been affected by a 7.0 pg_dump bug
946  * that caused loss of tgconstrrelid information.) We choose to translate to
947  * an FK constraint only when we've seen all three triggers of a set. This is
948  * implemented by storing unmatched items in a list in TopMemoryContext.
949  * We match triggers together by comparing the trigger arguments (which
950  * include constraint name, table and column names, so should be good enough).
951  */
952 typedef struct
953 {
954  List *args; /* list of (T_String) Values or NIL */
955  Oid funcoids[3]; /* OIDs of trigger functions */
956  /* The three function OIDs are stored in the order update, delete, child */
958 
959 static void
961 {
962  static List *info_list = NIL;
963 
964  static const char *const funcdescr[3] = {
965  gettext_noop("Found referenced table's UPDATE trigger."),
966  gettext_noop("Found referenced table's DELETE trigger."),
967  gettext_noop("Found referencing table's trigger.")
968  };
969 
970  char *constr_name;
971  char *fk_table_name;
972  char *pk_table_name;
973  char fk_matchtype = FKCONSTR_MATCH_SIMPLE;
974  List *fk_attrs = NIL;
975  List *pk_attrs = NIL;
977  int funcnum;
978  OldTriggerInfo *info = NULL;
979  ListCell *l;
980  int i;
981 
982  /* Parse out the trigger arguments */
983  constr_name = strVal(linitial(stmt->args));
984  fk_table_name = strVal(lsecond(stmt->args));
985  pk_table_name = strVal(lthird(stmt->args));
986  i = 0;
987  foreach(l, stmt->args)
988  {
989  Value *arg = (Value *) lfirst(l);
990 
991  i++;
992  if (i < 4) /* skip constraint and table names */
993  continue;
994  if (i == 4) /* handle match type */
995  {
996  if (strcmp(strVal(arg), "FULL") == 0)
997  fk_matchtype = FKCONSTR_MATCH_FULL;
998  else
999  fk_matchtype = FKCONSTR_MATCH_SIMPLE;
1000  continue;
1001  }
1002  if (i % 2)
1003  fk_attrs = lappend(fk_attrs, arg);
1004  else
1005  pk_attrs = lappend(pk_attrs, arg);
1006  }
1007 
1008  /* Prepare description of constraint for use in messages */
1009  initStringInfo(&buf);
1010  appendStringInfo(&buf, "FOREIGN KEY %s(",
1011  quote_identifier(fk_table_name));
1012  i = 0;
1013  foreach(l, fk_attrs)
1014  {
1015  Value *arg = (Value *) lfirst(l);
1016 
1017  if (i++ > 0)
1018  appendStringInfoChar(&buf, ',');
1020  }
1021  appendStringInfo(&buf, ") REFERENCES %s(",
1022  quote_identifier(pk_table_name));
1023  i = 0;
1024  foreach(l, pk_attrs)
1025  {
1026  Value *arg = (Value *) lfirst(l);
1027 
1028  if (i++ > 0)
1029  appendStringInfoChar(&buf, ',');
1031  }
1032  appendStringInfoChar(&buf, ')');
1033 
1034  /* Identify class of trigger --- update, delete, or referencing-table */
1035  switch (funcoid)
1036  {
1037  case F_RI_FKEY_CASCADE_UPD:
1038  case F_RI_FKEY_RESTRICT_UPD:
1039  case F_RI_FKEY_SETNULL_UPD:
1040  case F_RI_FKEY_SETDEFAULT_UPD:
1041  case F_RI_FKEY_NOACTION_UPD:
1042  funcnum = 0;
1043  break;
1044 
1045  case F_RI_FKEY_CASCADE_DEL:
1046  case F_RI_FKEY_RESTRICT_DEL:
1047  case F_RI_FKEY_SETNULL_DEL:
1048  case F_RI_FKEY_SETDEFAULT_DEL:
1049  case F_RI_FKEY_NOACTION_DEL:
1050  funcnum = 1;
1051  break;
1052 
1053  default:
1054  funcnum = 2;
1055  break;
1056  }
1057 
1058  /* See if we have a match to this trigger */
1059  foreach(l, info_list)
1060  {
1061  info = (OldTriggerInfo *) lfirst(l);
1062  if (info->funcoids[funcnum] == InvalidOid &&
1063  equal(info->args, stmt->args))
1064  {
1065  info->funcoids[funcnum] = funcoid;
1066  break;
1067  }
1068  }
1069 
1070  if (l == NULL)
1071  {
1072  /* First trigger of set, so create a new list entry */
1073  MemoryContext oldContext;
1074 
1075  ereport(NOTICE,
1076  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1077  constr_name, buf.data),
1078  errdetail_internal("%s", _(funcdescr[funcnum]))));
1080  info = (OldTriggerInfo *) palloc0(sizeof(OldTriggerInfo));
1081  info->args = copyObject(stmt->args);
1082  info->funcoids[funcnum] = funcoid;
1083  info_list = lappend(info_list, info);
1084  MemoryContextSwitchTo(oldContext);
1085  }
1086  else if (info->funcoids[0] == InvalidOid ||
1087  info->funcoids[1] == InvalidOid ||
1088  info->funcoids[2] == InvalidOid)
1089  {
1090  /* Second trigger of set */
1091  ereport(NOTICE,
1092  (errmsg("ignoring incomplete trigger group for constraint \"%s\" %s",
1093  constr_name, buf.data),
1094  errdetail_internal("%s", _(funcdescr[funcnum]))));
1095  }
1096  else
1097  {
1098  /* OK, we have a set, so make the FK constraint ALTER TABLE cmd */
1101  Constraint *fkcon = makeNode(Constraint);
1102  PlannedStmt *wrapper = makeNode(PlannedStmt);
1103 
1104  ereport(NOTICE,
1105  (errmsg("converting trigger group into constraint \"%s\" %s",
1106  constr_name, buf.data),
1107  errdetail_internal("%s", _(funcdescr[funcnum]))));
1108  fkcon->contype = CONSTR_FOREIGN;
1109  fkcon->location = -1;
1110  if (funcnum == 2)
1111  {
1112  /* This trigger is on the FK table */
1113  atstmt->relation = stmt->relation;
1114  if (stmt->constrrel)
1115  fkcon->pktable = stmt->constrrel;
1116  else
1117  {
1118  /* Work around ancient pg_dump bug that omitted constrrel */
1119  fkcon->pktable = makeRangeVar(NULL, pk_table_name, -1);
1120  }
1121  }
1122  else
1123  {
1124  /* This trigger is on the PK table */
1125  fkcon->pktable = stmt->relation;
1126  if (stmt->constrrel)
1127  atstmt->relation = stmt->constrrel;
1128  else
1129  {
1130  /* Work around ancient pg_dump bug that omitted constrrel */
1131  atstmt->relation = makeRangeVar(NULL, fk_table_name, -1);
1132  }
1133  }
1134  atstmt->cmds = list_make1(atcmd);
1135  atstmt->relkind = OBJECT_TABLE;
1136  atcmd->subtype = AT_AddConstraint;
1137  atcmd->def = (Node *) fkcon;
1138  if (strcmp(constr_name, "<unnamed>") == 0)
1139  fkcon->conname = NULL;
1140  else
1141  fkcon->conname = constr_name;
1142  fkcon->fk_attrs = fk_attrs;
1143  fkcon->pk_attrs = pk_attrs;
1144  fkcon->fk_matchtype = fk_matchtype;
1145  switch (info->funcoids[0])
1146  {
1147  case F_RI_FKEY_NOACTION_UPD:
1149  break;
1150  case F_RI_FKEY_CASCADE_UPD:
1152  break;
1153  case F_RI_FKEY_RESTRICT_UPD:
1155  break;
1156  case F_RI_FKEY_SETNULL_UPD:
1158  break;
1159  case F_RI_FKEY_SETDEFAULT_UPD:
1161  break;
1162  default:
1163  /* can't get here because of earlier checks */
1164  elog(ERROR, "confused about RI update function");
1165  }
1166  switch (info->funcoids[1])
1167  {
1168  case F_RI_FKEY_NOACTION_DEL:
1170  break;
1171  case F_RI_FKEY_CASCADE_DEL:
1173  break;
1174  case F_RI_FKEY_RESTRICT_DEL:
1176  break;
1177  case F_RI_FKEY_SETNULL_DEL:
1179  break;
1180  case F_RI_FKEY_SETDEFAULT_DEL:
1182  break;
1183  default:
1184  /* can't get here because of earlier checks */
1185  elog(ERROR, "confused about RI delete function");
1186  }
1187  fkcon->deferrable = stmt->deferrable;
1188  fkcon->initdeferred = stmt->initdeferred;
1189  fkcon->skip_validation = false;
1190  fkcon->initially_valid = true;
1191 
1192  /* finally, wrap it in a dummy PlannedStmt */
1193  wrapper->commandType = CMD_UTILITY;
1194  wrapper->canSetTag = false;
1195  wrapper->utilityStmt = (Node *) atstmt;
1196  wrapper->stmt_location = -1;
1197  wrapper->stmt_len = -1;
1198 
1199  /* ... and execute it */
1200  ProcessUtility(wrapper,
1201  "(generated ALTER TABLE ADD FOREIGN KEY command)",
1203  None_Receiver, NULL);
1204 
1205  /* Remove the matched item from the list */
1206  info_list = list_delete_ptr(info_list, info);
1207  pfree(info);
1208  /* We leak the copied args ... not worth worrying about */
1209  }
1210 }
1211 
1212 /*
1213  * Guts of trigger deletion.
1214  */
1215 void
1217 {
1218  Relation tgrel;
1219  SysScanDesc tgscan;
1220  ScanKeyData skey[1];
1221  HeapTuple tup;
1222  Oid relid;
1223  Relation rel;
1224 
1226 
1227  /*
1228  * Find the trigger to delete.
1229  */
1230  ScanKeyInit(&skey[0],
1232  BTEqualStrategyNumber, F_OIDEQ,
1233  ObjectIdGetDatum(trigOid));
1234 
1235  tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1236  NULL, 1, skey);
1237 
1238  tup = systable_getnext(tgscan);
1239  if (!HeapTupleIsValid(tup))
1240  elog(ERROR, "could not find tuple for trigger %u", trigOid);
1241 
1242  /*
1243  * Open and exclusive-lock the relation the trigger belongs to.
1244  */
1245  relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1246 
1247  rel = heap_open(relid, AccessExclusiveLock);
1248 
1249  if (rel->rd_rel->relkind != RELKIND_RELATION &&
1250  rel->rd_rel->relkind != RELKIND_VIEW &&
1251  rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1252  rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1253  ereport(ERROR,
1254  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1255  errmsg("\"%s\" is not a table, view, or foreign table",
1256  RelationGetRelationName(rel))));
1257 
1259  ereport(ERROR,
1260  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1261  errmsg("permission denied: \"%s\" is a system catalog",
1262  RelationGetRelationName(rel))));
1263 
1264  /*
1265  * Delete the pg_trigger tuple.
1266  */
1267  CatalogTupleDelete(tgrel, &tup->t_self);
1268 
1269  systable_endscan(tgscan);
1270  heap_close(tgrel, RowExclusiveLock);
1271 
1272  /*
1273  * We do not bother to try to determine whether any other triggers remain,
1274  * which would be needed in order to decide whether it's safe to clear the
1275  * relation's relhastriggers. (In any case, there might be a concurrent
1276  * process adding new triggers.) Instead, just force a relcache inval to
1277  * make other backends (and this one too!) rebuild their relcache entries.
1278  * There's no great harm in leaving relhastriggers true even if there are
1279  * no triggers left.
1280  */
1282 
1283  /* Keep lock on trigger's rel until end of xact */
1284  heap_close(rel, NoLock);
1285 }
1286 
1287 /*
1288  * get_trigger_oid - Look up a trigger by name to find its OID.
1289  *
1290  * If missing_ok is false, throw an error if trigger not found. If
1291  * true, just return InvalidOid.
1292  */
1293 Oid
1294 get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1295 {
1296  Relation tgrel;
1297  ScanKeyData skey[2];
1298  SysScanDesc tgscan;
1299  HeapTuple tup;
1300  Oid oid;
1301 
1302  /*
1303  * Find the trigger, verify permissions, set up object address
1304  */
1306 
1307  ScanKeyInit(&skey[0],
1309  BTEqualStrategyNumber, F_OIDEQ,
1310  ObjectIdGetDatum(relid));
1311  ScanKeyInit(&skey[1],
1313  BTEqualStrategyNumber, F_NAMEEQ,
1314  CStringGetDatum(trigname));
1315 
1316  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1317  NULL, 2, skey);
1318 
1319  tup = systable_getnext(tgscan);
1320 
1321  if (!HeapTupleIsValid(tup))
1322  {
1323  if (!missing_ok)
1324  ereport(ERROR,
1325  (errcode(ERRCODE_UNDEFINED_OBJECT),
1326  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1327  trigname, get_rel_name(relid))));
1328  oid = InvalidOid;
1329  }
1330  else
1331  {
1332  oid = HeapTupleGetOid(tup);
1333  }
1334 
1335  systable_endscan(tgscan);
1336  heap_close(tgrel, AccessShareLock);
1337  return oid;
1338 }
1339 
1340 /*
1341  * Perform permissions and integrity checks before acquiring a relation lock.
1342  */
1343 static void
1345  void *arg)
1346 {
1347  HeapTuple tuple;
1348  Form_pg_class form;
1349 
1350  tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1351  if (!HeapTupleIsValid(tuple))
1352  return; /* concurrently dropped */
1353  form = (Form_pg_class) GETSTRUCT(tuple);
1354 
1355  /* only tables and views can have triggers */
1356  if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1357  form->relkind != RELKIND_FOREIGN_TABLE &&
1358  form->relkind != RELKIND_PARTITIONED_TABLE)
1359  ereport(ERROR,
1360  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1361  errmsg("\"%s\" is not a table, view, or foreign table",
1362  rv->relname)));
1363 
1364  /* you must own the table to rename one of its triggers */
1365  if (!pg_class_ownercheck(relid, GetUserId()))
1367  if (!allowSystemTableMods && IsSystemClass(relid, form))
1368  ereport(ERROR,
1369  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1370  errmsg("permission denied: \"%s\" is a system catalog",
1371  rv->relname)));
1372 
1373  ReleaseSysCache(tuple);
1374 }
1375 
1376 /*
1377  * renametrig - changes the name of a trigger on a relation
1378  *
1379  * trigger name is changed in trigger catalog.
1380  * No record of the previous name is kept.
1381  *
1382  * get proper relrelation from relation catalog (if not arg)
1383  * scan trigger catalog
1384  * for name conflict (within rel)
1385  * for original trigger (if not arg)
1386  * modify tgname in trigger tuple
1387  * update row in catalog
1388  */
1391 {
1392  Oid tgoid;
1393  Relation targetrel;
1394  Relation tgrel;
1395  HeapTuple tuple;
1396  SysScanDesc tgscan;
1397  ScanKeyData key[2];
1398  Oid relid;
1399  ObjectAddress address;
1400 
1401  /*
1402  * Look up name, check permissions, and acquire lock (which we will NOT
1403  * release until end of transaction).
1404  */
1406  false, false,
1408  NULL);
1409 
1410  /* Have lock already, so just need to build relcache entry. */
1411  targetrel = relation_open(relid, NoLock);
1412 
1413  /*
1414  * Scan pg_trigger twice for existing triggers on relation. We do this in
1415  * order to ensure a trigger does not exist with newname (The unique index
1416  * on tgrelid/tgname would complain anyway) and to ensure a trigger does
1417  * exist with oldname.
1418  *
1419  * NOTE that this is cool only because we have AccessExclusiveLock on the
1420  * relation, so the trigger set won't be changing underneath us.
1421  */
1423 
1424  /*
1425  * First pass -- look for name conflict
1426  */
1427  ScanKeyInit(&key[0],
1429  BTEqualStrategyNumber, F_OIDEQ,
1430  ObjectIdGetDatum(relid));
1431  ScanKeyInit(&key[1],
1433  BTEqualStrategyNumber, F_NAMEEQ,
1434  PointerGetDatum(stmt->newname));
1435  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1436  NULL, 2, key);
1437  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1438  ereport(ERROR,
1440  errmsg("trigger \"%s\" for relation \"%s\" already exists",
1441  stmt->newname, RelationGetRelationName(targetrel))));
1442  systable_endscan(tgscan);
1443 
1444  /*
1445  * Second pass -- look for trigger existing with oldname and update
1446  */
1447  ScanKeyInit(&key[0],
1449  BTEqualStrategyNumber, F_OIDEQ,
1450  ObjectIdGetDatum(relid));
1451  ScanKeyInit(&key[1],
1453  BTEqualStrategyNumber, F_NAMEEQ,
1454  PointerGetDatum(stmt->subname));
1455  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1456  NULL, 2, key);
1457  if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1458  {
1459  tgoid = HeapTupleGetOid(tuple);
1460 
1461  /*
1462  * Update pg_trigger tuple with new tgname.
1463  */
1464  tuple = heap_copytuple(tuple); /* need a modifiable copy */
1465 
1466  namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname,
1467  stmt->newname);
1468 
1469  CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1470 
1472  HeapTupleGetOid(tuple), 0);
1473 
1474  /*
1475  * Invalidate relation's relcache entry so that other backends (and
1476  * this one too!) are sent SI message to make them rebuild relcache
1477  * entries. (Ideally this should happen automatically...)
1478  */
1479  CacheInvalidateRelcache(targetrel);
1480  }
1481  else
1482  {
1483  ereport(ERROR,
1484  (errcode(ERRCODE_UNDEFINED_OBJECT),
1485  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1486  stmt->subname, RelationGetRelationName(targetrel))));
1487  }
1488 
1489  ObjectAddressSet(address, TriggerRelationId, tgoid);
1490 
1491  systable_endscan(tgscan);
1492 
1493  heap_close(tgrel, RowExclusiveLock);
1494 
1495  /*
1496  * Close rel, but keep exclusive lock!
1497  */
1498  relation_close(targetrel, NoLock);
1499 
1500  return address;
1501 }
1502 
1503 
1504 /*
1505  * EnableDisableTrigger()
1506  *
1507  * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1508  * to change 'tgenabled' field for the specified trigger(s)
1509  *
1510  * rel: relation to process (caller must hold suitable lock on it)
1511  * tgname: trigger to process, or NULL to scan all triggers
1512  * fires_when: new value for tgenabled field. In addition to generic
1513  * enablement/disablement, this also defines when the trigger
1514  * should be fired in session replication roles.
1515  * skip_system: if true, skip "system" triggers (constraint triggers)
1516  *
1517  * Caller should have checked permissions for the table; here we also
1518  * enforce that superuser privilege is required to alter the state of
1519  * system triggers
1520  */
1521 void
1522 EnableDisableTrigger(Relation rel, const char *tgname,
1523  char fires_when, bool skip_system)
1524 {
1525  Relation tgrel;
1526  int nkeys;
1527  ScanKeyData keys[2];
1528  SysScanDesc tgscan;
1529  HeapTuple tuple;
1530  bool found;
1531  bool changed;
1532 
1533  /* Scan the relevant entries in pg_triggers */
1535 
1536  ScanKeyInit(&keys[0],
1538  BTEqualStrategyNumber, F_OIDEQ,
1540  if (tgname)
1541  {
1542  ScanKeyInit(&keys[1],
1544  BTEqualStrategyNumber, F_NAMEEQ,
1545  CStringGetDatum(tgname));
1546  nkeys = 2;
1547  }
1548  else
1549  nkeys = 1;
1550 
1551  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1552  NULL, nkeys, keys);
1553 
1554  found = changed = false;
1555 
1556  while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1557  {
1558  Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1559 
1560  if (oldtrig->tgisinternal)
1561  {
1562  /* system trigger ... ok to process? */
1563  if (skip_system)
1564  continue;
1565  if (!superuser())
1566  ereport(ERROR,
1567  (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1568  errmsg("permission denied: \"%s\" is a system trigger",
1569  NameStr(oldtrig->tgname))));
1570  }
1571 
1572  found = true;
1573 
1574  if (oldtrig->tgenabled != fires_when)
1575  {
1576  /* need to change this one ... make a copy to scribble on */
1577  HeapTuple newtup = heap_copytuple(tuple);
1578  Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1579 
1580  newtrig->tgenabled = fires_when;
1581 
1582  CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1583 
1584  heap_freetuple(newtup);
1585 
1586  changed = true;
1587  }
1588 
1590  HeapTupleGetOid(tuple), 0);
1591  }
1592 
1593  systable_endscan(tgscan);
1594 
1595  heap_close(tgrel, RowExclusiveLock);
1596 
1597  if (tgname && !found)
1598  ereport(ERROR,
1599  (errcode(ERRCODE_UNDEFINED_OBJECT),
1600  errmsg("trigger \"%s\" for table \"%s\" does not exist",
1601  tgname, RelationGetRelationName(rel))));
1602 
1603  /*
1604  * If we changed anything, broadcast a SI inval message to force each
1605  * backend (including our own!) to rebuild relation's relcache entry.
1606  * Otherwise they will fail to apply the change promptly.
1607  */
1608  if (changed)
1610 }
1611 
1612 
1613 /*
1614  * Build trigger data to attach to the given relcache entry.
1615  *
1616  * Note that trigger data attached to a relcache entry must be stored in
1617  * CacheMemoryContext to ensure it survives as long as the relcache entry.
1618  * But we should be running in a less long-lived working context. To avoid
1619  * leaking cache memory if this routine fails partway through, we build a
1620  * temporary TriggerDesc in working memory and then copy the completed
1621  * structure into cache memory.
1622  */
1623 void
1625 {
1626  TriggerDesc *trigdesc;
1627  int numtrigs;
1628  int maxtrigs;
1629  Trigger *triggers;
1630  Relation tgrel;
1631  ScanKeyData skey;
1632  SysScanDesc tgscan;
1633  HeapTuple htup;
1634  MemoryContext oldContext;
1635  int i;
1636 
1637  /*
1638  * Allocate a working array to hold the triggers (the array is extended if
1639  * necessary)
1640  */
1641  maxtrigs = 16;
1642  triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1643  numtrigs = 0;
1644 
1645  /*
1646  * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1647  * be reading the triggers in name order, except possibly during
1648  * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1649  * ensures that triggers will be fired in name order.
1650  */
1651  ScanKeyInit(&skey,
1653  BTEqualStrategyNumber, F_OIDEQ,
1654  ObjectIdGetDatum(RelationGetRelid(relation)));
1655 
1657  tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1658  NULL, 1, &skey);
1659 
1660  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1661  {
1662  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1663  Trigger *build;
1664  Datum datum;
1665  bool isnull;
1666 
1667  if (numtrigs >= maxtrigs)
1668  {
1669  maxtrigs *= 2;
1670  triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1671  }
1672  build = &(triggers[numtrigs]);
1673 
1674  build->tgoid = HeapTupleGetOid(htup);
1676  NameGetDatum(&pg_trigger->tgname)));
1677  build->tgfoid = pg_trigger->tgfoid;
1678  build->tgtype = pg_trigger->tgtype;
1679  build->tgenabled = pg_trigger->tgenabled;
1680  build->tgisinternal = pg_trigger->tgisinternal;
1681  build->tgconstrrelid = pg_trigger->tgconstrrelid;
1682  build->tgconstrindid = pg_trigger->tgconstrindid;
1683  build->tgconstraint = pg_trigger->tgconstraint;
1684  build->tgdeferrable = pg_trigger->tgdeferrable;
1685  build->tginitdeferred = pg_trigger->tginitdeferred;
1686  build->tgnargs = pg_trigger->tgnargs;
1687  /* tgattr is first var-width field, so OK to access directly */
1688  build->tgnattr = pg_trigger->tgattr.dim1;
1689  if (build->tgnattr > 0)
1690  {
1691  build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1692  memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1693  build->tgnattr * sizeof(int16));
1694  }
1695  else
1696  build->tgattr = NULL;
1697  if (build->tgnargs > 0)
1698  {
1699  bytea *val;
1700  char *p;
1701 
1702  val = DatumGetByteaPP(fastgetattr(htup,
1704  tgrel->rd_att, &isnull));
1705  if (isnull)
1706  elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1707  RelationGetRelationName(relation));
1708  p = (char *) VARDATA_ANY(val);
1709  build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1710  for (i = 0; i < build->tgnargs; i++)
1711  {
1712  build->tgargs[i] = pstrdup(p);
1713  p += strlen(p) + 1;
1714  }
1715  }
1716  else
1717  build->tgargs = NULL;
1718 
1720  tgrel->rd_att, &isnull);
1721  if (!isnull)
1722  build->tgoldtable =
1724  else
1725  build->tgoldtable = NULL;
1726 
1728  tgrel->rd_att, &isnull);
1729  if (!isnull)
1730  build->tgnewtable =
1732  else
1733  build->tgnewtable = NULL;
1734 
1735  datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1736  tgrel->rd_att, &isnull);
1737  if (!isnull)
1738  build->tgqual = TextDatumGetCString(datum);
1739  else
1740  build->tgqual = NULL;
1741 
1742  numtrigs++;
1743  }
1744 
1745  systable_endscan(tgscan);
1746  heap_close(tgrel, AccessShareLock);
1747 
1748  /* There might not be any triggers */
1749  if (numtrigs == 0)
1750  {
1751  pfree(triggers);
1752  return;
1753  }
1754 
1755  /* Build trigdesc */
1756  trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1757  trigdesc->triggers = triggers;
1758  trigdesc->numtriggers = numtrigs;
1759  for (i = 0; i < numtrigs; i++)
1760  SetTriggerFlags(trigdesc, &(triggers[i]));
1761 
1762  /* Copy completed trigdesc into cache storage */
1764  relation->trigdesc = CopyTriggerDesc(trigdesc);
1765  MemoryContextSwitchTo(oldContext);
1766 
1767  /* Release working memory */
1768  FreeTriggerDesc(trigdesc);
1769 }
1770 
1771 /*
1772  * Update the TriggerDesc's hint flags to include the specified trigger
1773  */
1774 static void
1776 {
1777  int16 tgtype = trigger->tgtype;
1778 
1779  trigdesc->trig_insert_before_row |=
1782  trigdesc->trig_insert_after_row |=
1785  trigdesc->trig_insert_instead_row |=
1788  trigdesc->trig_insert_before_statement |=
1791  trigdesc->trig_insert_after_statement |=
1794  trigdesc->trig_update_before_row |=
1797  trigdesc->trig_update_after_row |=
1800  trigdesc->trig_update_instead_row |=
1803  trigdesc->trig_update_before_statement |=
1806  trigdesc->trig_update_after_statement |=
1809  trigdesc->trig_delete_before_row |=
1812  trigdesc->trig_delete_after_row |=
1815  trigdesc->trig_delete_instead_row |=
1818  trigdesc->trig_delete_before_statement |=
1821  trigdesc->trig_delete_after_statement |=
1824  /* there are no row-level truncate triggers */
1825  trigdesc->trig_truncate_before_statement |=
1828  trigdesc->trig_truncate_after_statement |=
1831 
1832  trigdesc->trig_insert_new_table |=
1833  (TRIGGER_FOR_INSERT(tgtype) &&
1835  trigdesc->trig_update_old_table |=
1836  (TRIGGER_FOR_UPDATE(tgtype) &&
1838  trigdesc->trig_update_new_table |=
1839  (TRIGGER_FOR_UPDATE(tgtype) &&
1841  trigdesc->trig_delete_old_table |=
1842  (TRIGGER_FOR_DELETE(tgtype) &&
1844 }
1845 
1846 /*
1847  * Copy a TriggerDesc data structure.
1848  *
1849  * The copy is allocated in the current memory context.
1850  */
1851 TriggerDesc *
1853 {
1854  TriggerDesc *newdesc;
1855  Trigger *trigger;
1856  int i;
1857 
1858  if (trigdesc == NULL || trigdesc->numtriggers <= 0)
1859  return NULL;
1860 
1861  newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
1862  memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
1863 
1864  trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
1865  memcpy(trigger, trigdesc->triggers,
1866  trigdesc->numtriggers * sizeof(Trigger));
1867  newdesc->triggers = trigger;
1868 
1869  for (i = 0; i < trigdesc->numtriggers; i++)
1870  {
1871  trigger->tgname = pstrdup(trigger->tgname);
1872  if (trigger->tgnattr > 0)
1873  {
1874  int16 *newattr;
1875 
1876  newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
1877  memcpy(newattr, trigger->tgattr,
1878  trigger->tgnattr * sizeof(int16));
1879  trigger->tgattr = newattr;
1880  }
1881  if (trigger->tgnargs > 0)
1882  {
1883  char **newargs;
1884  int16 j;
1885 
1886  newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
1887  for (j = 0; j < trigger->tgnargs; j++)
1888  newargs[j] = pstrdup(trigger->tgargs[j]);
1889  trigger->tgargs = newargs;
1890  }
1891  if (trigger->tgqual)
1892  trigger->tgqual = pstrdup(trigger->tgqual);
1893  if (trigger->tgoldtable)
1894  trigger->tgoldtable = pstrdup(trigger->tgoldtable);
1895  if (trigger->tgnewtable)
1896  trigger->tgnewtable = pstrdup(trigger->tgnewtable);
1897  trigger++;
1898  }
1899 
1900  return newdesc;
1901 }
1902 
1903 /*
1904  * Free a TriggerDesc data structure.
1905  */
1906 void
1908 {
1909  Trigger *trigger;
1910  int i;
1911 
1912  if (trigdesc == NULL)
1913  return;
1914 
1915  trigger = trigdesc->triggers;
1916  for (i = 0; i < trigdesc->numtriggers; i++)
1917  {
1918  pfree(trigger->tgname);
1919  if (trigger->tgnattr > 0)
1920  pfree(trigger->tgattr);
1921  if (trigger->tgnargs > 0)
1922  {
1923  while (--(trigger->tgnargs) >= 0)
1924  pfree(trigger->tgargs[trigger->tgnargs]);
1925  pfree(trigger->tgargs);
1926  }
1927  if (trigger->tgqual)
1928  pfree(trigger->tgqual);
1929  if (trigger->tgoldtable)
1930  pfree(trigger->tgoldtable);
1931  if (trigger->tgnewtable)
1932  pfree(trigger->tgnewtable);
1933  trigger++;
1934  }
1935  pfree(trigdesc->triggers);
1936  pfree(trigdesc);
1937 }
1938 
1939 /*
1940  * Compare two TriggerDesc structures for logical equality.
1941  */
1942 #ifdef NOT_USED
1943 bool
1944 equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
1945 {
1946  int i,
1947  j;
1948 
1949  /*
1950  * We need not examine the hint flags, just the trigger array itself; if
1951  * we have the same triggers with the same types, the flags should match.
1952  *
1953  * As of 7.3 we assume trigger set ordering is significant in the
1954  * comparison; so we just compare corresponding slots of the two sets.
1955  *
1956  * Note: comparing the stringToNode forms of the WHEN clauses means that
1957  * parse column locations will affect the result. This is okay as long as
1958  * this function is only used for detecting exact equality, as for example
1959  * in checking for staleness of a cache entry.
1960  */
1961  if (trigdesc1 != NULL)
1962  {
1963  if (trigdesc2 == NULL)
1964  return false;
1965  if (trigdesc1->numtriggers != trigdesc2->numtriggers)
1966  return false;
1967  for (i = 0; i < trigdesc1->numtriggers; i++)
1968  {
1969  Trigger *trig1 = trigdesc1->triggers + i;
1970  Trigger *trig2 = trigdesc2->triggers + i;
1971 
1972  if (trig1->tgoid != trig2->tgoid)
1973  return false;
1974  if (strcmp(trig1->tgname, trig2->tgname) != 0)
1975  return false;
1976  if (trig1->tgfoid != trig2->tgfoid)
1977  return false;
1978  if (trig1->tgtype != trig2->tgtype)
1979  return false;
1980  if (trig1->tgenabled != trig2->tgenabled)
1981  return false;
1982  if (trig1->tgisinternal != trig2->tgisinternal)
1983  return false;
1984  if (trig1->tgconstrrelid != trig2->tgconstrrelid)
1985  return false;
1986  if (trig1->tgconstrindid != trig2->tgconstrindid)
1987  return false;
1988  if (trig1->tgconstraint != trig2->tgconstraint)
1989  return false;
1990  if (trig1->tgdeferrable != trig2->tgdeferrable)
1991  return false;
1992  if (trig1->tginitdeferred != trig2->tginitdeferred)
1993  return false;
1994  if (trig1->tgnargs != trig2->tgnargs)
1995  return false;
1996  if (trig1->tgnattr != trig2->tgnattr)
1997  return false;
1998  if (trig1->tgnattr > 0 &&
1999  memcmp(trig1->tgattr, trig2->tgattr,
2000  trig1->tgnattr * sizeof(int16)) != 0)
2001  return false;
2002  for (j = 0; j < trig1->tgnargs; j++)
2003  if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2004  return false;
2005  if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2006  /* ok */ ;
2007  else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2008  return false;
2009  else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2010  return false;
2011  if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2012  /* ok */ ;
2013  else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2014  return false;
2015  else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2016  return false;
2017  if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2018  /* ok */ ;
2019  else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2020  return false;
2021  else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2022  return false;
2023  }
2024  }
2025  else if (trigdesc2 != NULL)
2026  return false;
2027  return true;
2028 }
2029 #endif /* NOT_USED */
2030 
2031 /*
2032  * Call a trigger function.
2033  *
2034  * trigdata: trigger descriptor.
2035  * tgindx: trigger's index in finfo and instr arrays.
2036  * finfo: array of cached trigger function call information.
2037  * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2038  * per_tuple_context: memory context to execute the function in.
2039  *
2040  * Returns the tuple (or NULL) as returned by the function.
2041  */
2042 static HeapTuple
2044  int tgindx,
2045  FmgrInfo *finfo,
2046  Instrumentation *instr,
2047  MemoryContext per_tuple_context)
2048 {
2049  FunctionCallInfoData fcinfo;
2050  PgStat_FunctionCallUsage fcusage;
2051  Datum result;
2052  MemoryContext oldContext;
2053 
2054  /*
2055  * Protect against code paths that may fail to initialize transition table
2056  * info.
2057  */
2058  Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2059  TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2060  TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2061  TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2062  !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2063  !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2064  (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2065 
2066  finfo += tgindx;
2067 
2068  /*
2069  * We cache fmgr lookup info, to avoid making the lookup again on each
2070  * call.
2071  */
2072  if (finfo->fn_oid == InvalidOid)
2073  fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2074 
2075  Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2076 
2077  /*
2078  * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2079  */
2080  if (instr)
2081  InstrStartNode(instr + tgindx);
2082 
2083  /*
2084  * Do the function evaluation in the per-tuple memory context, so that
2085  * leaked memory will be reclaimed once per tuple. Note in particular that
2086  * any new tuple created by the trigger function will live till the end of
2087  * the tuple cycle.
2088  */
2089  oldContext = MemoryContextSwitchTo(per_tuple_context);
2090 
2091  /*
2092  * Call the function, passing no arguments but setting a context.
2093  */
2094  InitFunctionCallInfoData(fcinfo, finfo, 0,
2095  InvalidOid, (Node *) trigdata, NULL);
2096 
2097  pgstat_init_function_usage(&fcinfo, &fcusage);
2098 
2099  MyTriggerDepth++;
2100  PG_TRY();
2101  {
2102  result = FunctionCallInvoke(&fcinfo);
2103  }
2104  PG_CATCH();
2105  {
2106  MyTriggerDepth--;
2107  PG_RE_THROW();
2108  }
2109  PG_END_TRY();
2110  MyTriggerDepth--;
2111 
2112  pgstat_end_function_usage(&fcusage, true);
2113 
2114  MemoryContextSwitchTo(oldContext);
2115 
2116  /*
2117  * Trigger protocol allows function to return a null pointer, but NOT to
2118  * set the isnull result flag.
2119  */
2120  if (fcinfo.isnull)
2121  ereport(ERROR,
2122  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2123  errmsg("trigger function %u returned null value",
2124  fcinfo.flinfo->fn_oid)));
2125 
2126  /*
2127  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2128  * one "tuple returned" (really the number of firings).
2129  */
2130  if (instr)
2131  InstrStopNode(instr + tgindx, 1);
2132 
2133  return (HeapTuple) DatumGetPointer(result);
2134 }
2135 
2136 void
2138 {
2139  TriggerDesc *trigdesc;
2140  int i;
2141  TriggerData LocTriggerData;
2142 
2143  trigdesc = relinfo->ri_TrigDesc;
2144 
2145  if (trigdesc == NULL)
2146  return;
2147  if (!trigdesc->trig_insert_before_statement)
2148  return;
2149 
2150  LocTriggerData.type = T_TriggerData;
2151  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2153  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2154  LocTriggerData.tg_trigtuple = NULL;
2155  LocTriggerData.tg_newtuple = NULL;
2156  LocTriggerData.tg_oldtable = NULL;
2157  LocTriggerData.tg_newtable = NULL;
2158  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2159  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2160  for (i = 0; i < trigdesc->numtriggers; i++)
2161  {
2162  Trigger *trigger = &trigdesc->triggers[i];
2163  HeapTuple newtuple;
2164 
2165  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2169  continue;
2170  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2171  NULL, NULL, NULL))
2172  continue;
2173 
2174  LocTriggerData.tg_trigger = trigger;
2175  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2176  i,
2177  relinfo->ri_TrigFunctions,
2178  relinfo->ri_TrigInstrument,
2179  GetPerTupleMemoryContext(estate));
2180 
2181  if (newtuple)
2182  ereport(ERROR,
2183  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2184  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2185  }
2186 }
2187 
2188 void
2190 {
2191  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2192 
2193  if (trigdesc && trigdesc->trig_insert_after_statement)
2195  false, NULL, NULL, NIL, NULL);
2196 }
2197 
2200  TupleTableSlot *slot)
2201 {
2202  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2203  HeapTuple slottuple = ExecMaterializeSlot(slot);
2204  HeapTuple newtuple = slottuple;
2205  HeapTuple oldtuple;
2206  TriggerData LocTriggerData;
2207  int i;
2208 
2209  LocTriggerData.type = T_TriggerData;
2210  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2213  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2214  LocTriggerData.tg_newtuple = NULL;
2215  LocTriggerData.tg_oldtable = NULL;
2216  LocTriggerData.tg_newtable = NULL;
2217  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2218  for (i = 0; i < trigdesc->numtriggers; i++)
2219  {
2220  Trigger *trigger = &trigdesc->triggers[i];
2221 
2222  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2226  continue;
2227  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2228  NULL, NULL, newtuple))
2229  continue;
2230 
2231  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2232  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2233  LocTriggerData.tg_trigger = trigger;
2234  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2235  i,
2236  relinfo->ri_TrigFunctions,
2237  relinfo->ri_TrigInstrument,
2238  GetPerTupleMemoryContext(estate));
2239  if (oldtuple != newtuple && oldtuple != slottuple)
2240  heap_freetuple(oldtuple);
2241  if (newtuple == NULL)
2242  return NULL; /* "do nothing" */
2243  }
2244 
2245  if (newtuple != slottuple)
2246  {
2247  /*
2248  * Return the modified tuple using the es_trig_tuple_slot. We assume
2249  * the tuple was allocated in per-tuple memory context, and therefore
2250  * will go away by itself. The tuple table slot should not try to
2251  * clear it.
2252  */
2253  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2254  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2255 
2256  if (newslot->tts_tupleDescriptor != tupdesc)
2257  ExecSetSlotDescriptor(newslot, tupdesc);
2258  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2259  slot = newslot;
2260  }
2261  return slot;
2262 }
2263 
2264 void
2266  HeapTuple trigtuple, List *recheckIndexes)
2267 {
2268  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2269 
2270  if (trigdesc &&
2271  (trigdesc->trig_insert_after_row || trigdesc->trig_insert_new_table))
2273  true, NULL, trigtuple, recheckIndexes, NULL);
2274 }
2275 
2278  TupleTableSlot *slot)
2279 {
2280  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2281  HeapTuple slottuple = ExecMaterializeSlot(slot);
2282  HeapTuple newtuple = slottuple;
2283  HeapTuple oldtuple;
2284  TriggerData LocTriggerData;
2285  int i;
2286 
2287  LocTriggerData.type = T_TriggerData;
2288  LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2291  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2292  LocTriggerData.tg_newtuple = NULL;
2293  LocTriggerData.tg_oldtable = NULL;
2294  LocTriggerData.tg_newtable = NULL;
2295  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2296  for (i = 0; i < trigdesc->numtriggers; i++)
2297  {
2298  Trigger *trigger = &trigdesc->triggers[i];
2299 
2300  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2304  continue;
2305  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2306  NULL, NULL, newtuple))
2307  continue;
2308 
2309  LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2310  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2311  LocTriggerData.tg_trigger = trigger;
2312  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2313  i,
2314  relinfo->ri_TrigFunctions,
2315  relinfo->ri_TrigInstrument,
2316  GetPerTupleMemoryContext(estate));
2317  if (oldtuple != newtuple && oldtuple != slottuple)
2318  heap_freetuple(oldtuple);
2319  if (newtuple == NULL)
2320  return NULL; /* "do nothing" */
2321  }
2322 
2323  if (newtuple != slottuple)
2324  {
2325  /*
2326  * Return the modified tuple using the es_trig_tuple_slot. We assume
2327  * the tuple was allocated in per-tuple memory context, and therefore
2328  * will go away by itself. The tuple table slot should not try to
2329  * clear it.
2330  */
2331  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2332  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2333 
2334  if (newslot->tts_tupleDescriptor != tupdesc)
2335  ExecSetSlotDescriptor(newslot, tupdesc);
2336  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2337  slot = newslot;
2338  }
2339  return slot;
2340 }
2341 
2342 void
2344 {
2345  TriggerDesc *trigdesc;
2346  int i;
2347  TriggerData LocTriggerData;
2348 
2349  trigdesc = relinfo->ri_TrigDesc;
2350 
2351  if (trigdesc == NULL)
2352  return;
2353  if (!trigdesc->trig_delete_before_statement)
2354  return;
2355 
2356  LocTriggerData.type = T_TriggerData;
2357  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2359  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2360  LocTriggerData.tg_trigtuple = NULL;
2361  LocTriggerData.tg_newtuple = NULL;
2362  LocTriggerData.tg_oldtable = NULL;
2363  LocTriggerData.tg_newtable = NULL;
2364  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2365  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2366  for (i = 0; i < trigdesc->numtriggers; i++)
2367  {
2368  Trigger *trigger = &trigdesc->triggers[i];
2369  HeapTuple newtuple;
2370 
2371  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2375  continue;
2376  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2377  NULL, NULL, NULL))
2378  continue;
2379 
2380  LocTriggerData.tg_trigger = trigger;
2381  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2382  i,
2383  relinfo->ri_TrigFunctions,
2384  relinfo->ri_TrigInstrument,
2385  GetPerTupleMemoryContext(estate));
2386 
2387  if (newtuple)
2388  ereport(ERROR,
2389  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2390  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2391  }
2392 }
2393 
2394 void
2396 {
2397  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2398 
2399  if (trigdesc && trigdesc->trig_delete_after_statement)
2401  false, NULL, NULL, NIL, NULL);
2402 }
2403 
2404 bool
2406  ResultRelInfo *relinfo,
2407  ItemPointer tupleid,
2408  HeapTuple fdw_trigtuple)
2409 {
2410  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2411  bool result = true;
2412  TriggerData LocTriggerData;
2413  HeapTuple trigtuple;
2414  HeapTuple newtuple;
2415  TupleTableSlot *newSlot;
2416  int i;
2417 
2418  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2419  if (fdw_trigtuple == NULL)
2420  {
2421  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2422  LockTupleExclusive, &newSlot);
2423  if (trigtuple == NULL)
2424  return false;
2425  }
2426  else
2427  trigtuple = fdw_trigtuple;
2428 
2429  LocTriggerData.type = T_TriggerData;
2430  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2433  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2434  LocTriggerData.tg_newtuple = NULL;
2435  LocTriggerData.tg_oldtable = NULL;
2436  LocTriggerData.tg_newtable = NULL;
2437  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2438  for (i = 0; i < trigdesc->numtriggers; i++)
2439  {
2440  Trigger *trigger = &trigdesc->triggers[i];
2441 
2442  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2446  continue;
2447  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2448  NULL, trigtuple, NULL))
2449  continue;
2450 
2451  LocTriggerData.tg_trigtuple = trigtuple;
2452  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2453  LocTriggerData.tg_trigger = trigger;
2454  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2455  i,
2456  relinfo->ri_TrigFunctions,
2457  relinfo->ri_TrigInstrument,
2458  GetPerTupleMemoryContext(estate));
2459  if (newtuple == NULL)
2460  {
2461  result = false; /* tell caller to suppress delete */
2462  break;
2463  }
2464  if (newtuple != trigtuple)
2465  heap_freetuple(newtuple);
2466  }
2467  if (trigtuple != fdw_trigtuple)
2468  heap_freetuple(trigtuple);
2469 
2470  return result;
2471 }
2472 
2473 void
2475  ItemPointer tupleid,
2476  HeapTuple fdw_trigtuple)
2477 {
2478  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2479 
2480  if (trigdesc &&
2481  (trigdesc->trig_delete_after_row || trigdesc->trig_delete_old_table))
2482  {
2483  HeapTuple trigtuple;
2484 
2485  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2486  if (fdw_trigtuple == NULL)
2487  trigtuple = GetTupleForTrigger(estate,
2488  NULL,
2489  relinfo,
2490  tupleid,
2492  NULL);
2493  else
2494  trigtuple = fdw_trigtuple;
2495 
2497  true, trigtuple, NULL, NIL, NULL);
2498  if (trigtuple != fdw_trigtuple)
2499  heap_freetuple(trigtuple);
2500  }
2501 }
2502 
2503 bool
2505  HeapTuple trigtuple)
2506 {
2507  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2508  TriggerData LocTriggerData;
2509  HeapTuple rettuple;
2510  int i;
2511 
2512  LocTriggerData.type = T_TriggerData;
2513  LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2516  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2517  LocTriggerData.tg_newtuple = NULL;
2518  LocTriggerData.tg_oldtable = NULL;
2519  LocTriggerData.tg_newtable = NULL;
2520  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2521  for (i = 0; i < trigdesc->numtriggers; i++)
2522  {
2523  Trigger *trigger = &trigdesc->triggers[i];
2524 
2525  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2529  continue;
2530  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2531  NULL, trigtuple, NULL))
2532  continue;
2533 
2534  LocTriggerData.tg_trigtuple = trigtuple;
2535  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2536  LocTriggerData.tg_trigger = trigger;
2537  rettuple = ExecCallTriggerFunc(&LocTriggerData,
2538  i,
2539  relinfo->ri_TrigFunctions,
2540  relinfo->ri_TrigInstrument,
2541  GetPerTupleMemoryContext(estate));
2542  if (rettuple == NULL)
2543  return false; /* Delete was suppressed */
2544  if (rettuple != trigtuple)
2545  heap_freetuple(rettuple);
2546  }
2547  return true;
2548 }
2549 
2550 void
2552 {
2553  TriggerDesc *trigdesc;
2554  int i;
2555  TriggerData LocTriggerData;
2556  Bitmapset *updatedCols;
2557 
2558  trigdesc = relinfo->ri_TrigDesc;
2559 
2560  if (trigdesc == NULL)
2561  return;
2562  if (!trigdesc->trig_update_before_statement)
2563  return;
2564 
2565  updatedCols = GetUpdatedColumns(relinfo, estate);
2566 
2567  LocTriggerData.type = T_TriggerData;
2568  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2570  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2571  LocTriggerData.tg_trigtuple = NULL;
2572  LocTriggerData.tg_newtuple = NULL;
2573  LocTriggerData.tg_oldtable = NULL;
2574  LocTriggerData.tg_newtable = NULL;
2575  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2576  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2577  for (i = 0; i < trigdesc->numtriggers; i++)
2578  {
2579  Trigger *trigger = &trigdesc->triggers[i];
2580  HeapTuple newtuple;
2581 
2582  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2586  continue;
2587  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2588  updatedCols, NULL, NULL))
2589  continue;
2590 
2591  LocTriggerData.tg_trigger = trigger;
2592  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2593  i,
2594  relinfo->ri_TrigFunctions,
2595  relinfo->ri_TrigInstrument,
2596  GetPerTupleMemoryContext(estate));
2597 
2598  if (newtuple)
2599  ereport(ERROR,
2600  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2601  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2602  }
2603 }
2604 
2605 void
2607 {
2608  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2609 
2610  if (trigdesc && trigdesc->trig_update_after_statement)
2612  false, NULL, NULL, NIL,
2613  GetUpdatedColumns(relinfo, estate));
2614 }
2615 
2618  ResultRelInfo *relinfo,
2619  ItemPointer tupleid,
2620  HeapTuple fdw_trigtuple,
2621  TupleTableSlot *slot)
2622 {
2623  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2624  HeapTuple slottuple = ExecMaterializeSlot(slot);
2625  HeapTuple newtuple = slottuple;
2626  TriggerData LocTriggerData;
2627  HeapTuple trigtuple;
2628  HeapTuple oldtuple;
2629  TupleTableSlot *newSlot;
2630  int i;
2631  Bitmapset *updatedCols;
2632  LockTupleMode lockmode;
2633 
2634  /* Determine lock mode to use */
2635  lockmode = ExecUpdateLockMode(estate, relinfo);
2636 
2637  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2638  if (fdw_trigtuple == NULL)
2639  {
2640  /* get a copy of the on-disk tuple we are planning to update */
2641  trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2642  lockmode, &newSlot);
2643  if (trigtuple == NULL)
2644  return NULL; /* cancel the update action */
2645  }
2646  else
2647  {
2648  trigtuple = fdw_trigtuple;
2649  newSlot = NULL;
2650  }
2651 
2652  /*
2653  * In READ COMMITTED isolation level it's possible that target tuple was
2654  * changed due to concurrent update. In that case we have a raw subplan
2655  * output tuple in newSlot, and need to run it through the junk filter to
2656  * produce an insertable tuple.
2657  *
2658  * Caution: more than likely, the passed-in slot is the same as the
2659  * junkfilter's output slot, so we are clobbering the original value of
2660  * slottuple by doing the filtering. This is OK since neither we nor our
2661  * caller have any more interest in the prior contents of that slot.
2662  */
2663  if (newSlot != NULL)
2664  {
2665  slot = ExecFilterJunk(relinfo->ri_junkFilter, newSlot);
2666  slottuple = ExecMaterializeSlot(slot);
2667  newtuple = slottuple;
2668  }
2669 
2670 
2671  LocTriggerData.type = T_TriggerData;
2672  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2675  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2676  LocTriggerData.tg_oldtable = NULL;
2677  LocTriggerData.tg_newtable = NULL;
2678  updatedCols = GetUpdatedColumns(relinfo, estate);
2679  for (i = 0; i < trigdesc->numtriggers; i++)
2680  {
2681  Trigger *trigger = &trigdesc->triggers[i];
2682 
2683  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2687  continue;
2688  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2689  updatedCols, trigtuple, newtuple))
2690  continue;
2691 
2692  LocTriggerData.tg_trigtuple = trigtuple;
2693  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2694  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2695  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2696  LocTriggerData.tg_trigger = trigger;
2697  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2698  i,
2699  relinfo->ri_TrigFunctions,
2700  relinfo->ri_TrigInstrument,
2701  GetPerTupleMemoryContext(estate));
2702  if (oldtuple != newtuple && oldtuple != slottuple)
2703  heap_freetuple(oldtuple);
2704  if (newtuple == NULL)
2705  {
2706  if (trigtuple != fdw_trigtuple)
2707  heap_freetuple(trigtuple);
2708  return NULL; /* "do nothing" */
2709  }
2710  }
2711  if (trigtuple != fdw_trigtuple)
2712  heap_freetuple(trigtuple);
2713 
2714  if (newtuple != slottuple)
2715  {
2716  /*
2717  * Return the modified tuple using the es_trig_tuple_slot. We assume
2718  * the tuple was allocated in per-tuple memory context, and therefore
2719  * will go away by itself. The tuple table slot should not try to
2720  * clear it.
2721  */
2722  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2723  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2724 
2725  if (newslot->tts_tupleDescriptor != tupdesc)
2726  ExecSetSlotDescriptor(newslot, tupdesc);
2727  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2728  slot = newslot;
2729  }
2730  return slot;
2731 }
2732 
2733 void
2735  ItemPointer tupleid,
2736  HeapTuple fdw_trigtuple,
2737  HeapTuple newtuple,
2738  List *recheckIndexes)
2739 {
2740  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2741 
2742  if (trigdesc && (trigdesc->trig_update_after_row ||
2743  trigdesc->trig_update_old_table || trigdesc->trig_update_new_table))
2744  {
2745  HeapTuple trigtuple;
2746 
2747  Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2748  if (fdw_trigtuple == NULL)
2749  trigtuple = GetTupleForTrigger(estate,
2750  NULL,
2751  relinfo,
2752  tupleid,
2754  NULL);
2755  else
2756  trigtuple = fdw_trigtuple;
2757 
2759  true, trigtuple, newtuple, recheckIndexes,
2760  GetUpdatedColumns(relinfo, estate));
2761  if (trigtuple != fdw_trigtuple)
2762  heap_freetuple(trigtuple);
2763  }
2764 }
2765 
2768  HeapTuple trigtuple, TupleTableSlot *slot)
2769 {
2770  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2771  HeapTuple slottuple = ExecMaterializeSlot(slot);
2772  HeapTuple newtuple = slottuple;
2773  TriggerData LocTriggerData;
2774  HeapTuple oldtuple;
2775  int i;
2776 
2777  LocTriggerData.type = T_TriggerData;
2778  LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2781  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2782  LocTriggerData.tg_oldtable = NULL;
2783  LocTriggerData.tg_newtable = NULL;
2784  for (i = 0; i < trigdesc->numtriggers; i++)
2785  {
2786  Trigger *trigger = &trigdesc->triggers[i];
2787 
2788  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2792  continue;
2793  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2794  NULL, trigtuple, newtuple))
2795  continue;
2796 
2797  LocTriggerData.tg_trigtuple = trigtuple;
2798  LocTriggerData.tg_newtuple = oldtuple = newtuple;
2799  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2800  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2801  LocTriggerData.tg_trigger = trigger;
2802  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2803  i,
2804  relinfo->ri_TrigFunctions,
2805  relinfo->ri_TrigInstrument,
2806  GetPerTupleMemoryContext(estate));
2807  if (oldtuple != newtuple && oldtuple != slottuple)
2808  heap_freetuple(oldtuple);
2809  if (newtuple == NULL)
2810  return NULL; /* "do nothing" */
2811  }
2812 
2813  if (newtuple != slottuple)
2814  {
2815  /*
2816  * Return the modified tuple using the es_trig_tuple_slot. We assume
2817  * the tuple was allocated in per-tuple memory context, and therefore
2818  * will go away by itself. The tuple table slot should not try to
2819  * clear it.
2820  */
2821  TupleTableSlot *newslot = estate->es_trig_tuple_slot;
2822  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
2823 
2824  if (newslot->tts_tupleDescriptor != tupdesc)
2825  ExecSetSlotDescriptor(newslot, tupdesc);
2826  ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
2827  slot = newslot;
2828  }
2829  return slot;
2830 }
2831 
2832 void
2834 {
2835  TriggerDesc *trigdesc;
2836  int i;
2837  TriggerData LocTriggerData;
2838 
2839  trigdesc = relinfo->ri_TrigDesc;
2840 
2841  if (trigdesc == NULL)
2842  return;
2843  if (!trigdesc->trig_truncate_before_statement)
2844  return;
2845 
2846  LocTriggerData.type = T_TriggerData;
2847  LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
2849  LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2850  LocTriggerData.tg_trigtuple = NULL;
2851  LocTriggerData.tg_newtuple = NULL;
2852  LocTriggerData.tg_oldtable = NULL;
2853  LocTriggerData.tg_newtable = NULL;
2854  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
2855  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
2856  for (i = 0; i < trigdesc->numtriggers; i++)
2857  {
2858  Trigger *trigger = &trigdesc->triggers[i];
2859  HeapTuple newtuple;
2860 
2861  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2865  continue;
2866  if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2867  NULL, NULL, NULL))
2868  continue;
2869 
2870  LocTriggerData.tg_trigger = trigger;
2871  newtuple = ExecCallTriggerFunc(&LocTriggerData,
2872  i,
2873  relinfo->ri_TrigFunctions,
2874  relinfo->ri_TrigInstrument,
2875  GetPerTupleMemoryContext(estate));
2876 
2877  if (newtuple)
2878  ereport(ERROR,
2879  (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2880  errmsg("BEFORE STATEMENT trigger cannot return a value")));
2881  }
2882 }
2883 
2884 void
2886 {
2887  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2888 
2889  if (trigdesc && trigdesc->trig_truncate_after_statement)
2891  false, NULL, NULL, NIL, NULL);
2892 }
2893 
2894 
2895 static HeapTuple
2897  EPQState *epqstate,
2898  ResultRelInfo *relinfo,
2899  ItemPointer tid,
2900  LockTupleMode lockmode,
2901  TupleTableSlot **newSlot)
2902 {
2903  Relation relation = relinfo->ri_RelationDesc;
2904  HeapTupleData tuple;
2905  HeapTuple result;
2906  Buffer buffer;
2907 
2908  if (newSlot != NULL)
2909  {
2910  HTSU_Result test;
2911  HeapUpdateFailureData hufd;
2912 
2913  *newSlot = NULL;
2914 
2915  /* caller must pass an epqstate if EvalPlanQual is possible */
2916  Assert(epqstate != NULL);
2917 
2918  /*
2919  * lock tuple for update
2920  */
2921 ltrmark:;
2922  tuple.t_self = *tid;
2923  test = heap_lock_tuple(relation, &tuple,
2924  estate->es_output_cid,
2925  lockmode, LockWaitBlock,
2926  false, &buffer, &hufd);
2927  switch (test)
2928  {
2929  case HeapTupleSelfUpdated:
2930 
2931  /*
2932  * The target tuple was already updated or deleted by the
2933  * current command, or by a later command in the current
2934  * transaction. We ignore the tuple in the former case, and
2935  * throw error in the latter case, for the same reasons
2936  * enumerated in ExecUpdate and ExecDelete in
2937  * nodeModifyTable.c.
2938  */
2939  if (hufd.cmax != estate->es_output_cid)
2940  ereport(ERROR,
2941  (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2942  errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2943  errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2944 
2945  /* treat it as deleted; do not process */
2946  ReleaseBuffer(buffer);
2947  return NULL;
2948 
2949  case HeapTupleMayBeUpdated:
2950  break;
2951 
2952  case HeapTupleUpdated:
2953  ReleaseBuffer(buffer);
2955  ereport(ERROR,
2956  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2957  errmsg("could not serialize access due to concurrent update")));
2958  if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2959  {
2960  /* it was updated, so look at the updated version */
2961  TupleTableSlot *epqslot;
2962 
2963  epqslot = EvalPlanQual(estate,
2964  epqstate,
2965  relation,
2966  relinfo->ri_RangeTableIndex,
2967  lockmode,
2968  &hufd.ctid,
2969  hufd.xmax);
2970  if (!TupIsNull(epqslot))
2971  {
2972  *tid = hufd.ctid;
2973  *newSlot = epqslot;
2974 
2975  /*
2976  * EvalPlanQual already locked the tuple, but we
2977  * re-call heap_lock_tuple anyway as an easy way of
2978  * re-fetching the correct tuple. Speed is hardly a
2979  * criterion in this path anyhow.
2980  */
2981  goto ltrmark;
2982  }
2983  }
2984 
2985  /*
2986  * if tuple was deleted or PlanQual failed for updated tuple -
2987  * we must not process this tuple!
2988  */
2989  return NULL;
2990 
2991  case HeapTupleInvisible:
2992  elog(ERROR, "attempted to lock invisible tuple");
2993 
2994  default:
2995  ReleaseBuffer(buffer);
2996  elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
2997  return NULL; /* keep compiler quiet */
2998  }
2999  }
3000  else
3001  {
3002  Page page;
3003  ItemId lp;
3004 
3005  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3006 
3007  /*
3008  * Although we already know this tuple is valid, we must lock the
3009  * buffer to ensure that no one has a buffer cleanup lock; otherwise
3010  * they might move the tuple while we try to copy it. But we can
3011  * release the lock before actually doing the heap_copytuple call,
3012  * since holding pin is sufficient to prevent anyone from getting a
3013  * cleanup lock they don't already hold.
3014  */
3015  LockBuffer(buffer, BUFFER_LOCK_SHARE);
3016 
3017  page = BufferGetPage(buffer);
3018  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3019 
3020  Assert(ItemIdIsNormal(lp));
3021 
3022  tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3023  tuple.t_len = ItemIdGetLength(lp);
3024  tuple.t_self = *tid;
3025  tuple.t_tableOid = RelationGetRelid(relation);
3026 
3027  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3028  }
3029 
3030  result = heap_copytuple(&tuple);
3031  ReleaseBuffer(buffer);
3032 
3033  return result;
3034 }
3035 
3036 /*
3037  * Is trigger enabled to fire?
3038  */
3039 static bool
3041  Trigger *trigger, TriggerEvent event,
3042  Bitmapset *modifiedCols,
3043  HeapTuple oldtup, HeapTuple newtup)
3044 {
3045  /* Check replication-role-dependent enable state */
3047  {
3048  if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3049  trigger->tgenabled == TRIGGER_DISABLED)
3050  return false;
3051  }
3052  else /* ORIGIN or LOCAL role */
3053  {
3054  if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3055  trigger->tgenabled == TRIGGER_DISABLED)
3056  return false;
3057  }
3058 
3059  /*
3060  * Check for column-specific trigger (only possible for UPDATE, and in
3061  * fact we *must* ignore tgattr for other event types)
3062  */
3063  if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3064  {
3065  int i;
3066  bool modified;
3067 
3068  modified = false;
3069  for (i = 0; i < trigger->tgnattr; i++)
3070  {
3072  modifiedCols))
3073  {
3074  modified = true;
3075  break;
3076  }
3077  }
3078  if (!modified)
3079  return false;
3080  }
3081 
3082  /* Check for WHEN clause */
3083  if (trigger->tgqual)
3084  {
3085  TupleDesc tupdesc = RelationGetDescr(relinfo->ri_RelationDesc);
3086  ExprState **predicate;
3087  ExprContext *econtext;
3088  TupleTableSlot *oldslot = NULL;
3089  TupleTableSlot *newslot = NULL;
3090  MemoryContext oldContext;
3091  int i;
3092 
3093  Assert(estate != NULL);
3094 
3095  /*
3096  * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3097  * matching element of relinfo->ri_TrigWhenExprs[]
3098  */
3099  i = trigger - relinfo->ri_TrigDesc->triggers;
3100  predicate = &relinfo->ri_TrigWhenExprs[i];
3101 
3102  /*
3103  * If first time through for this WHEN expression, build expression
3104  * nodetrees for it. Keep them in the per-query memory context so
3105  * they'll survive throughout the query.
3106  */
3107  if (*predicate == NULL)
3108  {
3109  Node *tgqual;
3110 
3111  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3112  tgqual = stringToNode(trigger->tgqual);
3113  /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3116  /* ExecPrepareQual wants implicit-AND form */
3117  tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3118  *predicate = ExecPrepareQual((List *) tgqual, estate);
3119  MemoryContextSwitchTo(oldContext);
3120  }
3121 
3122  /*
3123  * We will use the EState's per-tuple context for evaluating WHEN
3124  * expressions (creating it if it's not already there).
3125  */
3126  econtext = GetPerTupleExprContext(estate);
3127 
3128  /*
3129  * Put OLD and NEW tuples into tupleslots for expression evaluation.
3130  * These slots can be shared across the whole estate, but be careful
3131  * that they have the current resultrel's tupdesc.
3132  */
3133  if (HeapTupleIsValid(oldtup))
3134  {
3135  if (estate->es_trig_oldtup_slot == NULL)
3136  {
3137  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3138  estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate);
3139  MemoryContextSwitchTo(oldContext);
3140  }
3141  oldslot = estate->es_trig_oldtup_slot;
3142  if (oldslot->tts_tupleDescriptor != tupdesc)
3143  ExecSetSlotDescriptor(oldslot, tupdesc);
3144  ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false);
3145  }
3146  if (HeapTupleIsValid(newtup))
3147  {
3148  if (estate->es_trig_newtup_slot == NULL)
3149  {
3150  oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3151  estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate);
3152  MemoryContextSwitchTo(oldContext);
3153  }
3154  newslot = estate->es_trig_newtup_slot;
3155  if (newslot->tts_tupleDescriptor != tupdesc)
3156  ExecSetSlotDescriptor(newslot, tupdesc);
3157  ExecStoreTuple(newtup, newslot, InvalidBuffer, false);
3158  }
3159 
3160  /*
3161  * Finally evaluate the expression, making the old and/or new tuples
3162  * available as INNER_VAR/OUTER_VAR respectively.
3163  */
3164  econtext->ecxt_innertuple = oldslot;
3165  econtext->ecxt_outertuple = newslot;
3166  if (!ExecQual(*predicate, econtext))
3167  return false;
3168  }
3169 
3170  return true;
3171 }
3172 
3173 
3174 /* ----------
3175  * After-trigger stuff
3176  *
3177  * The AfterTriggersData struct holds data about pending AFTER trigger events
3178  * during the current transaction tree. (BEFORE triggers are fired
3179  * immediately so we don't need any persistent state about them.) The struct
3180  * and most of its subsidiary data are kept in TopTransactionContext; however
3181  * the individual event records are kept in a separate sub-context. This is
3182  * done mainly so that it's easy to tell from a memory context dump how much
3183  * space is being eaten by trigger events.
3184  *
3185  * Because the list of pending events can grow large, we go to some
3186  * considerable effort to minimize per-event memory consumption. The event
3187  * records are grouped into chunks and common data for similar events in the
3188  * same chunk is only stored once.
3189  *
3190  * XXX We need to be able to save the per-event data in a file if it grows too
3191  * large.
3192  * ----------
3193  */
3194 
3195 /* Per-trigger SET CONSTRAINT status */
3197 {
3201 
3203 
3204 /*
3205  * SET CONSTRAINT intra-transaction status.
3206  *
3207  * We make this a single palloc'd object so it can be copied and freed easily.
3208  *
3209  * all_isset and all_isdeferred are used to keep track
3210  * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3211  *
3212  * trigstates[] stores per-trigger tgisdeferred settings.
3213  */
3215 {
3218  int numstates; /* number of trigstates[] entries in use */
3219  int numalloc; /* allocated size of trigstates[] */
3220  SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3222 
3224 
3225 
3226 /*
3227  * Per-trigger-event data
3228  *
3229  * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3230  * status bits and up to two tuple CTIDs. Each event record also has an
3231  * associated AfterTriggerSharedData that is shared across all instances of
3232  * similar events within a "chunk".
3233  *
3234  * For row-level triggers, we arrange not to waste storage on unneeded ctid
3235  * fields. Updates of regular tables use two; inserts and deletes of regular
3236  * tables use one; foreign tables always use zero and save the tuple(s) to a
3237  * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3238  * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3239  * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3240  * tuple(s). This permits storing tuples once regardless of the number of
3241  * row-level triggers on a foreign table.
3242  *
3243  * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3244  * require no ctid field. We lack the flag bit space to neatly represent that
3245  * distinct case, and it seems unlikely to be worth much trouble.
3246  *
3247  * Note: ats_firing_id is initially zero and is set to something else when
3248  * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3249  * cycle the trigger will be fired in (or was fired in, if DONE is set).
3250  * Although this is mutable state, we can keep it in AfterTriggerSharedData
3251  * because all instances of the same type of event in a given event list will
3252  * be fired at the same time, if they were queued between the same firing
3253  * cycles. So we need only ensure that ats_firing_id is zero when attaching
3254  * a new event to an existing AfterTriggerSharedData record.
3255  */
3257 
3258 #define AFTER_TRIGGER_OFFSET 0x0FFFFFFF /* must be low-order bits */
3259 #define AFTER_TRIGGER_DONE 0x10000000
3260 #define AFTER_TRIGGER_IN_PROGRESS 0x20000000
3261 /* bits describing the size and tuple sources of this event */
3262 #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3263 #define AFTER_TRIGGER_FDW_FETCH 0x80000000
3264 #define AFTER_TRIGGER_1CTID 0x40000000
3265 #define AFTER_TRIGGER_2CTID 0xC0000000
3266 #define AFTER_TRIGGER_TUP_BITS 0xC0000000
3267 
3269 
3271 {
3272  TriggerEvent ats_event; /* event type indicator, see trigger.h */
3273  Oid ats_tgoid; /* the trigger's ID */
3274  Oid ats_relid; /* the relation it's on */
3275  CommandId ats_firing_id; /* ID for firing cycle */
3277 
3279 
3281 {
3282  TriggerFlags ate_flags; /* status bits and offset to shared data */
3283  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3284  ItemPointerData ate_ctid2; /* new updated tuple */
3286 
3287 /* AfterTriggerEventData, minus ate_ctid2 */
3289 {
3290  TriggerFlags ate_flags; /* status bits and offset to shared data */
3291  ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3293 
3294 /* AfterTriggerEventData, minus ate_ctid1 and ate_ctid2 */
3296 {
3297  TriggerFlags ate_flags; /* status bits and offset to shared data */
3299 
3300 #define SizeofTriggerEvent(evt) \
3301  (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3302  sizeof(AfterTriggerEventData) : \
3303  ((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3304  sizeof(AfterTriggerEventDataOneCtid) : \
3305  sizeof(AfterTriggerEventDataZeroCtids))
3306 
3307 #define GetTriggerSharedData(evt) \
3308  ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3309 
3310 /*
3311  * To avoid palloc overhead, we keep trigger events in arrays in successively-
3312  * larger chunks (a slightly more sophisticated version of an expansible
3313  * array). The space between CHUNK_DATA_START and freeptr is occupied by
3314  * AfterTriggerEventData records; the space between endfree and endptr is
3315  * occupied by AfterTriggerSharedData records.
3316  */
3318 {
3319  struct AfterTriggerEventChunk *next; /* list link */
3320  char *freeptr; /* start of free space in chunk */
3321  char *endfree; /* end of free space in chunk */
3322  char *endptr; /* end of chunk */
3323  /* event data follows here */
3325 
3326 #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3327 
3328 /* A list of events */
3330 {
3333  char *tailfree; /* freeptr of tail chunk */
3335 
3336 /* Macros to help in iterating over a list of events */
3337 #define for_each_chunk(cptr, evtlist) \
3338  for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3339 #define for_each_event(eptr, cptr) \
3340  for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3341  (char *) eptr < (cptr)->freeptr; \
3342  eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3343 /* Use this if no special per-chunk processing is needed */
3344 #define for_each_event_chunk(eptr, cptr, evtlist) \
3345  for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3346 
3347 
3348 /*
3349  * All per-transaction data for the AFTER TRIGGERS module.
3350  *
3351  * AfterTriggersData has the following fields:
3352  *
3353  * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3354  * We mark firable events with the current firing cycle's ID so that we can
3355  * tell which ones to work on. This ensures sane behavior if a trigger
3356  * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3357  * only fire those events that weren't already scheduled for firing.
3358  *
3359  * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3360  * This is saved and restored across failed subtransactions.
3361  *
3362  * events is the current list of deferred events. This is global across
3363  * all subtransactions of the current transaction. In a subtransaction
3364  * abort, we know that the events added by the subtransaction are at the
3365  * end of the list, so it is relatively easy to discard them. The event
3366  * list chunks themselves are stored in event_cxt.
3367  *
3368  * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3369  * (-1 when the stack is empty).
3370  *
3371  * query_stack[query_depth] is a list of AFTER trigger events queued by the
3372  * current query (and the query_stack entries below it are lists of trigger
3373  * events queued by calling queries). None of these are valid until the
3374  * matching AfterTriggerEndQuery call occurs. At that point we fire
3375  * immediate-mode triggers, and append any deferred events to the main events
3376  * list.
3377  *
3378  * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples
3379  * needed for the current query.
3380  *
3381  * old_tuplestores[query_depth] and new_tuplestores[query_depth] hold the
3382  * transition relations for the current query.
3383  *
3384  * maxquerydepth is just the allocated length of query_stack and the
3385  * tuplestores.
3386  *
3387  * state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS
3388  * state data; each subtransaction level that modifies that state first
3389  * saves a copy, which we use to restore the state if we abort.
3390  *
3391  * events_stack is a stack of copies of the events head/tail pointers,
3392  * which we use to restore those values during subtransaction abort.
3393  *
3394  * depth_stack is a stack of copies of subtransaction-start-time query_depth,
3395  * which we similarly use to clean up at subtransaction abort.
3396  *
3397  * firing_stack is a stack of copies of subtransaction-start-time
3398  * firing_counter. We use this to recognize which deferred triggers were
3399  * fired (or marked for firing) within an aborted subtransaction.
3400  *
3401  * We use GetCurrentTransactionNestLevel() to determine the correct array
3402  * index in these stacks. maxtransdepth is the number of allocated entries in
3403  * each stack. (By not keeping our own stack pointer, we can avoid trouble
3404  * in cases where errors during subxact abort cause multiple invocations
3405  * of AfterTriggerEndSubXact() at the same nesting depth.)
3406  */
3407 typedef struct AfterTriggersData
3408 {
3409  CommandId firing_counter; /* next firing ID to assign */
3410  SetConstraintState state; /* the active S C state */
3411  AfterTriggerEventList events; /* deferred-event list */
3412  int query_depth; /* current query list index */
3413  AfterTriggerEventList *query_stack; /* events pending from each query */
3414  Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from
3415  * each query */
3416  Tuplestorestate **old_tuplestores; /* all old tuples from each query */
3417  Tuplestorestate **new_tuplestores; /* all new tuples from each query */
3418  int maxquerydepth; /* allocated len of above array */
3419  MemoryContext event_cxt; /* memory context for events, if any */
3420 
3421  /* these fields are just for resetting at subtrans abort: */
3422 
3423  SetConstraintState *state_stack; /* stacked S C states */
3424  AfterTriggerEventList *events_stack; /* stacked list pointers */
3425  int *depth_stack; /* stacked query_depths */
3426  CommandId *firing_stack; /* stacked firing_counters */
3427  int maxtransdepth; /* allocated len of above arrays */
3429 
3431 
3432 static void AfterTriggerExecute(AfterTriggerEvent event,
3433  Relation rel, TriggerDesc *trigdesc,
3434  FmgrInfo *finfo,
3435  Instrumentation *instr,
3436  MemoryContext per_tuple_context,
3437  TupleTableSlot *trig_tuple_slot1,
3438  TupleTableSlot *trig_tuple_slot2);
3439 static SetConstraintState SetConstraintStateCreate(int numalloc);
3440 static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
3441 static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3442  Oid tgoid, bool tgisdeferred);
3443 
3444 
3445 /*
3446  * Gets a current query transition tuplestore and initializes it if necessary.
3447  * This can be holding a single transition row tuple (in the case of an FDW)
3448  * or a transition table (for an AFTER trigger).
3449  */
3450 static Tuplestorestate *
3452 {
3453  Tuplestorestate *ret;
3454 
3455  ret = tss[afterTriggers.query_depth];
3456  if (ret == NULL)
3457  {
3458  MemoryContext oldcxt;
3459  ResourceOwner saveResourceOwner;
3460 
3461  /*
3462  * Make the tuplestore valid until end of transaction. This is the
3463  * allocation lifespan of the associated events list, but we really
3464  * only need it until AfterTriggerEndQuery().
3465  */
3467  saveResourceOwner = CurrentResourceOwner;
3468  PG_TRY();
3469  {
3471  ret = tuplestore_begin_heap(false, false, work_mem);
3472  }
3473  PG_CATCH();
3474  {
3475  CurrentResourceOwner = saveResourceOwner;
3476  PG_RE_THROW();
3477  }
3478  PG_END_TRY();
3479  CurrentResourceOwner = saveResourceOwner;
3480  MemoryContextSwitchTo(oldcxt);
3481 
3482  tss[afterTriggers.query_depth] = ret;
3483  }
3484 
3485  return ret;
3486 }
3487 
3488 /* ----------
3489  * afterTriggerCheckState()
3490  *
3491  * Returns true if the trigger event is actually in state DEFERRED.
3492  * ----------
3493  */
3494 static bool
3495 afterTriggerCheckState(AfterTriggerShared evtshared)
3496 {
3497  Oid tgoid = evtshared->ats_tgoid;
3498  SetConstraintState state = afterTriggers.state;
3499  int i;
3500 
3501  /*
3502  * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3503  * constraints declared NOT DEFERRABLE), the state is always false.
3504  */
3505  if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3506  return false;
3507 
3508  /*
3509  * If constraint state exists, SET CONSTRAINTS might have been executed
3510  * either for this trigger or for all triggers.
3511  */
3512  if (state != NULL)
3513  {
3514  /* Check for SET CONSTRAINTS for this specific trigger. */
3515  for (i = 0; i < state->numstates; i++)
3516  {
3517  if (state->trigstates[i].sct_tgoid == tgoid)
3518  return state->trigstates[i].sct_tgisdeferred;
3519  }
3520 
3521  /* Check for SET CONSTRAINTS ALL. */
3522  if (state->all_isset)
3523  return state->all_isdeferred;
3524  }
3525 
3526  /*
3527  * Otherwise return the default state for the trigger.
3528  */
3529  return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3530 }
3531 
3532 
3533 /* ----------
3534  * afterTriggerAddEvent()
3535  *
3536  * Add a new trigger event to the specified queue.
3537  * The passed-in event data is copied.
3538  * ----------
3539  */
3540 static void
3542  AfterTriggerEvent event, AfterTriggerShared evtshared)
3543 {
3544  Size eventsize = SizeofTriggerEvent(event);
3545  Size needed = eventsize + sizeof(AfterTriggerSharedData);
3546  AfterTriggerEventChunk *chunk;
3547  AfterTriggerShared newshared;
3548  AfterTriggerEvent newevent;
3549 
3550  /*
3551  * If empty list or not enough room in the tail chunk, make a new chunk.
3552  * We assume here that a new shared record will always be needed.
3553  */
3554  chunk = events->tail;
3555  if (chunk == NULL ||
3556  chunk->endfree - chunk->freeptr < needed)
3557  {
3558  Size chunksize;
3559 
3560  /* Create event context if we didn't already */
3561  if (afterTriggers.event_cxt == NULL)
3562  afterTriggers.event_cxt =
3564  "AfterTriggerEvents",
3566 
3567  /*
3568  * Chunk size starts at 1KB and is allowed to increase up to 1MB.
3569  * These numbers are fairly arbitrary, though there is a hard limit at
3570  * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
3571  * shared records using the available space in ate_flags. Another
3572  * constraint is that if the chunk size gets too huge, the search loop
3573  * below would get slow given a (not too common) usage pattern with
3574  * many distinct event types in a chunk. Therefore, we double the
3575  * preceding chunk size only if there weren't too many shared records
3576  * in the preceding chunk; otherwise we halve it. This gives us some
3577  * ability to adapt to the actual usage pattern of the current query
3578  * while still having large chunk sizes in typical usage. All chunk
3579  * sizes used should be MAXALIGN multiples, to ensure that the shared
3580  * records will be aligned safely.
3581  */
3582 #define MIN_CHUNK_SIZE 1024
3583 #define MAX_CHUNK_SIZE (1024*1024)
3584 
3585 #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
3586 #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
3587 #endif
3588 
3589  if (chunk == NULL)
3590  chunksize = MIN_CHUNK_SIZE;
3591  else
3592  {
3593  /* preceding chunk size... */
3594  chunksize = chunk->endptr - (char *) chunk;
3595  /* check number of shared records in preceding chunk */
3596  if ((chunk->endptr - chunk->endfree) <=
3597  (100 * sizeof(AfterTriggerSharedData)))
3598  chunksize *= 2; /* okay, double it */
3599  else
3600  chunksize /= 2; /* too many shared records */
3601  chunksize = Min(chunksize, MAX_CHUNK_SIZE);
3602  }
3603  chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
3604  chunk->next = NULL;
3605  chunk->freeptr = CHUNK_DATA_START(chunk);
3606  chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
3607  Assert(chunk->endfree - chunk->freeptr >= needed);
3608 
3609  if (events->head == NULL)
3610  events->head = chunk;
3611  else
3612  events->tail->next = chunk;
3613  events->tail = chunk;
3614  /* events->tailfree is now out of sync, but we'll fix it below */
3615  }
3616 
3617  /*
3618  * Try to locate a matching shared-data record already in the chunk. If
3619  * none, make a new one.
3620  */
3621  for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
3622  (char *) newshared >= chunk->endfree;
3623  newshared--)
3624  {
3625  if (newshared->ats_tgoid == evtshared->ats_tgoid &&
3626  newshared->ats_relid == evtshared->ats_relid &&
3627  newshared->ats_event == evtshared->ats_event &&
3628  newshared->ats_firing_id == 0)
3629  break;
3630  }
3631  if ((char *) newshared < chunk->endfree)
3632  {
3633  *newshared = *evtshared;
3634  newshared->ats_firing_id = 0; /* just to be sure */
3635  chunk->endfree = (char *) newshared;
3636  }
3637 
3638  /* Insert the data */
3639  newevent = (AfterTriggerEvent) chunk->freeptr;
3640  memcpy(newevent, event, eventsize);
3641  /* ... and link the new event to its shared record */
3642  newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
3643  newevent->ate_flags |= (char *) newshared - (char *) newevent;
3644 
3645  chunk->freeptr += eventsize;
3646  events->tailfree = chunk->freeptr;
3647 }
3648 
3649 /* ----------
3650  * afterTriggerFreeEventList()
3651  *
3652  * Free all the event storage in the given list.
3653  * ----------
3654  */
3655 static void
3657 {
3658  AfterTriggerEventChunk *chunk;
3659  AfterTriggerEventChunk *next_chunk;
3660 
3661  for (chunk = events->head; chunk != NULL; chunk = next_chunk)
3662  {
3663  next_chunk = chunk->next;
3664  pfree(chunk);
3665  }
3666  events->head = NULL;
3667  events->tail = NULL;
3668  events->tailfree = NULL;
3669 }
3670 
3671 /* ----------
3672  * afterTriggerRestoreEventList()
3673  *
3674  * Restore an event list to its prior length, removing all the events
3675  * added since it had the value old_events.
3676  * ----------
3677  */
3678 static void
3680  const AfterTriggerEventList *old_events)
3681 {
3682  AfterTriggerEventChunk *chunk;
3683  AfterTriggerEventChunk *next_chunk;
3684 
3685  if (old_events->tail == NULL)
3686  {
3687  /* restoring to a completely empty state, so free everything */
3688  afterTriggerFreeEventList(events);
3689  }
3690  else
3691  {
3692  *events = *old_events;
3693  /* free any chunks after the last one we want to keep */
3694  for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
3695  {
3696  next_chunk = chunk->next;
3697  pfree(chunk);
3698  }
3699  /* and clean up the tail chunk to be the right length */
3700  events->tail->next = NULL;
3701  events->tail->freeptr = events->tailfree;
3702 
3703  /*
3704  * We don't make any effort to remove now-unused shared data records.
3705  * They might still be useful, anyway.
3706  */
3707  }
3708 }
3709 
3710 
3711 /* ----------
3712  * AfterTriggerExecute()
3713  *
3714  * Fetch the required tuples back from the heap and fire one
3715  * single trigger function.
3716  *
3717  * Frequently, this will be fired many times in a row for triggers of
3718  * a single relation. Therefore, we cache the open relation and provide
3719  * fmgr lookup cache space at the caller level. (For triggers fired at
3720  * the end of a query, we can even piggyback on the executor's state.)
3721  *
3722  * event: event currently being fired.
3723  * rel: open relation for event.
3724  * trigdesc: working copy of rel's trigger info.
3725  * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
3726  * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
3727  * or NULL if no instrumentation is wanted.
3728  * per_tuple_context: memory context to call trigger function in.
3729  * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
3730  * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
3731  * ----------
3732  */
3733 static void
3734 AfterTriggerExecute(AfterTriggerEvent event,
3735  Relation rel, TriggerDesc *trigdesc,
3736  FmgrInfo *finfo, Instrumentation *instr,
3737  MemoryContext per_tuple_context,
3738  TupleTableSlot *trig_tuple_slot1,
3739  TupleTableSlot *trig_tuple_slot2)
3740 {
3741  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3742  Oid tgoid = evtshared->ats_tgoid;
3743  TriggerData LocTriggerData;
3744  HeapTupleData tuple1;
3745  HeapTupleData tuple2;
3746  HeapTuple rettuple;
3747  Buffer buffer1 = InvalidBuffer;
3748  Buffer buffer2 = InvalidBuffer;
3749  int tgindx;
3750 
3751  /*
3752  * Locate trigger in trigdesc.
3753  */
3754  LocTriggerData.tg_trigger = NULL;
3755  for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
3756  {
3757  if (trigdesc->triggers[tgindx].tgoid == tgoid)
3758  {
3759  LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
3760  break;
3761  }
3762  }
3763  if (LocTriggerData.tg_trigger == NULL)
3764  elog(ERROR, "could not find trigger %u", tgoid);
3765 
3766  /*
3767  * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
3768  * to include time spent re-fetching tuples in the trigger cost.
3769  */
3770  if (instr)
3771  InstrStartNode(instr + tgindx);
3772 
3773  /*
3774  * Fetch the required tuple(s).
3775  */
3776  switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
3777  {
3779  {
3780  Tuplestorestate *fdw_tuplestore =
3782  (afterTriggers.fdw_tuplestores);
3783 
3784  if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
3785  trig_tuple_slot1))
3786  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3787 
3788  if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3790  !tuplestore_gettupleslot(fdw_tuplestore, true, false,
3791  trig_tuple_slot2))
3792  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3793  }
3794  /* fall through */
3796 
3797  /*
3798  * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
3799  * ensures that tg_trigtuple does not reference tuplestore memory.
3800  * (It is formally possible for the trigger function to queue
3801  * trigger events that add to the same tuplestore, which can push
3802  * other tuples out of memory.) The distinction is academic,
3803  * because we start with a minimal tuple that ExecFetchSlotTuple()
3804  * must materialize anyway.
3805  */
3806  LocTriggerData.tg_trigtuple =
3807  ExecMaterializeSlot(trig_tuple_slot1);
3808  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3809 
3810  LocTriggerData.tg_newtuple =
3811  ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
3813  ExecMaterializeSlot(trig_tuple_slot2) : NULL;
3814  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3815 
3816  break;
3817 
3818  default:
3819  if (ItemPointerIsValid(&(event->ate_ctid1)))
3820  {
3821  ItemPointerCopy(&(event->ate_ctid1), &(tuple1.t_self));
3822  if (!heap_fetch(rel, SnapshotAny, &tuple1, &buffer1, false, NULL))
3823  elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
3824  LocTriggerData.tg_trigtuple = &tuple1;
3825  LocTriggerData.tg_trigtuplebuf = buffer1;
3826  }
3827  else
3828  {
3829  LocTriggerData.tg_trigtuple = NULL;
3830  LocTriggerData.tg_trigtuplebuf = InvalidBuffer;
3831  }
3832 
3833  /* don't touch ctid2 if not there */
3834  if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
3836  ItemPointerIsValid(&(event->ate_ctid2)))
3837  {
3838  ItemPointerCopy(&(event->ate_ctid2), &(tuple2.t_self));
3839  if (!heap_fetch(rel, SnapshotAny, &tuple2, &buffer2, false, NULL))
3840  elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
3841  LocTriggerData.tg_newtuple = &tuple2;
3842  LocTriggerData.tg_newtuplebuf = buffer2;
3843  }
3844  else
3845  {
3846  LocTriggerData.tg_newtuple = NULL;
3847  LocTriggerData.tg_newtuplebuf = InvalidBuffer;
3848  }
3849  }
3850 
3851  /*
3852  * Set up the tuplestore information.
3853  */
3854  if (LocTriggerData.tg_trigger->tgoldtable)
3855  LocTriggerData.tg_oldtable =
3857  else
3858  LocTriggerData.tg_oldtable = NULL;
3859  if (LocTriggerData.tg_trigger->tgnewtable)
3860  LocTriggerData.tg_newtable =
3862  else
3863  LocTriggerData.tg_newtable = NULL;
3864 
3865  /*
3866  * Setup the remaining trigger information
3867  */
3868  LocTriggerData.type = T_TriggerData;
3869  LocTriggerData.tg_event =
3871  LocTriggerData.tg_relation = rel;
3872 
3873  MemoryContextReset(per_tuple_context);
3874 
3875  /*
3876  * Call the trigger and throw away any possibly returned updated tuple.
3877  * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
3878  */
3879  rettuple = ExecCallTriggerFunc(&LocTriggerData,
3880  tgindx,
3881  finfo,
3882  NULL,
3883  per_tuple_context);
3884  if (rettuple != NULL &&
3885  rettuple != LocTriggerData.tg_trigtuple &&
3886  rettuple != LocTriggerData.tg_newtuple)
3887  heap_freetuple(rettuple);
3888 
3889  /*
3890  * Release buffers
3891  */
3892  if (buffer1 != InvalidBuffer)
3893  ReleaseBuffer(buffer1);
3894  if (buffer2 != InvalidBuffer)
3895  ReleaseBuffer(buffer2);
3896 
3897  /*
3898  * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
3899  * one "tuple returned" (really the number of firings).
3900  */
3901  if (instr)
3902  InstrStopNode(instr + tgindx, 1);
3903 }
3904 
3905 
3906 /*
3907  * afterTriggerMarkEvents()
3908  *
3909  * Scan the given event list for not yet invoked events. Mark the ones
3910  * that can be invoked now with the current firing ID.
3911  *
3912  * If move_list isn't NULL, events that are not to be invoked now are
3913  * transferred to move_list.
3914  *
3915  * When immediate_only is TRUE, do not invoke currently-deferred triggers.
3916  * (This will be FALSE only at main transaction exit.)
3917  *
3918  * Returns TRUE if any invokable events were found.
3919  */
3920 static bool
3922  AfterTriggerEventList *move_list,
3923  bool immediate_only)
3924 {
3925  bool found = false;
3926  AfterTriggerEvent event;
3927  AfterTriggerEventChunk *chunk;
3928 
3929  for_each_event_chunk(event, chunk, *events)
3930  {
3931  AfterTriggerShared evtshared = GetTriggerSharedData(event);
3932  bool defer_it = false;
3933 
3934  if (!(event->ate_flags &
3936  {
3937  /*
3938  * This trigger hasn't been called or scheduled yet. Check if we
3939  * should call it now.
3940  */
3941  if (immediate_only && afterTriggerCheckState(evtshared))
3942  {
3943  defer_it = true;
3944  }
3945  else
3946  {
3947  /*
3948  * Mark it as to be fired in this firing cycle.
3949  */
3950  evtshared->ats_firing_id = afterTriggers.firing_counter;
3951  event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
3952  found = true;
3953  }
3954  }
3955 
3956  /*
3957  * If it's deferred, move it to move_list, if requested.
3958  */
3959  if (defer_it && move_list != NULL)
3960  {
3961  /* add it to move_list */
3962  afterTriggerAddEvent(move_list, event, evtshared);
3963  /* mark original copy "done" so we don't do it again */
3964  event->ate_flags |= AFTER_TRIGGER_DONE;
3965  }
3966  }
3967 
3968  return found;
3969 }
3970 
3971 /*
3972  * afterTriggerInvokeEvents()
3973  *
3974  * Scan the given event list for events that are marked as to be fired
3975  * in the current firing cycle, and fire them.
3976  *
3977  * If estate isn't NULL, we use its result relation info to avoid repeated
3978  * openings and closing of trigger target relations. If it is NULL, we
3979  * make one locally to cache the info in case there are multiple trigger
3980  * events per rel.
3981  *
3982  * When delete_ok is TRUE, it's safe to delete fully-processed events.
3983  * (We are not very tense about that: we simply reset a chunk to be empty
3984  * if all its events got fired. The objective here is just to avoid useless
3985  * rescanning of events when a trigger queues new events during transaction
3986  * end, so it's not necessary to worry much about the case where only
3987  * some events are fired.)
3988  *
3989  * Returns TRUE if no unfired events remain in the list (this allows us
3990  * to avoid repeating afterTriggerMarkEvents).
3991  */
3992 static bool
3994  CommandId firing_id,
3995  EState *estate,
3996  bool delete_ok)
3997 {
3998  bool all_fired = true;
3999  AfterTriggerEventChunk *chunk;
4000  MemoryContext per_tuple_context;
4001  bool local_estate = false;
4002  Relation rel = NULL;
4003  TriggerDesc *trigdesc = NULL;
4004  FmgrInfo *finfo = NULL;
4005  Instrumentation *instr = NULL;
4006  TupleTableSlot *slot1 = NULL,
4007  *slot2 = NULL;
4008 
4009  /* Make a local EState if need be */
4010  if (estate == NULL)
4011  {
4012  estate = CreateExecutorState();
4013  local_estate = true;
4014  }
4015 
4016  /* Make a per-tuple memory context for trigger function calls */
4017  per_tuple_context =
4019  "AfterTriggerTupleContext",
4021 
4022  for_each_chunk(chunk, *events)
4023  {
4024  AfterTriggerEvent event;
4025  bool all_fired_in_chunk = true;
4026 
4027  for_each_event(event, chunk)
4028  {
4029  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4030 
4031  /*
4032  * Is it one for me to fire?
4033  */
4034  if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4035  evtshared->ats_firing_id == firing_id)
4036  {
4037  /*
4038  * So let's fire it... but first, find the correct relation if
4039  * this is not the same relation as before.
4040  */
4041  if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4042  {
4043  ResultRelInfo *rInfo;
4044 
4045  rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid);
4046  rel = rInfo->ri_RelationDesc;
4047  trigdesc = rInfo->ri_TrigDesc;
4048  finfo = rInfo->ri_TrigFunctions;
4049  instr = rInfo->ri_TrigInstrument;
4050  if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4051  {
4052  if (slot1 != NULL)
4053  {
4056  }
4057  slot1 = MakeSingleTupleTableSlot(rel->rd_att);
4058  slot2 = MakeSingleTupleTableSlot(rel->rd_att);
4059  }
4060  if (trigdesc == NULL) /* should not happen */
4061  elog(ERROR, "relation %u has no triggers",
4062  evtshared->ats_relid);
4063  }
4064 
4065  /*
4066  * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4067  * still set, so recursive examinations of the event list
4068  * won't try to re-fire it.
4069  */
4070  AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
4071  per_tuple_context, slot1, slot2);
4072 
4073  /*
4074  * Mark the event as done.
4075  */
4076  event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4077  event->ate_flags |= AFTER_TRIGGER_DONE;
4078  }
4079  else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4080  {
4081  /* something remains to be done */
4082  all_fired = all_fired_in_chunk = false;
4083  }
4084  }
4085 
4086  /* Clear the chunk if delete_ok and nothing left of interest */
4087  if (delete_ok && all_fired_in_chunk)
4088  {
4089  chunk->freeptr = CHUNK_DATA_START(chunk);
4090  chunk->endfree = chunk->endptr;
4091 
4092  /*
4093  * If it's last chunk, must sync event list's tailfree too. Note
4094  * that delete_ok must NOT be passed as true if there could be
4095  * stacked AfterTriggerEventList values pointing at this event
4096  * list, since we'd fail to fix their copies of tailfree.
4097  */
4098  if (chunk == events->tail)
4099  events->tailfree = chunk->freeptr;
4100  }
4101  }
4102  if (slot1 != NULL)
4103  {
4106  }
4107 
4108  /* Release working resources */
4109  MemoryContextDelete(per_tuple_context);
4110 
4111  if (local_estate)
4112  {
4113  ExecCleanUpTriggerState(estate);
4114  FreeExecutorState(estate);
4115  }
4116 
4117  return all_fired;
4118 }
4119 
4120 
4121 /* ----------
4122  * AfterTriggerBeginXact()
4123  *
4124  * Called at transaction start (either BEGIN or implicit for single
4125  * statement outside of transaction block).
4126  * ----------
4127  */
4128 void
4130 {
4131  /*
4132  * Initialize after-trigger state structure to empty
4133  */
4134  afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4135  afterTriggers.query_depth = -1;
4136 
4137  /*
4138  * Verify that there is no leftover state remaining. If these assertions
4139  * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
4140  * up properly.
4141  */
4142  Assert(afterTriggers.state == NULL);
4143  Assert(afterTriggers.query_stack == NULL);
4144  Assert(afterTriggers.fdw_tuplestores == NULL);
4145  Assert(afterTriggers.old_tuplestores == NULL);
4146  Assert(afterTriggers.new_tuplestores == NULL);
4147  Assert(afterTriggers.maxquerydepth == 0);
4148  Assert(afterTriggers.event_cxt == NULL);
4149  Assert(afterTriggers.events.head == NULL);
4150  Assert(afterTriggers.state_stack == NULL);
4151  Assert(afterTriggers.events_stack == NULL);
4152  Assert(afterTriggers.depth_stack == NULL);
4153  Assert(afterTriggers.firing_stack == NULL);
4154  Assert(afterTriggers.maxtransdepth == 0);
4155 }
4156 
4157 
4158 /* ----------
4159  * AfterTriggerBeginQuery()
4160  *
4161  * Called just before we start processing a single query within a
4162  * transaction (or subtransaction). Most of the real work gets deferred
4163  * until somebody actually tries to queue a trigger event.
4164  * ----------
4165  */
4166 void
4168 {
4169  /* Increase the query stack depth */
4170  afterTriggers.query_depth++;
4171 }
4172 
4173 
4174 /* ----------
4175  * AfterTriggerEndQuery()
4176  *
4177  * Called after one query has been completely processed. At this time
4178  * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
4179  * transfer deferred trigger events to the global deferred-trigger list.
4180  *
4181  * Note that this must be called BEFORE closing down the executor
4182  * with ExecutorEnd, because we make use of the EState's info about
4183  * target relations. Normally it is called from ExecutorFinish.
4184  * ----------
4185  */
4186 void
4188 {
4189  AfterTriggerEventList *events;
4190  Tuplestorestate *fdw_tuplestore;
4191  Tuplestorestate *old_tuplestore;
4192  Tuplestorestate *new_tuplestore;
4193 
4194  /* Must be inside a query, too */
4195  Assert(afterTriggers.query_depth >= 0);
4196 
4197  /*
4198  * If we never even got as far as initializing the event stack, there
4199  * certainly won't be any events, so exit quickly.
4200  */
4201  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4202  {
4203  afterTriggers.query_depth--;
4204  return;
4205  }
4206 
4207  /*
4208  * Process all immediate-mode triggers queued by the query, and move the
4209  * deferred ones to the main list of deferred events.
4210  *
4211  * Notice that we decide which ones will be fired, and put the deferred
4212  * ones on the main list, before anything is actually fired. This ensures
4213  * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
4214  * IMMEDIATE: all events we have decided to defer will be available for it
4215  * to fire.
4216  *
4217  * We loop in case a trigger queues more events at the same query level.
4218  * Ordinary trigger functions, including all PL/pgSQL trigger functions,
4219  * will instead fire any triggers in a dedicated query level. Foreign key
4220  * enforcement triggers do add to the current query level, thanks to their
4221  * passing fire_triggers = false to SPI_execute_snapshot(). Other
4222  * C-language triggers might do likewise. Be careful here: firing a
4223  * trigger could result in query_stack being repalloc'd, so we can't save
4224  * its address across afterTriggerInvokeEvents calls.
4225  *
4226  * If we find no firable events, we don't have to increment
4227  * firing_counter.
4228  */
4229  for (;;)
4230  {
4231  events = &afterTriggers.query_stack[afterTriggers.query_depth];
4232  if (afterTriggerMarkEvents(events, &afterTriggers.events, true))
4233  {
4234  CommandId firing_id = afterTriggers.firing_counter++;
4235 
4236  /* OK to delete the immediate events after processing them */
4237  if (afterTriggerInvokeEvents(events, firing_id, estate, true))
4238  break; /* all fired */
4239  }
4240  else
4241  break;
4242  }
4243 
4244  /* Release query-local storage for events, including tuplestore if any */
4245  fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4246  if (fdw_tuplestore)
4247  {
4248  tuplestore_end(fdw_tuplestore);
4249  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4250  }
4251  old_tuplestore = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4252  if (old_tuplestore)
4253  {
4254  tuplestore_end(old_tuplestore);
4255  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4256  }
4257  new_tuplestore = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4258  if (new_tuplestore)
4259  {
4260  tuplestore_end(new_tuplestore);
4261  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4262  }
4263  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4264 
4265  afterTriggers.query_depth--;
4266 }
4267 
4268 
4269 /* ----------
4270  * AfterTriggerFireDeferred()
4271  *
4272  * Called just before the current transaction is committed. At this
4273  * time we invoke all pending DEFERRED triggers.
4274  *
4275  * It is possible for other modules to queue additional deferred triggers
4276  * during pre-commit processing; therefore xact.c may have to call this
4277  * multiple times.
4278  * ----------
4279  */
4280 void
4282 {
4283  AfterTriggerEventList *events;
4284  bool snap_pushed = false;
4285 
4286  /* Must not be inside a query */
4287  Assert(afterTriggers.query_depth == -1);
4288 
4289  /*
4290  * If there are any triggers to fire, make sure we have set a snapshot for
4291  * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
4292  * can't assume ActiveSnapshot is valid on entry.)
4293  */
4294  events = &afterTriggers.events;
4295  if (events->head != NULL)
4296  {
4298  snap_pushed = true;
4299  }
4300 
4301  /*
4302  * Run all the remaining triggers. Loop until they are all gone, in case
4303  * some trigger queues more for us to do.
4304  */
4305  while (afterTriggerMarkEvents(events, NULL, false))
4306  {
4307  CommandId firing_id = afterTriggers.firing_counter++;
4308 
4309  if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
4310  break; /* all fired */
4311  }
4312 
4313  /*
4314  * We don't bother freeing the event list, since it will go away anyway
4315  * (and more efficiently than via pfree) in AfterTriggerEndXact.
4316  */
4317 
4318  if (snap_pushed)
4320 }
4321 
4322 
4323 /* ----------
4324  * AfterTriggerEndXact()
4325  *
4326  * The current transaction is finishing.
4327  *
4328  * Any unfired triggers are canceled so we simply throw
4329  * away anything we know.
4330  *
4331  * Note: it is possible for this to be called repeatedly in case of
4332  * error during transaction abort; therefore, do not complain if
4333  * already closed down.
4334  * ----------
4335  */
4336 void
4337 AfterTriggerEndXact(bool isCommit)
4338 {
4339  /*
4340  * Forget the pending-events list.
4341  *
4342  * Since all the info is in TopTransactionContext or children thereof, we
4343  * don't really need to do anything to reclaim memory. However, the
4344  * pending-events list could be large, and so it's useful to discard it as
4345  * soon as possible --- especially if we are aborting because we ran out
4346  * of memory for the list!
4347  */
4348  if (afterTriggers.event_cxt)
4349  {
4350  MemoryContextDelete(afterTriggers.event_cxt);
4351  afterTriggers.event_cxt = NULL;
4352  afterTriggers.events.head = NULL;
4353  afterTriggers.events.tail = NULL;
4354  afterTriggers.events.tailfree = NULL;
4355  }
4356 
4357  /*
4358  * Forget any subtransaction state as well. Since this can't be very
4359  * large, we let the eventual reset of TopTransactionContext free the
4360  * memory instead of doing it here.
4361  */
4362  afterTriggers.state_stack = NULL;
4363  afterTriggers.events_stack = NULL;
4364  afterTriggers.depth_stack = NULL;
4365  afterTriggers.firing_stack = NULL;
4366  afterTriggers.maxtransdepth = 0;
4367 
4368 
4369  /*
4370  * Forget the query stack and constraint-related state information. As
4371  * with the subtransaction state information, we don't bother freeing the
4372  * memory here.
4373  */
4374  afterTriggers.query_stack = NULL;
4375  afterTriggers.fdw_tuplestores = NULL;
4376  afterTriggers.old_tuplestores = NULL;
4377  afterTriggers.new_tuplestores = NULL;
4378  afterTriggers.maxquerydepth = 0;
4379  afterTriggers.state = NULL;
4380 
4381  /* No more afterTriggers manipulation until next transaction starts. */
4382  afterTriggers.query_depth = -1;
4383 }
4384 
4385 /*
4386  * AfterTriggerBeginSubXact()
4387  *
4388  * Start a subtransaction.
4389  */
4390 void
4392 {
4393  int my_level = GetCurrentTransactionNestLevel();
4394 
4395  /*
4396  * Allocate more space in the stacks if needed. (Note: because the
4397  * minimum nest level of a subtransaction is 2, we waste the first couple
4398  * entries of each array; not worth the notational effort to avoid it.)
4399  */
4400  while (my_level >= afterTriggers.maxtransdepth)
4401  {
4402  if (afterTriggers.maxtransdepth == 0)
4403  {
4404  MemoryContext old_cxt;
4405 
4407 
4408 #define DEFTRIG_INITALLOC 8
4409  afterTriggers.state_stack = (SetConstraintState *)
4410  palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState));
4411  afterTriggers.events_stack = (AfterTriggerEventList *)
4413  afterTriggers.depth_stack = (int *)
4414  palloc(DEFTRIG_INITALLOC * sizeof(int));
4415  afterTriggers.firing_stack = (CommandId *)
4416  palloc(DEFTRIG_INITALLOC * sizeof(CommandId));
4417  afterTriggers.maxtransdepth = DEFTRIG_INITALLOC;
4418 
4419  MemoryContextSwitchTo(old_cxt);
4420  }
4421  else
4422  {
4423  /* repalloc will keep the stacks in the same context */
4424  int new_alloc = afterTriggers.maxtransdepth * 2;
4425 
4426  afterTriggers.state_stack = (SetConstraintState *)
4427  repalloc(afterTriggers.state_stack,
4428  new_alloc * sizeof(SetConstraintState));
4429  afterTriggers.events_stack = (AfterTriggerEventList *)
4430  repalloc(afterTriggers.events_stack,
4431  new_alloc * sizeof(AfterTriggerEventList));
4432  afterTriggers.depth_stack = (int *)
4433  repalloc(afterTriggers.depth_stack,
4434  new_alloc * sizeof(int));
4435  afterTriggers.firing_stack = (CommandId *)
4436  repalloc(afterTriggers.firing_stack,
4437  new_alloc * sizeof(CommandId));
4438  afterTriggers.maxtransdepth = new_alloc;
4439  }
4440  }
4441 
4442  /*
4443  * Push the current information into the stack. The SET CONSTRAINTS state
4444  * is not saved until/unless changed. Likewise, we don't make a
4445  * per-subtransaction event context until needed.
4446  */
4447  afterTriggers.state_stack[my_level] = NULL;
4448  afterTriggers.events_stack[my_level] = afterTriggers.events;
4449  afterTriggers.depth_stack[my_level] = afterTriggers.query_depth;
4450  afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter;
4451 }
4452 
4453 /*
4454  * AfterTriggerEndSubXact()
4455  *
4456  * The current subtransaction is ending.
4457  */
4458 void
4460 {
4461  int my_level = GetCurrentTransactionNestLevel();
4462  SetConstraintState state;
4463  AfterTriggerEvent event;
4464  AfterTriggerEventChunk *chunk;
4465  CommandId subxact_firing_id;
4466 
4467  /*
4468  * Pop the prior state if needed.
4469  */
4470  if (isCommit)
4471  {
4472  Assert(my_level < afterTriggers.maxtransdepth);
4473  /* If we saved a prior state, we don't need it anymore */
4474  state = afterTriggers.state_stack[my_level];
4475  if (state != NULL)
4476  pfree(state);
4477  /* this avoids double pfree if error later: */
4478  afterTriggers.state_stack[my_level] = NULL;
4479  Assert(afterTriggers.query_depth ==
4480  afterTriggers.depth_stack[my_level]);
4481  }
4482  else
4483  {
4484  /*
4485  * Aborting. It is possible subxact start failed before calling
4486  * AfterTriggerBeginSubXact, in which case we mustn't risk touching
4487  * stack levels that aren't there.
4488  */
4489  if (my_level >= afterTriggers.maxtransdepth)
4490  return;
4491 
4492  /*
4493  * Release any event lists from queries being aborted, and restore
4494  * query_depth to its pre-subxact value. This assumes that a
4495  * subtransaction will not add events to query levels started in a
4496  * earlier transaction state.
4497  */
4498  while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level])
4499  {
4500  if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
4501  {
4502  Tuplestorestate *ts;
4503 
4504  ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth];
4505  if (ts)
4506  {
4507  tuplestore_end(ts);
4508  afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL;
4509  }
4510  ts = afterTriggers.old_tuplestores[afterTriggers.query_depth];
4511  if (ts)
4512  {
4513  tuplestore_end(ts);
4514  afterTriggers.old_tuplestores[afterTriggers.query_depth] = NULL;
4515  }
4516  ts = afterTriggers.new_tuplestores[afterTriggers.query_depth];
4517  if (ts)
4518  {
4519  tuplestore_end(ts);
4520  afterTriggers.new_tuplestores[afterTriggers.query_depth] = NULL;
4521  }
4522 
4523  afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]);
4524  }
4525 
4526  afterTriggers.query_depth--;
4527  }
4528  Assert(afterTriggers.query_depth ==
4529  afterTriggers.depth_stack[my_level]);
4530 
4531  /*
4532  * Restore the global deferred-event list to its former length,
4533  * discarding any events queued by the subxact.
4534  */
4535  afterTriggerRestoreEventList(&afterTriggers.events,
4536  &afterTriggers.events_stack[my_level]);
4537 
4538  /*
4539  * Restore the trigger state. If the saved state is NULL, then this
4540  * subxact didn't save it, so it doesn't need restoring.
4541  */
4542  state = afterTriggers.state_stack[my_level];
4543  if (state != NULL)
4544  {
4545  pfree(afterTriggers.state);
4546  afterTriggers.state = state;
4547  }
4548  /* this avoids double pfree if error later: */
4549  afterTriggers.state_stack[my_level] = NULL;
4550 
4551  /*
4552  * Scan for any remaining deferred events that were marked DONE or IN
4553  * PROGRESS by this subxact or a child, and un-mark them. We can
4554  * recognize such events because they have a firing ID greater than or
4555  * equal to the firing_counter value we saved at subtransaction start.
4556  * (This essentially assumes that the current subxact includes all
4557  * subxacts started after it.)
4558  */
4559  subxact_firing_id = afterTriggers.firing_stack[my_level];
4560  for_each_event_chunk(event, chunk, afterTriggers.events)
4561  {
4562  AfterTriggerShared evtshared = GetTriggerSharedData(event);
4563 
4564  if (event->ate_flags &
4566  {
4567  if (evtshared->ats_firing_id >= subxact_firing_id)
4568  event->ate_flags &=
4570  }
4571  }
4572  }
4573 }
4574 
4575 /* ----------
4576  * AfterTriggerEnlargeQueryState()
4577  *
4578  * Prepare the necessary state so that we can record AFTER trigger events
4579  * queued by a query. It is allowed to have nested queries within a
4580  * (sub)transaction, so we need to have separate state for each query
4581  * nesting level.
4582  * ----------
4583  */
4584 static void
4586 {
4587  int init_depth = afterTriggers.maxquerydepth;
4588 
4589  Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
4590 
4591  if (afterTriggers.maxquerydepth == 0)
4592  {
4593  int new_alloc = Max(afterTriggers.query_depth + 1, 8);
4594 
4595  afterTriggers.query_stack = (AfterTriggerEventList *)
4597  new_alloc * sizeof(AfterTriggerEventList));
4598  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4600  new_alloc * sizeof(Tuplestorestate *));
4601  afterTriggers.old_tuplestores = (Tuplestorestate **)
4603  new_alloc * sizeof(Tuplestorestate *));
4604  afterTriggers.new_tuplestores = (Tuplestorestate **)
4606  new_alloc * sizeof(Tuplestorestate *));
4607  afterTriggers.maxquerydepth = new_alloc;
4608  }
4609  else
4610  {
4611  /* repalloc will keep the stack in the same context */
4612  int old_alloc = afterTriggers.maxquerydepth;
4613  int new_alloc = Max(afterTriggers.query_depth + 1,
4614  old_alloc * 2);
4615 
4616  afterTriggers.query_stack = (AfterTriggerEventList *)
4617  repalloc(afterTriggers.query_stack,
4618  new_alloc * sizeof(AfterTriggerEventList));
4619  afterTriggers.fdw_tuplestores = (Tuplestorestate **)
4620  repalloc(afterTriggers.fdw_tuplestores,
4621  new_alloc * sizeof(Tuplestorestate *));
4622  afterTriggers.old_tuplestores = (Tuplestorestate **)
4623  repalloc(afterTriggers.old_tuplestores,
4624  new_alloc * sizeof(Tuplestorestate *));
4625  afterTriggers.new_tuplestores = (Tuplestorestate **)
4626  repalloc(afterTriggers.new_tuplestores,
4627  new_alloc * sizeof(Tuplestorestate *));
4628  /* Clear newly-allocated slots for subsequent lazy initialization. */
4629  memset(afterTriggers.fdw_tuplestores + old_alloc,
4630  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4631  memset(afterTriggers.old_tuplestores + old_alloc,
4632  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4633  memset(afterTriggers.new_tuplestores + old_alloc,
4634  0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *));
4635  afterTriggers.maxquerydepth = new_alloc;
4636  }
4637 
4638  /* Initialize new query lists to empty */
4639  while (init_depth < afterTriggers.maxquerydepth)
4640  {
4641  AfterTriggerEventList *events;
4642 
4643  events = &afterTriggers.query_stack[init_depth];
4644  events->head = NULL;
4645  events->tail = NULL;
4646  events->tailfree = NULL;
4647 
4648  ++init_depth;
4649  }
4650 }
4651 
4652 /*
4653  * Create an empty SetConstraintState with room for numalloc trigstates
4654  */
4655 static SetConstraintState
4657 {
4658  SetConstraintState state;
4659 
4660  /* Behave sanely with numalloc == 0 */
4661  if (numalloc <= 0)
4662  numalloc = 1;
4663 
4664  /*
4665  * We assume that zeroing will correctly initialize the state values.
4666  */
4667  state = (SetConstraintState)
4669  offsetof(SetConstraintStateData, trigstates) +
4670  numalloc * sizeof(SetConstraintTriggerData));
4671 
4672  state->numalloc = numalloc;
4673 
4674  return state;
4675 }
4676 
4677 /*
4678  * Copy a SetConstraintState
4679  */
4680 static SetConstraintState
4681 SetConstraintStateCopy(SetConstraintState origstate)
4682 {
4683  SetConstraintState state;
4684 
4685  state = SetConstraintStateCreate(origstate->numstates);
4686 
4687  state->all_isset = origstate->all_isset;
4688  state->all_isdeferred = origstate->all_isdeferred;
4689  state->numstates = origstate->numstates;
4690  memcpy(state->trigstates, origstate->trigstates,
4691  origstate->numstates * sizeof(SetConstraintTriggerData));
4692 
4693  return state;
4694 }
4695 
4696 /*
4697  * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
4698  * pointer to the state object (it will change if we have to repalloc).
4699  */
4700 static SetConstraintState
4702  Oid tgoid, bool tgisdeferred)
4703 {
4704  if (state->numstates >= state->numalloc)
4705  {
4706  int newalloc = state->numalloc * 2;
4707 
4708  newalloc = Max(newalloc, 8); /* in case original has size 0 */
4709  state = (SetConstraintState)
4710  repalloc(state,
4711  offsetof(SetConstraintStateData, trigstates) +
4712  newalloc * sizeof(SetConstraintTriggerData));
4713  state->numalloc = newalloc;
4714  Assert(state->numstates < state->numalloc);
4715  }
4716 
4717  state->trigstates[state->numstates].sct_tgoid = tgoid;
4718  state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
4719  state->numstates++;
4720 
4721  return state;
4722 }
4723 
4724 /* ----------
4725  * AfterTriggerSetState()
4726  *
4727  * Execute the SET CONSTRAINTS ... utility command.
4728  * ----------
4729  */
4730 void
4732 {
4733  int my_level = GetCurrentTransactionNestLevel();
4734 
4735  /* If we haven't already done so, initialize our state. */
4736  if (afterTriggers.state == NULL)
4737  afterTriggers.state = SetConstraintStateCreate(8);
4738 
4739  /*
4740  * If in a subtransaction, and we didn't save the current state already,
4741  * save it so it can be restored if the subtransaction aborts.
4742  */
4743  if (my_level > 1 &&
4744  afterTriggers.state_stack[my_level] == NULL)
4745  {
4746  afterTriggers.state_stack[my_level] =
4747  SetConstraintStateCopy(afterTriggers.state);
4748  }
4749 
4750  /*
4751  * Handle SET CONSTRAINTS ALL ...
4752  */
4753  if (stmt->constraints == NIL)
4754  {
4755  /*
4756  * Forget any previous SET CONSTRAINTS commands in this transaction.
4757  */
4758  afterTriggers.state->numstates = 0;
4759 
4760  /*
4761  * Set the per-transaction ALL state to known.
4762  */
4763  afterTriggers.state->all_isset = true;
4764  afterTriggers.state->all_isdeferred = stmt->deferred;
4765  }
4766  else
4767  {
4768  Relation conrel;
4769  Relation tgrel;
4770  List *conoidlist = NIL;
4771  List *tgoidlist = NIL;
4772  ListCell *lc;
4773 
4774  /*
4775  * Handle SET CONSTRAINTS constraint-name [, ...]
4776  *
4777  * First, identify all the named constraints and make a list of their
4778  * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
4779  * the same name within a schema, the specifications are not
4780  * necessarily unique. Our strategy is to target all matching
4781  * constraints within the first search-path schema that has any
4782  * matches, but disregard matches in schemas beyond the first match.
4783  * (This is a bit odd but it's the historical behavior.)
4784  */
4786 
4787  foreach(lc, stmt->constraints)
4788  {
4789  RangeVar *constraint = lfirst(lc);
4790  bool found;
4791  List *namespacelist;
4792  ListCell *nslc;
4793 
4794  if (constraint->catalogname)
4795  {
4796  if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
4797  ereport(ERROR,
4798  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4799  errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
4800  constraint->catalogname, constraint->schemaname,
4801  constraint->relname)));
4802  }
4803 
4804  /*
4805  * If we're given the schema name with the constraint, look only
4806  * in that schema. If given a bare constraint name, use the
4807  * search path to find the first matching constraint.
4808  */
4809  if (constraint->schemaname)
4810  {
4811  Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
4812  false);
4813 
4814  namespacelist = list_make1_oid(namespaceId);
4815  }
4816  else
4817  {
4818  namespacelist = fetch_search_path(true);
4819  }
4820 
4821  found = false;
4822  foreach(nslc, namespacelist)
4823  {
4824  Oid namespaceId = lfirst_oid(nslc);
4825  SysScanDesc conscan;
4826  ScanKeyData skey[2];
4827  HeapTuple tup;
4828 
4829  ScanKeyInit(&skey[0],
4831  BTEqualStrategyNumber, F_NAMEEQ,
4832  CStringGetDatum(constraint->relname));
4833  ScanKeyInit(&skey[1],
4835  BTEqualStrategyNumber, F_OIDEQ,
4836  ObjectIdGetDatum(namespaceId));
4837 
4838  conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
4839  true, NULL, 2, skey);
4840 
4841  while (HeapTupleIsValid(tup = systable_getnext(conscan)))
4842  {
4844 
4845  if (con->condeferrable)
4846  conoidlist = lappend_oid(conoidlist,
4847  HeapTupleGetOid(tup));
4848  else if (stmt->deferred)
4849  ereport(ERROR,
4850  (errcode(ERRCODE_WRONG_OBJECT_TYPE),
4851  errmsg("constraint \"%s\" is not deferrable",
4852  constraint->relname)));
4853  found = true;
4854  }
4855 
4856  systable_endscan(conscan);
4857 
4858  /*
4859  * Once we've found a matching constraint we do not search
4860  * later parts of the search path.
4861  */
4862  if (found)
4863  break;
4864  }
4865 
4866  list_free(namespacelist);
4867 
4868  /*
4869  * Not found ?
4870  */
4871  if (!found)
4872  ereport(ERROR,
4873  (errcode(ERRCODE_UNDEFINED_OBJECT),
4874  errmsg("constraint \"%s\" does not exist",
4875  constraint->relname)));
4876  }
4877 
4878  heap_close(conrel, AccessShareLock);
4879 
4880  /*
4881  * Now, locate the trigger(s) implementing each of these constraints,
4882  * and make a list of their OIDs.
4883  */
4885 
4886  foreach(lc, conoidlist)
4887  {
4888  Oid conoid = lfirst_oid(lc);
4889  bool found;
4890  ScanKeyData skey;
4891  SysScanDesc tgscan;
4892  HeapTuple htup;
4893 
4894  found = false;
4895 
4896  ScanKeyInit(&skey,
4898  BTEqualStrategyNumber, F_OIDEQ,
4899  ObjectIdGetDatum(conoid));
4900 
4901  tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
4902  NULL, 1, &skey);
4903 
4904  while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
4905  {
4906  Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
4907 
4908  /*
4909  * Silently skip triggers that are marked as non-deferrable in
4910  * pg_trigger. This is not an error condition, since a
4911  * deferrable RI constraint may have some non-deferrable
4912  * actions.
4913  */
4914  if (pg_trigger->tgdeferrable)
4915  tgoidlist = lappend_oid(tgoidlist,
4916  HeapTupleGetOid(htup));
4917 
4918  found = true;
4919  }
4920 
4921  systable_endscan(tgscan);
4922 
4923  /* Safety check: a deferrable constraint should have triggers */
4924  if (!found)
4925  elog(ERROR, "no triggers found for constraint with OID %u",
4926  conoid);
4927  }
4928 
4929  heap_close(tgrel, AccessShareLock);
4930 
4931  /*
4932  * Now we can set the trigger states of individual triggers for this
4933  * xact.
4934  */
4935  foreach(lc, tgoidlist)
4936  {
4937  Oid tgoid = lfirst_oid(lc);
4938  SetConstraintState state = afterTriggers.state;
4939  bool found = false;
4940  int i;
4941 
4942  for (i = 0; i < state->numstates; i++)
4943  {
4944  if (state->trigstates[i].sct_tgoid == tgoid)
4945  {
4946  state->trigstates[i].sct_tgisdeferred = stmt->deferred;
4947  found = true;
4948  break;
4949  }
4950  }
4951  if (!found)
4952  {
4953  afterTriggers.state =
4954  SetConstraintStateAddItem(state, tgoid, stmt->deferred);
4955  }
4956  }
4957  }
4958 
4959  /*
4960  * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
4961  * checks against that constraint must be made when the SET CONSTRAINTS
4962  * command is executed -- i.e. the effects of the SET CONSTRAINTS command
4963  * apply retroactively. We've updated the constraints state, so scan the
4964  * list of previously deferred events to fire any that have now become
4965  * immediate.
4966  *
4967  * Obviously, if this was SET ... DEFERRED then it can't have converted
4968  * any unfired events to immediate, so we need do nothing in that case.
4969  */
4970  if (!stmt->deferred)
4971  {
4972  AfterTriggerEventList *events = &afterTriggers.events;
4973  bool snapshot_set = false;
4974 
4975  while (afterTriggerMarkEvents(events, NULL, true))
4976  {
4977  CommandId firing_id = afterTriggers.firing_counter++;
4978 
4979  /*
4980  * Make sure a snapshot has been established in case trigger
4981  * functions need one. Note that we avoid setting a snapshot if
4982  * we don't find at least one trigger that has to be fired now.
4983  * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
4984  * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
4985  * at the start of a transaction it's not possible for any trigger
4986  * events to be queued yet.)
4987  */
4988  if (!snapshot_set)
4989  {
4991  snapshot_set = true;
4992  }
4993 
4994  /*
4995  * We can delete fired events if we are at top transaction level,
4996  * but we'd better not if inside a subtransaction, since the
4997  * subtransaction could later get rolled back.
4998  */
4999  if (afterTriggerInvokeEvents(events, firing_id, NULL,
5000  !IsSubTransaction()))
5001  break; /* all fired */
5002  }
5003 
5004  if (snapshot_set)
5006  }
5007 }
5008 
5009 /* ----------
5010  * AfterTriggerPendingOnRel()
5011  * Test to see if there are any pending after-trigger events for rel.
5012  *
5013  * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5014  * it is unsafe to perform major surgery on a relation. Note that only
5015  * local pending events are examined. We assume that having exclusive lock
5016  * on a rel guarantees there are no unserviced events in other backends ---
5017  * but having a lock does not prevent there being such events in our own.
5018  *
5019  * In some scenarios it'd be reasonable to remove pending events (more
5020  * specifically, mark them DONE by the current subxact) but without a lot
5021  * of knowledge of the trigger semantics we can't do this in general.
5022  * ----------
5023  */
5024 bool
5026 {
5027  AfterTriggerEvent event;
5028  AfterTriggerEventChunk *chunk;
5029  int depth;
5030 
5031  /* Scan queued events */
5032  for_each_event_chunk(event, chunk, afterTriggers.events)
5033  {
5034  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5035 
5036  /*
5037  * We can ignore completed events. (Even if a DONE flag is rolled
5038  * back by subxact abort, it's OK because the effects of the TRUNCATE
5039  * or whatever must get rolled back too.)
5040  */
5041  if (event->ate_flags & AFTER_TRIGGER_DONE)
5042  continue;
5043 
5044  if (evtshared->ats_relid == relid)
5045  return true;
5046  }
5047 
5048  /*
5049  * Also scan events queued by incomplete queries. This could only matter
5050  * if TRUNCATE/etc is executed by a function or trigger within an updating
5051  * query on the same relation, which is pretty perverse, but let's check.
5052  */
5053  for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
5054  {
5055  for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth])
5056  {
5057  AfterTriggerShared evtshared = GetTriggerSharedData(event);
5058 
5059  if (event->ate_flags & AFTER_TRIGGER_DONE)
5060  continue;
5061 
5062  if (evtshared->ats_relid == relid)
5063  return true;
5064  }
5065  }
5066 
5067  return false;
5068 }
5069 
5070 
5071 /* ----------
5072  * AfterTriggerSaveEvent()
5073  *
5074  * Called by ExecA[RS]...Triggers() to queue up the triggers that should
5075  * be fired for an event.
5076  *
5077  * NOTE: this is called whenever there are any triggers associated with
5078  * the event (even if they are disabled). This function decides which
5079  * triggers actually need to be queued. It is also called after each row,
5080  * even if there are no triggers for that event, if there are any AFTER
5081  * STATEMENT triggers for the statement which use transition tables, so that
5082  * the transition tuplestores can be built.
5083  *
5084  * Transition tuplestores are built now, rather than when events are pulled
5085  * off of the queue because AFTER ROW triggers are allowed to select from the
5086  * transition tables for the statement.
5087  * ----------
5088  */
5089 static void
5091  int event, bool row_trigger,
5092  HeapTuple oldtup, HeapTuple newtup,
5093  List *recheckIndexes, Bitmapset *modifiedCols)
5094 {
5095  Relation rel = relinfo->ri_RelationDesc;
5096  TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
5097  AfterTriggerEventData new_event;
5098  AfterTriggerSharedData new_shared;
5099  char relkind = relinfo->ri_RelationDesc->rd_rel->relkind;
5100  int tgtype_event;
5101  int tgtype_level;
5102  int i;
5103  Tuplestorestate *fdw_tuplestore = NULL;
5104 
5105  /*
5106  * Check state. We use a normal test not Assert because it is possible to
5107  * reach here in the wrong state given misconfigured RI triggers, in
5108  * particular deferring a cascade action trigger.
5109  */
5110  if (afterTriggers.query_depth < 0)
5111  elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
5112 
5113  /* Be sure we have enough space to record events at this query depth. */
5114  if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5116 
5117  /*
5118  * If the relation has AFTER ... FOR EACH ROW triggers, capture rows into
5119  * transition tuplestores for this depth.
5120  */
5121  if (row_trigger)
5122  {
5123  if ((event == TRIGGER_EVENT_DELETE &&
5124  trigdesc->trig_delete_old_table) ||
5125  (event == TRIGGER_EVENT_UPDATE &&
5126  trigdesc->trig_update_old_table))
5127  {
5128  Tuplestorestate *old_tuplestore;
5129 
5130  Assert(oldtup != NULL);
5131  old_tuplestore =
5133  (afterTriggers.old_tuplestores);
5134  tuplestore_puttuple(old_tuplestore, oldtup);
5135  }
5136  if ((event == TRIGGER_EVENT_INSERT &&
5137  trigdesc->trig_insert_new_table) ||
5138  (event == TRIGGER_EVENT_UPDATE &&
5139  trigdesc->trig_update_new_table))
5140  {
5141  Tuplestorestate *new_tuplestore;
5142 
5143  Assert(newtup != NULL);
5144  new_tuplestore =
5146  (afterTriggers.new_tuplestores);
5147  tuplestore_puttuple(new_tuplestore, newtup);
5148  }
5149 
5150  /* If transition tables are the only reason we're here, return. */
5151  if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
5152  (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
5153  (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
5154  return;
5155  }
5156 
5157  /*
5158  * Validate the event code and collect the associated tuple CTIDs.
5159  *
5160  * The event code will be used both as a bitmask and an array offset, so
5161  * validation is important to make sure we don't walk off the edge of our
5162  * arrays.
5163  */
5164  switch (event)
5165  {
5166  case TRIGGER_EVENT_INSERT:
5167  tgtype_event = TRIGGER_TYPE_INSERT;
5168  if (row_trigger)
5169  {
5170  Assert(oldtup == NULL);
5171  Assert(newtup != NULL);
5172  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid1));
5173  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5174  }
5175  else
5176  {
5177  Assert(oldtup == NULL);
5178  Assert(newtup == NULL);
5179  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5180  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5181  }
5182  break;
5183  case TRIGGER_EVENT_DELETE:
5184  tgtype_event = TRIGGER_TYPE_DELETE;
5185  if (row_trigger)
5186  {
5187  Assert(oldtup != NULL);
5188  Assert(newtup == NULL);
5189  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5190  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5191  }
5192  else
5193  {
5194  Assert(oldtup == NULL);
5195  Assert(newtup == NULL);
5196  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5197  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5198  }
5199  break;
5200  case TRIGGER_EVENT_UPDATE:
5201  tgtype_event = TRIGGER_TYPE_UPDATE;
5202  if (row_trigger)
5203  {
5204  Assert(oldtup != NULL);
5205  Assert(newtup != NULL);
5206  ItemPointerCopy(&(oldtup->t_self), &(new_event.ate_ctid1));
5207  ItemPointerCopy(&(newtup->t_self), &(new_event.ate_ctid2));
5208  }
5209  else
5210  {
5211  Assert(oldtup == NULL);
5212  Assert(newtup == NULL);
5213  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5214  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5215  }
5216  break;
5218  tgtype_event = TRIGGER_TYPE_TRUNCATE;
5219  Assert(oldtup == NULL);
5220  Assert(newtup == NULL);
5221  ItemPointerSetInvalid(&(new_event.ate_ctid1));
5222  ItemPointerSetInvalid(&(new_event.ate_ctid2));
5223  break;
5224  default:
5225  elog(ERROR, "invalid after-trigger event code: %d", event);
5226  tgtype_event = 0; /* keep compiler quiet */
5227  break;
5228  }
5229 
5230  if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
5231  new_event.ate_flags = (row_trigger && event == TRIGGER_EVENT_UPDATE) ?
5233  /* else, we'll initialize ate_flags for each trigger */
5234 
5235  tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
5236 
5237  for (i = 0; i < trigdesc->numtriggers; i++)
5238  {
5239  Trigger *trigger = &trigdesc->triggers[i];
5240 
5241  if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
5242  tgtype_level,
5244  tgtype_event))
5245  continue;
5246  if (!TriggerEnabled(estate, relinfo, trigger, event,
5247  modifiedCols, oldtup, newtup))
5248  continue;
5249 
5250  if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
5251  {
5252  if (fdw_tuplestore == NULL)
5253  {
5254  fdw_tuplestore =
5256  (afterTriggers.fdw_tuplestores);
5257  new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
5258  }
5259  else
5260  /* subsequent event for the same tuple */
5261  new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
5262  }
5263 
5264  /*
5265  * If the trigger is a foreign key enforcement trigger, there are
5266  * certain cases where we can skip queueing the event because we can
5267  * tell by inspection that the FK constraint will still pass.
5268  */
5269  if (TRIGGER_FIRED_BY_UPDATE(event))
5270  {
5271  switch (RI_FKey_trigger_type(trigger->tgfoid))
5272  {
5273  case RI_TRIGGER_PK:
5274  /* Update on trigger's PK table */
5275  if (!RI_FKey_pk_upd_check_required(trigger, rel,
5276  oldtup, newtup))
5277  {
5278  /* skip queuing this event */
5279  continue;
5280  }
5281  break;
5282 
5283  case RI_TRIGGER_FK:
5284  /* Update on trigger's FK table */
5285  if (!RI_FKey_fk_upd_check_required(trigger, rel,
5286  oldtup, newtup))
5287  {
5288  /* skip queuing this event */
5289  continue;
5290  }
5291  break;
5292 
5293  case RI_TRIGGER_NONE:
5294  /* Not an FK trigger */
5295  break;
5296  }
5297  }
5298 
5299  /*
5300  * If the trigger is a deferred unique constraint check trigger, only
5301  * queue it if the unique constraint was potentially violated, which
5302  * we know from index insertion time.
5303  */
5304  if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
5305  {
5306  if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
5307  continue; /* Uniqueness definitely not violated */
5308  }
5309 
5310  /*
5311  * Fill in event structure and add it to the current query's queue.
5312  */
5313  new_shared.ats_event =
5314  (event & TRIGGER_EVENT_OPMASK) |
5315  (row_trigger ? TRIGGER_EVENT_ROW : 0) |
5316  (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
5317  (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
5318  new_shared.ats_tgoid = trigger->tgoid;
5319  new_shared.ats_relid = RelationGetRelid(rel);
5320  new_shared.ats_firing_id = 0;
5321 
5322  afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth],
5323  &new_event, &new_shared);
5324  }
5325 
5326  /*
5327  * Finally, spool any foreign tuple(s). The tuplestore squashes them to
5328  * minimal tuples, so this loses any system columns. The executor lost
5329  * those columns before us, for an unrelated reason, so this is fine.
5330  */
5331  if (fdw_tuplestore)
5332  {
5333  if (oldtup != NULL)
5334  tuplestore_puttuple(fdw_tuplestore, oldtup);
5335  if (newtup != NULL)
5336  tuplestore_puttuple(fdw_tuplestore, newtup);
5337  }
5338 }
5339 
5340 Datum
5342 {
5344 }
void RemoveTriggerById(Oid trigOid)
Definition: trigger.c:1216
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
signed short int16
Definition: c.h:255
#define TRIGGER_EVENT_ROW
Definition: trigger.h:58
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:608
#define NIL
Definition: pg_list.h:69
void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2395
uint32 CommandId
Definition: c.h:411
TriggerEvent ats_event
Definition: trigger.c:3272
#define Anum_pg_trigger_tgdeferrable
Definition: pg_trigger.h:88
void InstrStopNode(Instrumentation *instr, double nTuples)
Definition: instrument.c:80
Tuplestorestate ** old_tuplestores
Definition: trigger.c:3416
TupleTableSlot * ExecStoreTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree)
Definition: execTuples.c:320
#define FKCONSTR_MATCH_SIMPLE
Definition: parsenodes.h:2060
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
JunkFilter * ri_junkFilter
Definition: execnodes.h:396
Definition: fmgr.h:56
void * stringToNode(char *str)
Definition: read.c:38
Relation ri_RelationDesc
Definition: execnodes.h:354
#define TRIGGER_FOR_DELETE(type)
Definition: pg_trigger.h:135
bool ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, HeapTuple trigtuple)
Definition: trigger.c:2504
#define NameGetDatum(X)
Definition: postgres.h:601
int RI_FKey_trigger_type(Oid tgfoid)
Definition: ri_triggers.c:3701
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
Datum namein(PG_FUNCTION_ARGS)
Definition: name.c:46
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate)
Definition: execTuples.c:852
#define AFTER_TRIGGER_FDW_REUSE
Definition: trigger.c:3262
#define TriggerOidIndexId
Definition: indexing.h:251
#define AFTER_TRIGGER_INITDEFERRED
Definition: trigger.h:68
Oid LookupExplicitNamespace(const char *nspname, bool missing_ok)
Definition: namespace.c:2853
int errhint(const char *fmt,...)
Definition: elog.c:987
#define VARDATA_ANY(PTR)
Definition: postgres.h:347
void ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2343
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:499
#define GETSTRUCT(TUP)
Definition: htup_details.h:656
#define fastgetattr(tup, attnum, tupleDesc, isnull)
Definition: htup_details.h:719
MemoryContext TopTransactionContext
Definition: mcxt.c:48
CommandId es_output_cid
Definition: execnodes.h:438
static void test(void)
bool IsSystemRelation(Relation relation)
Definition: catalog.c:63
char * subname
Definition: parsenodes.h:2790
const char * quote_identifier(const char *ident)
Definition: ruleutils.c:10280
ItemPointerData ate_ctid2
Definition: trigger.c:3284
#define TRIGGER_TYPE_DELETE
Definition: pg_trigger.h:101
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:2962
#define RelationGetDescr(relation)
Definition: rel.h:428
#define TRIGGER_EVENT_DELETE
Definition: trigger.h:53
Oid GetUserId(void)
Definition: miscinit.c:283
SetConstraintStateData * SetConstraintState
Definition: trigger.c:3223
TupleTableSlot * es_trig_newtup_slot
Definition: execnodes.h:459
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
Oid tgfoid
Definition: reltrigger.h:28
#define MIN_CHUNK_SIZE
TriggerFlags ate_flags
Definition: trigger.c:3282
HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
Definition: heapam.c:4540
Oid RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, bool missing_ok, bool nowait, RangeVarGetRelidCallback callback, void *callback_arg)
Definition: namespace.c:218
#define AFTER_TRIGGER_DEFERRABLE
Definition: trigger.h:67
ResourceOwner TopTransactionResourceOwner
Definition: resowner.c:140
void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
Definition: trigger.c:2606